Home | History | Annotate | Line # | Download | only in kern
sysv_shm.c revision 1.107.2.1
      1 /*	$NetBSD: sysv_shm.c,v 1.107.2.1 2008/05/10 23:49:05 wrstuden Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, and by Mindaugas Rasiukevicius.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  * 3. All advertising materials mentioning features or use of this software
     45  *    must display the following acknowledgement:
     46  *	This product includes software developed by Adam Glass and Charles M.
     47  *	Hannum.
     48  * 4. The names of the authors may not be used to endorse or promote products
     49  *    derived from this software without specific prior written permission.
     50  *
     51  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
     52  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     53  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     54  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     55  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     56  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     60  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     61  */
     62 
     63 #include <sys/cdefs.h>
     64 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.107.2.1 2008/05/10 23:49:05 wrstuden Exp $");
     65 
     66 #define SYSVSHM
     67 
     68 #include <sys/param.h>
     69 #include <sys/kernel.h>
     70 #include <sys/kmem.h>
     71 #include <sys/shm.h>
     72 #include <sys/mutex.h>
     73 #include <sys/mman.h>
     74 #include <sys/stat.h>
     75 #include <sys/sysctl.h>
     76 #include <sys/mount.h>		/* XXX for <sys/syscallargs.h> */
     77 #include <sys/sa.h>
     78 #include <sys/syscallargs.h>
     79 #include <sys/queue.h>
     80 #include <sys/pool.h>
     81 #include <sys/kauth.h>
     82 
     83 #include <uvm/uvm_extern.h>
     84 #include <uvm/uvm_object.h>
     85 
     86 int shm_nused;
     87 struct	shmid_ds *shmsegs;
     88 
     89 struct shmmap_entry {
     90 	SLIST_ENTRY(shmmap_entry) next;
     91 	vaddr_t va;
     92 	int shmid;
     93 };
     94 
     95 static kmutex_t		shm_lock;
     96 static kcondvar_t *	shm_cv;
     97 static struct pool	shmmap_entry_pool;
     98 static int		shm_last_free, shm_use_phys;
     99 static size_t		shm_committed;
    100 
    101 static kcondvar_t	shm_realloc_cv;
    102 static bool		shm_realloc_state;
    103 static u_int		shm_realloc_disable;
    104 
    105 struct shmmap_state {
    106 	unsigned int nitems;
    107 	unsigned int nrefs;
    108 	SLIST_HEAD(, shmmap_entry) entries;
    109 };
    110 
    111 #ifdef SHMDEBUG
    112 #define SHMPRINTF(a) printf a
    113 #else
    114 #define SHMPRINTF(a)
    115 #endif
    116 
    117 static int shmrealloc(int);
    118 
    119 /*
    120  * Find the shared memory segment by the identifier.
    121  *  => must be called with shm_lock held;
    122  */
    123 static struct shmid_ds *
    124 shm_find_segment_by_shmid(int shmid)
    125 {
    126 	int segnum;
    127 	struct shmid_ds *shmseg;
    128 
    129 	KASSERT(mutex_owned(&shm_lock));
    130 
    131 	segnum = IPCID_TO_IX(shmid);
    132 	if (segnum < 0 || segnum >= shminfo.shmmni)
    133 		return NULL;
    134 	shmseg = &shmsegs[segnum];
    135 	if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
    136 		return NULL;
    137 	if ((shmseg->shm_perm.mode &
    138 	    (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED)
    139 		return NULL;
    140 	if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
    141 		return NULL;
    142 
    143 	return shmseg;
    144 }
    145 
    146 /*
    147  * Free memory segment.
    148  *  => must be called with shm_lock held;
    149  */
    150 static void
    151 shm_free_segment(int segnum)
    152 {
    153 	struct shmid_ds *shmseg;
    154 	size_t size;
    155 	bool wanted;
    156 
    157 	KASSERT(mutex_owned(&shm_lock));
    158 
    159 	shmseg = &shmsegs[segnum];
    160 	SHMPRINTF(("shm freeing key 0x%lx seq 0x%x\n",
    161 	    shmseg->shm_perm._key, shmseg->shm_perm._seq));
    162 
    163 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
    164 	wanted = (shmseg->shm_perm.mode & SHMSEG_WANTED);
    165 
    166 	shmseg->_shm_internal = NULL;
    167 	shm_committed -= btoc(size);
    168 	shm_nused--;
    169 	shmseg->shm_perm.mode = SHMSEG_FREE;
    170 	shm_last_free = segnum;
    171 	if (wanted == true)
    172 		cv_broadcast(&shm_cv[segnum]);
    173 }
    174 
    175 /*
    176  * Delete entry from the shm map.
    177  *  => must be called with shm_lock held;
    178  */
    179 static struct uvm_object *
    180 shm_delete_mapping(struct shmmap_state *shmmap_s,
    181     struct shmmap_entry *shmmap_se)
    182 {
    183 	struct uvm_object *uobj = NULL;
    184 	struct shmid_ds *shmseg;
    185 	int segnum;
    186 
    187 	KASSERT(mutex_owned(&shm_lock));
    188 
    189 	segnum = IPCID_TO_IX(shmmap_se->shmid);
    190 	shmseg = &shmsegs[segnum];
    191 	SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
    192 	shmmap_s->nitems--;
    193 	shmseg->shm_dtime = time_second;
    194 	if ((--shmseg->shm_nattch <= 0) &&
    195 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
    196 		uobj = shmseg->_shm_internal;
    197 		shm_free_segment(segnum);
    198 	}
    199 
    200 	return uobj;
    201 }
    202 
    203 /*
    204  * Get a non-shared shm map for that vmspace.  Note, that memory
    205  * allocation might be performed with lock held.
    206  */
    207 static struct shmmap_state *
    208 shmmap_getprivate(struct proc *p)
    209 {
    210 	struct shmmap_state *oshmmap_s, *shmmap_s;
    211 	struct shmmap_entry *oshmmap_se, *shmmap_se;
    212 
    213 	KASSERT(mutex_owned(&shm_lock));
    214 
    215 	/* 1. A shm map with refcnt = 1, used by ourselves, thus return */
    216 	oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    217 	if (oshmmap_s && oshmmap_s->nrefs == 1)
    218 		return oshmmap_s;
    219 
    220 	/* 2. No shm map preset - create a fresh one */
    221 	shmmap_s = kmem_zalloc(sizeof(struct shmmap_state), KM_SLEEP);
    222 	shmmap_s->nrefs = 1;
    223 	SLIST_INIT(&shmmap_s->entries);
    224 	p->p_vmspace->vm_shm = (void *)shmmap_s;
    225 
    226 	if (oshmmap_s == NULL)
    227 		return shmmap_s;
    228 
    229 	SHMPRINTF(("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
    230 	    p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs));
    231 
    232 	/* 3. A shared shm map, copy to a fresh one and adjust refcounts */
    233 	SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
    234 		shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
    235 		shmmap_se->va = oshmmap_se->va;
    236 		shmmap_se->shmid = oshmmap_se->shmid;
    237 		SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
    238 	}
    239 	shmmap_s->nitems = oshmmap_s->nitems;
    240 	oshmmap_s->nrefs--;
    241 
    242 	return shmmap_s;
    243 }
    244 
    245 /*
    246  * Lock/unlock the memory.
    247  *  => must be called with shm_lock held;
    248  *  => called from one place, thus, inline;
    249  */
    250 static inline int
    251 shm_memlock(struct lwp *l, struct shmid_ds *shmseg, int shmid, int cmd)
    252 {
    253 	struct proc *p = l->l_proc;
    254 	struct shmmap_entry *shmmap_se;
    255 	struct shmmap_state *shmmap_s;
    256 	size_t size;
    257 	int error;
    258 
    259 	KASSERT(mutex_owned(&shm_lock));
    260 	shmmap_s = shmmap_getprivate(p);
    261 
    262 	/* Find our shared memory address by shmid */
    263 	SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
    264 		if (shmmap_se->shmid != shmid)
    265 			continue;
    266 
    267 		size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
    268 
    269 		if (cmd == SHM_LOCK &&
    270 		    (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
    271 			/* Wire the object and map, then tag it */
    272 			error = uobj_wirepages(shmseg->_shm_internal, 0,
    273 			    round_page(shmseg->shm_segsz));
    274 			if (error)
    275 				return EIO;
    276 			error = uvm_map_pageable(&p->p_vmspace->vm_map,
    277 			    shmmap_se->va, shmmap_se->va + size, false, 0);
    278 			if (error) {
    279 				uobj_unwirepages(shmseg->_shm_internal, 0,
    280 				    round_page(shmseg->shm_segsz));
    281 				if (error == EFAULT)
    282 					error = ENOMEM;
    283 				return error;
    284 			}
    285 			shmseg->shm_perm.mode |= SHMSEG_WIRED;
    286 
    287 		} else if (cmd == SHM_UNLOCK &&
    288 		    (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
    289 			/* Unwire the object and map, then untag it */
    290 			uobj_unwirepages(shmseg->_shm_internal, 0,
    291 			    round_page(shmseg->shm_segsz));
    292 			error = uvm_map_pageable(&p->p_vmspace->vm_map,
    293 			    shmmap_se->va, shmmap_se->va + size, true, 0);
    294 			if (error)
    295 				return EIO;
    296 			shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
    297 		}
    298 	}
    299 
    300 	return 0;
    301 }
    302 
    303 /*
    304  * Unmap shared memory.
    305  */
    306 int
    307 sys_shmdt(struct lwp *l, const struct sys_shmdt_args *uap, register_t *retval)
    308 {
    309 	/* {
    310 		syscallarg(const void *) shmaddr;
    311 	} */
    312 	struct proc *p = l->l_proc;
    313 	struct shmmap_state *shmmap_s1, *shmmap_s;
    314 	struct shmmap_entry *shmmap_se;
    315 	struct uvm_object *uobj;
    316 	struct shmid_ds *shmseg;
    317 	size_t size;
    318 
    319 	mutex_enter(&shm_lock);
    320 	/* In case of reallocation, we will wait for completion */
    321 	while (__predict_false(shm_realloc_state))
    322 		cv_wait(&shm_realloc_cv, &shm_lock);
    323 
    324 	shmmap_s1 = (struct shmmap_state *)p->p_vmspace->vm_shm;
    325 	if (shmmap_s1 == NULL) {
    326 		mutex_exit(&shm_lock);
    327 		return EINVAL;
    328 	}
    329 
    330 	/* Find the map entry */
    331 	SLIST_FOREACH(shmmap_se, &shmmap_s1->entries, next)
    332 		if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr))
    333 			break;
    334 	if (shmmap_se == NULL) {
    335 		mutex_exit(&shm_lock);
    336 		return EINVAL;
    337 	}
    338 
    339 	shmmap_s = shmmap_getprivate(p);
    340 	if (shmmap_s != shmmap_s1) {
    341 		/* Map has been copied, lookup entry in new map */
    342 		SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
    343 			if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr))
    344 				break;
    345 		if (shmmap_se == NULL) {
    346 			mutex_exit(&shm_lock);
    347 			return EINVAL;
    348 		}
    349 	}
    350 
    351 	SHMPRINTF(("shmdt: vm %p: remove %d @%lx\n",
    352 	    p->p_vmspace, shmmap_se->shmid, shmmap_se->va));
    353 
    354 	/* Delete the entry from shm map */
    355 	uobj = shm_delete_mapping(shmmap_s, shmmap_se);
    356 	shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)];
    357 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
    358 	mutex_exit(&shm_lock);
    359 
    360 	uvm_deallocate(&p->p_vmspace->vm_map, shmmap_se->va, size);
    361 	if (uobj != NULL)
    362 		uao_detach(uobj);
    363 	pool_put(&shmmap_entry_pool, shmmap_se);
    364 
    365 	return 0;
    366 }
    367 
    368 /*
    369  * Map shared memory.
    370  */
    371 int
    372 sys_shmat(struct lwp *l, const struct sys_shmat_args *uap, register_t *retval)
    373 {
    374 	/* {
    375 		syscallarg(int) shmid;
    376 		syscallarg(const void *) shmaddr;
    377 		syscallarg(int) shmflg;
    378 	} */
    379 	int error, flags = 0;
    380 	struct proc *p = l->l_proc;
    381 	kauth_cred_t cred = l->l_cred;
    382 	struct shmid_ds *shmseg;
    383 	struct shmmap_state *shmmap_s;
    384 	struct shmmap_entry *shmmap_se;
    385 	struct uvm_object *uobj;
    386 	struct vmspace *vm;
    387 	vaddr_t attach_va;
    388 	vm_prot_t prot;
    389 	vsize_t size;
    390 
    391 	/* Allocate a new map entry and set it */
    392 	shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
    393 
    394 	mutex_enter(&shm_lock);
    395 	/* In case of reallocation, we will wait for completion */
    396 	while (__predict_false(shm_realloc_state))
    397 		cv_wait(&shm_realloc_cv, &shm_lock);
    398 
    399 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
    400 	if (shmseg == NULL) {
    401 		error = EINVAL;
    402 		goto err;
    403 	}
    404 	error = ipcperm(cred, &shmseg->shm_perm,
    405 	    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
    406 	if (error)
    407 		goto err;
    408 
    409 	vm = p->p_vmspace;
    410 	shmmap_s = (struct shmmap_state *)vm->vm_shm;
    411 	if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg) {
    412 		error = EMFILE;
    413 		goto err;
    414 	}
    415 
    416 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
    417 	prot = VM_PROT_READ;
    418 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
    419 		prot |= VM_PROT_WRITE;
    420 	if (SCARG(uap, shmaddr)) {
    421 		flags |= UVM_FLAG_FIXED;
    422 		if (SCARG(uap, shmflg) & SHM_RND)
    423 			attach_va =
    424 			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
    425 		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
    426 			attach_va = (vaddr_t)SCARG(uap, shmaddr);
    427 		else {
    428 			error = EINVAL;
    429 			goto err;
    430 		}
    431 	} else {
    432 		/* This is just a hint to uvm_mmap() about where to put it. */
    433 		attach_va = p->p_emul->e_vm_default_addr(p,
    434 		    (vaddr_t)vm->vm_daddr, size);
    435 	}
    436 
    437 	/*
    438 	 * Create a map entry, add it to the list and increase the counters.
    439 	 * The lock will be dropped before the mapping, disable reallocation.
    440 	 */
    441 	shmmap_s = shmmap_getprivate(p);
    442 	SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
    443 	shmmap_s->nitems++;
    444 	shmseg->shm_lpid = p->p_pid;
    445 	shmseg->shm_nattch++;
    446 	shm_realloc_disable++;
    447 	mutex_exit(&shm_lock);
    448 
    449 	/*
    450 	 * Add a reference to the memory object, map it to the
    451 	 * address space, and lock the memory, if needed.
    452 	 */
    453 	uobj = shmseg->_shm_internal;
    454 	uao_reference(uobj);
    455 	error = uvm_map(&vm->vm_map, &attach_va, size, uobj, 0, 0,
    456 	    UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags));
    457 	if (error)
    458 		goto err_detach;
    459 	if (shm_use_phys || (shmseg->shm_perm.mode & SHMSEG_WIRED)) {
    460 		error = uvm_map_pageable(&vm->vm_map, attach_va,
    461 		    attach_va + size, false, 0);
    462 		if (error) {
    463 			if (error == EFAULT)
    464 				error = ENOMEM;
    465 			uvm_deallocate(&vm->vm_map, attach_va, size);
    466 			goto err_detach;
    467 		}
    468 	}
    469 
    470 	/* Set the new address, and update the time */
    471 	mutex_enter(&shm_lock);
    472 	shmmap_se->va = attach_va;
    473 	shmmap_se->shmid = SCARG(uap, shmid);
    474 	shmseg->shm_atime = time_second;
    475 	shm_realloc_disable--;
    476 	retval[0] = attach_va;
    477 	SHMPRINTF(("shmat: vm %p: add %d @%lx\n",
    478 	    p->p_vmspace, shmmap_se->shmid, attach_va));
    479 err:
    480 	cv_broadcast(&shm_realloc_cv);
    481 	mutex_exit(&shm_lock);
    482 	if (error && shmmap_se)
    483 		pool_put(&shmmap_entry_pool, shmmap_se);
    484 	return error;
    485 
    486 err_detach:
    487 	uao_detach(uobj);
    488 	mutex_enter(&shm_lock);
    489 	uobj = shm_delete_mapping(shmmap_s, shmmap_se);
    490 	shm_realloc_disable--;
    491 	cv_broadcast(&shm_realloc_cv);
    492 	mutex_exit(&shm_lock);
    493 	if (uobj != NULL)
    494 		uao_detach(uobj);
    495 	pool_put(&shmmap_entry_pool, shmmap_se);
    496 	return error;
    497 }
    498 
    499 /*
    500  * Shared memory control operations.
    501  */
    502 int
    503 sys___shmctl13(struct lwp *l, const struct sys___shmctl13_args *uap, register_t *retval)
    504 {
    505 	/* {
    506 		syscallarg(int) shmid;
    507 		syscallarg(int) cmd;
    508 		syscallarg(struct shmid_ds *) buf;
    509 	} */
    510 	struct shmid_ds shmbuf;
    511 	int cmd, error;
    512 
    513 	cmd = SCARG(uap, cmd);
    514 	if (cmd == IPC_SET) {
    515 		error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
    516 		if (error)
    517 			return error;
    518 	}
    519 
    520 	error = shmctl1(l, SCARG(uap, shmid), cmd,
    521 	    (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
    522 
    523 	if (error == 0 && cmd == IPC_STAT)
    524 		error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
    525 
    526 	return error;
    527 }
    528 
    529 int
    530 shmctl1(struct lwp *l, int shmid, int cmd, struct shmid_ds *shmbuf)
    531 {
    532 	struct uvm_object *uobj = NULL;
    533 	kauth_cred_t cred = l->l_cred;
    534 	struct shmid_ds *shmseg;
    535 	int error = 0;
    536 
    537 	mutex_enter(&shm_lock);
    538 	/* In case of reallocation, we will wait for completion */
    539 	while (__predict_false(shm_realloc_state))
    540 		cv_wait(&shm_realloc_cv, &shm_lock);
    541 
    542 	shmseg = shm_find_segment_by_shmid(shmid);
    543 	if (shmseg == NULL) {
    544 		mutex_exit(&shm_lock);
    545 		return EINVAL;
    546 	}
    547 
    548 	switch (cmd) {
    549 	case IPC_STAT:
    550 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
    551 			break;
    552 		memcpy(shmbuf, shmseg, sizeof(struct shmid_ds));
    553 		break;
    554 	case IPC_SET:
    555 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
    556 			break;
    557 		shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
    558 		shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
    559 		shmseg->shm_perm.mode =
    560 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
    561 		    (shmbuf->shm_perm.mode & ACCESSPERMS);
    562 		shmseg->shm_ctime = time_second;
    563 		break;
    564 	case IPC_RMID:
    565 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
    566 			break;
    567 		shmseg->shm_perm._key = IPC_PRIVATE;
    568 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
    569 		if (shmseg->shm_nattch <= 0) {
    570 			uobj = shmseg->_shm_internal;
    571 			shm_free_segment(IPCID_TO_IX(shmid));
    572 		}
    573 		break;
    574 	case SHM_LOCK:
    575 	case SHM_UNLOCK:
    576 		if ((error = kauth_authorize_generic(cred,
    577 		    KAUTH_GENERIC_ISSUSER, NULL)) != 0)
    578 			break;
    579 		error = shm_memlock(l, shmseg, shmid, cmd);
    580 		break;
    581 	default:
    582 		error = EINVAL;
    583 	}
    584 
    585 	mutex_exit(&shm_lock);
    586 	if (uobj != NULL)
    587 		uao_detach(uobj);
    588 	return error;
    589 }
    590 
    591 /*
    592  * Try to take an already existing segment.
    593  *  => must be called with shm_lock held;
    594  *  => called from one place, thus, inline;
    595  */
    596 static inline int
    597 shmget_existing(struct lwp *l, const struct sys_shmget_args *uap, int mode,
    598     register_t *retval)
    599 {
    600 	struct shmid_ds *shmseg;
    601 	kauth_cred_t cred = l->l_cred;
    602 	int segnum, error;
    603 again:
    604 	KASSERT(mutex_owned(&shm_lock));
    605 
    606 	/* Find segment by key */
    607 	for (segnum = 0; segnum < shminfo.shmmni; segnum++)
    608 		if ((shmsegs[segnum].shm_perm.mode & SHMSEG_ALLOCATED) &&
    609 		    shmsegs[segnum].shm_perm._key == SCARG(uap, key))
    610 			break;
    611 	if (segnum == shminfo.shmmni) {
    612 		/* Not found */
    613 		return -1;
    614 	}
    615 
    616 	shmseg = &shmsegs[segnum];
    617 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
    618 		/*
    619 		 * This segment is in the process of being allocated.  Wait
    620 		 * until it's done, and look the key up again (in case the
    621 		 * allocation failed or it was freed).
    622 		 */
    623 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
    624 		error = cv_wait_sig(&shm_cv[segnum], &shm_lock);
    625 		if (error)
    626 			return error;
    627 		goto again;
    628 	}
    629 
    630 	/* Check the permission, segment size and appropriate flag */
    631 	error = ipcperm(cred, &shmseg->shm_perm, mode);
    632 	if (error)
    633 		return error;
    634 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
    635 		return EINVAL;
    636 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
    637 	    (IPC_CREAT | IPC_EXCL))
    638 		return EEXIST;
    639 
    640 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
    641 	return 0;
    642 }
    643 
    644 int
    645 sys_shmget(struct lwp *l, const struct sys_shmget_args *uap, register_t *retval)
    646 {
    647 	/* {
    648 		syscallarg(key_t) key;
    649 		syscallarg(size_t) size;
    650 		syscallarg(int) shmflg;
    651 	} */
    652 	struct shmid_ds *shmseg;
    653 	kauth_cred_t cred = l->l_cred;
    654 	key_t key = SCARG(uap, key);
    655 	size_t size;
    656 	int error, mode, segnum;
    657 	bool lockmem;
    658 
    659 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
    660 	if (SCARG(uap, shmflg) & _SHM_RMLINGER)
    661 		mode |= SHMSEG_RMLINGER;
    662 
    663 	SHMPRINTF(("shmget: key 0x%lx size 0x%x shmflg 0x%x mode 0x%x\n",
    664 	    SCARG(uap, key), SCARG(uap, size), SCARG(uap, shmflg), mode));
    665 
    666 	mutex_enter(&shm_lock);
    667 	/* In case of reallocation, we will wait for completion */
    668 	while (__predict_false(shm_realloc_state))
    669 		cv_wait(&shm_realloc_cv, &shm_lock);
    670 
    671 	if (key != IPC_PRIVATE) {
    672 		error = shmget_existing(l, uap, mode, retval);
    673 		if (error != -1) {
    674 			mutex_exit(&shm_lock);
    675 			return error;
    676 		}
    677 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0) {
    678 			mutex_exit(&shm_lock);
    679 			return ENOENT;
    680 		}
    681 	}
    682 	error = 0;
    683 
    684 	/*
    685 	 * Check the for the limits.
    686 	 */
    687 	size = SCARG(uap, size);
    688 	if (size < shminfo.shmmin || size > shminfo.shmmax) {
    689 		mutex_exit(&shm_lock);
    690 		return EINVAL;
    691 	}
    692 	if (shm_nused >= shminfo.shmmni) {
    693 		mutex_exit(&shm_lock);
    694 		return ENOSPC;
    695 	}
    696 	size = (size + PGOFSET) & ~PGOFSET;
    697 	if (shm_committed + btoc(size) > shminfo.shmall) {
    698 		mutex_exit(&shm_lock);
    699 		return ENOMEM;
    700 	}
    701 
    702 	/* Find the first available segment */
    703 	if (shm_last_free < 0) {
    704 		for (segnum = 0; segnum < shminfo.shmmni; segnum++)
    705 			if (shmsegs[segnum].shm_perm.mode & SHMSEG_FREE)
    706 				break;
    707 		KASSERT(segnum < shminfo.shmmni);
    708 	} else {
    709 		segnum = shm_last_free;
    710 		shm_last_free = -1;
    711 	}
    712 
    713 	/*
    714 	 * Initialize the segment.
    715 	 * We will drop the lock while allocating the memory, thus mark the
    716 	 * segment present, but removed, that no other thread could take it.
    717 	 * Also, disable reallocation, while lock is dropped.
    718 	 */
    719 	shmseg = &shmsegs[segnum];
    720 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
    721 	shm_committed += btoc(size);
    722 	shm_nused++;
    723 	lockmem = shm_use_phys;
    724 	shm_realloc_disable++;
    725 	mutex_exit(&shm_lock);
    726 
    727 	/* Allocate the memory object and lock it if needed */
    728 	shmseg->_shm_internal = uao_create(size, 0);
    729 	if (lockmem) {
    730 		/* Wire the pages and tag it */
    731 		error = uobj_wirepages(shmseg->_shm_internal, 0,
    732 		    round_page(shmseg->shm_segsz));
    733 		if (error) {
    734 			mutex_enter(&shm_lock);
    735 			shm_free_segment(segnum);
    736 			shm_realloc_disable--;
    737 			mutex_exit(&shm_lock);
    738 			return error;
    739 		}
    740 	}
    741 
    742 	/*
    743 	 * Please note, while segment is marked, there are no need to hold the
    744 	 * lock, while setting it (except shm_perm.mode).
    745 	 */
    746 	shmseg->shm_perm._key = SCARG(uap, key);
    747 	shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
    748 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
    749 
    750 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = kauth_cred_geteuid(cred);
    751 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = kauth_cred_getegid(cred);
    752 	shmseg->shm_segsz = SCARG(uap, size);
    753 	shmseg->shm_cpid = l->l_proc->p_pid;
    754 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
    755 	shmseg->shm_atime = shmseg->shm_dtime = 0;
    756 	shmseg->shm_ctime = time_second;
    757 
    758 	/*
    759 	 * Segment is initialized.
    760 	 * Enter the lock, mark as allocated, and notify waiters (if any).
    761 	 * Also, unmark the state of reallocation.
    762 	 */
    763 	mutex_enter(&shm_lock);
    764 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
    765 	    (mode & (ACCESSPERMS | SHMSEG_RMLINGER)) |
    766 	    SHMSEG_ALLOCATED | (lockmem ? SHMSEG_WIRED : 0);
    767 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
    768 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
    769 		cv_broadcast(&shm_cv[segnum]);
    770 	}
    771 	shm_realloc_disable--;
    772 	cv_broadcast(&shm_realloc_cv);
    773 	mutex_exit(&shm_lock);
    774 
    775 	return error;
    776 }
    777 
    778 void
    779 shmfork(struct vmspace *vm1, struct vmspace *vm2)
    780 {
    781 	struct shmmap_state *shmmap_s;
    782 	struct shmmap_entry *shmmap_se;
    783 
    784 	SHMPRINTF(("shmfork %p->%p\n", vm1, vm2));
    785 	mutex_enter(&shm_lock);
    786 	vm2->vm_shm = vm1->vm_shm;
    787 	if (vm1->vm_shm) {
    788 		shmmap_s = (struct shmmap_state *)vm1->vm_shm;
    789 		SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
    790 			shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
    791 		shmmap_s->nrefs++;
    792 	}
    793 	mutex_exit(&shm_lock);
    794 }
    795 
    796 void
    797 shmexit(struct vmspace *vm)
    798 {
    799 	struct shmmap_state *shmmap_s;
    800 	struct shmmap_entry *shmmap_se;
    801 	struct uvm_object **uobj;
    802 	size_t *size;
    803 	u_int i, n;
    804 
    805 	SLIST_HEAD(, shmmap_entry) tmp_entries;
    806 
    807 	mutex_enter(&shm_lock);
    808 	shmmap_s = (struct shmmap_state *)vm->vm_shm;
    809 	if (shmmap_s == NULL) {
    810 		mutex_exit(&shm_lock);
    811 		return;
    812 	}
    813 
    814 	vm->vm_shm = NULL;
    815 
    816 	if (--shmmap_s->nrefs > 0) {
    817 		SHMPRINTF(("shmexit: vm %p drop ref (%d entries), refs = %d\n",
    818 		    vm, shmmap_s->nitems, shmmap_s->nrefs));
    819 		SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
    820 			shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
    821 		mutex_exit(&shm_lock);
    822 		return;
    823 	}
    824 
    825 	KASSERT(shmmap_s->nrefs == 0);
    826 	n = shmmap_s->nitems;
    827 	SHMPRINTF(("shmexit: vm %p cleanup (%d entries)\n", vm, n));
    828 	mutex_exit(&shm_lock);
    829 	if (n == 0) {
    830 		kmem_free(shmmap_s, sizeof(struct shmmap_state));
    831 		return;
    832 	}
    833 
    834 	/* Allocate the arrays */
    835 	SLIST_INIT(&tmp_entries);
    836 	uobj = kmem_zalloc(n * sizeof(void *), KM_SLEEP);
    837 	size = kmem_zalloc(n * sizeof(size_t), KM_SLEEP);
    838 
    839 	/* Delete the entry from shm map */
    840 	i = 0;
    841 	mutex_enter(&shm_lock);
    842 	while (!SLIST_EMPTY(&shmmap_s->entries)) {
    843 		struct shmid_ds *shmseg;
    844 
    845 		shmmap_se = SLIST_FIRST(&shmmap_s->entries);
    846 		shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)];
    847 		size[i] = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
    848 		uobj[i] = shm_delete_mapping(shmmap_s, shmmap_se);
    849 		SLIST_INSERT_HEAD(&tmp_entries, shmmap_se, next);
    850 		i++;
    851 	}
    852 	mutex_exit(&shm_lock);
    853 
    854 	/* Unmap all segments, free the entries */
    855 	i = 0;
    856 	while (!SLIST_EMPTY(&tmp_entries)) {
    857 		KASSERT(i < n);
    858 		shmmap_se = SLIST_FIRST(&tmp_entries);
    859 		SLIST_REMOVE(&tmp_entries, shmmap_se, shmmap_entry, next);
    860 		uvm_deallocate(&vm->vm_map, shmmap_se->va, size[i]);
    861 		if (uobj[i] != NULL)
    862 			uao_detach(uobj[i]);
    863 		pool_put(&shmmap_entry_pool, shmmap_se);
    864 		i++;
    865 	}
    866 
    867 	kmem_free(uobj, n * sizeof(void *));
    868 	kmem_free(size, n * sizeof(size_t));
    869 	kmem_free(shmmap_s, sizeof(struct shmmap_state));
    870 }
    871 
    872 static int
    873 shmrealloc(int newshmni)
    874 {
    875 	vaddr_t v;
    876 	struct shmid_ds *oldshmsegs, *newshmsegs;
    877 	kcondvar_t *newshm_cv;
    878 	size_t sz;
    879 	int i, lsegid;
    880 
    881 	if (newshmni < 1)
    882 		return EINVAL;
    883 
    884 	/* Allocate new memory area */
    885 	sz = ALIGN(newshmni * sizeof(struct shmid_ds)) +
    886 	    ALIGN(newshmni * sizeof(kcondvar_t));
    887 	v = uvm_km_alloc(kernel_map, round_page(sz), 0,
    888 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
    889 	if (v == 0)
    890 		return ENOMEM;
    891 
    892 	mutex_enter(&shm_lock);
    893 	while (shm_realloc_state || shm_realloc_disable)
    894 		cv_wait(&shm_realloc_cv, &shm_lock);
    895 
    896 	/*
    897 	 * Get the number of last segment.  Fail we are trying to
    898 	 * reallocate less memory than we use.
    899 	 */
    900 	lsegid = 0;
    901 	for (i = 0; i < shminfo.shmmni; i++)
    902 		if ((shmsegs[i].shm_perm.mode & SHMSEG_FREE) == 0)
    903 			lsegid = i;
    904 	if (lsegid >= newshmni) {
    905 		mutex_exit(&shm_lock);
    906 		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
    907 		return EBUSY;
    908 	}
    909 	shm_realloc_state = true;
    910 
    911 	newshmsegs = (void *)v;
    912 	newshm_cv = (void *)(ALIGN(newshmsegs) +
    913 	    newshmni * sizeof(struct shmid_ds));
    914 
    915 	/* Copy all memory to the new area */
    916 	for (i = 0; i < shm_nused; i++)
    917 		(void)memcpy(&newshmsegs[i], &shmsegs[i],
    918 		    sizeof(newshmsegs[0]));
    919 
    920 	/* Mark as free all new segments, if there is any */
    921 	for (; i < newshmni; i++) {
    922 		cv_init(&newshm_cv[i], "shmwait");
    923 		newshmsegs[i].shm_perm.mode = SHMSEG_FREE;
    924 		newshmsegs[i].shm_perm._seq = 0;
    925 	}
    926 
    927 	oldshmsegs = shmsegs;
    928 	sz = ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)) +
    929 	    ALIGN(shminfo.shmmni * sizeof(kcondvar_t));
    930 
    931 	shminfo.shmmni = newshmni;
    932 	shmsegs = newshmsegs;
    933 	shm_cv = newshm_cv;
    934 
    935 	/* Reallocation completed - notify all waiters, if any */
    936 	shm_realloc_state = false;
    937 	cv_broadcast(&shm_realloc_cv);
    938 	mutex_exit(&shm_lock);
    939 
    940 	uvm_km_free(kernel_map, (vaddr_t)oldshmsegs, sz, UVM_KMF_WIRED);
    941 	return 0;
    942 }
    943 
    944 void
    945 shminit(void)
    946 {
    947 	vaddr_t v;
    948 	size_t sz;
    949 	int i;
    950 
    951 	mutex_init(&shm_lock, MUTEX_DEFAULT, IPL_NONE);
    952 	pool_init(&shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0,
    953 	    "shmmp", &pool_allocator_nointr, IPL_NONE);
    954 	cv_init(&shm_realloc_cv, "shmrealc");
    955 
    956 	/* Allocate the wired memory for our structures */
    957 	sz = ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)) +
    958 	    ALIGN(shminfo.shmmni * sizeof(kcondvar_t));
    959 	v = uvm_km_alloc(kernel_map, round_page(sz), 0,
    960 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
    961 	if (v == 0)
    962 		panic("sysv_shm: cannot allocate memory");
    963 	shmsegs = (void *)v;
    964 	shm_cv = (void *)(ALIGN(shmsegs) +
    965 	    shminfo.shmmni * sizeof(struct shmid_ds));
    966 
    967 	shminfo.shmmax *= PAGE_SIZE;
    968 
    969 	for (i = 0; i < shminfo.shmmni; i++) {
    970 		cv_init(&shm_cv[i], "shmwait");
    971 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
    972 		shmsegs[i].shm_perm._seq = 0;
    973 	}
    974 	shm_last_free = 0;
    975 	shm_nused = 0;
    976 	shm_committed = 0;
    977 	shm_realloc_disable = 0;
    978 	shm_realloc_state = false;
    979 }
    980 
    981 static int
    982 sysctl_ipc_shmmni(SYSCTLFN_ARGS)
    983 {
    984 	int newsize, error;
    985 	struct sysctlnode node;
    986 	node = *rnode;
    987 	node.sysctl_data = &newsize;
    988 
    989 	newsize = shminfo.shmmni;
    990 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    991 	if (error || newp == NULL)
    992 		return error;
    993 
    994 	sysctl_unlock();
    995 	error = shmrealloc(newsize);
    996 	sysctl_relock();
    997 	return error;
    998 }
    999 
   1000 static int
   1001 sysctl_ipc_shmmaxpgs(SYSCTLFN_ARGS)
   1002 {
   1003 	int newsize, error;
   1004 	struct sysctlnode node;
   1005 	node = *rnode;
   1006 	node.sysctl_data = &newsize;
   1007 
   1008 	newsize = shminfo.shmall;
   1009 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   1010 	if (error || newp == NULL)
   1011 		return error;
   1012 
   1013 	if (newsize < 1)
   1014 		return EINVAL;
   1015 
   1016 	shminfo.shmall = newsize;
   1017 	shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
   1018 
   1019 	return 0;
   1020 }
   1021 
   1022 SYSCTL_SETUP(sysctl_ipc_shm_setup, "sysctl kern.ipc subtree setup")
   1023 {
   1024 
   1025 	sysctl_createv(clog, 0, NULL, NULL,
   1026 		CTLFLAG_PERMANENT,
   1027 		CTLTYPE_NODE, "kern", NULL,
   1028 		NULL, 0, NULL, 0,
   1029 		CTL_KERN, CTL_EOL);
   1030 	sysctl_createv(clog, 0, NULL, NULL,
   1031 		CTLFLAG_PERMANENT,
   1032 		CTLTYPE_NODE, "ipc",
   1033 		SYSCTL_DESCR("SysV IPC options"),
   1034 		NULL, 0, NULL, 0,
   1035 		CTL_KERN, KERN_SYSVIPC, CTL_EOL);
   1036 	sysctl_createv(clog, 0, NULL, NULL,
   1037 		CTLFLAG_PERMANENT | CTLFLAG_READONLY,
   1038 		CTLTYPE_INT, "shmmax",
   1039 		SYSCTL_DESCR("Max shared memory segment size in bytes"),
   1040 		NULL, 0, &shminfo.shmmax, 0,
   1041 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAX, CTL_EOL);
   1042 	sysctl_createv(clog, 0, NULL, NULL,
   1043 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1044 		CTLTYPE_INT, "shmmni",
   1045 		SYSCTL_DESCR("Max number of shared memory identifiers"),
   1046 		sysctl_ipc_shmmni, 0, &shminfo.shmmni, 0,
   1047 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMNI, CTL_EOL);
   1048 	sysctl_createv(clog, 0, NULL, NULL,
   1049 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1050 		CTLTYPE_INT, "shmseg",
   1051 		SYSCTL_DESCR("Max shared memory segments per process"),
   1052 		NULL, 0, &shminfo.shmseg, 0,
   1053 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMSEG, CTL_EOL);
   1054 	sysctl_createv(clog, 0, NULL, NULL,
   1055 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1056 		CTLTYPE_INT, "shmmaxpgs",
   1057 		SYSCTL_DESCR("Max amount of shared memory in pages"),
   1058 		sysctl_ipc_shmmaxpgs, 0, &shminfo.shmall, 0,
   1059 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAXPGS, CTL_EOL);
   1060 	sysctl_createv(clog, 0, NULL, NULL,
   1061 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
   1062 		CTLTYPE_INT, "shm_use_phys",
   1063 		SYSCTL_DESCR("Enable/disable locking of shared memory in "
   1064 		    "physical memory"), NULL, 0, &shm_use_phys, 0,
   1065 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMUSEPHYS, CTL_EOL);
   1066 }
   1067