Home | History | Annotate | Line # | Download | only in kern
sysv_shm.c revision 1.21
      1 /*
      2  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1. Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2. Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  * 3. All advertising materials mentioning features or use of this software
     13  *    must display the following acknowledgement:
     14  *	This product includes software developed by Adam Glass and Charles
     15  *	Hannum.
     16  * 4. The names of the authors may not be used to endorse or promote products
     17  *    derived from this software without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
     20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     22  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include <sys/types.h>
     32 #include <sys/param.h>
     33 #include <sys/kernel.h>
     34 #include <sys/shm.h>
     35 #include <sys/proc.h>
     36 #include <sys/uio.h>
     37 #include <sys/time.h>
     38 #include <sys/malloc.h>
     39 #include <sys/mman.h>
     40 #include <sys/systm.h>
     41 #include <sys/stat.h>
     42 
     43 #include <vm/vm.h>
     44 #include <vm/vm_map.h>
     45 #include <vm/vm_map.h>
     46 #include <vm/vm_kern.h>
     47 
     48 /*
     49  * Provides the following externally accessible functions:
     50  *
     51  * shminit(void);		           initialization
     52  * shmexit(struct proc *)                  cleanup
     53  * shmfork(struct proc *, struct proc *, int) fork handling
     54  * shmsys(arg1, arg2, arg3, arg4);         shm{at,ctl,dt,get}(arg2, arg3, arg4)
     55  *
     56  * Structures:
     57  * shmsegs (an array of 'struct shmid_ds')
     58  * per proc array of 'struct shmmap_state'
     59  */
     60 
     61 int	shmat(), shmctl(), shmdt(), shmget();
     62 int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
     63 
     64 #define	SHMSEG_FREE     	0x0200
     65 #define	SHMSEG_REMOVED  	0x0400
     66 #define	SHMSEG_ALLOCATED	0x0800
     67 #define	SHMSEG_WANTED		0x1000
     68 
     69 vm_map_t sysvshm_map;
     70 int shm_last_free, shm_nused, shm_committed;
     71 
     72 struct shm_handle {
     73 	vm_offset_t kva;
     74 };
     75 
     76 struct shmmap_state {
     77 	vm_offset_t va;
     78 	int shmid;
     79 };
     80 
     81 static void shm_deallocate_segment __P((struct shmid_ds *));
     82 static int shm_find_segment_by_key __P((key_t));
     83 static struct shmid_ds *shm_find_segment_by_shmid __P((int));
     84 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
     85 
     86 static int
     87 shm_find_segment_by_key(key)
     88 	key_t key;
     89 {
     90 	int i;
     91 
     92 	for (i = 0; i < shminfo.shmmni; i++)
     93 		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
     94 		    shmsegs[i].shm_perm.key == key)
     95 			return i;
     96 	return -1;
     97 }
     98 
     99 static struct shmid_ds *
    100 shm_find_segment_by_shmid(shmid)
    101 	int shmid;
    102 {
    103 	int segnum;
    104 	struct shmid_ds *shmseg;
    105 
    106 	segnum = IPCID_TO_IX(shmid);
    107 	if (segnum < 0 || segnum >= shminfo.shmmni)
    108 		return NULL;
    109 	shmseg = &shmsegs[segnum];
    110 	if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
    111 	    != SHMSEG_ALLOCATED ||
    112 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
    113 		return NULL;
    114 	return shmseg;
    115 }
    116 
    117 static void
    118 shm_deallocate_segment(shmseg)
    119 	struct shmid_ds *shmseg;
    120 {
    121 	struct shm_handle *shm_handle;
    122 	size_t size;
    123 
    124 	shm_handle = shmseg->shm_internal;
    125 	size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
    126 	vm_deallocate(sysvshm_map, shm_handle->kva, size);
    127 	free((caddr_t)shm_handle, M_SHM);
    128 	shmseg->shm_internal = NULL;
    129 	shm_committed -= btoc(size);
    130 	shmseg->shm_perm.mode = SHMSEG_FREE;
    131 }
    132 
    133 static int
    134 shm_delete_mapping(p, shmmap_s)
    135 	struct proc *p;
    136 	struct shmmap_state *shmmap_s;
    137 {
    138 	struct shmid_ds *shmseg;
    139 	int segnum, result;
    140 	size_t size;
    141 
    142 	segnum = IPCID_TO_IX(shmmap_s->shmid);
    143 	shmseg = &shmsegs[segnum];
    144 	size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
    145 	result = vm_deallocate(&p->p_vmspace->vm_map, shmmap_s->va, size);
    146 	if (result != KERN_SUCCESS)
    147 		return EINVAL;
    148 	shmmap_s->shmid = -1;
    149 	shmseg->shm_dtime = time.tv_sec;
    150 	if ((--shmseg->shm_nattch <= 0) &&
    151 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
    152 		shm_deallocate_segment(shmseg);
    153 		shm_last_free = segnum;
    154 	}
    155 	return 0;
    156 }
    157 
    158 struct shmdt_args {
    159 	void *shmaddr;
    160 };
    161 int
    162 shmdt(p, uap, retval)
    163 	struct proc *p;
    164 	struct shmdt_args *uap;
    165 	int *retval;
    166 {
    167 	struct shmmap_state *shmmap_s;
    168 	int i;
    169 
    170 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    171 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
    172 		if (shmmap_s->shmid != -1 &&
    173 		    shmmap_s->va == (vm_offset_t)uap->shmaddr)
    174 			break;
    175 	if (i == shminfo.shmseg)
    176 		return EINVAL;
    177 	return shm_delete_mapping(p, shmmap_s);
    178 }
    179 
    180 struct shmat_args {
    181 	int shmid;
    182 	void *shmaddr;
    183 	int shmflg;
    184 };
    185 int
    186 shmat(p, uap, retval)
    187 	struct proc *p;
    188 	struct shmat_args *uap;
    189 	int *retval;
    190 {
    191 	int error, i, flags;
    192 	struct ucred *cred = p->p_ucred;
    193 	struct shmid_ds *shmseg;
    194 	struct shmmap_state *shmmap_s = NULL;
    195 	vm_offset_t attach_va;
    196 	vm_prot_t prot;
    197 	vm_size_t size;
    198 
    199 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    200 	if (shmmap_s == NULL) {
    201 		size = shminfo.shmseg * sizeof(struct shmmap_state);
    202 		shmmap_s = malloc(size, M_SHM, M_WAITOK);
    203 		for (i = 0; i < shminfo.shmseg; i++)
    204 			shmmap_s[i].shmid = -1;
    205 		p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
    206 	}
    207 	shmseg = shm_find_segment_by_shmid(uap->shmid);
    208 	if (shmseg == NULL)
    209 		return EINVAL;
    210 	if (error = ipcperm(cred, &shmseg->shm_perm,
    211 	    (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W))
    212 		return error;
    213 	for (i = 0; i < shminfo.shmseg; i++) {
    214 		if (shmmap_s->shmid == -1)
    215 			break;
    216 		shmmap_s++;
    217 	}
    218 	if (i >= shminfo.shmseg)
    219 		return EMFILE;
    220 	size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
    221 	prot = VM_PROT_READ;
    222 	if ((uap->shmflg & SHM_RDONLY) == 0)
    223 		prot |= VM_PROT_WRITE;
    224 	flags = MAP_ANON | MAP_SHARED;
    225 	if (uap->shmaddr) {
    226 		flags |= MAP_FIXED;
    227 		if (uap->shmflg & SHM_RND)
    228 			attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
    229 		else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
    230 			attach_va = (vm_offset_t)uap->shmaddr;
    231 		else
    232 			return EINVAL;
    233 	} else {
    234 		/* This is just a hint to vm_mmap() about where to put it. */
    235 		attach_va = round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
    236 	}
    237 	error = vm_mmap(&p->p_vmspace->vm_map, &attach_va, size, prot,
    238 	    VM_PROT_DEFAULT, flags, uap->shmid, 0);
    239 	if (error)
    240 		return error;
    241 	shmmap_s->va = attach_va;
    242 	shmmap_s->shmid = uap->shmid;
    243 	shmseg->shm_lpid = p->p_pid;
    244 	shmseg->shm_atime = time.tv_sec;
    245 	shmseg->shm_nattch++;
    246 	*retval = attach_va;
    247 	return 0;
    248 }
    249 
    250 struct shmctl_args {
    251 	int shmid;
    252 	int cmd;
    253 	struct shmat_ds *ubuf;
    254 };
    255 int
    256 shmctl(p, uap, retval)
    257 	struct proc *p;
    258 	struct shmctl_args *uap;
    259 	int *retval;
    260 {
    261 	int error, segnum;
    262 	struct ucred *cred = p->p_ucred;
    263 	struct shmid_ds inbuf;
    264 	struct shmid_ds *shmseg;
    265 
    266 	shmseg = shm_find_segment_by_shmid(uap->shmid);
    267 	if (shmseg == NULL)
    268 		return EINVAL;
    269 	switch (uap->cmd) {
    270 	case IPC_STAT:
    271 		if (error = ipcperm(cred, &shmseg->shm_perm, IPC_R))
    272 			return error;
    273 		if (error = copyout((caddr_t)shmseg, uap->ubuf, sizeof(inbuf)))
    274 			return error;
    275 		break;
    276 	case IPC_SET:
    277 		if (error = ipcperm(cred, &shmseg->shm_perm, IPC_M))
    278 			return error;
    279 		if (error = copyin(uap->ubuf, (caddr_t)&inbuf, sizeof(inbuf)))
    280 			return error;
    281 		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
    282 		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
    283 		shmseg->shm_perm.mode =
    284 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
    285 		    (inbuf.shm_perm.mode & ACCESSPERMS);
    286 		shmseg->shm_ctime = time.tv_sec;
    287 		break;
    288 	case IPC_RMID:
    289 		if (error = ipcperm(cred, &shmseg->shm_perm, IPC_M))
    290 			return error;
    291 		shmseg->shm_perm.key = IPC_PRIVATE;
    292 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
    293 		if (shmseg->shm_nattch <= 0) {
    294 			shm_deallocate_segment(shmseg);
    295 			shm_last_free = IPCID_TO_IX(uap->shmid);
    296 		}
    297 		break;
    298 #if 0
    299 	case SHM_LOCK:
    300 	case SHM_UNLOCK:
    301 #endif
    302 	default:
    303 		return EINVAL;
    304 	}
    305 	return 0;
    306 }
    307 
    308 struct shmget_args {
    309 	key_t key;
    310 	size_t size;
    311 	int shmflg;
    312 };
    313 static int
    314 shmget_existing(p, uap, mode, segnum, retval)
    315 	struct proc *p;
    316 	struct shmget_args *uap;
    317 	int mode;
    318 	int segnum;
    319 	int *retval;
    320 {
    321 	struct shmid_ds *shmseg;
    322 	struct ucred *cred = p->p_ucred;
    323 	int error;
    324 
    325 	shmseg = &shmsegs[segnum];
    326 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
    327 		/*
    328 		 * This segment is in the process of being allocated.  Wait
    329 		 * until it's done, and look the key up again (in case the
    330 		 * allocation failed or it was freed).
    331 		 */
    332 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
    333 		if (error =
    334 		    tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0))
    335 			return error;
    336 		return EAGAIN;
    337 	}
    338 	if (error = ipcperm(cred, &shmseg->shm_perm, mode))
    339 		return error;
    340 	if (uap->size && uap->size > shmseg->shm_segsz)
    341 		return EINVAL;
    342 	if (uap->shmflg & (IPC_CREAT | IPC_EXCL) == (IPC_CREAT | IPC_EXCL))
    343 		return EEXIST;
    344 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
    345 	return 0;
    346 }
    347 
    348 static int
    349 shmget_allocate_segment(p, uap, mode, retval)
    350 	struct proc *p;
    351 	struct shmget_args *uap;
    352 	int mode;
    353 	int *retval;
    354 {
    355 	int i, segnum, result, shmid, size;
    356 	struct ucred *cred = p->p_ucred;
    357 	struct shmid_ds *shmseg;
    358 	struct shm_handle *shm_handle;
    359 
    360 	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
    361 		return EINVAL;
    362 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
    363 		return ENOSPC;
    364 	size = (uap->size + CLOFSET) & ~CLOFSET;
    365 	if (shm_committed + btoc(size) > shminfo.shmall)
    366 		return ENOMEM;
    367 	if (shm_last_free < 0) {
    368 		for (i = 0; i < shminfo.shmmni; i++)
    369 			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
    370 				break;
    371 		if (i == shminfo.shmmni)
    372 			panic("shmseg free count inconsistent");
    373 		segnum = i;
    374 	} else  {
    375 		segnum = shm_last_free;
    376 		shm_last_free = -1;
    377 	}
    378 	shmseg = &shmsegs[segnum];
    379 	/*
    380 	 * In case we sleep in malloc(), mark the segment present but deleted
    381 	 * so that noone else tries to create the same key.
    382 	 */
    383 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
    384 	shmseg->shm_perm.key = uap->key;
    385 	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
    386 	shm_handle = (struct shm_handle *)
    387 	    malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
    388 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
    389 	result = vm_mmap(sysvshm_map, &shm_handle->kva, size, VM_PROT_ALL,
    390 	    VM_PROT_DEFAULT, MAP_ANON, shmid, 0);
    391 	if (result != KERN_SUCCESS) {
    392 		shmseg->shm_perm.mode = SHMSEG_FREE;
    393 		shm_last_free = segnum;
    394 		free((caddr_t)shm_handle, M_SHM);
    395 		/* Just in case. */
    396 		wakeup((caddr_t)shmseg);
    397 		return ENOMEM;
    398 	}
    399 	shmseg->shm_internal = shm_handle;
    400 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
    401 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
    402 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
    403 	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
    404 	shmseg->shm_segsz = uap->size;
    405 	shmseg->shm_cpid = p->p_pid;
    406 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
    407 	shmseg->shm_atime = shmseg->shm_dtime = 0;
    408 	shmseg->shm_ctime = time.tv_sec;
    409 	shm_committed += btoc(size);
    410 	shm_nused++;
    411 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
    412 		/*
    413 		 * Somebody else wanted this key while we were asleep.  Wake
    414 		 * them up now.
    415 		 */
    416 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
    417 		wakeup((caddr_t)shmseg);
    418 	}
    419 	*retval = shmid;
    420 	return 0;
    421 }
    422 
    423 int
    424 shmget(p, uap, retval)
    425 	struct proc *p;
    426 	struct shmget_args *uap;
    427 	int *retval;
    428 {
    429 	int segnum, mode, error;
    430 	struct shmid_ds *shmseg;
    431 
    432 	mode = uap->shmflg & ACCESSPERMS;
    433 	if (uap->key != IPC_PRIVATE) {
    434 	again:
    435 		segnum = shm_find_segment_by_key(uap->key);
    436 		if (segnum >= 0) {
    437 			error = shmget_existing(p, uap, mode, segnum, retval);
    438 			if (error == EAGAIN)
    439 				goto again;
    440 			return error;
    441 		}
    442 		if ((uap->shmflg & IPC_CREAT) == 0)
    443 			return ENOENT;
    444 	}
    445 	return shmget_allocate_segment(p, uap, mode, retval);
    446 }
    447 
    448 struct shmsys_args {
    449 	u_int	which;
    450 };
    451 int
    452 shmsys(p, uap, retval)
    453 	struct proc *p;
    454 	struct shmsys_args *uap;
    455 	int *retval;
    456 {
    457 
    458 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
    459 		return EINVAL;
    460 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
    461 }
    462 
    463 void
    464 shmfork(p1, p2, isvfork)
    465 	struct proc *p1, *p2;
    466 	int isvfork;
    467 {
    468 	struct shmmap_state *shmmap_s;
    469 	size_t size;
    470 	int i;
    471 
    472 	size = shminfo.shmseg * sizeof(struct shmmap_state);
    473 	shmmap_s = malloc(size, M_SHM, M_WAITOK);
    474 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
    475 	p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
    476 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
    477 		if (shmmap_s->shmid != -1)
    478 			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
    479 }
    480 
    481 void
    482 shmexit(p)
    483 	struct proc *p;
    484 {
    485 	struct shmmap_state *shmmap_s;
    486 	struct shmid_ds *shmseg;
    487 	int i;
    488 
    489 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    490 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
    491 		if (shmmap_s->shmid != -1)
    492 			shm_delete_mapping(p, shmmap_s);
    493 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
    494 	p->p_vmspace->vm_shm = NULL;
    495 }
    496 
    497 void
    498 shminit()
    499 {
    500 	int i;
    501 	vm_offset_t garbage1, garbage2;
    502 
    503 	/* actually this *should* be pageable.  SHM_{LOCK,UNLOCK} */
    504 	sysvshm_map = kmem_suballoc(kernel_map, &garbage1, &garbage2,
    505 				    shminfo.shmall * NBPG, TRUE);
    506 	for (i = 0; i < shminfo.shmmni; i++) {
    507 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
    508 		shmsegs[i].shm_perm.seq = 0;
    509 	}
    510 	shm_last_free = 0;
    511 	shm_nused = 0;
    512 	shm_committed = 0;
    513 }
    514