Home | History | Annotate | Line # | Download | only in kern
sysv_shm.c revision 1.42
      1 /*	$NetBSD: sysv_shm.c,v 1.42 1998/02/05 07:59:59 mrg Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. All advertising materials mentioning features or use of this software
     15  *    must display the following acknowledgement:
     16  *	This product includes software developed by Adam Glass and Charles
     17  *	Hannum.
     18  * 4. The names of the authors may not be used to endorse or promote products
     19  *    derived from this software without specific prior written permission.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
     22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/types.h>
     34 #include <sys/param.h>
     35 #include <sys/kernel.h>
     36 #include <sys/shm.h>
     37 #include <sys/proc.h>
     38 #include <sys/uio.h>
     39 #include <sys/time.h>
     40 #include <sys/malloc.h>
     41 #include <sys/mman.h>
     42 #include <sys/systm.h>
     43 #include <sys/stat.h>
     44 
     45 #include <sys/mount.h>
     46 #include <sys/syscallargs.h>
     47 
     48 #include <vm/vm.h>
     49 #ifdef UVM
     50 #include <uvm/uvm_extern.h>
     51 #else
     52 #include <vm/vm_map.h>
     53 #include <vm/vm_kern.h>
     54 #endif
     55 
     56 struct shmid_ds *shm_find_segment_by_shmid __P((int));
     57 
     58 /*
     59  * Provides the following externally accessible functions:
     60  *
     61  * shminit(void);		                 initialization
     62  * shmexit(struct vmspace *)                     cleanup
     63  * shmfork(struct vmspace *, struct vmspace *)   fork handling
     64  * shmsys(arg1, arg2, arg3, arg4);         shm{at,ctl,dt,get}(arg2, arg3, arg4)
     65  *
     66  * Structures:
     67  * shmsegs (an array of 'struct shmid_ds')
     68  * per proc array of 'struct shmmap_state'
     69  */
     70 
     71 #define	SHMSEG_FREE     	0x0200
     72 #define	SHMSEG_REMOVED  	0x0400
     73 #define	SHMSEG_ALLOCATED	0x0800
     74 #define	SHMSEG_WANTED		0x1000
     75 
     76 int shm_last_free, shm_nused, shm_committed;
     77 
     78 struct shm_handle {
     79 #ifdef UVM
     80 	struct uvm_object *shm_object;
     81 #else
     82 	vm_object_t shm_object;
     83 #endif
     84 };
     85 
     86 struct shmmap_state {
     87 	vm_offset_t va;
     88 	int shmid;
     89 };
     90 
     91 static int shm_find_segment_by_key __P((key_t));
     92 static void shm_deallocate_segment __P((struct shmid_ds *));
     93 static int shm_delete_mapping __P((struct vmspace *, struct shmmap_state *));
     94 static int shmget_existing __P((struct proc *, struct sys_shmget_args *,
     95 				int, int, register_t *));
     96 static int shmget_allocate_segment __P((struct proc *, struct sys_shmget_args *,
     97 					int, register_t *));
     98 
     99 static int
    100 shm_find_segment_by_key(key)
    101 	key_t key;
    102 {
    103 	int i;
    104 
    105 	for (i = 0; i < shminfo.shmmni; i++)
    106 		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
    107 		    shmsegs[i].shm_perm.key == key)
    108 			return i;
    109 	return -1;
    110 }
    111 
    112 struct shmid_ds *
    113 shm_find_segment_by_shmid(shmid)
    114 	int shmid;
    115 {
    116 	int segnum;
    117 	struct shmid_ds *shmseg;
    118 
    119 	segnum = IPCID_TO_IX(shmid);
    120 	if (segnum < 0 || segnum >= shminfo.shmmni)
    121 		return NULL;
    122 	shmseg = &shmsegs[segnum];
    123 	if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
    124 	    != SHMSEG_ALLOCATED ||
    125 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
    126 		return NULL;
    127 	return shmseg;
    128 }
    129 
    130 static void
    131 shm_deallocate_segment(shmseg)
    132 	struct shmid_ds *shmseg;
    133 {
    134 	struct shm_handle *shm_handle;
    135 	size_t size;
    136 
    137 	shm_handle = shmseg->shm_internal;
    138 	size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
    139 #ifdef UVM
    140 	uao_detach(shm_handle->shm_object);
    141 #else
    142 	vm_object_deallocate(shm_handle->shm_object);
    143 #endif
    144 	free((caddr_t)shm_handle, M_SHM);
    145 	shmseg->shm_internal = NULL;
    146 	shm_committed -= btoc(size);
    147 	shmseg->shm_perm.mode = SHMSEG_FREE;
    148 	shm_nused--;
    149 }
    150 
    151 static int
    152 shm_delete_mapping(vm, shmmap_s)
    153 	struct vmspace *vm;
    154 	struct shmmap_state *shmmap_s;
    155 {
    156 	struct shmid_ds *shmseg;
    157 	int segnum, result;
    158 	size_t size;
    159 
    160 	segnum = IPCID_TO_IX(shmmap_s->shmid);
    161 	shmseg = &shmsegs[segnum];
    162 	size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
    163 #ifdef UVM
    164 	result = uvm_deallocate(&vm->vm_map,
    165 				shmmap_s->va, shmmap_s->va + size);
    166 #else
    167 	result = vm_map_remove(&vm->vm_map,
    168 			       shmmap_s->va, shmmap_s->va + size);
    169 #endif
    170 	if (result != KERN_SUCCESS)
    171 		return EINVAL;
    172 	shmmap_s->shmid = -1;
    173 	shmseg->shm_dtime = time.tv_sec;
    174 	if ((--shmseg->shm_nattch <= 0) &&
    175 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
    176 		shm_deallocate_segment(shmseg);
    177 		shm_last_free = segnum;
    178 	}
    179 	return 0;
    180 }
    181 
    182 int
    183 sys_shmdt(p, v, retval)
    184 	struct proc *p;
    185 	void *v;
    186 	register_t *retval;
    187 {
    188 	struct sys_shmdt_args /* {
    189 		syscallarg(void *) shmaddr;
    190 	} */ *uap = v;
    191 	struct shmmap_state *shmmap_s;
    192 	int i;
    193 
    194 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    195 	if (shmmap_s == NULL)
    196 		return EINVAL;
    197 
    198 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
    199 		if (shmmap_s->shmid != -1 &&
    200 		    shmmap_s->va == (vm_offset_t)SCARG(uap, shmaddr))
    201 			break;
    202 	if (i == shminfo.shmseg)
    203 		return EINVAL;
    204 	return shm_delete_mapping(p->p_vmspace, shmmap_s);
    205 }
    206 
    207 int
    208 sys_shmat(p, v, retval)
    209 	struct proc *p;
    210 	void *v;
    211 	register_t *retval;
    212 {
    213 	struct sys_shmat_args /* {
    214 		syscallarg(int) shmid;
    215 		syscallarg(void *) shmaddr;
    216 		syscallarg(int) shmflg;
    217 	} */ *uap = v;
    218 	int error, i, flags;
    219 	struct ucred *cred = p->p_ucred;
    220 	struct shmid_ds *shmseg;
    221 	struct shmmap_state *shmmap_s = NULL;
    222 	struct shm_handle *shm_handle;
    223 	vm_offset_t attach_va;
    224 	vm_prot_t prot;
    225 	vm_size_t size;
    226 	int rv;
    227 
    228 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    229 	if (shmmap_s == NULL) {
    230 		size = shminfo.shmseg * sizeof(struct shmmap_state);
    231 		shmmap_s = malloc(size, M_SHM, M_WAITOK);
    232 		for (i = 0; i < shminfo.shmseg; i++)
    233 			shmmap_s[i].shmid = -1;
    234 		p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
    235 	}
    236 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
    237 	if (shmseg == NULL)
    238 		return EINVAL;
    239 	error = ipcperm(cred, &shmseg->shm_perm,
    240 		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
    241 	if (error)
    242 		return error;
    243 	for (i = 0; i < shminfo.shmseg; i++) {
    244 		if (shmmap_s->shmid == -1)
    245 			break;
    246 		shmmap_s++;
    247 	}
    248 	if (i >= shminfo.shmseg)
    249 		return EMFILE;
    250 	size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
    251 	prot = VM_PROT_READ;
    252 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
    253 		prot |= VM_PROT_WRITE;
    254 	flags = MAP_ANON | MAP_SHARED;
    255 	if (SCARG(uap, shmaddr)) {
    256 		flags |= MAP_FIXED;
    257 		if (SCARG(uap, shmflg) & SHM_RND)
    258 			attach_va =
    259 			    (vm_offset_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
    260 		else if (((vm_offset_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
    261 			attach_va = (vm_offset_t)SCARG(uap, shmaddr);
    262 		else
    263 			return EINVAL;
    264 	} else {
    265 		/* This is just a hint to vm_mmap() about where to put it. */
    266 		attach_va =
    267 		    round_page(p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
    268 	}
    269 	shm_handle = shmseg->shm_internal;
    270 #ifdef UVM
    271 	uao_reference(shm_handle->shm_object);
    272 	rv = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
    273 		     shm_handle->shm_object, 0,
    274 		     UVM_MAPFLAG(prot, prot, UVM_INH_SHARE,
    275 				 UVM_ADV_RANDOM, 0));
    276 	if (rv != KERN_SUCCESS) {
    277 	    return ENOMEM;
    278 	}
    279 #else
    280 	vm_object_reference(shm_handle->shm_object);
    281 	rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
    282 		0, &attach_va, size, (flags & MAP_FIXED)?0:1);
    283 	if (rv != KERN_SUCCESS) {
    284 		return ENOMEM;
    285 	}
    286 	vm_map_protect(&p->p_vmspace->vm_map, attach_va, attach_va + size,
    287 		       prot, 0);
    288 	vm_map_inherit(&p->p_vmspace->vm_map,
    289 		attach_va, attach_va + size, VM_INHERIT_SHARE);
    290 #endif
    291 
    292 	shmmap_s->va = attach_va;
    293 	shmmap_s->shmid = SCARG(uap, shmid);
    294 	shmseg->shm_lpid = p->p_pid;
    295 	shmseg->shm_atime = time.tv_sec;
    296 	shmseg->shm_nattch++;
    297 	*retval = attach_va;
    298 	return 0;
    299 }
    300 
    301 int
    302 sys_shmctl(p, v, retval)
    303 	struct proc *p;
    304 	void *v;
    305 	register_t *retval;
    306 {
    307 	struct sys_shmctl_args /* {
    308 		syscallarg(int) shmid;
    309 		syscallarg(int) cmd;
    310 		syscallarg(struct shmid_ds *) buf;
    311 	} */ *uap = v;
    312 	int error;
    313 	struct ucred *cred = p->p_ucred;
    314 	struct shmid_ds inbuf;
    315 	struct shmid_ds *shmseg;
    316 
    317 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
    318 	if (shmseg == NULL)
    319 		return EINVAL;
    320 	switch (SCARG(uap, cmd)) {
    321 	case IPC_STAT:
    322 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
    323 			return error;
    324 		error = copyout((caddr_t)shmseg, SCARG(uap, buf),
    325 				sizeof(inbuf));
    326 		if (error)
    327 			return error;
    328 		break;
    329 	case IPC_SET:
    330 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
    331 			return error;
    332 		error = copyin(SCARG(uap, buf), (caddr_t)&inbuf,
    333 			       sizeof(inbuf));
    334 		if (error)
    335 			return error;
    336 		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
    337 		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
    338 		shmseg->shm_perm.mode =
    339 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
    340 		    (inbuf.shm_perm.mode & ACCESSPERMS);
    341 		shmseg->shm_ctime = time.tv_sec;
    342 		break;
    343 	case IPC_RMID:
    344 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
    345 			return error;
    346 		shmseg->shm_perm.key = IPC_PRIVATE;
    347 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
    348 		if (shmseg->shm_nattch <= 0) {
    349 			shm_deallocate_segment(shmseg);
    350 			shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
    351 		}
    352 		break;
    353 	case SHM_LOCK:
    354 	case SHM_UNLOCK:
    355 	default:
    356 		return EINVAL;
    357 	}
    358 	return 0;
    359 }
    360 
    361 static int
    362 shmget_existing(p, uap, mode, segnum, retval)
    363 	struct proc *p;
    364 	struct sys_shmget_args /* {
    365 		syscallarg(key_t) key;
    366 		syscallarg(int) size;
    367 		syscallarg(int) shmflg;
    368 	} */ *uap;
    369 	int mode;
    370 	int segnum;
    371 	register_t *retval;
    372 {
    373 	struct shmid_ds *shmseg;
    374 	struct ucred *cred = p->p_ucred;
    375 	int error;
    376 
    377 	shmseg = &shmsegs[segnum];
    378 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
    379 		/*
    380 		 * This segment is in the process of being allocated.  Wait
    381 		 * until it's done, and look the key up again (in case the
    382 		 * allocation failed or it was freed).
    383 		 */
    384 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
    385 		error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
    386 		if (error)
    387 			return error;
    388 		return EAGAIN;
    389 	}
    390 	if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
    391 		return error;
    392 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
    393 		return EINVAL;
    394 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
    395 	    (IPC_CREAT | IPC_EXCL))
    396 		return EEXIST;
    397 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
    398 	return 0;
    399 }
    400 
    401 static int
    402 shmget_allocate_segment(p, uap, mode, retval)
    403 	struct proc *p;
    404 	struct sys_shmget_args /* {
    405 		syscallarg(key_t) key;
    406 		syscallarg(int) size;
    407 		syscallarg(int) shmflg;
    408 	} */ *uap;
    409 	int mode;
    410 	register_t *retval;
    411 {
    412 	int i, segnum, shmid, size;
    413 	struct ucred *cred = p->p_ucred;
    414 	struct shmid_ds *shmseg;
    415 	struct shm_handle *shm_handle;
    416 #ifndef UVM
    417 	vm_pager_t pager;
    418 #endif
    419 	int error = 0;
    420 
    421 	if (SCARG(uap, size) < shminfo.shmmin ||
    422 	    SCARG(uap, size) > shminfo.shmmax)
    423 		return EINVAL;
    424 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
    425 		return ENOSPC;
    426 	size = (SCARG(uap, size) + CLOFSET) & ~CLOFSET;
    427 	if (shm_committed + btoc(size) > shminfo.shmall)
    428 		return ENOMEM;
    429 	if (shm_last_free < 0) {
    430 		for (i = 0; i < shminfo.shmmni; i++)
    431 			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
    432 				break;
    433 		if (i == shminfo.shmmni)
    434 			panic("shmseg free count inconsistent");
    435 		segnum = i;
    436 	} else  {
    437 		segnum = shm_last_free;
    438 		shm_last_free = -1;
    439 	}
    440 	shmseg = &shmsegs[segnum];
    441 	/*
    442 	 * In case we sleep in malloc(), mark the segment present but deleted
    443 	 * so that noone else tries to create the same key.
    444 	 */
    445 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
    446 	shmseg->shm_perm.key = SCARG(uap, key);
    447 	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
    448 	shm_handle = (struct shm_handle *)
    449 	    malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
    450 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
    451 
    452 
    453 #ifdef UVM
    454 	shm_handle->shm_object = uao_create(size, 0);
    455 #else
    456 	shm_handle->shm_object = vm_object_allocate(size);
    457 	if (shm_handle->shm_object == NULL) {
    458 		/* XXX cannot happen */
    459 		error = ENOMEM;
    460 		goto out;
    461 	}
    462 	/*
    463 	 * We make sure that we have allocated a pager before we need
    464 	 * to.
    465 	 */
    466 	pager = vm_pager_allocate(PG_DFLT, 0, size, VM_PROT_DEFAULT, 0);
    467 	if (pager == NULL) {
    468 		error = ENOMEM;
    469 		goto out;
    470 	}
    471 	vm_object_setpager(shm_handle->shm_object, pager, 0, 0);
    472 #endif
    473 
    474 	shmseg->shm_internal = shm_handle;
    475 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
    476 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
    477 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
    478 	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
    479 	shmseg->shm_segsz = SCARG(uap, size);
    480 	shmseg->shm_cpid = p->p_pid;
    481 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
    482 	shmseg->shm_atime = shmseg->shm_dtime = 0;
    483 	shmseg->shm_ctime = time.tv_sec;
    484 	shm_committed += btoc(size);
    485 	shm_nused++;
    486 
    487 #ifndef UVM
    488 out:
    489 	if (error) {
    490 		if (shm_handle->shm_object != NULL)
    491 			vm_object_deallocate(shm_handle->shm_object);
    492 		free(shm_handle, M_SHM);
    493 		shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED)
    494 		    | SHMSEG_FREE;
    495 	} else
    496 #endif
    497 		*retval = shmid;
    498 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
    499 		/*
    500 		 * Somebody else wanted this key while we were asleep.  Wake
    501 		 * them up now.
    502 		 */
    503 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
    504 		wakeup((caddr_t)shmseg);
    505 	}
    506 	return error;
    507 }
    508 
    509 int
    510 sys_shmget(p, v, retval)
    511 	struct proc *p;
    512 	void *v;
    513 	register_t *retval;
    514 {
    515 	struct sys_shmget_args /* {
    516 		syscallarg(key_t) key;
    517 		syscallarg(int) size;
    518 		syscallarg(int) shmflg;
    519 	} */ *uap = v;
    520 	int segnum, mode, error;
    521 
    522 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
    523 	if (SCARG(uap, key) != IPC_PRIVATE) {
    524 	again:
    525 		segnum = shm_find_segment_by_key(SCARG(uap, key));
    526 		if (segnum >= 0) {
    527 			error = shmget_existing(p, uap, mode, segnum, retval);
    528 			if (error == EAGAIN)
    529 				goto again;
    530 			return error;
    531 		}
    532 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
    533 			return ENOENT;
    534 	}
    535 	return shmget_allocate_segment(p, uap, mode, retval);
    536 }
    537 
    538 void
    539 shmfork(vm1, vm2)
    540 	struct vmspace *vm1, *vm2;
    541 {
    542 	struct shmmap_state *shmmap_s;
    543 	size_t size;
    544 	int i;
    545 
    546 	if (vm1->vm_shm == NULL) {
    547 		vm2->vm_shm = NULL;
    548 		return;
    549 	}
    550 
    551 	size = shminfo.shmseg * sizeof(struct shmmap_state);
    552 	shmmap_s = malloc(size, M_SHM, M_WAITOK);
    553 	bcopy(vm1->vm_shm, shmmap_s, size);
    554 	vm2->vm_shm = (caddr_t)shmmap_s;
    555 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
    556 		if (shmmap_s->shmid != -1)
    557 			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
    558 }
    559 
    560 void
    561 shmexit(vm)
    562 	struct vmspace *vm;
    563 {
    564 	struct shmmap_state *shmmap_s;
    565 	int i;
    566 
    567 	shmmap_s = (struct shmmap_state *)vm->vm_shm;
    568 	if (shmmap_s == NULL)
    569 		return;
    570 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
    571 		if (shmmap_s->shmid != -1)
    572 			shm_delete_mapping(vm, shmmap_s);
    573 	free(vm->vm_shm, M_SHM);
    574 	vm->vm_shm = NULL;
    575 }
    576 
    577 void
    578 shminit()
    579 {
    580 	int i;
    581 
    582 	shminfo.shmmax *= NBPG;
    583 
    584 	for (i = 0; i < shminfo.shmmni; i++) {
    585 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
    586 		shmsegs[i].shm_perm.seq = 0;
    587 	}
    588 	shm_last_free = 0;
    589 	shm_nused = 0;
    590 	shm_committed = 0;
    591 }
    592