sysv_shm.c revision 1.25 1 /* $NetBSD: sysv_shm.c,v 1.25 1994/08/31 21:47:36 mycroft Exp $ */
2
3 /*
4 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Adam Glass and Charles
17 * Hannum.
18 * 4. The names of the authors may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/shm.h>
37 #include <sys/proc.h>
38 #include <sys/uio.h>
39 #include <sys/time.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/systm.h>
43 #include <sys/stat.h>
44
45 #include <vm/vm.h>
46 #include <vm/vm_map.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49
50 /*
51 * Provides the following externally accessible functions:
52 *
53 * shminit(void); initialization
54 * shmexit(struct proc *) cleanup
55 * shmfork(struct proc *, struct proc *, int) fork handling
56 * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
57 *
58 * Structures:
59 * shmsegs (an array of 'struct shmid_ds')
60 * per proc array of 'struct shmmap_state'
61 */
62
63 int shmat(), shmctl(), shmdt(), shmget();
64 int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
65
66 #define SHMSEG_FREE 0x0200
67 #define SHMSEG_REMOVED 0x0400
68 #define SHMSEG_ALLOCATED 0x0800
69 #define SHMSEG_WANTED 0x1000
70
71 vm_map_t sysvshm_map;
72 int shm_last_free, shm_nused, shm_committed;
73
74 struct shm_handle {
75 vm_offset_t kva;
76 };
77
78 struct shmmap_state {
79 vm_offset_t va;
80 int shmid;
81 };
82
83 static void shm_deallocate_segment __P((struct shmid_ds *));
84 static int shm_find_segment_by_key __P((key_t));
85 static struct shmid_ds *shm_find_segment_by_shmid __P((int));
86 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
87
88 static int
89 shm_find_segment_by_key(key)
90 key_t key;
91 {
92 int i;
93
94 for (i = 0; i < shminfo.shmmni; i++)
95 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
96 shmsegs[i].shm_perm.key == key)
97 return i;
98 return -1;
99 }
100
101 static struct shmid_ds *
102 shm_find_segment_by_shmid(shmid)
103 int shmid;
104 {
105 int segnum;
106 struct shmid_ds *shmseg;
107
108 segnum = IPCID_TO_IX(shmid);
109 if (segnum < 0 || segnum >= shminfo.shmmni)
110 return NULL;
111 shmseg = &shmsegs[segnum];
112 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
113 != SHMSEG_ALLOCATED ||
114 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
115 return NULL;
116 return shmseg;
117 }
118
119 static void
120 shm_deallocate_segment(shmseg)
121 struct shmid_ds *shmseg;
122 {
123 struct shm_handle *shm_handle;
124 size_t size;
125
126 shm_handle = shmseg->shm_internal;
127 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
128 vm_deallocate(sysvshm_map, shm_handle->kva, size);
129 free((caddr_t)shm_handle, M_SHM);
130 shmseg->shm_internal = NULL;
131 shm_committed -= btoc(size);
132 shmseg->shm_perm.mode = SHMSEG_FREE;
133 shm_nused--;
134 }
135
136 static int
137 shm_delete_mapping(p, shmmap_s)
138 struct proc *p;
139 struct shmmap_state *shmmap_s;
140 {
141 struct shmid_ds *shmseg;
142 int segnum, result;
143 size_t size;
144
145 segnum = IPCID_TO_IX(shmmap_s->shmid);
146 shmseg = &shmsegs[segnum];
147 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
148 result = vm_deallocate(&p->p_vmspace->vm_map, shmmap_s->va, size);
149 if (result != KERN_SUCCESS)
150 return EINVAL;
151 shmmap_s->shmid = -1;
152 shmseg->shm_dtime = time.tv_sec;
153 if ((--shmseg->shm_nattch <= 0) &&
154 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
155 shm_deallocate_segment(shmseg);
156 shm_last_free = segnum;
157 }
158 return 0;
159 }
160
161 struct shmdt_args {
162 void *shmaddr;
163 };
164 int
165 shmdt(p, uap, retval)
166 struct proc *p;
167 struct shmdt_args *uap;
168 int *retval;
169 {
170 struct shmmap_state *shmmap_s;
171 int i;
172
173 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
174 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
175 if (shmmap_s->shmid != -1 &&
176 shmmap_s->va == (vm_offset_t)uap->shmaddr)
177 break;
178 if (i == shminfo.shmseg)
179 return EINVAL;
180 return shm_delete_mapping(p, shmmap_s);
181 }
182
183 struct shmat_args {
184 int shmid;
185 void *shmaddr;
186 int shmflg;
187 };
188 int
189 shmat(p, uap, retval)
190 struct proc *p;
191 struct shmat_args *uap;
192 int *retval;
193 {
194 int error, i, flags;
195 struct ucred *cred = p->p_ucred;
196 struct shmid_ds *shmseg;
197 struct shmmap_state *shmmap_s = NULL;
198 vm_offset_t attach_va;
199 vm_prot_t prot;
200 vm_size_t size;
201
202 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
203 if (shmmap_s == NULL) {
204 size = shminfo.shmseg * sizeof(struct shmmap_state);
205 shmmap_s = malloc(size, M_SHM, M_WAITOK);
206 for (i = 0; i < shminfo.shmseg; i++)
207 shmmap_s[i].shmid = -1;
208 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
209 }
210 shmseg = shm_find_segment_by_shmid(uap->shmid);
211 if (shmseg == NULL)
212 return EINVAL;
213 if (error = ipcperm(cred, &shmseg->shm_perm,
214 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W))
215 return error;
216 for (i = 0; i < shminfo.shmseg; i++) {
217 if (shmmap_s->shmid == -1)
218 break;
219 shmmap_s++;
220 }
221 if (i >= shminfo.shmseg)
222 return EMFILE;
223 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
224 prot = VM_PROT_READ;
225 if ((uap->shmflg & SHM_RDONLY) == 0)
226 prot |= VM_PROT_WRITE;
227 flags = MAP_ANON | MAP_SHARED;
228 if (uap->shmaddr) {
229 flags |= MAP_FIXED;
230 if (uap->shmflg & SHM_RND)
231 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
232 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
233 attach_va = (vm_offset_t)uap->shmaddr;
234 else
235 return EINVAL;
236 } else {
237 /* This is just a hint to vm_mmap() about where to put it. */
238 attach_va = round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
239 }
240 error = vm_mmap(&p->p_vmspace->vm_map, &attach_va, size, prot,
241 VM_PROT_DEFAULT, flags, (caddr_t) uap->shmid, 0);
242 if (error)
243 return error;
244 shmmap_s->va = attach_va;
245 shmmap_s->shmid = uap->shmid;
246 shmseg->shm_lpid = p->p_pid;
247 shmseg->shm_atime = time.tv_sec;
248 shmseg->shm_nattch++;
249 *retval = attach_va;
250 return 0;
251 }
252
253 struct shmctl_args {
254 int shmid;
255 int cmd;
256 struct shmat_ds *ubuf;
257 };
258 int
259 shmctl(p, uap, retval)
260 struct proc *p;
261 struct shmctl_args *uap;
262 int *retval;
263 {
264 int error, segnum;
265 struct ucred *cred = p->p_ucred;
266 struct shmid_ds inbuf;
267 struct shmid_ds *shmseg;
268
269 shmseg = shm_find_segment_by_shmid(uap->shmid);
270 if (shmseg == NULL)
271 return EINVAL;
272 switch (uap->cmd) {
273 case IPC_STAT:
274 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_R))
275 return error;
276 if (error = copyout((caddr_t)shmseg, uap->ubuf, sizeof(inbuf)))
277 return error;
278 break;
279 case IPC_SET:
280 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_M))
281 return error;
282 if (error = copyin(uap->ubuf, (caddr_t)&inbuf, sizeof(inbuf)))
283 return error;
284 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
285 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
286 shmseg->shm_perm.mode =
287 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
288 (inbuf.shm_perm.mode & ACCESSPERMS);
289 shmseg->shm_ctime = time.tv_sec;
290 break;
291 case IPC_RMID:
292 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_M))
293 return error;
294 shmseg->shm_perm.key = IPC_PRIVATE;
295 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
296 if (shmseg->shm_nattch <= 0) {
297 shm_deallocate_segment(shmseg);
298 shm_last_free = IPCID_TO_IX(uap->shmid);
299 }
300 break;
301 #if 0
302 case SHM_LOCK:
303 case SHM_UNLOCK:
304 #endif
305 default:
306 return EINVAL;
307 }
308 return 0;
309 }
310
311 struct shmget_args {
312 key_t key;
313 size_t size;
314 int shmflg;
315 };
316 static int
317 shmget_existing(p, uap, mode, segnum, retval)
318 struct proc *p;
319 struct shmget_args *uap;
320 int mode;
321 int segnum;
322 int *retval;
323 {
324 struct shmid_ds *shmseg;
325 struct ucred *cred = p->p_ucred;
326 int error;
327
328 shmseg = &shmsegs[segnum];
329 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
330 /*
331 * This segment is in the process of being allocated. Wait
332 * until it's done, and look the key up again (in case the
333 * allocation failed or it was freed).
334 */
335 shmseg->shm_perm.mode |= SHMSEG_WANTED;
336 if (error =
337 tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0))
338 return error;
339 return EAGAIN;
340 }
341 if (error = ipcperm(cred, &shmseg->shm_perm, mode))
342 return error;
343 if (uap->size && uap->size > shmseg->shm_segsz)
344 return EINVAL;
345 if (uap->shmflg & (IPC_CREAT | IPC_EXCL) == (IPC_CREAT | IPC_EXCL))
346 return EEXIST;
347 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
348 return 0;
349 }
350
351 static int
352 shmget_allocate_segment(p, uap, mode, retval)
353 struct proc *p;
354 struct shmget_args *uap;
355 int mode;
356 int *retval;
357 {
358 int i, segnum, result, shmid, size;
359 struct ucred *cred = p->p_ucred;
360 struct shmid_ds *shmseg;
361 struct shm_handle *shm_handle;
362
363 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
364 return EINVAL;
365 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
366 return ENOSPC;
367 size = (uap->size + CLOFSET) & ~CLOFSET;
368 if (shm_committed + btoc(size) > shminfo.shmall)
369 return ENOMEM;
370 if (shm_last_free < 0) {
371 for (i = 0; i < shminfo.shmmni; i++)
372 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
373 break;
374 if (i == shminfo.shmmni)
375 panic("shmseg free count inconsistent");
376 segnum = i;
377 } else {
378 segnum = shm_last_free;
379 shm_last_free = -1;
380 }
381 shmseg = &shmsegs[segnum];
382 /*
383 * In case we sleep in malloc(), mark the segment present but deleted
384 * so that noone else tries to create the same key.
385 */
386 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
387 shmseg->shm_perm.key = uap->key;
388 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
389 shm_handle = (struct shm_handle *)
390 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
391 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
392 result = vm_mmap(sysvshm_map, &shm_handle->kva, size, VM_PROT_ALL,
393 VM_PROT_DEFAULT, MAP_ANON, (caddr_t) shmid, 0);
394 if (result != KERN_SUCCESS) {
395 shmseg->shm_perm.mode = SHMSEG_FREE;
396 shm_last_free = segnum;
397 free((caddr_t)shm_handle, M_SHM);
398 /* Just in case. */
399 wakeup((caddr_t)shmseg);
400 return ENOMEM;
401 }
402 shmseg->shm_internal = shm_handle;
403 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
404 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
405 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
406 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
407 shmseg->shm_segsz = uap->size;
408 shmseg->shm_cpid = p->p_pid;
409 shmseg->shm_lpid = shmseg->shm_nattch = 0;
410 shmseg->shm_atime = shmseg->shm_dtime = 0;
411 shmseg->shm_ctime = time.tv_sec;
412 shm_committed += btoc(size);
413 shm_nused++;
414 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
415 /*
416 * Somebody else wanted this key while we were asleep. Wake
417 * them up now.
418 */
419 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
420 wakeup((caddr_t)shmseg);
421 }
422 *retval = shmid;
423 return 0;
424 }
425
426 int
427 shmget(p, uap, retval)
428 struct proc *p;
429 struct shmget_args *uap;
430 int *retval;
431 {
432 int segnum, mode, error;
433 struct shmid_ds *shmseg;
434
435 mode = uap->shmflg & ACCESSPERMS;
436 if (uap->key != IPC_PRIVATE) {
437 again:
438 segnum = shm_find_segment_by_key(uap->key);
439 if (segnum >= 0) {
440 error = shmget_existing(p, uap, mode, segnum, retval);
441 if (error == EAGAIN)
442 goto again;
443 return error;
444 }
445 if ((uap->shmflg & IPC_CREAT) == 0)
446 return ENOENT;
447 }
448 return shmget_allocate_segment(p, uap, mode, retval);
449 }
450
451 struct shmsys_args {
452 u_int which;
453 };
454 int
455 shmsys(p, uap, retval)
456 struct proc *p;
457 struct shmsys_args *uap;
458 int *retval;
459 {
460
461 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
462 return EINVAL;
463 return ((*shmcalls[uap->which])(p, &uap[1], retval));
464 }
465
466 void
467 shmfork(p1, p2, isvfork)
468 struct proc *p1, *p2;
469 int isvfork;
470 {
471 struct shmmap_state *shmmap_s;
472 size_t size;
473 int i;
474
475 size = shminfo.shmseg * sizeof(struct shmmap_state);
476 shmmap_s = malloc(size, M_SHM, M_WAITOK);
477 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
478 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
479 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
480 if (shmmap_s->shmid != -1)
481 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
482 }
483
484 void
485 shmexit(p)
486 struct proc *p;
487 {
488 struct shmmap_state *shmmap_s;
489 struct shmid_ds *shmseg;
490 int i;
491
492 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
493 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
494 if (shmmap_s->shmid != -1)
495 shm_delete_mapping(p, shmmap_s);
496 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
497 p->p_vmspace->vm_shm = NULL;
498 }
499
500 void
501 shminit()
502 {
503 int i;
504 vm_offset_t garbage1, garbage2;
505
506 shminfo.shmmax *= NBPG;
507
508 /* actually this *should* be pageable. SHM_{LOCK,UNLOCK} */
509 sysvshm_map = kmem_suballoc(kernel_map, &garbage1, &garbage2,
510 shminfo.shmall * NBPG, TRUE);
511 for (i = 0; i < shminfo.shmmni; i++) {
512 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
513 shmsegs[i].shm_perm.seq = 0;
514 }
515 shm_last_free = 0;
516 shm_nused = 0;
517 shm_committed = 0;
518 }
519