sysv_shm.c revision 1.15 1 /*
2 * Copyright (c) 1994 Charles Hannum.
3 * Copyright (c) 1994 Adam Glass
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the Author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY Adam Glass ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL Adam Glass BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/kernel.h>
30 #include <sys/shm.h>
31 #include <sys/proc.h>
32 #include <sys/uio.h>
33 #include <sys/time.h>
34 #include <sys/malloc.h>
35 #include <sys/mman.h>
36 #include <sys/systm.h>
37 #include <sys/stat.h>
38
39 #include <vm/vm.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43
44 /*
45 * Provides the following externally accessible functions:
46 *
47 * shminit(void); initialization
48 * shmexit(struct proc *) cleanup
49 * shmfork(struct proc *, struct proc *, int) fork handling
50 * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
51 *
52 * Structures:
53 * shmsegs (an array of 'struct shmid_ds')
54 * per proc array of 'struct shmmap_state'
55 */
56
57 int shmat(), shmctl(), shmdt(), shmget();
58 int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
59
60 #define SHMSEG_FREE 0x200
61 #define SHMSEG_REMOVED 0x400
62 #define SHMSEG_ALLOCATED 0x800
63
64 vm_map_t sysvshm_map;
65 int shm_last_free, shm_nused, shm_committed;
66
67 struct shm_handle {
68 vm_offset_t kva;
69 };
70
71 struct shmmap_state {
72 vm_offset_t va;
73 int shmid;
74 };
75
76 static void shm_deallocate_segment __P((struct shmid_ds *));
77 static int shm_find_segment_by_key __P((key_t));
78 static struct shmid_ds *shm_find_segment_by_shmid __P((int));
79 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
80
81 static int
82 shm_find_segment_by_key(key)
83 key_t key;
84 {
85 int i;
86
87 for (i = 0; i < shminfo.shmmni; i++)
88 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
89 shmsegs[i].shm_perm.key == key)
90 return i;
91 return -1;
92 }
93
94 static struct shmid_ds *
95 shm_find_segment_by_shmid(shmid)
96 int shmid;
97 {
98 int segnum;
99 struct shmid_ds *shmseg;
100
101 segnum = IPCID_TO_IX(shmid);
102 if (segnum < 0 || segnum >= shminfo.shmmni)
103 return NULL;
104 shmseg = &shmsegs[segnum];
105 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
106 != SHMSEG_ALLOCATED ||
107 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
108 return NULL;
109 return shmseg;
110 }
111
112 static vm_offset_t
113 shm_find_space(p, size)
114 struct proc *p;
115 size_t size;
116 {
117 vm_offset_t low_end, range, current;
118 int result;
119
120 low_end = (vm_offset_t)p->p_vmspace->vm_daddr +
121 (p->p_vmspace->vm_dsize << PGSHIFT);
122 range = (USRSTACK - low_end);
123
124 /* XXXX totally bogus */
125 /* current = range *3/4 + low_end */
126 current = ((range&1)<<1 + range)>>2 + range>>1 + low_end;
127 #if 0
128 result = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, ¤t, size,
129 TRUE);
130 if (result)
131 return NULL;
132 #endif
133 return current;
134 }
135
136 static void
137 shm_deallocate_segment(shmseg)
138 struct shmid_ds *shmseg;
139 {
140 struct shm_handle *shm_handle;
141 size_t size;
142
143 shm_handle = shmseg->shm_internal;
144 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
145 vm_deallocate(sysvshm_map, shm_handle->kva, size);
146 free((caddr_t)shm_handle, M_SHM);
147 shmseg->shm_internal = NULL;
148 shm_committed -= btoc(size);
149 shmseg->shm_perm.mode = SHMSEG_FREE;
150 }
151
152 static int
153 shm_delete_mapping(p, shmmap_s)
154 struct proc *p;
155 struct shmmap_state *shmmap_s;
156 {
157 struct shmid_ds *shmseg;
158 int segnum, result;
159 size_t size;
160
161 segnum = IPCID_TO_IX(shmmap_s->shmid);
162 shmseg = &shmsegs[segnum];
163 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
164 result = vm_deallocate(&p->p_vmspace->vm_map, shmmap_s->va, size);
165 if (result != KERN_SUCCESS)
166 return EINVAL;
167 shmmap_s->shmid = -1;
168 shmseg->shm_dtime = time.tv_sec;
169 if ((--shmseg->shm_nattch <= 0) &&
170 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
171 shm_deallocate_segment(shmseg);
172 shm_last_free = segnum;
173 }
174 return 0;
175 }
176
177 struct shmdt_args {
178 void *shmaddr;
179 };
180 int
181 shmdt(p, uap, retval)
182 struct proc *p;
183 struct shmdt_args *uap;
184 int *retval;
185 {
186 struct shmmap_state *shmmap_s;
187 int i;
188
189 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
190 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
191 if (shmmap_s->shmid != -1 &&
192 shmmap_s->va == (vm_offset_t)uap->shmaddr)
193 break;
194 if (i == shminfo.shmseg)
195 return EINVAL;
196 return shm_delete_mapping(p, shmmap_s);
197 }
198
199 struct shmat_args {
200 int shmid;
201 void *shmaddr;
202 int shmflg;
203 };
204 int
205 shmat(p, uap, retval)
206 struct proc *p;
207 struct shmat_args *uap;
208 int *retval;
209 {
210 int error, i, flags;
211 struct ucred *cred = p->p_ucred;
212 struct shmid_ds *shmseg;
213 struct shmmap_state *shmmap_s = NULL;
214 vm_offset_t attach_va;
215 vm_prot_t prot;
216 vm_size_t size;
217
218 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
219 if (shmmap_s == NULL) {
220 size = shminfo.shmseg * sizeof(struct shmmap_state);
221 shmmap_s = malloc(size, M_SHM, M_WAITOK);
222 bzero((caddr_t)shmmap_s, size);
223 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
224 }
225 shmseg = shm_find_segment_by_shmid(uap->shmid);
226 if (shmseg == NULL)
227 return EINVAL;
228 if (error = ipcperm(cred, &shmseg->shm_perm,
229 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W))
230 return error;
231 for (i = 0; i < shminfo.shmseg; i++) {
232 if (shmmap_s->shmid == -1)
233 break;
234 shmmap_s++;
235 }
236 if (i >= shminfo.shmseg)
237 return EMFILE;
238 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
239 prot = VM_PROT_READ;
240 if ((uap->shmflg & SHM_RDONLY) == 0)
241 prot |= VM_PROT_WRITE;
242 flags = MAP_ANON | MAP_SHARED;
243 if (uap->shmaddr) {
244 flags |= MAP_FIXED;
245 if (uap->shmflg & SHM_RND)
246 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
247 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
248 attach_va = (vm_offset_t)uap->shmaddr;
249 else
250 return EINVAL;
251 } else {
252 attach_va = shm_find_space(p, shmseg->shm_segsz);
253 if (attach_va == NULL)
254 return ENOMEM;
255 }
256 error = vm_mmap(&p->p_vmspace->vm_map, &attach_va, size, prot,
257 VM_PROT_DEFAULT, flags, uap->shmid, 0);
258 if (error)
259 return error;
260 shmmap_s->va = attach_va;
261 shmmap_s->shmid = uap->shmid;
262 shmseg->shm_lpid = p->p_pid;
263 shmseg->shm_atime = time.tv_sec;
264 shmseg->shm_nattch++;
265 *retval = attach_va;
266 return 0;
267 }
268
269 struct shmctl_args {
270 int shmid;
271 int cmd;
272 struct shmat_ds *ubuf;
273 };
274 int
275 shmctl(p, uap, retval)
276 struct proc *p;
277 struct shmctl_args *uap;
278 int *retval;
279 {
280 int error, segnum;
281 struct ucred *cred = p->p_ucred;
282 struct shmid_ds inbuf;
283 struct shmid_ds *shmseg;
284
285 shmseg = shm_find_segment_by_shmid(uap->shmid);
286 if (shmseg == NULL)
287 return EINVAL;
288 switch (uap->cmd) {
289 case IPC_STAT:
290 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_R))
291 return error;
292 if (error = copyout((caddr_t)shmseg, uap->ubuf, sizeof(inbuf)))
293 return error;
294 break;
295 case IPC_SET:
296 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_M))
297 return error;
298 if (error = copyin(uap->ubuf, (caddr_t)&inbuf, sizeof(inbuf)))
299 return error;
300 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
301 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
302 shmseg->shm_perm.mode =
303 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
304 (inbuf.shm_perm.mode & ACCESSPERMS);
305 shmseg->shm_ctime = time.tv_sec;
306 break;
307 case IPC_RMID:
308 if (error = ipcperm(cred, &shmseg->shm_perm, IPC_M))
309 return error;
310 shmseg->shm_perm.key = IPC_PRIVATE;
311 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
312 if (shmseg->shm_nattch <= 0) {
313 shm_deallocate_segment(shmseg);
314 shm_last_free = IPCID_TO_IX(uap->shmid);
315 }
316 break;
317 #if 0
318 case SHM_LOCK:
319 case SHM_UNLOCK:
320 #endif
321 default:
322 return EINVAL;
323 }
324 return 0;
325 }
326
327 struct shmget_args {
328 key_t key;
329 size_t size;
330 int shmflg;
331 };
332 static int
333 shmget_existing(p, uap, mode, segnum, retval)
334 struct proc *p;
335 struct shmget_args *uap;
336 int mode;
337 int segnum;
338 int *retval;
339 {
340 struct shmid_ds *shmseg;
341 struct ucred *cred = p->p_ucred;
342 int error;
343
344 shmseg = &shmsegs[segnum];
345 if (shmseg->shm_perm.mode & SHMSEG_REMOVED)
346 return EBUSY;
347 if (error = ipcperm(cred, &shmseg->shm_perm, mode))
348 return error;
349 if (uap->size && uap->size > shmseg->shm_segsz)
350 return EINVAL;
351 if (uap->shmflg & (IPC_CREAT | IPC_EXCL) == (IPC_CREAT | IPC_EXCL))
352 return EEXIST;
353 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
354 return 0;
355 }
356
357 static int
358 shmget_allocate_segment(p, uap, mode, retval)
359 struct proc *p;
360 struct shmget_args *uap;
361 int mode;
362 int *retval;
363 {
364 int i, segnum, result, shmid, size;
365 struct ucred *cred = p->p_ucred;
366 struct shmid_ds *shmseg;
367 struct shm_handle *shm_handle;
368
369 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
370 return EINVAL;
371 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
372 return ENOSPC;
373 size = (uap->size + CLOFSET) & ~CLOFSET;
374 if (shm_committed + btoc(size) > shminfo.shmall)
375 return ENOMEM;
376 if (shm_last_free < 0) {
377 for (i = 0; i < shminfo.shmmni; i++)
378 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
379 break;
380 if (i == shminfo.shmmni)
381 panic("shmseg free count inconsistent");
382 segnum = i;
383 } else {
384 segnum = shm_last_free;
385 shm_last_free = -1;
386 }
387 shmseg = &shmsegs[segnum];
388 /*
389 * In case we sleep in malloc(), mark the segment present but deleted
390 * so that noone else tries to create the same key.
391 */
392 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
393 shmseg->shm_perm.key = uap->key;
394 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
395 shm_handle = (struct shm_handle *)
396 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
397 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
398 result = vm_mmap(sysvshm_map, &shm_handle->kva, size, VM_PROT_ALL,
399 VM_PROT_DEFAULT, MAP_ANON, shmid, 0);
400 if (result != KERN_SUCCESS) {
401 shmseg->shm_perm.mode = SHMSEG_FREE;
402 shm_last_free = segnum;
403 free((caddr_t)shm_handle, M_SHM);
404 return ENOMEM;
405 }
406 shmseg->shm_internal = shm_handle;
407 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
408 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
409 shmseg->shm_perm.mode = (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
410 shmseg->shm_segsz = uap->size;
411 shmseg->shm_cpid = p->p_pid;
412 shmseg->shm_lpid = shmseg->shm_nattch = 0;
413 shmseg->shm_atime = shmseg->shm_dtime = 0;
414 shmseg->shm_ctime = time.tv_sec;
415 shm_committed += btoc(size);
416 shm_nused++;
417 *retval = shmid;
418 return 0;
419 }
420
421 int
422 shmget(p, uap, retval)
423 struct proc *p;
424 struct shmget_args *uap;
425 int *retval;
426 {
427 int segnum, mode, error;
428 struct shmid_ds *shmseg;
429
430 mode = uap->shmflg & ACCESSPERMS;
431 if (uap->key != IPC_PRIVATE) {
432 segnum = shm_find_segment_by_key(uap->key);
433 if (segnum >= 0)
434 return shmget_existing(p, uap, mode, segnum, retval);
435 if ((uap->shmflg & IPC_CREAT) == 0)
436 return ENOENT;
437 }
438 return shmget_allocate_segment(p, uap, mode, retval);
439 }
440
441 struct shmsys_args {
442 u_int which;
443 };
444 int
445 shmsys(p, uap, retval)
446 struct proc *p;
447 struct shmsys_args *uap;
448 int *retval;
449 {
450
451 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
452 return EINVAL;
453 return ((*shmcalls[uap->which])(p, &uap[1], retval));
454 }
455
456 void
457 shmfork(p1, p2, isvfork)
458 struct proc *p1, *p2;
459 int isvfork;
460 {
461 struct shmmap_state *shmmap_s;
462 size_t size;
463 int i;
464
465 size = shminfo.shmseg * sizeof(struct shmmap_state);
466 shmmap_s = malloc(size, M_SHM, M_WAITOK);
467 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
468 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
469 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
470 if (shmmap_s->shmid != -1)
471 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
472 }
473
474 void
475 shmexit(p)
476 struct proc *p;
477 {
478 struct shmmap_state *shmmap_s;
479 struct shmid_ds *shmseg;
480 int i;
481
482 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
483 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
484 if (shmmap_s->shmid != -1)
485 shm_delete_mapping(p, shmmap_s);
486 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
487 p->p_vmspace->vm_shm = NULL;
488 }
489
490 void
491 shminit()
492 {
493 int i;
494 vm_offset_t garbage1, garbage2;
495
496 /* actually this *should* be pageable. SHM_{LOCK,UNLOCK} */
497 sysvshm_map = kmem_suballoc(kernel_map, &garbage1, &garbage2,
498 shminfo.shmall * NBPG, FALSE);
499 for (i = 0; i < shminfo.shmmni; i++) {
500 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
501 shmsegs[i].shm_perm.seq = 0;
502 }
503 shm_last_free = 0;
504 shm_nused = 0;
505 shm_committed = 0;
506 }
507