sysv_msg.c revision 1.71.8.1 1 /* $NetBSD: sysv_msg.c,v 1.71.8.1 2019/02/23 06:58:14 martin Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Implementation of SVID messages
35 *
36 * Author: Daniel Boulet
37 *
38 * Copyright 1993 Daniel Boulet and RTMX Inc.
39 *
40 * This system call was implemented by Daniel Boulet under contract from RTMX.
41 *
42 * Redistribution and use in source forms, with and without modification,
43 * are permitted provided that this entire comment appears intact.
44 *
45 * Redistribution in binary form may occur without any restrictions.
46 * Obviously, it would be nice if you gave credit where credit is due
47 * but requiring it would be too onerous.
48 *
49 * This software is provided ``AS IS'' without any warranties of any kind.
50 */
51
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: sysv_msg.c,v 1.71.8.1 2019/02/23 06:58:14 martin Exp $");
54
55 #ifdef _KERNEL_OPT
56 #include "opt_sysv.h"
57 #endif
58
59 #include <sys/param.h>
60 #include <sys/kernel.h>
61 #include <sys/msg.h>
62 #include <sys/sysctl.h>
63 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
64 #include <sys/syscallargs.h>
65 #include <sys/kauth.h>
66
67 #define MSG_DEBUG
68 #undef MSG_DEBUG_OK
69
70 #ifdef MSG_DEBUG_OK
71 #define MSG_PRINTF(a) printf a
72 #else
73 #define MSG_PRINTF(a)
74 #endif
75
76 static int nfree_msgmaps; /* # of free map entries */
77 static short free_msgmaps; /* head of linked list of free map entries */
78 static struct __msg *free_msghdrs; /* list of free msg headers */
79 static char *msgpool; /* MSGMAX byte long msg buffer pool */
80 static struct msgmap *msgmaps; /* MSGSEG msgmap structures */
81 static struct __msg *msghdrs; /* MSGTQL msg headers */
82
83 kmsq_t *msqs; /* MSGMNI msqid_ds struct's */
84 kmutex_t msgmutex; /* subsystem lock */
85
86 static u_int msg_waiters = 0; /* total number of msgrcv waiters */
87 static bool msg_realloc_state;
88 static kcondvar_t msg_realloc_cv;
89
90 static void msg_freehdr(struct __msg *);
91
92 extern int kern_has_sysvmsg;
93
94 SYSCTL_SETUP_PROTO(sysctl_ipc_msg_setup);
95
96 void
97 msginit(struct sysctllog **clog)
98 {
99 int i, sz;
100 vaddr_t v;
101
102 /*
103 * msginfo.msgssz should be a power of two for efficiency reasons.
104 * It is also pretty silly if msginfo.msgssz is less than 8
105 * or greater than about 256 so ...
106 */
107
108 i = 8;
109 while (i < 1024 && i != msginfo.msgssz)
110 i <<= 1;
111 if (i != msginfo.msgssz) {
112 panic("msginfo.msgssz = %d, not a small power of 2",
113 msginfo.msgssz);
114 }
115
116 if (msginfo.msgseg > 32767) {
117 panic("msginfo.msgseg = %d > 32767", msginfo.msgseg);
118 }
119
120 /* Allocate the wired memory for our structures */
121 sz = ALIGN(msginfo.msgmax) +
122 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
123 ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
124 ALIGN(msginfo.msgmni * sizeof(kmsq_t));
125 sz = round_page(sz);
126 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
127 if (v == 0)
128 panic("sysv_msg: cannot allocate memory");
129 msgpool = (void *)v;
130 msgmaps = (void *)((uintptr_t)msgpool + ALIGN(msginfo.msgmax));
131 msghdrs = (void *)((uintptr_t)msgmaps +
132 ALIGN(msginfo.msgseg * sizeof(struct msgmap)));
133 msqs = (void *)((uintptr_t)msghdrs +
134 ALIGN(msginfo.msgtql * sizeof(struct __msg)));
135
136 for (i = 0; i < (msginfo.msgseg - 1); i++)
137 msgmaps[i].next = i + 1;
138 msgmaps[msginfo.msgseg - 1].next = -1;
139
140 free_msgmaps = 0;
141 nfree_msgmaps = msginfo.msgseg;
142
143 for (i = 0; i < (msginfo.msgtql - 1); i++) {
144 msghdrs[i].msg_type = 0;
145 msghdrs[i].msg_next = &msghdrs[i + 1];
146 }
147 i = msginfo.msgtql - 1;
148 msghdrs[i].msg_type = 0;
149 msghdrs[i].msg_next = NULL;
150 free_msghdrs = &msghdrs[0];
151
152 for (i = 0; i < msginfo.msgmni; i++) {
153 cv_init(&msqs[i].msq_cv, "msgwait");
154 /* Implies entry is available */
155 msqs[i].msq_u.msg_qbytes = 0;
156 /* Reset to a known value */
157 msqs[i].msq_u.msg_perm._seq = 0;
158 }
159
160 mutex_init(&msgmutex, MUTEX_DEFAULT, IPL_NONE);
161 cv_init(&msg_realloc_cv, "msgrealc");
162 msg_realloc_state = false;
163
164 kern_has_sysvmsg = 1;
165
166 #ifdef _MODULE
167 if (clog)
168 sysctl_ipc_msg_setup(clog);
169 #endif
170 }
171
172 int
173 msgfini(void)
174 {
175 int i, sz;
176 vaddr_t v = (vaddr_t)msgpool;
177
178 mutex_enter(&msgmutex);
179 for (i = 0; i < msginfo.msgmni; i++) {
180 if (msqs[i].msq_u.msg_qbytes != 0) {
181 mutex_exit(&msgmutex);
182 return 1; /* queue not available, prevent unload! */
183 }
184 }
185 /*
186 * Destroy all condvars and free the memory we're using
187 */
188 for (i = 0; i < msginfo.msgmni; i++) {
189 cv_destroy(&msqs[i].msq_cv);
190 }
191 sz = ALIGN(msginfo.msgmax) +
192 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
193 ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
194 ALIGN(msginfo.msgmni * sizeof(kmsq_t));
195 sz = round_page(sz);
196 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
197
198 cv_destroy(&msg_realloc_cv);
199 mutex_exit(&msgmutex);
200 mutex_destroy(&msgmutex);
201
202 kern_has_sysvmsg = 0;
203
204 return 0;
205 }
206
207 static int
208 msgrealloc(int newmsgmni, int newmsgseg)
209 {
210 struct msgmap *new_msgmaps;
211 struct __msg *new_msghdrs, *new_free_msghdrs;
212 char *old_msgpool, *new_msgpool;
213 kmsq_t *new_msqs;
214 vaddr_t v;
215 int i, sz, msqid, newmsgmax, new_nfree_msgmaps;
216 short new_free_msgmaps;
217
218 if (newmsgmni < 1 || newmsgseg < 1)
219 return EINVAL;
220
221 /* Allocate the wired memory for our structures */
222 newmsgmax = msginfo.msgssz * newmsgseg;
223 sz = ALIGN(newmsgmax) +
224 ALIGN(newmsgseg * sizeof(struct msgmap)) +
225 ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
226 ALIGN(newmsgmni * sizeof(kmsq_t));
227 sz = round_page(sz);
228 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
229 if (v == 0)
230 return ENOMEM;
231
232 mutex_enter(&msgmutex);
233 if (msg_realloc_state) {
234 mutex_exit(&msgmutex);
235 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
236 return EBUSY;
237 }
238 msg_realloc_state = true;
239 if (msg_waiters) {
240 /*
241 * Mark reallocation state, wake-up all waiters,
242 * and wait while they will all exit.
243 */
244 for (i = 0; i < msginfo.msgmni; i++)
245 cv_broadcast(&msqs[i].msq_cv);
246 while (msg_waiters)
247 cv_wait(&msg_realloc_cv, &msgmutex);
248 }
249 old_msgpool = msgpool;
250
251 /* We cannot reallocate less memory than we use */
252 i = 0;
253 for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
254 struct msqid_ds *mptr;
255 kmsq_t *msq;
256
257 msq = &msqs[msqid];
258 mptr = &msq->msq_u;
259 if (mptr->msg_qbytes || (mptr->msg_perm.mode & MSG_LOCKED))
260 i = msqid;
261 }
262 if (i >= newmsgmni || (msginfo.msgseg - nfree_msgmaps) > newmsgseg) {
263 mutex_exit(&msgmutex);
264 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
265 return EBUSY;
266 }
267
268 new_msgpool = (void *)v;
269 new_msgmaps = (void *)((uintptr_t)new_msgpool + ALIGN(newmsgmax));
270 new_msghdrs = (void *)((uintptr_t)new_msgmaps +
271 ALIGN(newmsgseg * sizeof(struct msgmap)));
272 new_msqs = (void *)((uintptr_t)new_msghdrs +
273 ALIGN(msginfo.msgtql * sizeof(struct __msg)));
274
275 /* Initialize the structures */
276 for (i = 0; i < (newmsgseg - 1); i++)
277 new_msgmaps[i].next = i + 1;
278 new_msgmaps[newmsgseg - 1].next = -1;
279 new_free_msgmaps = 0;
280 new_nfree_msgmaps = newmsgseg;
281
282 for (i = 0; i < (msginfo.msgtql - 1); i++) {
283 new_msghdrs[i].msg_type = 0;
284 new_msghdrs[i].msg_next = &new_msghdrs[i + 1];
285 }
286 i = msginfo.msgtql - 1;
287 new_msghdrs[i].msg_type = 0;
288 new_msghdrs[i].msg_next = NULL;
289 new_free_msghdrs = &new_msghdrs[0];
290
291 for (i = 0; i < newmsgmni; i++) {
292 new_msqs[i].msq_u.msg_qbytes = 0;
293 new_msqs[i].msq_u.msg_perm._seq = 0;
294 cv_init(&new_msqs[i].msq_cv, "msgwait");
295 }
296
297 /*
298 * Copy all message queue identifiers, message headers and buffer
299 * pools to the new memory location.
300 */
301 for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
302 struct __msg *nmsghdr, *msghdr, *pmsghdr;
303 struct msqid_ds *nmptr, *mptr;
304 kmsq_t *nmsq, *msq;
305
306 msq = &msqs[msqid];
307 mptr = &msq->msq_u;
308
309 if (mptr->msg_qbytes == 0 &&
310 (mptr->msg_perm.mode & MSG_LOCKED) == 0)
311 continue;
312
313 nmsq = &new_msqs[msqid];
314 nmptr = &nmsq->msq_u;
315 memcpy(nmptr, mptr, sizeof(struct msqid_ds));
316
317 /*
318 * Go through the message headers, and and copy each
319 * one by taking the new ones, and thus defragmenting.
320 */
321 nmsghdr = pmsghdr = NULL;
322 msghdr = mptr->_msg_first;
323 while (msghdr) {
324 short nnext = 0, next;
325 u_short msgsz, segcnt;
326
327 /* Take an entry from the new list of free msghdrs */
328 nmsghdr = new_free_msghdrs;
329 KASSERT(nmsghdr != NULL);
330 new_free_msghdrs = nmsghdr->msg_next;
331
332 nmsghdr->msg_next = NULL;
333 if (pmsghdr) {
334 pmsghdr->msg_next = nmsghdr;
335 } else {
336 nmptr->_msg_first = nmsghdr;
337 pmsghdr = nmsghdr;
338 }
339 nmsghdr->msg_ts = msghdr->msg_ts;
340 nmsghdr->msg_spot = -1;
341
342 /* Compute the amount of segments and reserve them */
343 msgsz = msghdr->msg_ts;
344 segcnt = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
345 if (segcnt == 0)
346 continue;
347 while (segcnt--) {
348 nnext = new_free_msgmaps;
349 new_free_msgmaps = new_msgmaps[nnext].next;
350 new_nfree_msgmaps--;
351 new_msgmaps[nnext].next = nmsghdr->msg_spot;
352 nmsghdr->msg_spot = nnext;
353 }
354
355 /* Copy all segments */
356 KASSERT(nnext == nmsghdr->msg_spot);
357 next = msghdr->msg_spot;
358 while (msgsz > 0) {
359 size_t tlen;
360
361 if (msgsz >= msginfo.msgssz) {
362 tlen = msginfo.msgssz;
363 msgsz -= msginfo.msgssz;
364 } else {
365 tlen = msgsz;
366 msgsz = 0;
367 }
368
369 /* Copy the message buffer */
370 memcpy(&new_msgpool[nnext * msginfo.msgssz],
371 &msgpool[next * msginfo.msgssz], tlen);
372
373 /* Next entry of the map */
374 nnext = msgmaps[nnext].next;
375 next = msgmaps[next].next;
376 }
377
378 /* Next message header */
379 msghdr = msghdr->msg_next;
380 }
381 nmptr->_msg_last = nmsghdr;
382 }
383 KASSERT((msginfo.msgseg - nfree_msgmaps) ==
384 (newmsgseg - new_nfree_msgmaps));
385
386 sz = ALIGN(msginfo.msgmax) +
387 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
388 ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
389 ALIGN(msginfo.msgmni * sizeof(kmsq_t));
390 sz = round_page(sz);
391
392 for (i = 0; i < msginfo.msgmni; i++)
393 cv_destroy(&msqs[i].msq_cv);
394
395 /* Set the pointers and update the new values */
396 msgpool = new_msgpool;
397 msgmaps = new_msgmaps;
398 msghdrs = new_msghdrs;
399 msqs = new_msqs;
400
401 free_msghdrs = new_free_msghdrs;
402 free_msgmaps = new_free_msgmaps;
403 nfree_msgmaps = new_nfree_msgmaps;
404 msginfo.msgmni = newmsgmni;
405 msginfo.msgseg = newmsgseg;
406 msginfo.msgmax = newmsgmax;
407
408 /* Reallocation completed - notify all waiters, if any */
409 msg_realloc_state = false;
410 cv_broadcast(&msg_realloc_cv);
411 mutex_exit(&msgmutex);
412
413 uvm_km_free(kernel_map, (vaddr_t)old_msgpool, sz, UVM_KMF_WIRED);
414 return 0;
415 }
416
417 static void
418 msg_freehdr(struct __msg *msghdr)
419 {
420
421 KASSERT(mutex_owned(&msgmutex));
422
423 while (msghdr->msg_ts > 0) {
424 short next;
425 KASSERT(msghdr->msg_spot >= 0);
426 KASSERT(msghdr->msg_spot < msginfo.msgseg);
427
428 next = msgmaps[msghdr->msg_spot].next;
429 msgmaps[msghdr->msg_spot].next = free_msgmaps;
430 free_msgmaps = msghdr->msg_spot;
431 nfree_msgmaps++;
432 msghdr->msg_spot = next;
433 if (msghdr->msg_ts >= msginfo.msgssz)
434 msghdr->msg_ts -= msginfo.msgssz;
435 else
436 msghdr->msg_ts = 0;
437 }
438 KASSERT(msghdr->msg_spot == -1);
439 msghdr->msg_next = free_msghdrs;
440 free_msghdrs = msghdr;
441 }
442
443 int
444 sys___msgctl50(struct lwp *l, const struct sys___msgctl50_args *uap,
445 register_t *retval)
446 {
447 /* {
448 syscallarg(int) msqid;
449 syscallarg(int) cmd;
450 syscallarg(struct msqid_ds *) buf;
451 } */
452 struct msqid_ds msqbuf;
453 int cmd, error;
454
455 cmd = SCARG(uap, cmd);
456
457 if (cmd == IPC_SET) {
458 error = copyin(SCARG(uap, buf), &msqbuf, sizeof(msqbuf));
459 if (error)
460 return (error);
461 }
462
463 error = msgctl1(l, SCARG(uap, msqid), cmd,
464 (cmd == IPC_SET || cmd == IPC_STAT) ? &msqbuf : NULL);
465
466 if (error == 0 && cmd == IPC_STAT)
467 error = copyout(&msqbuf, SCARG(uap, buf), sizeof(msqbuf));
468
469 return (error);
470 }
471
472 int
473 msgctl1(struct lwp *l, int msqid, int cmd, struct msqid_ds *msqbuf)
474 {
475 kauth_cred_t cred = l->l_cred;
476 struct msqid_ds *msqptr;
477 kmsq_t *msq;
478 int error = 0, ix;
479
480 MSG_PRINTF(("call to msgctl1(%d, %d)\n", msqid, cmd));
481
482 ix = IPCID_TO_IX(msqid);
483
484 mutex_enter(&msgmutex);
485
486 if (ix < 0 || ix >= msginfo.msgmni) {
487 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", ix,
488 msginfo.msgmni));
489 error = EINVAL;
490 goto unlock;
491 }
492
493 msq = &msqs[ix];
494 msqptr = &msq->msq_u;
495
496 if (msqptr->msg_qbytes == 0) {
497 MSG_PRINTF(("no such msqid\n"));
498 error = EINVAL;
499 goto unlock;
500 }
501 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqid)) {
502 MSG_PRINTF(("wrong sequence number\n"));
503 error = EINVAL;
504 goto unlock;
505 }
506
507 switch (cmd) {
508 case IPC_RMID:
509 {
510 struct __msg *msghdr;
511 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)) != 0)
512 break;
513 /* Free the message headers */
514 msghdr = msqptr->_msg_first;
515 while (msghdr != NULL) {
516 struct __msg *msghdr_tmp;
517
518 /* Free the segments of each message */
519 msqptr->_msg_cbytes -= msghdr->msg_ts;
520 msqptr->msg_qnum--;
521 msghdr_tmp = msghdr;
522 msghdr = msghdr->msg_next;
523 msg_freehdr(msghdr_tmp);
524 }
525 KASSERT(msqptr->_msg_cbytes == 0);
526 KASSERT(msqptr->msg_qnum == 0);
527
528 /* Mark it as free */
529 msqptr->msg_qbytes = 0;
530 cv_broadcast(&msq->msq_cv);
531 }
532 break;
533
534 case IPC_SET:
535 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)))
536 break;
537 if (msqbuf->msg_qbytes > msqptr->msg_qbytes &&
538 kauth_authorize_system(cred, KAUTH_SYSTEM_SYSVIPC,
539 KAUTH_REQ_SYSTEM_SYSVIPC_MSGQ_OVERSIZE,
540 KAUTH_ARG(msqbuf->msg_qbytes),
541 KAUTH_ARG(msqptr->msg_qbytes), NULL) != 0) {
542 error = EPERM;
543 break;
544 }
545 if (msqbuf->msg_qbytes > msginfo.msgmnb) {
546 MSG_PRINTF(("can't increase msg_qbytes beyond %d "
547 "(truncating)\n", msginfo.msgmnb));
548 /* silently restrict qbytes to system limit */
549 msqbuf->msg_qbytes = msginfo.msgmnb;
550 }
551 if (msqbuf->msg_qbytes == 0) {
552 MSG_PRINTF(("can't reduce msg_qbytes to 0\n"));
553 error = EINVAL; /* XXX non-standard errno! */
554 break;
555 }
556 msqptr->msg_perm.uid = msqbuf->msg_perm.uid;
557 msqptr->msg_perm.gid = msqbuf->msg_perm.gid;
558 msqptr->msg_perm.mode = (msqptr->msg_perm.mode & ~0777) |
559 (msqbuf->msg_perm.mode & 0777);
560 msqptr->msg_qbytes = msqbuf->msg_qbytes;
561 msqptr->msg_ctime = time_second;
562 break;
563
564 case IPC_STAT:
565 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
566 MSG_PRINTF(("requester doesn't have read access\n"));
567 break;
568 }
569 memset(msqbuf, 0, sizeof *msqbuf);
570 msqbuf->msg_perm = msqptr->msg_perm;
571 msqbuf->msg_perm.mode &= 0777;
572 msqbuf->msg_qnum = msqptr->msg_qnum;
573 msqbuf->msg_qbytes = msqptr->msg_qbytes;
574 msqbuf->msg_lspid = msqptr->msg_lspid;
575 msqbuf->msg_lrpid = msqptr->msg_lrpid;
576 msqbuf->msg_stime = msqptr->msg_stime;
577 msqbuf->msg_rtime = msqptr->msg_rtime;
578 msqbuf->msg_ctime = msqptr->msg_ctime;
579 break;
580
581 default:
582 MSG_PRINTF(("invalid command %d\n", cmd));
583 error = EINVAL;
584 break;
585 }
586
587 unlock:
588 mutex_exit(&msgmutex);
589 return (error);
590 }
591
592 int
593 sys_msgget(struct lwp *l, const struct sys_msgget_args *uap, register_t *retval)
594 {
595 /* {
596 syscallarg(key_t) key;
597 syscallarg(int) msgflg;
598 } */
599 int msqid, error = 0;
600 int key = SCARG(uap, key);
601 int msgflg = SCARG(uap, msgflg);
602 kauth_cred_t cred = l->l_cred;
603 struct msqid_ds *msqptr = NULL;
604 kmsq_t *msq;
605
606 mutex_enter(&msgmutex);
607
608 MSG_PRINTF(("msgget(0x%x, 0%o)\n", key, msgflg));
609
610 if (key != IPC_PRIVATE) {
611 for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
612 msq = &msqs[msqid];
613 msqptr = &msq->msq_u;
614 if (msqptr->msg_qbytes != 0 &&
615 msqptr->msg_perm._key == key)
616 break;
617 }
618 if (msqid < msginfo.msgmni) {
619 MSG_PRINTF(("found public key\n"));
620 if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL)) {
621 MSG_PRINTF(("not exclusive\n"));
622 error = EEXIST;
623 goto unlock;
624 }
625 if ((error = ipcperm(cred, &msqptr->msg_perm,
626 msgflg & 0700 ))) {
627 MSG_PRINTF(("requester doesn't have 0%o access\n",
628 msgflg & 0700));
629 goto unlock;
630 }
631 goto found;
632 }
633 }
634
635 MSG_PRINTF(("need to allocate the msqid_ds\n"));
636 if (key == IPC_PRIVATE || (msgflg & IPC_CREAT)) {
637 for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
638 /*
639 * Look for an unallocated and unlocked msqid_ds.
640 * msqid_ds's can be locked by msgsnd or msgrcv while
641 * they are copying the message in/out. We can't
642 * re-use the entry until they release it.
643 */
644 msq = &msqs[msqid];
645 msqptr = &msq->msq_u;
646 if (msqptr->msg_qbytes == 0 &&
647 (msqptr->msg_perm.mode & MSG_LOCKED) == 0)
648 break;
649 }
650 if (msqid == msginfo.msgmni) {
651 MSG_PRINTF(("no more msqid_ds's available\n"));
652 error = ENOSPC;
653 goto unlock;
654 }
655 MSG_PRINTF(("msqid %d is available\n", msqid));
656 msqptr->msg_perm._key = key;
657 msqptr->msg_perm.cuid = kauth_cred_geteuid(cred);
658 msqptr->msg_perm.uid = kauth_cred_geteuid(cred);
659 msqptr->msg_perm.cgid = kauth_cred_getegid(cred);
660 msqptr->msg_perm.gid = kauth_cred_getegid(cred);
661 msqptr->msg_perm.mode = (msgflg & 0777);
662 /* Make sure that the returned msqid is unique */
663 msqptr->msg_perm._seq++;
664 msqptr->_msg_first = NULL;
665 msqptr->_msg_last = NULL;
666 msqptr->_msg_cbytes = 0;
667 msqptr->msg_qnum = 0;
668 msqptr->msg_qbytes = msginfo.msgmnb;
669 msqptr->msg_lspid = 0;
670 msqptr->msg_lrpid = 0;
671 msqptr->msg_stime = 0;
672 msqptr->msg_rtime = 0;
673 msqptr->msg_ctime = time_second;
674 } else {
675 MSG_PRINTF(("didn't find it and wasn't asked to create it\n"));
676 error = ENOENT;
677 goto unlock;
678 }
679
680 found:
681 /* Construct the unique msqid */
682 *retval = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm);
683
684 unlock:
685 mutex_exit(&msgmutex);
686 return (error);
687 }
688
689 int
690 sys_msgsnd(struct lwp *l, const struct sys_msgsnd_args *uap, register_t *retval)
691 {
692 /* {
693 syscallarg(int) msqid;
694 syscallarg(const void *) msgp;
695 syscallarg(size_t) msgsz;
696 syscallarg(int) msgflg;
697 } */
698
699 return msgsnd1(l, SCARG(uap, msqid), SCARG(uap, msgp),
700 SCARG(uap, msgsz), SCARG(uap, msgflg), sizeof(long), copyin);
701 }
702
703 int
704 msgsnd1(struct lwp *l, int msqidr, const char *user_msgp, size_t msgsz,
705 int msgflg, size_t typesz, copyin_t fetch_type)
706 {
707 int segs_needed, error = 0, msqid;
708 kauth_cred_t cred = l->l_cred;
709 struct msqid_ds *msqptr;
710 struct __msg *msghdr;
711 kmsq_t *msq;
712 short next;
713
714 MSG_PRINTF(("call to msgsnd(%d, %p, %lld, %d)\n", msqidr,
715 user_msgp, (long long)msgsz, msgflg));
716
717 if ((ssize_t)msgsz < 0)
718 return EINVAL;
719
720 restart:
721 msqid = IPCID_TO_IX(msqidr);
722
723 mutex_enter(&msgmutex);
724 /* In case of reallocation, we will wait for completion */
725 while (__predict_false(msg_realloc_state))
726 cv_wait(&msg_realloc_cv, &msgmutex);
727
728 if (msqid < 0 || msqid >= msginfo.msgmni) {
729 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
730 msginfo.msgmni));
731 error = EINVAL;
732 goto unlock;
733 }
734
735 msq = &msqs[msqid];
736 msqptr = &msq->msq_u;
737
738 if (msqptr->msg_qbytes == 0) {
739 MSG_PRINTF(("no such message queue id\n"));
740 error = EINVAL;
741 goto unlock;
742 }
743 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
744 MSG_PRINTF(("wrong sequence number\n"));
745 error = EINVAL;
746 goto unlock;
747 }
748
749 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_W))) {
750 MSG_PRINTF(("requester doesn't have write access\n"));
751 goto unlock;
752 }
753
754 segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
755 MSG_PRINTF(("msgsz=%lld, msgssz=%d, segs_needed=%d\n",
756 (long long)msgsz, msginfo.msgssz, segs_needed));
757 for (;;) {
758 int need_more_resources = 0;
759
760 /*
761 * check msgsz [cannot be negative since it is unsigned]
762 * (inside this loop in case msg_qbytes changes while we sleep)
763 */
764
765 if (msgsz > msqptr->msg_qbytes) {
766 MSG_PRINTF(("msgsz > msqptr->msg_qbytes\n"));
767 error = EINVAL;
768 goto unlock;
769 }
770
771 if (msqptr->msg_perm.mode & MSG_LOCKED) {
772 MSG_PRINTF(("msqid is locked\n"));
773 need_more_resources = 1;
774 }
775 if (msgsz + msqptr->_msg_cbytes > msqptr->msg_qbytes) {
776 MSG_PRINTF(("msgsz + msg_cbytes > msg_qbytes\n"));
777 need_more_resources = 1;
778 }
779 if (segs_needed > nfree_msgmaps) {
780 MSG_PRINTF(("segs_needed > nfree_msgmaps\n"));
781 need_more_resources = 1;
782 }
783 if (free_msghdrs == NULL) {
784 MSG_PRINTF(("no more msghdrs\n"));
785 need_more_resources = 1;
786 }
787
788 if (need_more_resources) {
789 int we_own_it;
790
791 if ((msgflg & IPC_NOWAIT) != 0) {
792 MSG_PRINTF(("need more resources but caller "
793 "doesn't want to wait\n"));
794 error = EAGAIN;
795 goto unlock;
796 }
797
798 if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) {
799 MSG_PRINTF(("we don't own the msqid_ds\n"));
800 we_own_it = 0;
801 } else {
802 /* Force later arrivals to wait for our
803 request */
804 MSG_PRINTF(("we own the msqid_ds\n"));
805 msqptr->msg_perm.mode |= MSG_LOCKED;
806 we_own_it = 1;
807 }
808
809 msg_waiters++;
810 MSG_PRINTF(("goodnight\n"));
811 error = cv_wait_sig(&msq->msq_cv, &msgmutex);
812 MSG_PRINTF(("good morning, error=%d\n", error));
813 msg_waiters--;
814
815 if (we_own_it)
816 msqptr->msg_perm.mode &= ~MSG_LOCKED;
817
818 /*
819 * In case of such state, notify reallocator and
820 * restart the call.
821 */
822 if (msg_realloc_state) {
823 cv_broadcast(&msg_realloc_cv);
824 mutex_exit(&msgmutex);
825 goto restart;
826 }
827
828 if (error != 0) {
829 MSG_PRINTF(("msgsnd: interrupted system "
830 "call\n"));
831 error = EINTR;
832 goto unlock;
833 }
834
835 /*
836 * Make sure that the msq queue still exists
837 */
838
839 if (msqptr->msg_qbytes == 0) {
840 MSG_PRINTF(("msqid deleted\n"));
841 error = EIDRM;
842 goto unlock;
843 }
844 } else {
845 MSG_PRINTF(("got all the resources that we need\n"));
846 break;
847 }
848 }
849
850 /*
851 * We have the resources that we need.
852 * Make sure!
853 */
854
855 KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
856 KASSERT(segs_needed <= nfree_msgmaps);
857 KASSERT(msgsz + msqptr->_msg_cbytes <= msqptr->msg_qbytes);
858 KASSERT(free_msghdrs != NULL);
859
860 /*
861 * Re-lock the msqid_ds in case we page-fault when copying in the
862 * message
863 */
864
865 KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
866 msqptr->msg_perm.mode |= MSG_LOCKED;
867
868 /*
869 * Allocate a message header
870 */
871
872 msghdr = free_msghdrs;
873 free_msghdrs = msghdr->msg_next;
874 msghdr->msg_spot = -1;
875 msghdr->msg_ts = msgsz;
876
877 /*
878 * Allocate space for the message
879 */
880
881 while (segs_needed > 0) {
882 KASSERT(nfree_msgmaps > 0);
883 KASSERT(free_msgmaps != -1);
884 KASSERT(free_msgmaps < msginfo.msgseg);
885
886 next = free_msgmaps;
887 MSG_PRINTF(("allocating segment %d to message\n", next));
888 free_msgmaps = msgmaps[next].next;
889 nfree_msgmaps--;
890 msgmaps[next].next = msghdr->msg_spot;
891 msghdr->msg_spot = next;
892 segs_needed--;
893 }
894
895 /*
896 * Copy in the message type
897 */
898 mutex_exit(&msgmutex);
899 error = (*fetch_type)(user_msgp, &msghdr->msg_type, typesz);
900 mutex_enter(&msgmutex);
901 if (error != 0) {
902 MSG_PRINTF(("error %d copying the message type\n", error));
903 msg_freehdr(msghdr);
904 msqptr->msg_perm.mode &= ~MSG_LOCKED;
905 cv_broadcast(&msq->msq_cv);
906 goto unlock;
907 }
908 user_msgp += typesz;
909
910 /*
911 * Validate the message type
912 */
913
914 if (msghdr->msg_type < 1) {
915 msg_freehdr(msghdr);
916 msqptr->msg_perm.mode &= ~MSG_LOCKED;
917 cv_broadcast(&msq->msq_cv);
918 MSG_PRINTF(("mtype (%ld) < 1\n", msghdr->msg_type));
919 error = EINVAL;
920 goto unlock;
921 }
922
923 /*
924 * Copy in the message body
925 */
926
927 next = msghdr->msg_spot;
928 while (msgsz > 0) {
929 size_t tlen;
930 KASSERT(next > -1);
931 KASSERT(next < msginfo.msgseg);
932
933 if (msgsz > msginfo.msgssz)
934 tlen = msginfo.msgssz;
935 else
936 tlen = msgsz;
937 mutex_exit(&msgmutex);
938 error = copyin(user_msgp, &msgpool[next * msginfo.msgssz], tlen);
939 mutex_enter(&msgmutex);
940 if (error != 0) {
941 MSG_PRINTF(("error %d copying in message segment\n",
942 error));
943 msg_freehdr(msghdr);
944 msqptr->msg_perm.mode &= ~MSG_LOCKED;
945 cv_broadcast(&msq->msq_cv);
946 goto unlock;
947 }
948 msgsz -= tlen;
949 user_msgp += tlen;
950 next = msgmaps[next].next;
951 }
952 KASSERT(next == -1);
953
954 /*
955 * We've got the message. Unlock the msqid_ds.
956 */
957
958 msqptr->msg_perm.mode &= ~MSG_LOCKED;
959
960 /*
961 * Make sure that the msqid_ds is still allocated.
962 */
963
964 if (msqptr->msg_qbytes == 0) {
965 msg_freehdr(msghdr);
966 cv_broadcast(&msq->msq_cv);
967 error = EIDRM;
968 goto unlock;
969 }
970
971 /*
972 * Put the message into the queue
973 */
974
975 if (msqptr->_msg_first == NULL) {
976 msqptr->_msg_first = msghdr;
977 msqptr->_msg_last = msghdr;
978 } else {
979 msqptr->_msg_last->msg_next = msghdr;
980 msqptr->_msg_last = msghdr;
981 }
982 msqptr->_msg_last->msg_next = NULL;
983
984 msqptr->_msg_cbytes += msghdr->msg_ts;
985 msqptr->msg_qnum++;
986 msqptr->msg_lspid = l->l_proc->p_pid;
987 msqptr->msg_stime = time_second;
988
989 cv_broadcast(&msq->msq_cv);
990
991 unlock:
992 mutex_exit(&msgmutex);
993 return error;
994 }
995
996 int
997 sys_msgrcv(struct lwp *l, const struct sys_msgrcv_args *uap, register_t *retval)
998 {
999 /* {
1000 syscallarg(int) msqid;
1001 syscallarg(void *) msgp;
1002 syscallarg(size_t) msgsz;
1003 syscallarg(long) msgtyp;
1004 syscallarg(int) msgflg;
1005 } */
1006
1007 return msgrcv1(l, SCARG(uap, msqid), SCARG(uap, msgp),
1008 SCARG(uap, msgsz), SCARG(uap, msgtyp), SCARG(uap, msgflg),
1009 sizeof(long), copyout, retval);
1010 }
1011
1012 int
1013 msgrcv1(struct lwp *l, int msqidr, char *user_msgp, size_t msgsz, long msgtyp,
1014 int msgflg, size_t typesz, copyout_t put_type, register_t *retval)
1015 {
1016 size_t len;
1017 kauth_cred_t cred = l->l_cred;
1018 struct msqid_ds *msqptr;
1019 struct __msg *msghdr;
1020 int error = 0, msqid;
1021 kmsq_t *msq;
1022 short next;
1023
1024 MSG_PRINTF(("call to msgrcv(%d, %p, %lld, %ld, %d)\n", msqidr,
1025 user_msgp, (long long)msgsz, msgtyp, msgflg));
1026
1027 if ((ssize_t)msgsz < 0)
1028 return EINVAL;
1029
1030 restart:
1031 msqid = IPCID_TO_IX(msqidr);
1032
1033 mutex_enter(&msgmutex);
1034 /* In case of reallocation, we will wait for completion */
1035 while (__predict_false(msg_realloc_state))
1036 cv_wait(&msg_realloc_cv, &msgmutex);
1037
1038 if (msqid < 0 || msqid >= msginfo.msgmni) {
1039 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
1040 msginfo.msgmni));
1041 error = EINVAL;
1042 goto unlock;
1043 }
1044
1045 msq = &msqs[msqid];
1046 msqptr = &msq->msq_u;
1047
1048 if (msqptr->msg_qbytes == 0) {
1049 MSG_PRINTF(("no such message queue id\n"));
1050 error = EINVAL;
1051 goto unlock;
1052 }
1053 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
1054 MSG_PRINTF(("wrong sequence number\n"));
1055 error = EINVAL;
1056 goto unlock;
1057 }
1058
1059 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
1060 MSG_PRINTF(("requester doesn't have read access\n"));
1061 goto unlock;
1062 }
1063
1064 msghdr = NULL;
1065 while (msghdr == NULL) {
1066 if (msgtyp == 0) {
1067 msghdr = msqptr->_msg_first;
1068 if (msghdr != NULL) {
1069 if (msgsz < msghdr->msg_ts &&
1070 (msgflg & MSG_NOERROR) == 0) {
1071 MSG_PRINTF(("first msg on the queue "
1072 "is too big (want %lld, got %d)\n",
1073 (long long)msgsz, msghdr->msg_ts));
1074 error = E2BIG;
1075 goto unlock;
1076 }
1077 if (msqptr->_msg_first == msqptr->_msg_last) {
1078 msqptr->_msg_first = NULL;
1079 msqptr->_msg_last = NULL;
1080 } else {
1081 msqptr->_msg_first = msghdr->msg_next;
1082 KASSERT(msqptr->_msg_first != NULL);
1083 }
1084 }
1085 } else {
1086 struct __msg *previous;
1087 struct __msg **prev;
1088
1089 for (previous = NULL, prev = &msqptr->_msg_first;
1090 (msghdr = *prev) != NULL;
1091 previous = msghdr, prev = &msghdr->msg_next) {
1092 /*
1093 * Is this message's type an exact match or is
1094 * this message's type less than or equal to
1095 * the absolute value of a negative msgtyp?
1096 * Note that the second half of this test can
1097 * NEVER be true if msgtyp is positive since
1098 * msg_type is always positive!
1099 */
1100
1101 if (msgtyp != msghdr->msg_type &&
1102 msghdr->msg_type > -msgtyp)
1103 continue;
1104
1105 MSG_PRINTF(("found message type %ld, requested %ld\n",
1106 msghdr->msg_type, msgtyp));
1107 if (msgsz < msghdr->msg_ts &&
1108 (msgflg & MSG_NOERROR) == 0) {
1109 MSG_PRINTF(("requested message on the queue "
1110 "is too big (want %lld, got %d)\n",
1111 (long long)msgsz, msghdr->msg_ts));
1112 error = E2BIG;
1113 goto unlock;
1114 }
1115 *prev = msghdr->msg_next;
1116 if (msghdr != msqptr->_msg_last)
1117 break;
1118 if (previous == NULL) {
1119 KASSERT(prev == &msqptr->_msg_first);
1120 msqptr->_msg_first = NULL;
1121 msqptr->_msg_last = NULL;
1122 } else {
1123 KASSERT(prev != &msqptr->_msg_first);
1124 msqptr->_msg_last = previous;
1125 }
1126 break;
1127 }
1128 }
1129
1130 /*
1131 * We've either extracted the msghdr for the appropriate
1132 * message or there isn't one.
1133 * If there is one then bail out of this loop.
1134 */
1135 if (msghdr != NULL)
1136 break;
1137
1138 /*
1139 * Hmph! No message found. Does the user want to wait?
1140 */
1141
1142 if ((msgflg & IPC_NOWAIT) != 0) {
1143 MSG_PRINTF(("no appropriate message found (msgtyp=%ld)\n",
1144 msgtyp));
1145 error = ENOMSG;
1146 goto unlock;
1147 }
1148
1149 /*
1150 * Wait for something to happen
1151 */
1152
1153 msg_waiters++;
1154 MSG_PRINTF(("msgrcv: goodnight\n"));
1155 error = cv_wait_sig(&msq->msq_cv, &msgmutex);
1156 MSG_PRINTF(("msgrcv: good morning (error=%d)\n", error));
1157 msg_waiters--;
1158
1159 /*
1160 * In case of such state, notify reallocator and
1161 * restart the call.
1162 */
1163 if (msg_realloc_state) {
1164 cv_broadcast(&msg_realloc_cv);
1165 mutex_exit(&msgmutex);
1166 goto restart;
1167 }
1168
1169 if (error != 0) {
1170 MSG_PRINTF(("msgsnd: interrupted system call\n"));
1171 error = EINTR;
1172 goto unlock;
1173 }
1174
1175 /*
1176 * Make sure that the msq queue still exists
1177 */
1178
1179 if (msqptr->msg_qbytes == 0 ||
1180 msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
1181 MSG_PRINTF(("msqid deleted\n"));
1182 error = EIDRM;
1183 goto unlock;
1184 }
1185 }
1186
1187 /*
1188 * Return the message to the user.
1189 *
1190 * First, do the bookkeeping (before we risk being interrupted).
1191 */
1192
1193 msqptr->_msg_cbytes -= msghdr->msg_ts;
1194 msqptr->msg_qnum--;
1195 msqptr->msg_lrpid = l->l_proc->p_pid;
1196 msqptr->msg_rtime = time_second;
1197
1198 /*
1199 * Make msgsz the actual amount that we'll be returning.
1200 * Note that this effectively truncates the message if it is too long
1201 * (since msgsz is never increased).
1202 */
1203
1204 MSG_PRINTF(("found a message, msgsz=%lld, msg_ts=%d\n",
1205 (long long)msgsz, msghdr->msg_ts));
1206 if (msgsz > msghdr->msg_ts)
1207 msgsz = msghdr->msg_ts;
1208
1209 /*
1210 * Return the type to the user.
1211 */
1212 mutex_exit(&msgmutex);
1213 error = (*put_type)(&msghdr->msg_type, user_msgp, typesz);
1214 mutex_enter(&msgmutex);
1215 if (error != 0) {
1216 MSG_PRINTF(("error (%d) copying out message type\n", error));
1217 msg_freehdr(msghdr);
1218 cv_broadcast(&msq->msq_cv);
1219 goto unlock;
1220 }
1221 user_msgp += typesz;
1222
1223 /*
1224 * Return the segments to the user
1225 */
1226
1227 next = msghdr->msg_spot;
1228 for (len = 0; len < msgsz; len += msginfo.msgssz) {
1229 size_t tlen;
1230 KASSERT(next > -1);
1231 KASSERT(next < msginfo.msgseg);
1232
1233 if (msgsz - len > msginfo.msgssz)
1234 tlen = msginfo.msgssz;
1235 else
1236 tlen = msgsz - len;
1237 mutex_exit(&msgmutex);
1238 error = copyout(&msgpool[next * msginfo.msgssz],
1239 user_msgp, tlen);
1240 mutex_enter(&msgmutex);
1241 if (error != 0) {
1242 MSG_PRINTF(("error (%d) copying out message segment\n",
1243 error));
1244 msg_freehdr(msghdr);
1245 cv_broadcast(&msq->msq_cv);
1246 goto unlock;
1247 }
1248 user_msgp += tlen;
1249 next = msgmaps[next].next;
1250 }
1251
1252 /*
1253 * Done, return the actual number of bytes copied out.
1254 */
1255
1256 msg_freehdr(msghdr);
1257 cv_broadcast(&msq->msq_cv);
1258 *retval = msgsz;
1259
1260 unlock:
1261 mutex_exit(&msgmutex);
1262 return error;
1263 }
1264
1265 /*
1266 * Sysctl initialization and nodes.
1267 */
1268
1269 static int
1270 sysctl_ipc_msgmni(SYSCTLFN_ARGS)
1271 {
1272 int newsize, error;
1273 struct sysctlnode node;
1274 node = *rnode;
1275 node.sysctl_data = &newsize;
1276
1277 newsize = msginfo.msgmni;
1278 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1279 if (error || newp == NULL)
1280 return error;
1281
1282 sysctl_unlock();
1283 error = msgrealloc(newsize, msginfo.msgseg);
1284 sysctl_relock();
1285 return error;
1286 }
1287
1288 static int
1289 sysctl_ipc_msgseg(SYSCTLFN_ARGS)
1290 {
1291 int newsize, error;
1292 struct sysctlnode node;
1293 node = *rnode;
1294 node.sysctl_data = &newsize;
1295
1296 newsize = msginfo.msgseg;
1297 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1298 if (error || newp == NULL)
1299 return error;
1300
1301 sysctl_unlock();
1302 error = msgrealloc(msginfo.msgmni, newsize);
1303 sysctl_relock();
1304 return error;
1305 }
1306
1307 SYSCTL_SETUP(sysctl_ipc_msg_setup, "sysctl kern.ipc subtree setup")
1308 {
1309 const struct sysctlnode *node = NULL;
1310
1311 sysctl_createv(clog, 0, NULL, &node,
1312 CTLFLAG_PERMANENT,
1313 CTLTYPE_NODE, "ipc",
1314 SYSCTL_DESCR("SysV IPC options"),
1315 NULL, 0, NULL, 0,
1316 CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1317
1318 if (node == NULL)
1319 return;
1320
1321 sysctl_createv(clog, 0, &node, NULL,
1322 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1323 CTLTYPE_INT, "msgmni",
1324 SYSCTL_DESCR("Max number of message queue identifiers"),
1325 sysctl_ipc_msgmni, 0, &msginfo.msgmni, 0,
1326 CTL_CREATE, CTL_EOL);
1327 sysctl_createv(clog, 0, &node, NULL,
1328 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1329 CTLTYPE_INT, "msgseg",
1330 SYSCTL_DESCR("Max number of number of message segments"),
1331 sysctl_ipc_msgseg, 0, &msginfo.msgseg, 0,
1332 CTL_CREATE, CTL_EOL);
1333 }
1334