sysv_msg.c revision 1.66.6.2 1 /* $NetBSD: sysv_msg.c,v 1.66.6.2 2015/12/27 12:10:05 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Implementation of SVID messages
35 *
36 * Author: Daniel Boulet
37 *
38 * Copyright 1993 Daniel Boulet and RTMX Inc.
39 *
40 * This system call was implemented by Daniel Boulet under contract from RTMX.
41 *
42 * Redistribution and use in source forms, with and without modification,
43 * are permitted provided that this entire comment appears intact.
44 *
45 * Redistribution in binary form may occur without any restrictions.
46 * Obviously, it would be nice if you gave credit where credit is due
47 * but requiring it would be too onerous.
48 *
49 * This software is provided ``AS IS'' without any warranties of any kind.
50 */
51
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: sysv_msg.c,v 1.66.6.2 2015/12/27 12:10:05 skrll Exp $");
54
55 #ifdef _KERNEL_OPT
56 #include "opt_sysv.h"
57 #endif
58
59 #include <sys/param.h>
60 #include <sys/kernel.h>
61 #include <sys/msg.h>
62 #include <sys/sysctl.h>
63 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
64 #include <sys/syscallargs.h>
65 #include <sys/kauth.h>
66
67 #define MSG_DEBUG
68 #undef MSG_DEBUG_OK
69
70 #ifdef MSG_DEBUG_OK
71 #define MSG_PRINTF(a) printf a
72 #else
73 #define MSG_PRINTF(a)
74 #endif
75
76 static int nfree_msgmaps; /* # of free map entries */
77 static short free_msgmaps; /* head of linked list of free map entries */
78 static struct __msg *free_msghdrs; /* list of free msg headers */
79 static char *msgpool; /* MSGMAX byte long msg buffer pool */
80 static struct msgmap *msgmaps; /* MSGSEG msgmap structures */
81 static struct __msg *msghdrs; /* MSGTQL msg headers */
82
83 kmsq_t *msqs; /* MSGMNI msqid_ds struct's */
84 kmutex_t msgmutex; /* subsystem lock */
85
86 static u_int msg_waiters = 0; /* total number of msgrcv waiters */
87 static bool msg_realloc_state;
88 static kcondvar_t msg_realloc_cv;
89
90 static void msg_freehdr(struct __msg *);
91
92 extern int kern_has_sysvmsg;
93
94 SYSCTL_SETUP_PROTO(sysctl_ipc_msg_setup);
95
96 void
97 msginit(struct sysctllog **clog)
98 {
99 int i, sz;
100 vaddr_t v;
101
102 /*
103 * msginfo.msgssz should be a power of two for efficiency reasons.
104 * It is also pretty silly if msginfo.msgssz is less than 8
105 * or greater than about 256 so ...
106 */
107
108 i = 8;
109 while (i < 1024 && i != msginfo.msgssz)
110 i <<= 1;
111 if (i != msginfo.msgssz) {
112 panic("msginfo.msgssz = %d, not a small power of 2",
113 msginfo.msgssz);
114 }
115
116 if (msginfo.msgseg > 32767) {
117 panic("msginfo.msgseg = %d > 32767", msginfo.msgseg);
118 }
119
120 /* Allocate the wired memory for our structures */
121 sz = ALIGN(msginfo.msgmax) +
122 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
123 ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
124 ALIGN(msginfo.msgmni * sizeof(kmsq_t));
125 sz = round_page(sz);
126 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
127 if (v == 0)
128 panic("sysv_msg: cannot allocate memory");
129 msgpool = (void *)v;
130 msgmaps = (void *)((uintptr_t)msgpool + ALIGN(msginfo.msgmax));
131 msghdrs = (void *)((uintptr_t)msgmaps +
132 ALIGN(msginfo.msgseg * sizeof(struct msgmap)));
133 msqs = (void *)((uintptr_t)msghdrs +
134 ALIGN(msginfo.msgtql * sizeof(struct __msg)));
135
136 for (i = 0; i < (msginfo.msgseg - 1); i++)
137 msgmaps[i].next = i + 1;
138 msgmaps[msginfo.msgseg - 1].next = -1;
139
140 free_msgmaps = 0;
141 nfree_msgmaps = msginfo.msgseg;
142
143 for (i = 0; i < (msginfo.msgtql - 1); i++) {
144 msghdrs[i].msg_type = 0;
145 msghdrs[i].msg_next = &msghdrs[i + 1];
146 }
147 i = msginfo.msgtql - 1;
148 msghdrs[i].msg_type = 0;
149 msghdrs[i].msg_next = NULL;
150 free_msghdrs = &msghdrs[0];
151
152 for (i = 0; i < msginfo.msgmni; i++) {
153 cv_init(&msqs[i].msq_cv, "msgwait");
154 /* Implies entry is available */
155 msqs[i].msq_u.msg_qbytes = 0;
156 /* Reset to a known value */
157 msqs[i].msq_u.msg_perm._seq = 0;
158 }
159
160 mutex_init(&msgmutex, MUTEX_DEFAULT, IPL_NONE);
161 cv_init(&msg_realloc_cv, "msgrealc");
162 msg_realloc_state = false;
163
164 kern_has_sysvmsg = 1;
165
166 #ifdef _MODULE
167 if (clog)
168 sysctl_ipc_msg_setup(clog);
169 #endif
170 }
171
172 int
173 msgfini(void)
174 {
175 int i, sz;
176 vaddr_t v = (vaddr_t)msgpool;
177
178 mutex_enter(&msgmutex);
179 for (i = 0; i < msginfo.msgmni; i++) {
180 if (msqs[i].msq_u.msg_qbytes != 0) {
181 mutex_exit(&msgmutex);
182 return 1; /* queue not available, prevent unload! */
183 }
184 }
185 /*
186 * Destroy all condvars and free the memory we're using
187 */
188 for (i = 0; i < msginfo.msgmni; i++) {
189 cv_destroy(&msqs[i].msq_cv);
190 }
191 sz = ALIGN(msginfo.msgmax) +
192 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
193 ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
194 ALIGN(msginfo.msgmni * sizeof(kmsq_t));
195 sz = round_page(sz);
196 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
197
198 mutex_exit(&msgmutex);
199 mutex_destroy(&msgmutex);
200
201 kern_has_sysvmsg = 0;
202
203 return 0;
204 }
205
206 static int
207 msgrealloc(int newmsgmni, int newmsgseg)
208 {
209 struct msgmap *new_msgmaps;
210 struct __msg *new_msghdrs, *new_free_msghdrs;
211 char *old_msgpool, *new_msgpool;
212 kmsq_t *new_msqs;
213 vaddr_t v;
214 int i, sz, msqid, newmsgmax, new_nfree_msgmaps;
215 short new_free_msgmaps;
216
217 if (newmsgmni < 1 || newmsgseg < 1)
218 return EINVAL;
219
220 /* Allocate the wired memory for our structures */
221 newmsgmax = msginfo.msgssz * newmsgseg;
222 sz = ALIGN(newmsgmax) +
223 ALIGN(newmsgseg * sizeof(struct msgmap)) +
224 ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
225 ALIGN(newmsgmni * sizeof(kmsq_t));
226 sz = round_page(sz);
227 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
228 if (v == 0)
229 return ENOMEM;
230
231 mutex_enter(&msgmutex);
232 if (msg_realloc_state) {
233 mutex_exit(&msgmutex);
234 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
235 return EBUSY;
236 }
237 msg_realloc_state = true;
238 if (msg_waiters) {
239 /*
240 * Mark reallocation state, wake-up all waiters,
241 * and wait while they will all exit.
242 */
243 for (i = 0; i < msginfo.msgmni; i++)
244 cv_broadcast(&msqs[i].msq_cv);
245 while (msg_waiters)
246 cv_wait(&msg_realloc_cv, &msgmutex);
247 }
248 old_msgpool = msgpool;
249
250 /* We cannot reallocate less memory than we use */
251 i = 0;
252 for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
253 struct msqid_ds *mptr;
254 kmsq_t *msq;
255
256 msq = &msqs[msqid];
257 mptr = &msq->msq_u;
258 if (mptr->msg_qbytes || (mptr->msg_perm.mode & MSG_LOCKED))
259 i = msqid;
260 }
261 if (i >= newmsgmni || (msginfo.msgseg - nfree_msgmaps) > newmsgseg) {
262 mutex_exit(&msgmutex);
263 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
264 return EBUSY;
265 }
266
267 new_msgpool = (void *)v;
268 new_msgmaps = (void *)((uintptr_t)new_msgpool + ALIGN(newmsgmax));
269 new_msghdrs = (void *)((uintptr_t)new_msgmaps +
270 ALIGN(newmsgseg * sizeof(struct msgmap)));
271 new_msqs = (void *)((uintptr_t)new_msghdrs +
272 ALIGN(msginfo.msgtql * sizeof(struct __msg)));
273
274 /* Initialize the structures */
275 for (i = 0; i < (newmsgseg - 1); i++)
276 new_msgmaps[i].next = i + 1;
277 new_msgmaps[newmsgseg - 1].next = -1;
278 new_free_msgmaps = 0;
279 new_nfree_msgmaps = newmsgseg;
280
281 for (i = 0; i < (msginfo.msgtql - 1); i++) {
282 new_msghdrs[i].msg_type = 0;
283 new_msghdrs[i].msg_next = &new_msghdrs[i + 1];
284 }
285 i = msginfo.msgtql - 1;
286 new_msghdrs[i].msg_type = 0;
287 new_msghdrs[i].msg_next = NULL;
288 new_free_msghdrs = &new_msghdrs[0];
289
290 for (i = 0; i < newmsgmni; i++) {
291 new_msqs[i].msq_u.msg_qbytes = 0;
292 new_msqs[i].msq_u.msg_perm._seq = 0;
293 cv_init(&new_msqs[i].msq_cv, "msgwait");
294 }
295
296 /*
297 * Copy all message queue identifiers, message headers and buffer
298 * pools to the new memory location.
299 */
300 for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
301 struct __msg *nmsghdr, *msghdr, *pmsghdr;
302 struct msqid_ds *nmptr, *mptr;
303 kmsq_t *nmsq, *msq;
304
305 msq = &msqs[msqid];
306 mptr = &msq->msq_u;
307
308 if (mptr->msg_qbytes == 0 &&
309 (mptr->msg_perm.mode & MSG_LOCKED) == 0)
310 continue;
311
312 nmsq = &new_msqs[msqid];
313 nmptr = &nmsq->msq_u;
314 memcpy(nmptr, mptr, sizeof(struct msqid_ds));
315
316 /*
317 * Go through the message headers, and and copy each
318 * one by taking the new ones, and thus defragmenting.
319 */
320 nmsghdr = pmsghdr = NULL;
321 msghdr = mptr->_msg_first;
322 while (msghdr) {
323 short nnext = 0, next;
324 u_short msgsz, segcnt;
325
326 /* Take an entry from the new list of free msghdrs */
327 nmsghdr = new_free_msghdrs;
328 KASSERT(nmsghdr != NULL);
329 new_free_msghdrs = nmsghdr->msg_next;
330
331 nmsghdr->msg_next = NULL;
332 if (pmsghdr) {
333 pmsghdr->msg_next = nmsghdr;
334 } else {
335 nmptr->_msg_first = nmsghdr;
336 pmsghdr = nmsghdr;
337 }
338 nmsghdr->msg_ts = msghdr->msg_ts;
339 nmsghdr->msg_spot = -1;
340
341 /* Compute the amount of segments and reserve them */
342 msgsz = msghdr->msg_ts;
343 segcnt = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
344 if (segcnt == 0)
345 continue;
346 while (segcnt--) {
347 nnext = new_free_msgmaps;
348 new_free_msgmaps = new_msgmaps[nnext].next;
349 new_nfree_msgmaps--;
350 new_msgmaps[nnext].next = nmsghdr->msg_spot;
351 nmsghdr->msg_spot = nnext;
352 }
353
354 /* Copy all segments */
355 KASSERT(nnext == nmsghdr->msg_spot);
356 next = msghdr->msg_spot;
357 while (msgsz > 0) {
358 size_t tlen;
359
360 if (msgsz >= msginfo.msgssz) {
361 tlen = msginfo.msgssz;
362 msgsz -= msginfo.msgssz;
363 } else {
364 tlen = msgsz;
365 msgsz = 0;
366 }
367
368 /* Copy the message buffer */
369 memcpy(&new_msgpool[nnext * msginfo.msgssz],
370 &msgpool[next * msginfo.msgssz], tlen);
371
372 /* Next entry of the map */
373 nnext = msgmaps[nnext].next;
374 next = msgmaps[next].next;
375 }
376
377 /* Next message header */
378 msghdr = msghdr->msg_next;
379 }
380 nmptr->_msg_last = nmsghdr;
381 }
382 KASSERT((msginfo.msgseg - nfree_msgmaps) ==
383 (newmsgseg - new_nfree_msgmaps));
384
385 sz = ALIGN(msginfo.msgmax) +
386 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
387 ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
388 ALIGN(msginfo.msgmni * sizeof(kmsq_t));
389 sz = round_page(sz);
390
391 for (i = 0; i < msginfo.msgmni; i++)
392 cv_destroy(&msqs[i].msq_cv);
393
394 /* Set the pointers and update the new values */
395 msgpool = new_msgpool;
396 msgmaps = new_msgmaps;
397 msghdrs = new_msghdrs;
398 msqs = new_msqs;
399
400 free_msghdrs = new_free_msghdrs;
401 free_msgmaps = new_free_msgmaps;
402 nfree_msgmaps = new_nfree_msgmaps;
403 msginfo.msgmni = newmsgmni;
404 msginfo.msgseg = newmsgseg;
405 msginfo.msgmax = newmsgmax;
406
407 /* Reallocation completed - notify all waiters, if any */
408 msg_realloc_state = false;
409 cv_broadcast(&msg_realloc_cv);
410 mutex_exit(&msgmutex);
411
412 uvm_km_free(kernel_map, (vaddr_t)old_msgpool, sz, UVM_KMF_WIRED);
413 return 0;
414 }
415
416 static void
417 msg_freehdr(struct __msg *msghdr)
418 {
419
420 KASSERT(mutex_owned(&msgmutex));
421
422 while (msghdr->msg_ts > 0) {
423 short next;
424 KASSERT(msghdr->msg_spot >= 0);
425 KASSERT(msghdr->msg_spot < msginfo.msgseg);
426
427 next = msgmaps[msghdr->msg_spot].next;
428 msgmaps[msghdr->msg_spot].next = free_msgmaps;
429 free_msgmaps = msghdr->msg_spot;
430 nfree_msgmaps++;
431 msghdr->msg_spot = next;
432 if (msghdr->msg_ts >= msginfo.msgssz)
433 msghdr->msg_ts -= msginfo.msgssz;
434 else
435 msghdr->msg_ts = 0;
436 }
437 KASSERT(msghdr->msg_spot == -1);
438 msghdr->msg_next = free_msghdrs;
439 free_msghdrs = msghdr;
440 }
441
442 int
443 sys___msgctl50(struct lwp *l, const struct sys___msgctl50_args *uap,
444 register_t *retval)
445 {
446 /* {
447 syscallarg(int) msqid;
448 syscallarg(int) cmd;
449 syscallarg(struct msqid_ds *) buf;
450 } */
451 struct msqid_ds msqbuf;
452 int cmd, error;
453
454 cmd = SCARG(uap, cmd);
455
456 if (cmd == IPC_SET) {
457 error = copyin(SCARG(uap, buf), &msqbuf, sizeof(msqbuf));
458 if (error)
459 return (error);
460 }
461
462 error = msgctl1(l, SCARG(uap, msqid), cmd,
463 (cmd == IPC_SET || cmd == IPC_STAT) ? &msqbuf : NULL);
464
465 if (error == 0 && cmd == IPC_STAT)
466 error = copyout(&msqbuf, SCARG(uap, buf), sizeof(msqbuf));
467
468 return (error);
469 }
470
471 int
472 msgctl1(struct lwp *l, int msqid, int cmd, struct msqid_ds *msqbuf)
473 {
474 kauth_cred_t cred = l->l_cred;
475 struct msqid_ds *msqptr;
476 kmsq_t *msq;
477 int error = 0, ix;
478
479 MSG_PRINTF(("call to msgctl1(%d, %d)\n", msqid, cmd));
480
481 ix = IPCID_TO_IX(msqid);
482
483 mutex_enter(&msgmutex);
484
485 if (ix < 0 || ix >= msginfo.msgmni) {
486 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", ix,
487 msginfo.msgmni));
488 error = EINVAL;
489 goto unlock;
490 }
491
492 msq = &msqs[ix];
493 msqptr = &msq->msq_u;
494
495 if (msqptr->msg_qbytes == 0) {
496 MSG_PRINTF(("no such msqid\n"));
497 error = EINVAL;
498 goto unlock;
499 }
500 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqid)) {
501 MSG_PRINTF(("wrong sequence number\n"));
502 error = EINVAL;
503 goto unlock;
504 }
505
506 switch (cmd) {
507 case IPC_RMID:
508 {
509 struct __msg *msghdr;
510 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)) != 0)
511 break;
512 /* Free the message headers */
513 msghdr = msqptr->_msg_first;
514 while (msghdr != NULL) {
515 struct __msg *msghdr_tmp;
516
517 /* Free the segments of each message */
518 msqptr->_msg_cbytes -= msghdr->msg_ts;
519 msqptr->msg_qnum--;
520 msghdr_tmp = msghdr;
521 msghdr = msghdr->msg_next;
522 msg_freehdr(msghdr_tmp);
523 }
524 KASSERT(msqptr->_msg_cbytes == 0);
525 KASSERT(msqptr->msg_qnum == 0);
526
527 /* Mark it as free */
528 msqptr->msg_qbytes = 0;
529 cv_broadcast(&msq->msq_cv);
530 }
531 break;
532
533 case IPC_SET:
534 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)))
535 break;
536 if (msqbuf->msg_qbytes > msqptr->msg_qbytes &&
537 kauth_authorize_system(cred, KAUTH_SYSTEM_SYSVIPC,
538 KAUTH_REQ_SYSTEM_SYSVIPC_MSGQ_OVERSIZE,
539 KAUTH_ARG(msqbuf->msg_qbytes),
540 KAUTH_ARG(msqptr->msg_qbytes), NULL) != 0) {
541 error = EPERM;
542 break;
543 }
544 if (msqbuf->msg_qbytes > msginfo.msgmnb) {
545 MSG_PRINTF(("can't increase msg_qbytes beyond %d "
546 "(truncating)\n", msginfo.msgmnb));
547 /* silently restrict qbytes to system limit */
548 msqbuf->msg_qbytes = msginfo.msgmnb;
549 }
550 if (msqbuf->msg_qbytes == 0) {
551 MSG_PRINTF(("can't reduce msg_qbytes to 0\n"));
552 error = EINVAL; /* XXX non-standard errno! */
553 break;
554 }
555 msqptr->msg_perm.uid = msqbuf->msg_perm.uid;
556 msqptr->msg_perm.gid = msqbuf->msg_perm.gid;
557 msqptr->msg_perm.mode = (msqptr->msg_perm.mode & ~0777) |
558 (msqbuf->msg_perm.mode & 0777);
559 msqptr->msg_qbytes = msqbuf->msg_qbytes;
560 msqptr->msg_ctime = time_second;
561 break;
562
563 case IPC_STAT:
564 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
565 MSG_PRINTF(("requester doesn't have read access\n"));
566 break;
567 }
568 memcpy(msqbuf, msqptr, sizeof(struct msqid_ds));
569 break;
570
571 default:
572 MSG_PRINTF(("invalid command %d\n", cmd));
573 error = EINVAL;
574 break;
575 }
576
577 unlock:
578 mutex_exit(&msgmutex);
579 return (error);
580 }
581
582 int
583 sys_msgget(struct lwp *l, const struct sys_msgget_args *uap, register_t *retval)
584 {
585 /* {
586 syscallarg(key_t) key;
587 syscallarg(int) msgflg;
588 } */
589 int msqid, error = 0;
590 int key = SCARG(uap, key);
591 int msgflg = SCARG(uap, msgflg);
592 kauth_cred_t cred = l->l_cred;
593 struct msqid_ds *msqptr = NULL;
594 kmsq_t *msq;
595
596 mutex_enter(&msgmutex);
597
598 MSG_PRINTF(("msgget(0x%x, 0%o)\n", key, msgflg));
599
600 if (key != IPC_PRIVATE) {
601 for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
602 msq = &msqs[msqid];
603 msqptr = &msq->msq_u;
604 if (msqptr->msg_qbytes != 0 &&
605 msqptr->msg_perm._key == key)
606 break;
607 }
608 if (msqid < msginfo.msgmni) {
609 MSG_PRINTF(("found public key\n"));
610 if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL)) {
611 MSG_PRINTF(("not exclusive\n"));
612 error = EEXIST;
613 goto unlock;
614 }
615 if ((error = ipcperm(cred, &msqptr->msg_perm,
616 msgflg & 0700 ))) {
617 MSG_PRINTF(("requester doesn't have 0%o access\n",
618 msgflg & 0700));
619 goto unlock;
620 }
621 goto found;
622 }
623 }
624
625 MSG_PRINTF(("need to allocate the msqid_ds\n"));
626 if (key == IPC_PRIVATE || (msgflg & IPC_CREAT)) {
627 for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
628 /*
629 * Look for an unallocated and unlocked msqid_ds.
630 * msqid_ds's can be locked by msgsnd or msgrcv while
631 * they are copying the message in/out. We can't
632 * re-use the entry until they release it.
633 */
634 msq = &msqs[msqid];
635 msqptr = &msq->msq_u;
636 if (msqptr->msg_qbytes == 0 &&
637 (msqptr->msg_perm.mode & MSG_LOCKED) == 0)
638 break;
639 }
640 if (msqid == msginfo.msgmni) {
641 MSG_PRINTF(("no more msqid_ds's available\n"));
642 error = ENOSPC;
643 goto unlock;
644 }
645 MSG_PRINTF(("msqid %d is available\n", msqid));
646 msqptr->msg_perm._key = key;
647 msqptr->msg_perm.cuid = kauth_cred_geteuid(cred);
648 msqptr->msg_perm.uid = kauth_cred_geteuid(cred);
649 msqptr->msg_perm.cgid = kauth_cred_getegid(cred);
650 msqptr->msg_perm.gid = kauth_cred_getegid(cred);
651 msqptr->msg_perm.mode = (msgflg & 0777);
652 /* Make sure that the returned msqid is unique */
653 msqptr->msg_perm._seq++;
654 msqptr->_msg_first = NULL;
655 msqptr->_msg_last = NULL;
656 msqptr->_msg_cbytes = 0;
657 msqptr->msg_qnum = 0;
658 msqptr->msg_qbytes = msginfo.msgmnb;
659 msqptr->msg_lspid = 0;
660 msqptr->msg_lrpid = 0;
661 msqptr->msg_stime = 0;
662 msqptr->msg_rtime = 0;
663 msqptr->msg_ctime = time_second;
664 } else {
665 MSG_PRINTF(("didn't find it and wasn't asked to create it\n"));
666 error = ENOENT;
667 goto unlock;
668 }
669
670 found:
671 /* Construct the unique msqid */
672 *retval = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm);
673
674 unlock:
675 mutex_exit(&msgmutex);
676 return (error);
677 }
678
679 int
680 sys_msgsnd(struct lwp *l, const struct sys_msgsnd_args *uap, register_t *retval)
681 {
682 /* {
683 syscallarg(int) msqid;
684 syscallarg(const void *) msgp;
685 syscallarg(size_t) msgsz;
686 syscallarg(int) msgflg;
687 } */
688
689 return msgsnd1(l, SCARG(uap, msqid), SCARG(uap, msgp),
690 SCARG(uap, msgsz), SCARG(uap, msgflg), sizeof(long), copyin);
691 }
692
693 int
694 msgsnd1(struct lwp *l, int msqidr, const char *user_msgp, size_t msgsz,
695 int msgflg, size_t typesz, copyin_t fetch_type)
696 {
697 int segs_needed, error = 0, msqid;
698 kauth_cred_t cred = l->l_cred;
699 struct msqid_ds *msqptr;
700 struct __msg *msghdr;
701 kmsq_t *msq;
702 short next;
703
704 MSG_PRINTF(("call to msgsnd(%d, %p, %lld, %d)\n", msqidr,
705 user_msgp, (long long)msgsz, msgflg));
706
707 if ((ssize_t)msgsz < 0)
708 return EINVAL;
709
710 restart:
711 msqid = IPCID_TO_IX(msqidr);
712
713 mutex_enter(&msgmutex);
714 /* In case of reallocation, we will wait for completion */
715 while (__predict_false(msg_realloc_state))
716 cv_wait(&msg_realloc_cv, &msgmutex);
717
718 if (msqid < 0 || msqid >= msginfo.msgmni) {
719 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
720 msginfo.msgmni));
721 error = EINVAL;
722 goto unlock;
723 }
724
725 msq = &msqs[msqid];
726 msqptr = &msq->msq_u;
727
728 if (msqptr->msg_qbytes == 0) {
729 MSG_PRINTF(("no such message queue id\n"));
730 error = EINVAL;
731 goto unlock;
732 }
733 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
734 MSG_PRINTF(("wrong sequence number\n"));
735 error = EINVAL;
736 goto unlock;
737 }
738
739 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_W))) {
740 MSG_PRINTF(("requester doesn't have write access\n"));
741 goto unlock;
742 }
743
744 segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
745 MSG_PRINTF(("msgsz=%lld, msgssz=%d, segs_needed=%d\n",
746 (long long)msgsz, msginfo.msgssz, segs_needed));
747 for (;;) {
748 int need_more_resources = 0;
749
750 /*
751 * check msgsz [cannot be negative since it is unsigned]
752 * (inside this loop in case msg_qbytes changes while we sleep)
753 */
754
755 if (msgsz > msqptr->msg_qbytes) {
756 MSG_PRINTF(("msgsz > msqptr->msg_qbytes\n"));
757 error = EINVAL;
758 goto unlock;
759 }
760
761 if (msqptr->msg_perm.mode & MSG_LOCKED) {
762 MSG_PRINTF(("msqid is locked\n"));
763 need_more_resources = 1;
764 }
765 if (msgsz + msqptr->_msg_cbytes > msqptr->msg_qbytes) {
766 MSG_PRINTF(("msgsz + msg_cbytes > msg_qbytes\n"));
767 need_more_resources = 1;
768 }
769 if (segs_needed > nfree_msgmaps) {
770 MSG_PRINTF(("segs_needed > nfree_msgmaps\n"));
771 need_more_resources = 1;
772 }
773 if (free_msghdrs == NULL) {
774 MSG_PRINTF(("no more msghdrs\n"));
775 need_more_resources = 1;
776 }
777
778 if (need_more_resources) {
779 int we_own_it;
780
781 if ((msgflg & IPC_NOWAIT) != 0) {
782 MSG_PRINTF(("need more resources but caller "
783 "doesn't want to wait\n"));
784 error = EAGAIN;
785 goto unlock;
786 }
787
788 if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) {
789 MSG_PRINTF(("we don't own the msqid_ds\n"));
790 we_own_it = 0;
791 } else {
792 /* Force later arrivals to wait for our
793 request */
794 MSG_PRINTF(("we own the msqid_ds\n"));
795 msqptr->msg_perm.mode |= MSG_LOCKED;
796 we_own_it = 1;
797 }
798
799 msg_waiters++;
800 MSG_PRINTF(("goodnight\n"));
801 error = cv_wait_sig(&msq->msq_cv, &msgmutex);
802 MSG_PRINTF(("good morning, error=%d\n", error));
803 msg_waiters--;
804
805 if (we_own_it)
806 msqptr->msg_perm.mode &= ~MSG_LOCKED;
807
808 /*
809 * In case of such state, notify reallocator and
810 * restart the call.
811 */
812 if (msg_realloc_state) {
813 cv_broadcast(&msg_realloc_cv);
814 mutex_exit(&msgmutex);
815 goto restart;
816 }
817
818 if (error != 0) {
819 MSG_PRINTF(("msgsnd: interrupted system "
820 "call\n"));
821 error = EINTR;
822 goto unlock;
823 }
824
825 /*
826 * Make sure that the msq queue still exists
827 */
828
829 if (msqptr->msg_qbytes == 0) {
830 MSG_PRINTF(("msqid deleted\n"));
831 error = EIDRM;
832 goto unlock;
833 }
834 } else {
835 MSG_PRINTF(("got all the resources that we need\n"));
836 break;
837 }
838 }
839
840 /*
841 * We have the resources that we need.
842 * Make sure!
843 */
844
845 KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
846 KASSERT(segs_needed <= nfree_msgmaps);
847 KASSERT(msgsz + msqptr->_msg_cbytes <= msqptr->msg_qbytes);
848 KASSERT(free_msghdrs != NULL);
849
850 /*
851 * Re-lock the msqid_ds in case we page-fault when copying in the
852 * message
853 */
854
855 KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
856 msqptr->msg_perm.mode |= MSG_LOCKED;
857
858 /*
859 * Allocate a message header
860 */
861
862 msghdr = free_msghdrs;
863 free_msghdrs = msghdr->msg_next;
864 msghdr->msg_spot = -1;
865 msghdr->msg_ts = msgsz;
866
867 /*
868 * Allocate space for the message
869 */
870
871 while (segs_needed > 0) {
872 KASSERT(nfree_msgmaps > 0);
873 KASSERT(free_msgmaps != -1);
874 KASSERT(free_msgmaps < msginfo.msgseg);
875
876 next = free_msgmaps;
877 MSG_PRINTF(("allocating segment %d to message\n", next));
878 free_msgmaps = msgmaps[next].next;
879 nfree_msgmaps--;
880 msgmaps[next].next = msghdr->msg_spot;
881 msghdr->msg_spot = next;
882 segs_needed--;
883 }
884
885 /*
886 * Copy in the message type
887 */
888 mutex_exit(&msgmutex);
889 error = (*fetch_type)(user_msgp, &msghdr->msg_type, typesz);
890 mutex_enter(&msgmutex);
891 if (error != 0) {
892 MSG_PRINTF(("error %d copying the message type\n", error));
893 msg_freehdr(msghdr);
894 msqptr->msg_perm.mode &= ~MSG_LOCKED;
895 cv_broadcast(&msq->msq_cv);
896 goto unlock;
897 }
898 user_msgp += typesz;
899
900 /*
901 * Validate the message type
902 */
903
904 if (msghdr->msg_type < 1) {
905 msg_freehdr(msghdr);
906 msqptr->msg_perm.mode &= ~MSG_LOCKED;
907 cv_broadcast(&msq->msq_cv);
908 MSG_PRINTF(("mtype (%ld) < 1\n", msghdr->msg_type));
909 error = EINVAL;
910 goto unlock;
911 }
912
913 /*
914 * Copy in the message body
915 */
916
917 next = msghdr->msg_spot;
918 while (msgsz > 0) {
919 size_t tlen;
920 KASSERT(next > -1);
921 KASSERT(next < msginfo.msgseg);
922
923 if (msgsz > msginfo.msgssz)
924 tlen = msginfo.msgssz;
925 else
926 tlen = msgsz;
927 mutex_exit(&msgmutex);
928 error = copyin(user_msgp, &msgpool[next * msginfo.msgssz], tlen);
929 mutex_enter(&msgmutex);
930 if (error != 0) {
931 MSG_PRINTF(("error %d copying in message segment\n",
932 error));
933 msg_freehdr(msghdr);
934 msqptr->msg_perm.mode &= ~MSG_LOCKED;
935 cv_broadcast(&msq->msq_cv);
936 goto unlock;
937 }
938 msgsz -= tlen;
939 user_msgp += tlen;
940 next = msgmaps[next].next;
941 }
942 KASSERT(next == -1);
943
944 /*
945 * We've got the message. Unlock the msqid_ds.
946 */
947
948 msqptr->msg_perm.mode &= ~MSG_LOCKED;
949
950 /*
951 * Make sure that the msqid_ds is still allocated.
952 */
953
954 if (msqptr->msg_qbytes == 0) {
955 msg_freehdr(msghdr);
956 cv_broadcast(&msq->msq_cv);
957 error = EIDRM;
958 goto unlock;
959 }
960
961 /*
962 * Put the message into the queue
963 */
964
965 if (msqptr->_msg_first == NULL) {
966 msqptr->_msg_first = msghdr;
967 msqptr->_msg_last = msghdr;
968 } else {
969 msqptr->_msg_last->msg_next = msghdr;
970 msqptr->_msg_last = msghdr;
971 }
972 msqptr->_msg_last->msg_next = NULL;
973
974 msqptr->_msg_cbytes += msghdr->msg_ts;
975 msqptr->msg_qnum++;
976 msqptr->msg_lspid = l->l_proc->p_pid;
977 msqptr->msg_stime = time_second;
978
979 cv_broadcast(&msq->msq_cv);
980
981 unlock:
982 mutex_exit(&msgmutex);
983 return error;
984 }
985
986 int
987 sys_msgrcv(struct lwp *l, const struct sys_msgrcv_args *uap, register_t *retval)
988 {
989 /* {
990 syscallarg(int) msqid;
991 syscallarg(void *) msgp;
992 syscallarg(size_t) msgsz;
993 syscallarg(long) msgtyp;
994 syscallarg(int) msgflg;
995 } */
996
997 return msgrcv1(l, SCARG(uap, msqid), SCARG(uap, msgp),
998 SCARG(uap, msgsz), SCARG(uap, msgtyp), SCARG(uap, msgflg),
999 sizeof(long), copyout, retval);
1000 }
1001
1002 int
1003 msgrcv1(struct lwp *l, int msqidr, char *user_msgp, size_t msgsz, long msgtyp,
1004 int msgflg, size_t typesz, copyout_t put_type, register_t *retval)
1005 {
1006 size_t len;
1007 kauth_cred_t cred = l->l_cred;
1008 struct msqid_ds *msqptr;
1009 struct __msg *msghdr;
1010 int error = 0, msqid;
1011 kmsq_t *msq;
1012 short next;
1013
1014 MSG_PRINTF(("call to msgrcv(%d, %p, %lld, %ld, %d)\n", msqidr,
1015 user_msgp, (long long)msgsz, msgtyp, msgflg));
1016
1017 if ((ssize_t)msgsz < 0)
1018 return EINVAL;
1019
1020 restart:
1021 msqid = IPCID_TO_IX(msqidr);
1022
1023 mutex_enter(&msgmutex);
1024 /* In case of reallocation, we will wait for completion */
1025 while (__predict_false(msg_realloc_state))
1026 cv_wait(&msg_realloc_cv, &msgmutex);
1027
1028 if (msqid < 0 || msqid >= msginfo.msgmni) {
1029 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
1030 msginfo.msgmni));
1031 error = EINVAL;
1032 goto unlock;
1033 }
1034
1035 msq = &msqs[msqid];
1036 msqptr = &msq->msq_u;
1037
1038 if (msqptr->msg_qbytes == 0) {
1039 MSG_PRINTF(("no such message queue id\n"));
1040 error = EINVAL;
1041 goto unlock;
1042 }
1043 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
1044 MSG_PRINTF(("wrong sequence number\n"));
1045 error = EINVAL;
1046 goto unlock;
1047 }
1048
1049 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
1050 MSG_PRINTF(("requester doesn't have read access\n"));
1051 goto unlock;
1052 }
1053
1054 msghdr = NULL;
1055 while (msghdr == NULL) {
1056 if (msgtyp == 0) {
1057 msghdr = msqptr->_msg_first;
1058 if (msghdr != NULL) {
1059 if (msgsz < msghdr->msg_ts &&
1060 (msgflg & MSG_NOERROR) == 0) {
1061 MSG_PRINTF(("first msg on the queue "
1062 "is too big (want %lld, got %d)\n",
1063 (long long)msgsz, msghdr->msg_ts));
1064 error = E2BIG;
1065 goto unlock;
1066 }
1067 if (msqptr->_msg_first == msqptr->_msg_last) {
1068 msqptr->_msg_first = NULL;
1069 msqptr->_msg_last = NULL;
1070 } else {
1071 msqptr->_msg_first = msghdr->msg_next;
1072 KASSERT(msqptr->_msg_first != NULL);
1073 }
1074 }
1075 } else {
1076 struct __msg *previous;
1077 struct __msg **prev;
1078
1079 for (previous = NULL, prev = &msqptr->_msg_first;
1080 (msghdr = *prev) != NULL;
1081 previous = msghdr, prev = &msghdr->msg_next) {
1082 /*
1083 * Is this message's type an exact match or is
1084 * this message's type less than or equal to
1085 * the absolute value of a negative msgtyp?
1086 * Note that the second half of this test can
1087 * NEVER be true if msgtyp is positive since
1088 * msg_type is always positive!
1089 */
1090
1091 if (msgtyp != msghdr->msg_type &&
1092 msghdr->msg_type > -msgtyp)
1093 continue;
1094
1095 MSG_PRINTF(("found message type %ld, requested %ld\n",
1096 msghdr->msg_type, msgtyp));
1097 if (msgsz < msghdr->msg_ts &&
1098 (msgflg & MSG_NOERROR) == 0) {
1099 MSG_PRINTF(("requested message on the queue "
1100 "is too big (want %lld, got %d)\n",
1101 (long long)msgsz, msghdr->msg_ts));
1102 error = E2BIG;
1103 goto unlock;
1104 }
1105 *prev = msghdr->msg_next;
1106 if (msghdr != msqptr->_msg_last)
1107 break;
1108 if (previous == NULL) {
1109 KASSERT(prev == &msqptr->_msg_first);
1110 msqptr->_msg_first = NULL;
1111 msqptr->_msg_last = NULL;
1112 } else {
1113 KASSERT(prev != &msqptr->_msg_first);
1114 msqptr->_msg_last = previous;
1115 }
1116 break;
1117 }
1118 }
1119
1120 /*
1121 * We've either extracted the msghdr for the appropriate
1122 * message or there isn't one.
1123 * If there is one then bail out of this loop.
1124 */
1125 if (msghdr != NULL)
1126 break;
1127
1128 /*
1129 * Hmph! No message found. Does the user want to wait?
1130 */
1131
1132 if ((msgflg & IPC_NOWAIT) != 0) {
1133 MSG_PRINTF(("no appropriate message found (msgtyp=%ld)\n",
1134 msgtyp));
1135 error = ENOMSG;
1136 goto unlock;
1137 }
1138
1139 /*
1140 * Wait for something to happen
1141 */
1142
1143 msg_waiters++;
1144 MSG_PRINTF(("msgrcv: goodnight\n"));
1145 error = cv_wait_sig(&msq->msq_cv, &msgmutex);
1146 MSG_PRINTF(("msgrcv: good morning (error=%d)\n", error));
1147 msg_waiters--;
1148
1149 /*
1150 * In case of such state, notify reallocator and
1151 * restart the call.
1152 */
1153 if (msg_realloc_state) {
1154 cv_broadcast(&msg_realloc_cv);
1155 mutex_exit(&msgmutex);
1156 goto restart;
1157 }
1158
1159 if (error != 0) {
1160 MSG_PRINTF(("msgsnd: interrupted system call\n"));
1161 error = EINTR;
1162 goto unlock;
1163 }
1164
1165 /*
1166 * Make sure that the msq queue still exists
1167 */
1168
1169 if (msqptr->msg_qbytes == 0 ||
1170 msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
1171 MSG_PRINTF(("msqid deleted\n"));
1172 error = EIDRM;
1173 goto unlock;
1174 }
1175 }
1176
1177 /*
1178 * Return the message to the user.
1179 *
1180 * First, do the bookkeeping (before we risk being interrupted).
1181 */
1182
1183 msqptr->_msg_cbytes -= msghdr->msg_ts;
1184 msqptr->msg_qnum--;
1185 msqptr->msg_lrpid = l->l_proc->p_pid;
1186 msqptr->msg_rtime = time_second;
1187
1188 /*
1189 * Make msgsz the actual amount that we'll be returning.
1190 * Note that this effectively truncates the message if it is too long
1191 * (since msgsz is never increased).
1192 */
1193
1194 MSG_PRINTF(("found a message, msgsz=%lld, msg_ts=%d\n",
1195 (long long)msgsz, msghdr->msg_ts));
1196 if (msgsz > msghdr->msg_ts)
1197 msgsz = msghdr->msg_ts;
1198
1199 /*
1200 * Return the type to the user.
1201 */
1202 mutex_exit(&msgmutex);
1203 error = (*put_type)(&msghdr->msg_type, user_msgp, typesz);
1204 mutex_enter(&msgmutex);
1205 if (error != 0) {
1206 MSG_PRINTF(("error (%d) copying out message type\n", error));
1207 msg_freehdr(msghdr);
1208 cv_broadcast(&msq->msq_cv);
1209 goto unlock;
1210 }
1211 user_msgp += typesz;
1212
1213 /*
1214 * Return the segments to the user
1215 */
1216
1217 next = msghdr->msg_spot;
1218 for (len = 0; len < msgsz; len += msginfo.msgssz) {
1219 size_t tlen;
1220 KASSERT(next > -1);
1221 KASSERT(next < msginfo.msgseg);
1222
1223 if (msgsz - len > msginfo.msgssz)
1224 tlen = msginfo.msgssz;
1225 else
1226 tlen = msgsz - len;
1227 mutex_exit(&msgmutex);
1228 error = copyout(&msgpool[next * msginfo.msgssz],
1229 user_msgp, tlen);
1230 mutex_enter(&msgmutex);
1231 if (error != 0) {
1232 MSG_PRINTF(("error (%d) copying out message segment\n",
1233 error));
1234 msg_freehdr(msghdr);
1235 cv_broadcast(&msq->msq_cv);
1236 goto unlock;
1237 }
1238 user_msgp += tlen;
1239 next = msgmaps[next].next;
1240 }
1241
1242 /*
1243 * Done, return the actual number of bytes copied out.
1244 */
1245
1246 msg_freehdr(msghdr);
1247 cv_broadcast(&msq->msq_cv);
1248 *retval = msgsz;
1249
1250 unlock:
1251 mutex_exit(&msgmutex);
1252 return error;
1253 }
1254
1255 /*
1256 * Sysctl initialization and nodes.
1257 */
1258
1259 static int
1260 sysctl_ipc_msgmni(SYSCTLFN_ARGS)
1261 {
1262 int newsize, error;
1263 struct sysctlnode node;
1264 node = *rnode;
1265 node.sysctl_data = &newsize;
1266
1267 newsize = msginfo.msgmni;
1268 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1269 if (error || newp == NULL)
1270 return error;
1271
1272 sysctl_unlock();
1273 error = msgrealloc(newsize, msginfo.msgseg);
1274 sysctl_relock();
1275 return error;
1276 }
1277
1278 static int
1279 sysctl_ipc_msgseg(SYSCTLFN_ARGS)
1280 {
1281 int newsize, error;
1282 struct sysctlnode node;
1283 node = *rnode;
1284 node.sysctl_data = &newsize;
1285
1286 newsize = msginfo.msgseg;
1287 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1288 if (error || newp == NULL)
1289 return error;
1290
1291 sysctl_unlock();
1292 error = msgrealloc(msginfo.msgmni, newsize);
1293 sysctl_relock();
1294 return error;
1295 }
1296
1297 SYSCTL_SETUP(sysctl_ipc_msg_setup, "sysctl kern.ipc subtree setup")
1298 {
1299 const struct sysctlnode *node = NULL;
1300
1301 sysctl_createv(clog, 0, NULL, &node,
1302 CTLFLAG_PERMANENT,
1303 CTLTYPE_NODE, "ipc",
1304 SYSCTL_DESCR("SysV IPC options"),
1305 NULL, 0, NULL, 0,
1306 CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1307
1308 if (node == NULL)
1309 return;
1310
1311 sysctl_createv(clog, 0, &node, NULL,
1312 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1313 CTLTYPE_INT, "msgmni",
1314 SYSCTL_DESCR("Max number of message queue identifiers"),
1315 sysctl_ipc_msgmni, 0, &msginfo.msgmni, 0,
1316 CTL_CREATE, CTL_EOL);
1317 sysctl_createv(clog, 0, &node, NULL,
1318 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1319 CTLTYPE_INT, "msgseg",
1320 SYSCTL_DESCR("Max number of number of message segments"),
1321 sysctl_ipc_msgseg, 0, &msginfo.msgseg, 0,
1322 CTL_CREATE, CTL_EOL);
1323 }
1324