sys_mqueue.c revision 1.26 1 /* $NetBSD: sys_mqueue.c,v 1.26 2009/11/01 21:46:09 rmind Exp $ */
2
3 /*
4 * Copyright (c) 2007-2009 Mindaugas Rasiukevicius <rmind at NetBSD org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * Implementation of POSIX message queues.
31 * Defined in the Base Definitions volume of IEEE Std 1003.1-2001.
32 *
33 * Locking
34 *
35 * Global list of message queues (mqueue_head) and proc_t::p_mqueue_cnt
36 * counter are protected by mqlist_mtx lock. The very message queue and
37 * its members are protected by mqueue::mq_mtx.
38 *
39 * Lock order:
40 * mqlist_mtx ->
41 * mqueue::mq_mtx
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: sys_mqueue.c,v 1.26 2009/11/01 21:46:09 rmind Exp $");
46
47 #include <sys/param.h>
48 #include <sys/types.h>
49 #include <sys/condvar.h>
50 #include <sys/errno.h>
51 #include <sys/fcntl.h>
52 #include <sys/file.h>
53 #include <sys/filedesc.h>
54 #include <sys/kauth.h>
55 #include <sys/kernel.h>
56 #include <sys/kmem.h>
57 #include <sys/lwp.h>
58 #include <sys/mqueue.h>
59 #include <sys/module.h>
60 #include <sys/mutex.h>
61 #include <sys/pool.h>
62 #include <sys/poll.h>
63 #include <sys/proc.h>
64 #include <sys/queue.h>
65 #include <sys/select.h>
66 #include <sys/signal.h>
67 #include <sys/signalvar.h>
68 #include <sys/stat.h>
69 #include <sys/sysctl.h>
70 #include <sys/syscall.h>
71 #include <sys/syscallvar.h>
72 #include <sys/syscallargs.h>
73 #include <sys/systm.h>
74 #include <sys/unistd.h>
75
76 #include <miscfs/genfs/genfs.h>
77
78 MODULE(MODULE_CLASS_MISC, mqueue, NULL);
79
80 /* System-wide limits. */
81 static u_int mq_open_max = MQ_OPEN_MAX;
82 static u_int mq_prio_max = MQ_PRIO_MAX;
83 static u_int mq_max_msgsize = 16 * MQ_DEF_MSGSIZE;
84 static u_int mq_def_maxmsg = 32;
85
86 static kmutex_t mqlist_mtx;
87 static pool_cache_t mqmsg_cache;
88 static LIST_HEAD(, mqueue) mqueue_head;
89
90 static int mqueue_sysinit(void);
91 static int mqueue_sysfini(bool);
92 static int mq_poll_fop(file_t *, int);
93 static int mq_stat_fop(file_t *, struct stat *);
94 static int mq_close_fop(file_t *);
95
96 static const struct fileops mqops = {
97 .fo_read = fbadop_read,
98 .fo_write = fbadop_write,
99 .fo_ioctl = fbadop_ioctl,
100 .fo_fcntl = fnullop_fcntl,
101 .fo_poll = mq_poll_fop,
102 .fo_stat = mq_stat_fop,
103 .fo_close = mq_close_fop,
104 .fo_kqfilter = fnullop_kqfilter,
105 .fo_drain = fnullop_drain,
106 };
107
108 static const struct syscall_package mqueue_syscalls[] = {
109 { SYS_mq_open, 0, (sy_call_t *)sys_mq_open },
110 { SYS_mq_close, 0, (sy_call_t *)sys_mq_close },
111 { SYS_mq_unlink, 0, (sy_call_t *)sys_mq_unlink },
112 { SYS_mq_getattr, 0, (sy_call_t *)sys_mq_getattr },
113 { SYS_mq_setattr, 0, (sy_call_t *)sys_mq_setattr },
114 { SYS_mq_notify, 0, (sy_call_t *)sys_mq_notify },
115 { SYS_mq_send, 0, (sy_call_t *)sys_mq_send },
116 { SYS_mq_receive, 0, (sy_call_t *)sys_mq_receive },
117 { SYS___mq_timedsend50, 0, (sy_call_t *)sys___mq_timedsend50 },
118 { SYS___mq_timedreceive50, 0, (sy_call_t *)sys___mq_timedreceive50 },
119 { 0, 0, NULL }
120 };
121
122 /*
123 * Initialisation and unloading of POSIX message queue subsystem.
124 */
125
126 static int
127 mqueue_sysinit(void)
128 {
129 int error;
130
131 mqmsg_cache = pool_cache_init(MQ_DEF_MSGSIZE, coherency_unit,
132 0, 0, "mqmsgpl", NULL, IPL_NONE, NULL, NULL, NULL);
133 mutex_init(&mqlist_mtx, MUTEX_DEFAULT, IPL_NONE);
134 LIST_INIT(&mqueue_head);
135
136 error = syscall_establish(NULL, mqueue_syscalls);
137 if (error) {
138 (void)mqueue_sysfini(false);
139 }
140 return error;
141 }
142
143 static int
144 mqueue_sysfini(bool interface)
145 {
146
147 if (interface) {
148 int error;
149 bool inuse;
150
151 /* Stop syscall activity. */
152 error = syscall_disestablish(NULL, mqueue_syscalls);
153 if (error)
154 return error;
155 /*
156 * Check if there are any message queues in use.
157 * TODO: We shall support forced unload.
158 */
159 mutex_enter(&mqlist_mtx);
160 inuse = !LIST_EMPTY(&mqueue_head);
161 mutex_exit(&mqlist_mtx);
162 if (inuse) {
163 error = syscall_establish(NULL, mqueue_syscalls);
164 KASSERT(error == 0);
165 return EBUSY;
166 }
167 }
168 mutex_destroy(&mqlist_mtx);
169 pool_cache_destroy(mqmsg_cache);
170 return 0;
171 }
172
173 /*
174 * Module interface.
175 */
176 static int
177 mqueue_modcmd(modcmd_t cmd, void *arg)
178 {
179
180 switch (cmd) {
181 case MODULE_CMD_INIT:
182 return mqueue_sysinit();
183 case MODULE_CMD_FINI:
184 return mqueue_sysfini(true);
185 default:
186 return ENOTTY;
187 }
188 }
189
190 /*
191 * Free the message.
192 */
193 static void
194 mqueue_freemsg(struct mq_msg *msg, const size_t size)
195 {
196
197 if (size > MQ_DEF_MSGSIZE) {
198 kmem_free(msg, size);
199 } else {
200 pool_cache_put(mqmsg_cache, msg);
201 }
202 }
203
204 /*
205 * Destroy the message queue.
206 */
207 static void
208 mqueue_destroy(struct mqueue *mq)
209 {
210 struct mq_msg *msg;
211 size_t msz;
212 u_int i;
213
214 /* Note MQ_PQSIZE + 1. */
215 for (i = 0; i <= MQ_PQSIZE; i++) {
216 while ((msg = TAILQ_FIRST(&mq->mq_head[i])) != NULL) {
217 TAILQ_REMOVE(&mq->mq_head[i], msg, msg_queue);
218 msz = sizeof(struct mq_msg) + msg->msg_len;
219 mqueue_freemsg(msg, msz);
220 }
221 }
222 seldestroy(&mq->mq_rsel);
223 seldestroy(&mq->mq_wsel);
224 cv_destroy(&mq->mq_send_cv);
225 cv_destroy(&mq->mq_recv_cv);
226 mutex_destroy(&mq->mq_mtx);
227 kmem_free(mq, sizeof(struct mqueue));
228 }
229
230 /*
231 * Lookup for file name in general list of message queues.
232 * => locks the message queue
233 */
234 static void *
235 mqueue_lookup(char *name)
236 {
237 struct mqueue *mq;
238 KASSERT(mutex_owned(&mqlist_mtx));
239
240 LIST_FOREACH(mq, &mqueue_head, mq_list) {
241 if (strncmp(mq->mq_name, name, MQ_NAMELEN) == 0) {
242 mutex_enter(&mq->mq_mtx);
243 return mq;
244 }
245 }
246
247 return NULL;
248 }
249
250 /*
251 * mqueue_get: get the mqueue from the descriptor.
252 * => locks the message queue, if found.
253 * => holds a reference on the file descriptor.
254 */
255 static int
256 mqueue_get(mqd_t mqd, file_t **fpr)
257 {
258 struct mqueue *mq;
259 file_t *fp;
260
261 fp = fd_getfile((int)mqd);
262 if (__predict_false(fp == NULL)) {
263 return EBADF;
264 }
265 if (__predict_false(fp->f_type != DTYPE_MQUEUE)) {
266 fd_putfile((int)mqd);
267 return EBADF;
268 }
269 mq = fp->f_data;
270 mutex_enter(&mq->mq_mtx);
271
272 *fpr = fp;
273 return 0;
274 }
275
276 /*
277 * mqueue_linear_insert: perform linear insert according to the message
278 * priority into the reserved queue (MQ_PQRESQ). Reserved queue is a
279 * sorted list used only when mq_prio_max is increased via sysctl.
280 */
281 static inline void
282 mqueue_linear_insert(struct mqueue *mq, struct mq_msg *msg)
283 {
284 struct mq_msg *mit;
285
286 TAILQ_FOREACH(mit, &mq->mq_head[MQ_PQRESQ], msg_queue) {
287 if (msg->msg_prio > mit->msg_prio)
288 break;
289 }
290 if (mit == NULL) {
291 TAILQ_INSERT_TAIL(&mq->mq_head[MQ_PQRESQ], msg, msg_queue);
292 } else {
293 TAILQ_INSERT_BEFORE(mit, msg, msg_queue);
294 }
295 }
296
297 static int
298 mq_stat_fop(file_t *fp, struct stat *st)
299 {
300 struct mqueue *mq = fp->f_data;
301
302 memset(st, 0, sizeof(*st));
303
304 mutex_enter(&mq->mq_mtx);
305 st->st_mode = mq->mq_mode;
306 st->st_uid = mq->mq_euid;
307 st->st_gid = mq->mq_egid;
308 st->st_atimespec = mq->mq_atime;
309 st->st_mtimespec = mq->mq_mtime;
310 st->st_ctimespec = st->st_birthtimespec = mq->mq_btime;
311 st->st_uid = kauth_cred_geteuid(fp->f_cred);
312 st->st_gid = kauth_cred_getegid(fp->f_cred);
313 mutex_exit(&mq->mq_mtx);
314
315 return 0;
316 }
317
318 static int
319 mq_poll_fop(file_t *fp, int events)
320 {
321 struct mqueue *mq = fp->f_data;
322 struct mq_attr *mqattr;
323 int revents = 0;
324
325 mutex_enter(&mq->mq_mtx);
326 mqattr = &mq->mq_attrib;
327 if (events & (POLLIN | POLLRDNORM)) {
328 /* Ready for receiving, if there are messages in the queue */
329 if (mqattr->mq_curmsgs)
330 revents |= (POLLIN | POLLRDNORM);
331 else
332 selrecord(curlwp, &mq->mq_rsel);
333 }
334 if (events & (POLLOUT | POLLWRNORM)) {
335 /* Ready for sending, if the message queue is not full */
336 if (mqattr->mq_curmsgs < mqattr->mq_maxmsg)
337 revents |= (POLLOUT | POLLWRNORM);
338 else
339 selrecord(curlwp, &mq->mq_wsel);
340 }
341 mutex_exit(&mq->mq_mtx);
342
343 return revents;
344 }
345
346 static int
347 mq_close_fop(file_t *fp)
348 {
349 struct proc *p = curproc;
350 struct mqueue *mq = fp->f_data;
351 bool destroy;
352
353 mutex_enter(&mqlist_mtx);
354 mutex_enter(&mq->mq_mtx);
355
356 /* Decrease the counters */
357 p->p_mqueue_cnt--;
358 mq->mq_refcnt--;
359
360 /* Remove notification if registered for this process */
361 if (mq->mq_notify_proc == p)
362 mq->mq_notify_proc = NULL;
363
364 /*
365 * If this is the last reference and mqueue is marked for unlink,
366 * remove and later destroy the message queue.
367 */
368 if (mq->mq_refcnt == 0 && (mq->mq_attrib.mq_flags & MQ_UNLINK)) {
369 LIST_REMOVE(mq, mq_list);
370 destroy = true;
371 } else
372 destroy = false;
373
374 mutex_exit(&mq->mq_mtx);
375 mutex_exit(&mqlist_mtx);
376
377 if (destroy)
378 mqueue_destroy(mq);
379
380 return 0;
381 }
382
383 static int
384 mqueue_access(struct mqueue *mq, mode_t mode, kauth_cred_t cred)
385 {
386
387 if (genfs_can_access(VNON, mq->mq_mode, mq->mq_euid,
388 mq->mq_egid, mode, cred)) {
389 return EACCES;
390 }
391
392 return 0;
393 }
394
395 /*
396 * General mqueue system calls.
397 */
398
399 int
400 sys_mq_open(struct lwp *l, const struct sys_mq_open_args *uap,
401 register_t *retval)
402 {
403 /* {
404 syscallarg(const char *) name;
405 syscallarg(int) oflag;
406 syscallarg(mode_t) mode;
407 syscallarg(struct mq_attr) attr;
408 } */
409 struct proc *p = l->l_proc;
410 struct mqueue *mq, *mq_new = NULL;
411 file_t *fp;
412 char *name;
413 int mqd, error, oflag;
414
415 oflag = SCARG(uap, oflag);
416
417 /* Get the name from the user-space */
418 name = kmem_zalloc(MQ_NAMELEN, KM_SLEEP);
419 error = copyinstr(SCARG(uap, name), name, MQ_NAMELEN - 1, NULL);
420 if (error) {
421 kmem_free(name, MQ_NAMELEN);
422 return error;
423 }
424
425 if (oflag & O_CREAT) {
426 struct cwdinfo *cwdi = p->p_cwdi;
427 struct mq_attr attr;
428 u_int i;
429
430 /* Check the limit */
431 if (p->p_mqueue_cnt == mq_open_max) {
432 kmem_free(name, MQ_NAMELEN);
433 return EMFILE;
434 }
435
436 /* Empty name is invalid */
437 if (name[0] == '\0') {
438 kmem_free(name, MQ_NAMELEN);
439 return EINVAL;
440 }
441
442 /* Check for mqueue attributes */
443 if (SCARG(uap, attr)) {
444 error = copyin(SCARG(uap, attr), &attr,
445 sizeof(struct mq_attr));
446 if (error) {
447 kmem_free(name, MQ_NAMELEN);
448 return error;
449 }
450 if (attr.mq_maxmsg <= 0 || attr.mq_msgsize <= 0 ||
451 attr.mq_msgsize > mq_max_msgsize) {
452 kmem_free(name, MQ_NAMELEN);
453 return EINVAL;
454 }
455 attr.mq_curmsgs = 0;
456 } else {
457 memset(&attr, 0, sizeof(struct mq_attr));
458 attr.mq_maxmsg = mq_def_maxmsg;
459 attr.mq_msgsize =
460 MQ_DEF_MSGSIZE - sizeof(struct mq_msg);
461 }
462
463 /*
464 * Allocate new mqueue, initialize data structures,
465 * copy the name, attributes and set the flag.
466 */
467 mq_new = kmem_zalloc(sizeof(struct mqueue), KM_SLEEP);
468
469 mutex_init(&mq_new->mq_mtx, MUTEX_DEFAULT, IPL_NONE);
470 cv_init(&mq_new->mq_send_cv, "mqsendcv");
471 cv_init(&mq_new->mq_recv_cv, "mqrecvcv");
472 for (i = 0; i < (MQ_PQSIZE + 1); i++) {
473 TAILQ_INIT(&mq_new->mq_head[i]);
474 }
475 selinit(&mq_new->mq_rsel);
476 selinit(&mq_new->mq_wsel);
477
478 strlcpy(mq_new->mq_name, name, MQ_NAMELEN);
479 memcpy(&mq_new->mq_attrib, &attr, sizeof(struct mq_attr));
480
481 CTASSERT((O_MASK & (MQ_UNLINK | MQ_RECEIVE)) == 0);
482 mq_new->mq_attrib.mq_flags = (O_MASK & oflag);
483
484 /* Store mode and effective UID with GID */
485 mq_new->mq_mode = ((SCARG(uap, mode) &
486 ~cwdi->cwdi_cmask) & ALLPERMS) & ~S_ISTXT;
487 mq_new->mq_euid = kauth_cred_geteuid(l->l_cred);
488 mq_new->mq_egid = kauth_cred_getegid(l->l_cred);
489 }
490
491 /* Allocate file structure and descriptor */
492 error = fd_allocfile(&fp, &mqd);
493 if (error) {
494 if (mq_new)
495 mqueue_destroy(mq_new);
496 kmem_free(name, MQ_NAMELEN);
497 return error;
498 }
499 fp->f_type = DTYPE_MQUEUE;
500 fp->f_flag = FFLAGS(oflag) & (FREAD | FWRITE);
501 fp->f_ops = &mqops;
502
503 /* Look up for mqueue with such name */
504 mutex_enter(&mqlist_mtx);
505 mq = mqueue_lookup(name);
506 if (mq) {
507 mode_t acc_mode;
508
509 KASSERT(mutex_owned(&mq->mq_mtx));
510
511 /* Check if mqueue is not marked as unlinking */
512 if (mq->mq_attrib.mq_flags & MQ_UNLINK) {
513 error = EACCES;
514 goto exit;
515 }
516
517 /* Fail if O_EXCL is set, and mqueue already exists */
518 if ((oflag & O_CREAT) && (oflag & O_EXCL)) {
519 error = EEXIST;
520 goto exit;
521 }
522
523 /*
524 * Check the permissions. Note the difference between
525 * VREAD/VWRITE and FREAD/FWRITE.
526 */
527 acc_mode = 0;
528 if (fp->f_flag & FREAD) {
529 acc_mode |= VREAD;
530 }
531 if (fp->f_flag & FWRITE) {
532 acc_mode |= VWRITE;
533 }
534 if (mqueue_access(mq, acc_mode, l->l_cred) != 0) {
535 error = EACCES;
536 goto exit;
537 }
538 } else {
539 /* Fail if mqueue neither exists, nor we create it */
540 if ((oflag & O_CREAT) == 0) {
541 mutex_exit(&mqlist_mtx);
542 KASSERT(mq_new == NULL);
543 fd_abort(p, fp, mqd);
544 kmem_free(name, MQ_NAMELEN);
545 return ENOENT;
546 }
547
548 /* Check the limit */
549 if (p->p_mqueue_cnt == mq_open_max) {
550 error = EMFILE;
551 goto exit;
552 }
553
554 /* Insert the queue to the list */
555 mq = mq_new;
556 mutex_enter(&mq->mq_mtx);
557 LIST_INSERT_HEAD(&mqueue_head, mq, mq_list);
558 mq_new = NULL;
559 getnanotime(&mq->mq_btime);
560 mq->mq_atime = mq->mq_mtime = mq->mq_btime;
561 }
562
563 /* Increase the counters, and make descriptor ready */
564 p->p_mqueue_cnt++;
565 mq->mq_refcnt++;
566 fp->f_data = mq;
567 exit:
568 mutex_exit(&mq->mq_mtx);
569 mutex_exit(&mqlist_mtx);
570
571 if (mq_new)
572 mqueue_destroy(mq_new);
573 if (error) {
574 fd_abort(p, fp, mqd);
575 } else {
576 fd_affix(p, fp, mqd);
577 *retval = mqd;
578 }
579 kmem_free(name, MQ_NAMELEN);
580
581 return error;
582 }
583
584 int
585 sys_mq_close(struct lwp *l, const struct sys_mq_close_args *uap,
586 register_t *retval)
587 {
588
589 return sys_close(l, (const void *)uap, retval);
590 }
591
592 /*
593 * Primary mq_recv1() function.
594 */
595 int
596 mq_recv1(mqd_t mqdes, void *msg_ptr, size_t msg_len, u_int *msg_prio,
597 struct timespec *ts, ssize_t *mlen)
598 {
599 file_t *fp = NULL;
600 struct mqueue *mq;
601 struct mq_msg *msg = NULL;
602 struct mq_attr *mqattr;
603 u_int idx;
604 int error;
605
606 /* Get the message queue */
607 error = mqueue_get(mqdes, &fp);
608 if (error) {
609 return error;
610 }
611 mq = fp->f_data;
612 if ((fp->f_flag & FREAD) == 0) {
613 error = EBADF;
614 goto error;
615 }
616 getnanotime(&mq->mq_atime);
617 mqattr = &mq->mq_attrib;
618
619 /* Check the message size limits */
620 if (msg_len < mqattr->mq_msgsize) {
621 error = EMSGSIZE;
622 goto error;
623 }
624
625 /* Check if queue is empty */
626 while (mqattr->mq_curmsgs == 0) {
627 int t;
628
629 if (mqattr->mq_flags & O_NONBLOCK) {
630 error = EAGAIN;
631 goto error;
632 }
633 error = abstimeout2timo(ts, &t);
634 if (error) {
635 goto error;
636 }
637 /*
638 * Block until someone sends the message.
639 * While doing this, notification should not be sent.
640 */
641 mqattr->mq_flags |= MQ_RECEIVE;
642 error = cv_timedwait_sig(&mq->mq_send_cv, &mq->mq_mtx, t);
643 mqattr->mq_flags &= ~MQ_RECEIVE;
644 if (error || (mqattr->mq_flags & MQ_UNLINK)) {
645 error = (error == EWOULDBLOCK) ? ETIMEDOUT : EINTR;
646 goto error;
647 }
648 }
649
650 /*
651 * Find the highest priority message, and remove it from the queue.
652 * At first, reserved queue is checked, bitmap is next.
653 */
654 msg = TAILQ_FIRST(&mq->mq_head[MQ_PQRESQ]);
655 if (__predict_true(msg == NULL)) {
656 idx = ffs(mq->mq_bitmap);
657 msg = TAILQ_FIRST(&mq->mq_head[idx]);
658 KASSERT(msg != NULL);
659 } else {
660 idx = MQ_PQRESQ;
661 }
662 TAILQ_REMOVE(&mq->mq_head[idx], msg, msg_queue);
663
664 /* Unmark the bit, if last message. */
665 if (__predict_true(idx) && TAILQ_EMPTY(&mq->mq_head[idx])) {
666 KASSERT((MQ_PQSIZE - idx) == msg->msg_prio);
667 mq->mq_bitmap &= ~(1 << --idx);
668 }
669
670 /* Decrement the counter and signal waiter, if any */
671 mqattr->mq_curmsgs--;
672 cv_signal(&mq->mq_recv_cv);
673
674 /* Ready for sending now */
675 selnotify(&mq->mq_wsel, POLLOUT | POLLWRNORM, 0);
676 error:
677 mutex_exit(&mq->mq_mtx);
678 fd_putfile((int)mqdes);
679 if (error)
680 return error;
681
682 /*
683 * Copy the data to the user-space.
684 * Note: According to POSIX, no message should be removed from the
685 * queue in case of fail - this would be violated.
686 */
687 *mlen = msg->msg_len;
688 error = copyout(msg->msg_ptr, msg_ptr, msg->msg_len);
689 if (error == 0 && msg_prio)
690 error = copyout(&msg->msg_prio, msg_prio, sizeof(unsigned));
691 mqueue_freemsg(msg, sizeof(struct mq_msg) + msg->msg_len);
692
693 return error;
694 }
695
696 int
697 sys_mq_receive(struct lwp *l, const struct sys_mq_receive_args *uap,
698 register_t *retval)
699 {
700 /* {
701 syscallarg(mqd_t) mqdes;
702 syscallarg(char *) msg_ptr;
703 syscallarg(size_t) msg_len;
704 syscallarg(unsigned *) msg_prio;
705 } */
706 ssize_t mlen;
707 int error;
708
709 error = mq_recv1(SCARG(uap, mqdes), SCARG(uap, msg_ptr),
710 SCARG(uap, msg_len), SCARG(uap, msg_prio), NULL, &mlen);
711 if (error == 0)
712 *retval = mlen;
713
714 return error;
715 }
716
717 int
718 sys___mq_timedreceive50(struct lwp *l,
719 const struct sys___mq_timedreceive50_args *uap, register_t *retval)
720 {
721 /* {
722 syscallarg(mqd_t) mqdes;
723 syscallarg(char *) msg_ptr;
724 syscallarg(size_t) msg_len;
725 syscallarg(unsigned *) msg_prio;
726 syscallarg(const struct timespec *) abs_timeout;
727 } */
728 struct timespec ts, *tsp;
729 ssize_t mlen;
730 int error;
731
732 /* Get and convert time value */
733 if (SCARG(uap, abs_timeout)) {
734 error = copyin(SCARG(uap, abs_timeout), &ts, sizeof(ts));
735 if (error)
736 return error;
737 tsp = &ts;
738 } else {
739 tsp = NULL;
740 }
741
742 error = mq_recv1(SCARG(uap, mqdes), SCARG(uap, msg_ptr),
743 SCARG(uap, msg_len), SCARG(uap, msg_prio), tsp, &mlen);
744 if (error == 0)
745 *retval = mlen;
746
747 return error;
748 }
749
750 /*
751 * Primary mq_send1() function.
752 */
753 int
754 mq_send1(mqd_t mqdes, const char *msg_ptr, size_t msg_len, u_int msg_prio,
755 struct timespec *ts)
756 {
757 file_t *fp = NULL;
758 struct mqueue *mq;
759 struct mq_msg *msg;
760 struct mq_attr *mqattr;
761 struct proc *notify = NULL;
762 ksiginfo_t ksi;
763 size_t size;
764 int error;
765
766 /* Check the priority range */
767 if (msg_prio >= mq_prio_max)
768 return EINVAL;
769
770 /* Allocate a new message */
771 size = sizeof(struct mq_msg) + msg_len;
772 if (size > mq_max_msgsize)
773 return EMSGSIZE;
774
775 if (size > MQ_DEF_MSGSIZE) {
776 msg = kmem_alloc(size, KM_SLEEP);
777 } else {
778 msg = pool_cache_get(mqmsg_cache, PR_WAITOK);
779 }
780
781 /* Get the data from user-space */
782 error = copyin(msg_ptr, msg->msg_ptr, msg_len);
783 if (error) {
784 mqueue_freemsg(msg, size);
785 return error;
786 }
787 msg->msg_len = msg_len;
788 msg->msg_prio = msg_prio;
789
790 /* Get the mqueue */
791 error = mqueue_get(mqdes, &fp);
792 if (error) {
793 mqueue_freemsg(msg, size);
794 return error;
795 }
796 mq = fp->f_data;
797 if ((fp->f_flag & FWRITE) == 0) {
798 error = EBADF;
799 goto error;
800 }
801 getnanotime(&mq->mq_mtime);
802 mqattr = &mq->mq_attrib;
803
804 /* Check the message size limit */
805 if (msg_len <= 0 || msg_len > mqattr->mq_msgsize) {
806 error = EMSGSIZE;
807 goto error;
808 }
809
810 /* Check if queue is full */
811 while (mqattr->mq_curmsgs >= mqattr->mq_maxmsg) {
812 int t;
813
814 if (mqattr->mq_flags & O_NONBLOCK) {
815 error = EAGAIN;
816 goto error;
817 }
818 error = abstimeout2timo(ts, &t);
819 if (error) {
820 goto error;
821 }
822 /* Block until queue becomes available */
823 error = cv_timedwait_sig(&mq->mq_recv_cv, &mq->mq_mtx, t);
824 if (error || (mqattr->mq_flags & MQ_UNLINK)) {
825 error = (error == EWOULDBLOCK) ? ETIMEDOUT : error;
826 goto error;
827 }
828 }
829 KASSERT(mqattr->mq_curmsgs < mqattr->mq_maxmsg);
830
831 /*
832 * Insert message into the queue, according to the priority.
833 * Note the difference between index and priority.
834 */
835 if (__predict_true(msg_prio < MQ_PQSIZE)) {
836 u_int idx = MQ_PQSIZE - msg_prio;
837
838 KASSERT(idx != MQ_PQRESQ);
839 TAILQ_INSERT_TAIL(&mq->mq_head[idx], msg, msg_queue);
840 mq->mq_bitmap |= (1 << --idx);
841 } else {
842 mqueue_linear_insert(mq, msg);
843 }
844
845 /* Check for the notify */
846 if (mqattr->mq_curmsgs == 0 && mq->mq_notify_proc &&
847 (mqattr->mq_flags & MQ_RECEIVE) == 0) {
848 /* Initialize the signal */
849 KSI_INIT(&ksi);
850 ksi.ksi_signo = mq->mq_sig_notify.sigev_signo;
851 ksi.ksi_code = SI_MESGQ;
852 ksi.ksi_value = mq->mq_sig_notify.sigev_value;
853 /* Unregister the process */
854 notify = mq->mq_notify_proc;
855 mq->mq_notify_proc = NULL;
856 }
857
858 /* Increment the counter and signal waiter, if any */
859 mqattr->mq_curmsgs++;
860 cv_signal(&mq->mq_send_cv);
861
862 /* Ready for receiving now */
863 selnotify(&mq->mq_rsel, POLLIN | POLLRDNORM, 0);
864 error:
865 mutex_exit(&mq->mq_mtx);
866 fd_putfile((int)mqdes);
867
868 if (error) {
869 mqueue_freemsg(msg, size);
870 } else if (notify) {
871 /* Send the notify, if needed */
872 mutex_enter(proc_lock);
873 kpsignal(notify, &ksi, NULL);
874 mutex_exit(proc_lock);
875 }
876 return error;
877 }
878
879 int
880 sys_mq_send(struct lwp *l, const struct sys_mq_send_args *uap,
881 register_t *retval)
882 {
883 /* {
884 syscallarg(mqd_t) mqdes;
885 syscallarg(const char *) msg_ptr;
886 syscallarg(size_t) msg_len;
887 syscallarg(unsigned) msg_prio;
888 } */
889
890 return mq_send1(SCARG(uap, mqdes), SCARG(uap, msg_ptr),
891 SCARG(uap, msg_len), SCARG(uap, msg_prio), NULL);
892 }
893
894 int
895 sys___mq_timedsend50(struct lwp *l, const struct sys___mq_timedsend50_args *uap,
896 register_t *retval)
897 {
898 /* {
899 syscallarg(mqd_t) mqdes;
900 syscallarg(const char *) msg_ptr;
901 syscallarg(size_t) msg_len;
902 syscallarg(unsigned) msg_prio;
903 syscallarg(const struct timespec *) abs_timeout;
904 } */
905 struct timespec ts, *tsp;
906 int error;
907
908 /* Get and convert time value */
909 if (SCARG(uap, abs_timeout)) {
910 error = copyin(SCARG(uap, abs_timeout), &ts, sizeof(ts));
911 if (error)
912 return error;
913 tsp = &ts;
914 } else {
915 tsp = NULL;
916 }
917
918 return mq_send1(SCARG(uap, mqdes), SCARG(uap, msg_ptr),
919 SCARG(uap, msg_len), SCARG(uap, msg_prio), tsp);
920 }
921
922 int
923 sys_mq_notify(struct lwp *l, const struct sys_mq_notify_args *uap,
924 register_t *retval)
925 {
926 /* {
927 syscallarg(mqd_t) mqdes;
928 syscallarg(const struct sigevent *) notification;
929 } */
930 file_t *fp = NULL;
931 struct mqueue *mq;
932 struct sigevent sig;
933 int error;
934
935 if (SCARG(uap, notification)) {
936 /* Get the signal from user-space */
937 error = copyin(SCARG(uap, notification), &sig,
938 sizeof(struct sigevent));
939 if (error)
940 return error;
941 }
942
943 error = mqueue_get(SCARG(uap, mqdes), &fp);
944 if (error)
945 return error;
946 mq = fp->f_data;
947
948 if (SCARG(uap, notification)) {
949 /* Register notification: set the signal and target process */
950 if (mq->mq_notify_proc == NULL) {
951 memcpy(&mq->mq_sig_notify, &sig,
952 sizeof(struct sigevent));
953 mq->mq_notify_proc = l->l_proc;
954 } else {
955 /* Fail if someone else already registered */
956 error = EBUSY;
957 }
958 } else {
959 /* Unregister the notification */
960 mq->mq_notify_proc = NULL;
961 }
962 mutex_exit(&mq->mq_mtx);
963 fd_putfile((int)SCARG(uap, mqdes));
964
965 return error;
966 }
967
968 int
969 sys_mq_getattr(struct lwp *l, const struct sys_mq_getattr_args *uap,
970 register_t *retval)
971 {
972 /* {
973 syscallarg(mqd_t) mqdes;
974 syscallarg(struct mq_attr *) mqstat;
975 } */
976 file_t *fp = NULL;
977 struct mqueue *mq;
978 struct mq_attr attr;
979 int error;
980
981 /* Get the message queue */
982 error = mqueue_get(SCARG(uap, mqdes), &fp);
983 if (error)
984 return error;
985 mq = fp->f_data;
986 memcpy(&attr, &mq->mq_attrib, sizeof(struct mq_attr));
987 mutex_exit(&mq->mq_mtx);
988 fd_putfile((int)SCARG(uap, mqdes));
989
990 return copyout(&attr, SCARG(uap, mqstat), sizeof(struct mq_attr));
991 }
992
993 int
994 sys_mq_setattr(struct lwp *l, const struct sys_mq_setattr_args *uap,
995 register_t *retval)
996 {
997 /* {
998 syscallarg(mqd_t) mqdes;
999 syscallarg(const struct mq_attr *) mqstat;
1000 syscallarg(struct mq_attr *) omqstat;
1001 } */
1002 file_t *fp = NULL;
1003 struct mqueue *mq;
1004 struct mq_attr attr;
1005 int error, nonblock;
1006
1007 error = copyin(SCARG(uap, mqstat), &attr, sizeof(struct mq_attr));
1008 if (error)
1009 return error;
1010 nonblock = (attr.mq_flags & O_NONBLOCK);
1011
1012 /* Get the message queue */
1013 error = mqueue_get(SCARG(uap, mqdes), &fp);
1014 if (error)
1015 return error;
1016 mq = fp->f_data;
1017
1018 /* Copy the old attributes, if needed */
1019 if (SCARG(uap, omqstat)) {
1020 memcpy(&attr, &mq->mq_attrib, sizeof(struct mq_attr));
1021 }
1022
1023 /* Ignore everything, except O_NONBLOCK */
1024 if (nonblock)
1025 mq->mq_attrib.mq_flags |= O_NONBLOCK;
1026 else
1027 mq->mq_attrib.mq_flags &= ~O_NONBLOCK;
1028
1029 mutex_exit(&mq->mq_mtx);
1030 fd_putfile((int)SCARG(uap, mqdes));
1031
1032 /*
1033 * Copy the data to the user-space.
1034 * Note: According to POSIX, the new attributes should not be set in
1035 * case of fail - this would be violated.
1036 */
1037 if (SCARG(uap, omqstat))
1038 error = copyout(&attr, SCARG(uap, omqstat),
1039 sizeof(struct mq_attr));
1040
1041 return error;
1042 }
1043
1044 int
1045 sys_mq_unlink(struct lwp *l, const struct sys_mq_unlink_args *uap,
1046 register_t *retval)
1047 {
1048 /* {
1049 syscallarg(const char *) name;
1050 } */
1051 struct mqueue *mq;
1052 char *name;
1053 int error, refcnt = 0;
1054
1055 /* Get the name from the user-space */
1056 name = kmem_zalloc(MQ_NAMELEN, KM_SLEEP);
1057 error = copyinstr(SCARG(uap, name), name, MQ_NAMELEN - 1, NULL);
1058 if (error) {
1059 kmem_free(name, MQ_NAMELEN);
1060 return error;
1061 }
1062
1063 /* Lookup for this file */
1064 mutex_enter(&mqlist_mtx);
1065 mq = mqueue_lookup(name);
1066 if (mq == NULL) {
1067 error = ENOENT;
1068 goto error;
1069 }
1070
1071 /* Check the permissions */
1072 if (kauth_cred_geteuid(l->l_cred) != mq->mq_euid &&
1073 kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL)) {
1074 mutex_exit(&mq->mq_mtx);
1075 error = EACCES;
1076 goto error;
1077 }
1078
1079 /* Mark message queue as unlinking, before leaving the window */
1080 mq->mq_attrib.mq_flags |= MQ_UNLINK;
1081
1082 /* Wake up all waiters, if there are such */
1083 cv_broadcast(&mq->mq_send_cv);
1084 cv_broadcast(&mq->mq_recv_cv);
1085
1086 selnotify(&mq->mq_rsel, POLLHUP, 0);
1087 selnotify(&mq->mq_wsel, POLLHUP, 0);
1088
1089 refcnt = mq->mq_refcnt;
1090 if (refcnt == 0)
1091 LIST_REMOVE(mq, mq_list);
1092
1093 mutex_exit(&mq->mq_mtx);
1094 error:
1095 mutex_exit(&mqlist_mtx);
1096
1097 /*
1098 * If there are no references - destroy the message
1099 * queue, otherwise, the last mq_close() will do that.
1100 */
1101 if (error == 0 && refcnt == 0)
1102 mqueue_destroy(mq);
1103
1104 kmem_free(name, MQ_NAMELEN);
1105 return error;
1106 }
1107
1108 /*
1109 * System control nodes.
1110 */
1111
1112 SYSCTL_SETUP(sysctl_mqueue_setup, "sysctl mqueue setup")
1113 {
1114 const struct sysctlnode *node = NULL;
1115
1116 sysctl_createv(clog, 0, NULL, NULL,
1117 CTLFLAG_PERMANENT,
1118 CTLTYPE_NODE, "kern", NULL,
1119 NULL, 0, NULL, 0,
1120 CTL_KERN, CTL_EOL);
1121 sysctl_createv(clog, 0, NULL, NULL,
1122 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
1123 CTLTYPE_INT, "posix_msg",
1124 SYSCTL_DESCR("Version of IEEE Std 1003.1 and its "
1125 "Message Passing option to which the "
1126 "system attempts to conform"),
1127 NULL, _POSIX_MESSAGE_PASSING, NULL, 0,
1128 CTL_KERN, CTL_CREATE, CTL_EOL);
1129 sysctl_createv(clog, 0, NULL, &node,
1130 CTLFLAG_PERMANENT,
1131 CTLTYPE_NODE, "mqueue",
1132 SYSCTL_DESCR("Message queue options"),
1133 NULL, 0, NULL, 0,
1134 CTL_KERN, CTL_CREATE, CTL_EOL);
1135
1136 if (node == NULL)
1137 return;
1138
1139 sysctl_createv(clog, 0, &node, NULL,
1140 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1141 CTLTYPE_INT, "mq_open_max",
1142 SYSCTL_DESCR("Maximal number of message queue descriptors "
1143 "that process could open"),
1144 NULL, 0, &mq_open_max, 0,
1145 CTL_CREATE, CTL_EOL);
1146 sysctl_createv(clog, 0, &node, NULL,
1147 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1148 CTLTYPE_INT, "mq_prio_max",
1149 SYSCTL_DESCR("Maximal priority of the message"),
1150 NULL, 0, &mq_prio_max, 0,
1151 CTL_CREATE, CTL_EOL);
1152 sysctl_createv(clog, 0, &node, NULL,
1153 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1154 CTLTYPE_INT, "mq_max_msgsize",
1155 SYSCTL_DESCR("Maximal allowed size of the message"),
1156 NULL, 0, &mq_max_msgsize, 0,
1157 CTL_CREATE, CTL_EOL);
1158 sysctl_createv(clog, 0, &node, NULL,
1159 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1160 CTLTYPE_INT, "mq_def_maxmsg",
1161 SYSCTL_DESCR("Default maximal message count"),
1162 NULL, 0, &mq_def_maxmsg, 0,
1163 CTL_CREATE, CTL_EOL);
1164 }
1165
1166 /*
1167 * Debugging.
1168 */
1169 #if defined(DDB)
1170
1171 void
1172 mqueue_print_list(void (*pr)(const char *, ...))
1173 {
1174 struct mqueue *mq;
1175
1176 (*pr)("Global list of the message queues:\n");
1177 (*pr)("%20s %10s %8s %8s %3s %4s %4s %4s\n",
1178 "Name", "Ptr", "Mode", "Flags", "Ref",
1179 "MaxMsg", "MsgSze", "CurMsg");
1180 LIST_FOREACH(mq, &mqueue_head, mq_list) {
1181 (*pr)("%20s %10p %8x %8x %3u %6lu %6lu %6lu\n",
1182 mq->mq_name, mq, mq->mq_mode,
1183 mq->mq_attrib.mq_flags, mq->mq_refcnt,
1184 mq->mq_attrib.mq_maxmsg, mq->mq_attrib.mq_msgsize,
1185 mq->mq_attrib.mq_curmsgs);
1186 }
1187 }
1188
1189 #endif /* defined(DDB) */
1190