uipc_socket.c revision 1.261 1 /* $NetBSD: uipc_socket.c,v 1.261 2018/03/19 16:32:30 roy Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of Wasabi Systems, Inc, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 2004 The FreeBSD Foundation
34 * Copyright (c) 2004 Robert Watson
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
61 *
62 * @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95
63 */
64
65 /*
66 * Socket operation routines.
67 *
68 * These routines are called by the routines in sys_socket.c or from a
69 * system process, and implement the semantics of socket operations by
70 * switching out to the protocol specific routines.
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.261 2018/03/19 16:32:30 roy Exp $");
75
76 #ifdef _KERNEL_OPT
77 #include "opt_compat_netbsd.h"
78 #include "opt_sock_counters.h"
79 #include "opt_sosend_loan.h"
80 #include "opt_mbuftrace.h"
81 #include "opt_somaxkva.h"
82 #include "opt_multiprocessor.h" /* XXX */
83 #include "opt_sctp.h"
84 #endif
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/proc.h>
89 #include <sys/file.h>
90 #include <sys/filedesc.h>
91 #include <sys/kmem.h>
92 #include <sys/mbuf.h>
93 #include <sys/domain.h>
94 #include <sys/kernel.h>
95 #include <sys/protosw.h>
96 #include <sys/socket.h>
97 #include <sys/socketvar.h>
98 #include <sys/signalvar.h>
99 #include <sys/resourcevar.h>
100 #include <sys/uidinfo.h>
101 #include <sys/event.h>
102 #include <sys/poll.h>
103 #include <sys/kauth.h>
104 #include <sys/mutex.h>
105 #include <sys/condvar.h>
106 #include <sys/kthread.h>
107
108 #ifdef COMPAT_50
109 #include <compat/sys/time.h>
110 #include <compat/sys/socket.h>
111 #endif
112
113 #include <uvm/uvm_extern.h>
114 #include <uvm/uvm_loan.h>
115 #include <uvm/uvm_page.h>
116
117 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
118
119 extern const struct fileops socketops;
120
121 extern int somaxconn; /* patchable (XXX sysctl) */
122 int somaxconn = SOMAXCONN;
123 kmutex_t *softnet_lock;
124
125 #ifdef SOSEND_COUNTERS
126 #include <sys/device.h>
127
128 static struct evcnt sosend_loan_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
129 NULL, "sosend", "loan big");
130 static struct evcnt sosend_copy_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
131 NULL, "sosend", "copy big");
132 static struct evcnt sosend_copy_small = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
133 NULL, "sosend", "copy small");
134 static struct evcnt sosend_kvalimit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
135 NULL, "sosend", "kva limit");
136
137 #define SOSEND_COUNTER_INCR(ev) (ev)->ev_count++
138
139 EVCNT_ATTACH_STATIC(sosend_loan_big);
140 EVCNT_ATTACH_STATIC(sosend_copy_big);
141 EVCNT_ATTACH_STATIC(sosend_copy_small);
142 EVCNT_ATTACH_STATIC(sosend_kvalimit);
143 #else
144
145 #define SOSEND_COUNTER_INCR(ev) /* nothing */
146
147 #endif /* SOSEND_COUNTERS */
148
149 #if defined(SOSEND_NO_LOAN) || defined(MULTIPROCESSOR)
150 int sock_loan_thresh = -1;
151 #else
152 int sock_loan_thresh = 4096;
153 #endif
154
155 static kmutex_t so_pendfree_lock;
156 static struct mbuf *so_pendfree = NULL;
157
158 #ifndef SOMAXKVA
159 #define SOMAXKVA (16 * 1024 * 1024)
160 #endif
161 int somaxkva = SOMAXKVA;
162 static int socurkva;
163 static kcondvar_t socurkva_cv;
164
165 static kauth_listener_t socket_listener;
166
167 #define SOCK_LOAN_CHUNK 65536
168
169 static void sopendfree_thread(void *);
170 static kcondvar_t pendfree_thread_cv;
171 static lwp_t *sopendfree_lwp;
172
173 static void sysctl_kern_socket_setup(void);
174 static struct sysctllog *socket_sysctllog;
175
176 static vsize_t
177 sokvareserve(struct socket *so, vsize_t len)
178 {
179 int error;
180
181 mutex_enter(&so_pendfree_lock);
182 while (socurkva + len > somaxkva) {
183 SOSEND_COUNTER_INCR(&sosend_kvalimit);
184 error = cv_wait_sig(&socurkva_cv, &so_pendfree_lock);
185 if (error) {
186 len = 0;
187 break;
188 }
189 }
190 socurkva += len;
191 mutex_exit(&so_pendfree_lock);
192 return len;
193 }
194
195 static void
196 sokvaunreserve(vsize_t len)
197 {
198
199 mutex_enter(&so_pendfree_lock);
200 socurkva -= len;
201 cv_broadcast(&socurkva_cv);
202 mutex_exit(&so_pendfree_lock);
203 }
204
205 /*
206 * sokvaalloc: allocate kva for loan.
207 */
208
209 vaddr_t
210 sokvaalloc(vaddr_t sva, vsize_t len, struct socket *so)
211 {
212 vaddr_t lva;
213
214 /*
215 * reserve kva.
216 */
217
218 if (sokvareserve(so, len) == 0)
219 return 0;
220
221 /*
222 * allocate kva.
223 */
224
225 lva = uvm_km_alloc(kernel_map, len, atop(sva) & uvmexp.colormask,
226 UVM_KMF_COLORMATCH | UVM_KMF_VAONLY | UVM_KMF_WAITVA);
227 if (lva == 0) {
228 sokvaunreserve(len);
229 return (0);
230 }
231
232 return lva;
233 }
234
235 /*
236 * sokvafree: free kva for loan.
237 */
238
239 void
240 sokvafree(vaddr_t sva, vsize_t len)
241 {
242
243 /*
244 * free kva.
245 */
246
247 uvm_km_free(kernel_map, sva, len, UVM_KMF_VAONLY);
248
249 /*
250 * unreserve kva.
251 */
252
253 sokvaunreserve(len);
254 }
255
256 static void
257 sodoloanfree(struct vm_page **pgs, void *buf, size_t size)
258 {
259 vaddr_t sva, eva;
260 vsize_t len;
261 int npgs;
262
263 KASSERT(pgs != NULL);
264
265 eva = round_page((vaddr_t) buf + size);
266 sva = trunc_page((vaddr_t) buf);
267 len = eva - sva;
268 npgs = len >> PAGE_SHIFT;
269
270 pmap_kremove(sva, len);
271 pmap_update(pmap_kernel());
272 uvm_unloan(pgs, npgs, UVM_LOAN_TOPAGE);
273 sokvafree(sva, len);
274 }
275
276 /*
277 * sopendfree_thread: free mbufs on "pendfree" list.
278 * unlock and relock so_pendfree_lock when freeing mbufs.
279 */
280
281 static void
282 sopendfree_thread(void *v)
283 {
284 struct mbuf *m, *next;
285 size_t rv;
286
287 mutex_enter(&so_pendfree_lock);
288
289 for (;;) {
290 rv = 0;
291 while (so_pendfree != NULL) {
292 m = so_pendfree;
293 so_pendfree = NULL;
294 mutex_exit(&so_pendfree_lock);
295
296 for (; m != NULL; m = next) {
297 next = m->m_next;
298 KASSERT((~m->m_flags & (M_EXT|M_EXT_PAGES)) ==
299 0);
300 KASSERT(m->m_ext.ext_refcnt == 0);
301
302 rv += m->m_ext.ext_size;
303 sodoloanfree(m->m_ext.ext_pgs, m->m_ext.ext_buf,
304 m->m_ext.ext_size);
305 pool_cache_put(mb_cache, m);
306 }
307
308 mutex_enter(&so_pendfree_lock);
309 }
310 if (rv)
311 cv_broadcast(&socurkva_cv);
312 cv_wait(&pendfree_thread_cv, &so_pendfree_lock);
313 }
314 panic("sopendfree_thread");
315 /* NOTREACHED */
316 }
317
318 void
319 soloanfree(struct mbuf *m, void *buf, size_t size, void *arg)
320 {
321
322 KASSERT(m != NULL);
323
324 /*
325 * postpone freeing mbuf.
326 *
327 * we can't do it in interrupt context
328 * because we need to put kva back to kernel_map.
329 */
330
331 mutex_enter(&so_pendfree_lock);
332 m->m_next = so_pendfree;
333 so_pendfree = m;
334 cv_signal(&pendfree_thread_cv);
335 mutex_exit(&so_pendfree_lock);
336 }
337
338 static long
339 sosend_loan(struct socket *so, struct uio *uio, struct mbuf *m, long space)
340 {
341 struct iovec *iov = uio->uio_iov;
342 vaddr_t sva, eva;
343 vsize_t len;
344 vaddr_t lva;
345 int npgs, error;
346 vaddr_t va;
347 int i;
348
349 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace))
350 return (0);
351
352 if (iov->iov_len < (size_t) space)
353 space = iov->iov_len;
354 if (space > SOCK_LOAN_CHUNK)
355 space = SOCK_LOAN_CHUNK;
356
357 eva = round_page((vaddr_t) iov->iov_base + space);
358 sva = trunc_page((vaddr_t) iov->iov_base);
359 len = eva - sva;
360 npgs = len >> PAGE_SHIFT;
361
362 KASSERT(npgs <= M_EXT_MAXPAGES);
363
364 lva = sokvaalloc(sva, len, so);
365 if (lva == 0)
366 return 0;
367
368 error = uvm_loan(&uio->uio_vmspace->vm_map, sva, len,
369 m->m_ext.ext_pgs, UVM_LOAN_TOPAGE);
370 if (error) {
371 sokvafree(lva, len);
372 return (0);
373 }
374
375 for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE)
376 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(m->m_ext.ext_pgs[i]),
377 VM_PROT_READ, 0);
378 pmap_update(pmap_kernel());
379
380 lva += (vaddr_t) iov->iov_base & PAGE_MASK;
381
382 MEXTADD(m, (void *) lva, space, M_MBUF, soloanfree, so);
383 m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP;
384
385 uio->uio_resid -= space;
386 /* uio_offset not updated, not set/used for write(2) */
387 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + space;
388 uio->uio_iov->iov_len -= space;
389 if (uio->uio_iov->iov_len == 0) {
390 uio->uio_iov++;
391 uio->uio_iovcnt--;
392 }
393
394 return (space);
395 }
396
397 struct mbuf *
398 getsombuf(struct socket *so, int type)
399 {
400 struct mbuf *m;
401
402 m = m_get(M_WAIT, type);
403 MCLAIM(m, so->so_mowner);
404 return m;
405 }
406
407 static int
408 socket_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
409 void *arg0, void *arg1, void *arg2, void *arg3)
410 {
411 int result;
412 enum kauth_network_req req;
413
414 result = KAUTH_RESULT_DEFER;
415 req = (enum kauth_network_req)arg0;
416
417 if ((action != KAUTH_NETWORK_SOCKET) &&
418 (action != KAUTH_NETWORK_BIND))
419 return result;
420
421 switch (req) {
422 case KAUTH_REQ_NETWORK_BIND_PORT:
423 result = KAUTH_RESULT_ALLOW;
424 break;
425
426 case KAUTH_REQ_NETWORK_SOCKET_DROP: {
427 /* Normal users can only drop their own connections. */
428 struct socket *so = (struct socket *)arg1;
429
430 if (so->so_cred && proc_uidmatch(cred, so->so_cred) == 0)
431 result = KAUTH_RESULT_ALLOW;
432
433 break;
434 }
435
436 case KAUTH_REQ_NETWORK_SOCKET_OPEN:
437 /* We allow "raw" routing/bluetooth sockets to anyone. */
438 switch ((u_long)arg1) {
439 case PF_ROUTE:
440 case PF_OROUTE:
441 case PF_BLUETOOTH:
442 case PF_CAN:
443 result = KAUTH_RESULT_ALLOW;
444 break;
445 default:
446 /* Privileged, let secmodel handle this. */
447 if ((u_long)arg2 == SOCK_RAW)
448 break;
449 result = KAUTH_RESULT_ALLOW;
450 break;
451 }
452 break;
453
454 case KAUTH_REQ_NETWORK_SOCKET_CANSEE:
455 result = KAUTH_RESULT_ALLOW;
456
457 break;
458
459 default:
460 break;
461 }
462
463 return result;
464 }
465
466 void
467 soinit(void)
468 {
469
470 sysctl_kern_socket_setup();
471
472 mutex_init(&so_pendfree_lock, MUTEX_DEFAULT, IPL_VM);
473 softnet_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
474 cv_init(&socurkva_cv, "sokva");
475 cv_init(&pendfree_thread_cv, "sopendfr");
476 soinit2();
477
478 /* Set the initial adjusted socket buffer size. */
479 if (sb_max_set(sb_max))
480 panic("bad initial sb_max value: %lu", sb_max);
481
482 socket_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
483 socket_listener_cb, NULL);
484 }
485
486 void
487 soinit1(void)
488 {
489 int error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
490 sopendfree_thread, NULL, &sopendfree_lwp, "sopendfree");
491 if (error)
492 panic("soinit1 %d", error);
493 }
494
495 /*
496 * socreate: create a new socket of the specified type and the protocol.
497 *
498 * => Caller may specify another socket for lock sharing (must not be held).
499 * => Returns the new socket without lock held.
500 */
501 int
502 socreate(int dom, struct socket **aso, int type, int proto, struct lwp *l,
503 struct socket *lockso)
504 {
505 const struct protosw *prp;
506 struct socket *so;
507 uid_t uid;
508 int error;
509 kmutex_t *lock;
510
511 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_SOCKET,
512 KAUTH_REQ_NETWORK_SOCKET_OPEN, KAUTH_ARG(dom), KAUTH_ARG(type),
513 KAUTH_ARG(proto));
514 if (error != 0)
515 return error;
516
517 if (proto)
518 prp = pffindproto(dom, proto, type);
519 else
520 prp = pffindtype(dom, type);
521 if (prp == NULL) {
522 /* no support for domain */
523 if (pffinddomain(dom) == 0)
524 return EAFNOSUPPORT;
525 /* no support for socket type */
526 if (proto == 0 && type != 0)
527 return EPROTOTYPE;
528 return EPROTONOSUPPORT;
529 }
530 if (prp->pr_usrreqs == NULL)
531 return EPROTONOSUPPORT;
532 if (prp->pr_type != type)
533 return EPROTOTYPE;
534
535 so = soget(true);
536 so->so_type = type;
537 so->so_proto = prp;
538 so->so_send = sosend;
539 so->so_receive = soreceive;
540 #ifdef MBUFTRACE
541 so->so_rcv.sb_mowner = &prp->pr_domain->dom_mowner;
542 so->so_snd.sb_mowner = &prp->pr_domain->dom_mowner;
543 so->so_mowner = &prp->pr_domain->dom_mowner;
544 #endif
545 uid = kauth_cred_geteuid(l->l_cred);
546 so->so_uidinfo = uid_find(uid);
547 so->so_cpid = l->l_proc->p_pid;
548
549 /*
550 * Lock assigned and taken during PCB attach, unless we share
551 * the lock with another socket, e.g. socketpair(2) case.
552 */
553 if (lockso) {
554 lock = lockso->so_lock;
555 so->so_lock = lock;
556 mutex_obj_hold(lock);
557 mutex_enter(lock);
558 }
559
560 /* Attach the PCB (returns with the socket lock held). */
561 error = (*prp->pr_usrreqs->pr_attach)(so, proto);
562 KASSERT(solocked(so));
563
564 if (error) {
565 KASSERT(so->so_pcb == NULL);
566 so->so_state |= SS_NOFDREF;
567 sofree(so);
568 return error;
569 }
570 so->so_cred = kauth_cred_dup(l->l_cred);
571 sounlock(so);
572
573 *aso = so;
574 return 0;
575 }
576
577 /*
578 * fsocreate: create a socket and a file descriptor associated with it.
579 *
580 * => On success, write file descriptor to fdout and return zero.
581 * => On failure, return non-zero; *fdout will be undefined.
582 */
583 int
584 fsocreate(int domain, struct socket **sop, int type, int proto, int *fdout)
585 {
586 lwp_t *l = curlwp;
587 int error, fd, flags;
588 struct socket *so;
589 struct file *fp;
590
591 if ((error = fd_allocfile(&fp, &fd)) != 0) {
592 return error;
593 }
594 flags = type & SOCK_FLAGS_MASK;
595 fd_set_exclose(l, fd, (flags & SOCK_CLOEXEC) != 0);
596 fp->f_flag = FREAD|FWRITE|((flags & SOCK_NONBLOCK) ? FNONBLOCK : 0)|
597 ((flags & SOCK_NOSIGPIPE) ? FNOSIGPIPE : 0);
598 fp->f_type = DTYPE_SOCKET;
599 fp->f_ops = &socketops;
600
601 type &= ~SOCK_FLAGS_MASK;
602 error = socreate(domain, &so, type, proto, l, NULL);
603 if (error) {
604 fd_abort(curproc, fp, fd);
605 return error;
606 }
607 if (flags & SOCK_NONBLOCK) {
608 so->so_state |= SS_NBIO;
609 }
610 fp->f_socket = so;
611 fd_affix(curproc, fp, fd);
612
613 if (sop != NULL) {
614 *sop = so;
615 }
616 *fdout = fd;
617 return error;
618 }
619
620 int
621 sofamily(const struct socket *so)
622 {
623 const struct protosw *pr;
624 const struct domain *dom;
625
626 if ((pr = so->so_proto) == NULL)
627 return AF_UNSPEC;
628 if ((dom = pr->pr_domain) == NULL)
629 return AF_UNSPEC;
630 return dom->dom_family;
631 }
632
633 int
634 sobind(struct socket *so, struct sockaddr *nam, struct lwp *l)
635 {
636 int error;
637
638 solock(so);
639 if (nam->sa_family != so->so_proto->pr_domain->dom_family) {
640 sounlock(so);
641 return EAFNOSUPPORT;
642 }
643 error = (*so->so_proto->pr_usrreqs->pr_bind)(so, nam, l);
644 sounlock(so);
645 return error;
646 }
647
648 int
649 solisten(struct socket *so, int backlog, struct lwp *l)
650 {
651 int error;
652 short oldopt, oldqlimit;
653
654 solock(so);
655 if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
656 SS_ISDISCONNECTING)) != 0) {
657 sounlock(so);
658 return EINVAL;
659 }
660 oldopt = so->so_options;
661 oldqlimit = so->so_qlimit;
662 if (TAILQ_EMPTY(&so->so_q))
663 so->so_options |= SO_ACCEPTCONN;
664 if (backlog < 0)
665 backlog = 0;
666 so->so_qlimit = min(backlog, somaxconn);
667
668 error = (*so->so_proto->pr_usrreqs->pr_listen)(so, l);
669 if (error != 0) {
670 so->so_options = oldopt;
671 so->so_qlimit = oldqlimit;
672 sounlock(so);
673 return error;
674 }
675 sounlock(so);
676 return 0;
677 }
678
679 void
680 sofree(struct socket *so)
681 {
682 u_int refs;
683
684 KASSERT(solocked(so));
685
686 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) {
687 sounlock(so);
688 return;
689 }
690 if (so->so_head) {
691 /*
692 * We must not decommission a socket that's on the accept(2)
693 * queue. If we do, then accept(2) may hang after select(2)
694 * indicated that the listening socket was ready.
695 */
696 if (!soqremque(so, 0)) {
697 sounlock(so);
698 return;
699 }
700 }
701 if (so->so_rcv.sb_hiwat)
702 (void)chgsbsize(so->so_uidinfo, &so->so_rcv.sb_hiwat, 0,
703 RLIM_INFINITY);
704 if (so->so_snd.sb_hiwat)
705 (void)chgsbsize(so->so_uidinfo, &so->so_snd.sb_hiwat, 0,
706 RLIM_INFINITY);
707 sbrelease(&so->so_snd, so);
708 KASSERT(!cv_has_waiters(&so->so_cv));
709 KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv));
710 KASSERT(!cv_has_waiters(&so->so_snd.sb_cv));
711 sorflush(so);
712 refs = so->so_aborting; /* XXX */
713 /* Remove acccept filter if one is present. */
714 if (so->so_accf != NULL)
715 (void)accept_filt_clear(so);
716 sounlock(so);
717 if (refs == 0) /* XXX */
718 soput(so);
719 }
720
721 /*
722 * soclose: close a socket on last file table reference removal.
723 * Initiate disconnect if connected. Free socket when disconnect complete.
724 */
725 int
726 soclose(struct socket *so)
727 {
728 struct socket *so2;
729 int error = 0;
730
731 solock(so);
732 if (so->so_options & SO_ACCEPTCONN) {
733 for (;;) {
734 if ((so2 = TAILQ_FIRST(&so->so_q0)) != 0) {
735 KASSERT(solocked2(so, so2));
736 (void) soqremque(so2, 0);
737 /* soabort drops the lock. */
738 (void) soabort(so2);
739 solock(so);
740 continue;
741 }
742 if ((so2 = TAILQ_FIRST(&so->so_q)) != 0) {
743 KASSERT(solocked2(so, so2));
744 (void) soqremque(so2, 1);
745 /* soabort drops the lock. */
746 (void) soabort(so2);
747 solock(so);
748 continue;
749 }
750 break;
751 }
752 }
753 if (so->so_pcb == NULL)
754 goto discard;
755 if (so->so_state & SS_ISCONNECTED) {
756 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
757 error = sodisconnect(so);
758 if (error)
759 goto drop;
760 }
761 if (so->so_options & SO_LINGER) {
762 if ((so->so_state & (SS_ISDISCONNECTING|SS_NBIO)) ==
763 (SS_ISDISCONNECTING|SS_NBIO))
764 goto drop;
765 while (so->so_state & SS_ISCONNECTED) {
766 error = sowait(so, true, so->so_linger * hz);
767 if (error)
768 break;
769 }
770 }
771 }
772 drop:
773 if (so->so_pcb) {
774 KASSERT(solocked(so));
775 (*so->so_proto->pr_usrreqs->pr_detach)(so);
776 }
777 discard:
778 KASSERT((so->so_state & SS_NOFDREF) == 0);
779 kauth_cred_free(so->so_cred);
780 so->so_state |= SS_NOFDREF;
781 sofree(so);
782 return error;
783 }
784
785 /*
786 * Must be called with the socket locked.. Will return with it unlocked.
787 */
788 int
789 soabort(struct socket *so)
790 {
791 u_int refs;
792 int error;
793
794 KASSERT(solocked(so));
795 KASSERT(so->so_head == NULL);
796
797 so->so_aborting++; /* XXX */
798 error = (*so->so_proto->pr_usrreqs->pr_abort)(so);
799 refs = --so->so_aborting; /* XXX */
800 if (error || (refs == 0)) {
801 sofree(so);
802 } else {
803 sounlock(so);
804 }
805 return error;
806 }
807
808 int
809 soaccept(struct socket *so, struct sockaddr *nam)
810 {
811 int error;
812
813 KASSERT(solocked(so));
814 KASSERT((so->so_state & SS_NOFDREF) != 0);
815
816 so->so_state &= ~SS_NOFDREF;
817 if ((so->so_state & SS_ISDISCONNECTED) == 0 ||
818 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0)
819 error = (*so->so_proto->pr_usrreqs->pr_accept)(so, nam);
820 else
821 error = ECONNABORTED;
822
823 return error;
824 }
825
826 int
827 soconnect(struct socket *so, struct sockaddr *nam, struct lwp *l)
828 {
829 int error;
830
831 KASSERT(solocked(so));
832
833 if (so->so_options & SO_ACCEPTCONN)
834 return EOPNOTSUPP;
835 /*
836 * If protocol is connection-based, can only connect once.
837 * Otherwise, if connected, try to disconnect first.
838 * This allows user to disconnect by connecting to, e.g.,
839 * a null address.
840 */
841 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
842 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
843 (error = sodisconnect(so)))) {
844 error = EISCONN;
845 } else {
846 if (nam->sa_family != so->so_proto->pr_domain->dom_family) {
847 return EAFNOSUPPORT;
848 }
849 error = (*so->so_proto->pr_usrreqs->pr_connect)(so, nam, l);
850 }
851
852 return error;
853 }
854
855 int
856 soconnect2(struct socket *so1, struct socket *so2)
857 {
858 KASSERT(solocked2(so1, so2));
859
860 return (*so1->so_proto->pr_usrreqs->pr_connect2)(so1, so2);
861 }
862
863 int
864 sodisconnect(struct socket *so)
865 {
866 int error;
867
868 KASSERT(solocked(so));
869
870 if ((so->so_state & SS_ISCONNECTED) == 0) {
871 error = ENOTCONN;
872 } else if (so->so_state & SS_ISDISCONNECTING) {
873 error = EALREADY;
874 } else {
875 error = (*so->so_proto->pr_usrreqs->pr_disconnect)(so);
876 }
877 return (error);
878 }
879
880 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
881 /*
882 * Send on a socket.
883 * If send must go all at once and message is larger than
884 * send buffering, then hard error.
885 * Lock against other senders.
886 * If must go all at once and not enough room now, then
887 * inform user that this would block and do nothing.
888 * Otherwise, if nonblocking, send as much as possible.
889 * The data to be sent is described by "uio" if nonzero,
890 * otherwise by the mbuf chain "top" (which must be null
891 * if uio is not). Data provided in mbuf chain must be small
892 * enough to send all at once.
893 *
894 * Returns nonzero on error, timeout or signal; callers
895 * must check for short counts if EINTR/ERESTART are returned.
896 * Data and control buffers are freed on return.
897 */
898 int
899 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
900 struct mbuf *top, struct mbuf *control, int flags, struct lwp *l)
901 {
902 struct mbuf **mp, *m;
903 long space, len, resid, clen, mlen;
904 int error, s, dontroute, atomic;
905 short wakeup_state = 0;
906
907 clen = 0;
908
909 /*
910 * solock() provides atomicity of access. splsoftnet() prevents
911 * protocol processing soft interrupts from interrupting us and
912 * blocking (expensive).
913 */
914 s = splsoftnet();
915 solock(so);
916 atomic = sosendallatonce(so) || top;
917 if (uio)
918 resid = uio->uio_resid;
919 else
920 resid = top->m_pkthdr.len;
921 /*
922 * In theory resid should be unsigned.
923 * However, space must be signed, as it might be less than 0
924 * if we over-committed, and we must use a signed comparison
925 * of space and resid. On the other hand, a negative resid
926 * causes us to loop sending 0-length segments to the protocol.
927 */
928 if (resid < 0) {
929 error = EINVAL;
930 goto out;
931 }
932 dontroute =
933 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
934 (so->so_proto->pr_flags & PR_ATOMIC);
935 l->l_ru.ru_msgsnd++;
936 if (control)
937 clen = control->m_len;
938 restart:
939 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
940 goto out;
941 do {
942 if (so->so_state & SS_CANTSENDMORE) {
943 error = EPIPE;
944 goto release;
945 }
946 if (so->so_error) {
947 error = so->so_error;
948 so->so_error = 0;
949 goto release;
950 }
951 if ((so->so_state & SS_ISCONNECTED) == 0) {
952 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
953 if (resid || clen == 0) {
954 error = ENOTCONN;
955 goto release;
956 }
957 } else if (addr == NULL) {
958 error = EDESTADDRREQ;
959 goto release;
960 }
961 }
962 space = sbspace(&so->so_snd);
963 if (flags & MSG_OOB)
964 space += 1024;
965 if ((atomic && resid > so->so_snd.sb_hiwat) ||
966 clen > so->so_snd.sb_hiwat) {
967 error = EMSGSIZE;
968 goto release;
969 }
970 if (space < resid + clen &&
971 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
972 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
973 error = EWOULDBLOCK;
974 goto release;
975 }
976 sbunlock(&so->so_snd);
977 if (wakeup_state & SS_RESTARTSYS) {
978 error = ERESTART;
979 goto out;
980 }
981 error = sbwait(&so->so_snd);
982 if (error)
983 goto out;
984 wakeup_state = so->so_state;
985 goto restart;
986 }
987 wakeup_state = 0;
988 mp = ⊤
989 space -= clen;
990 do {
991 if (uio == NULL) {
992 /*
993 * Data is prepackaged in "top".
994 */
995 resid = 0;
996 if (flags & MSG_EOR)
997 top->m_flags |= M_EOR;
998 } else do {
999 sounlock(so);
1000 splx(s);
1001 if (top == NULL) {
1002 m = m_gethdr(M_WAIT, MT_DATA);
1003 mlen = MHLEN;
1004 m->m_pkthdr.len = 0;
1005 m_reset_rcvif(m);
1006 } else {
1007 m = m_get(M_WAIT, MT_DATA);
1008 mlen = MLEN;
1009 }
1010 MCLAIM(m, so->so_snd.sb_mowner);
1011 if (sock_loan_thresh >= 0 &&
1012 uio->uio_iov->iov_len >= sock_loan_thresh &&
1013 space >= sock_loan_thresh &&
1014 (len = sosend_loan(so, uio, m,
1015 space)) != 0) {
1016 SOSEND_COUNTER_INCR(&sosend_loan_big);
1017 space -= len;
1018 goto have_data;
1019 }
1020 if (resid >= MINCLSIZE && space >= MCLBYTES) {
1021 SOSEND_COUNTER_INCR(&sosend_copy_big);
1022 m_clget(m, M_DONTWAIT);
1023 if ((m->m_flags & M_EXT) == 0)
1024 goto nopages;
1025 mlen = MCLBYTES;
1026 if (atomic && top == 0) {
1027 len = lmin(MCLBYTES - max_hdr,
1028 resid);
1029 m->m_data += max_hdr;
1030 } else
1031 len = lmin(MCLBYTES, resid);
1032 space -= len;
1033 } else {
1034 nopages:
1035 SOSEND_COUNTER_INCR(&sosend_copy_small);
1036 len = lmin(lmin(mlen, resid), space);
1037 space -= len;
1038 /*
1039 * For datagram protocols, leave room
1040 * for protocol headers in first mbuf.
1041 */
1042 if (atomic && top == 0 && len < mlen)
1043 MH_ALIGN(m, len);
1044 }
1045 error = uiomove(mtod(m, void *), (int)len, uio);
1046 have_data:
1047 resid = uio->uio_resid;
1048 m->m_len = len;
1049 *mp = m;
1050 top->m_pkthdr.len += len;
1051 s = splsoftnet();
1052 solock(so);
1053 if (error != 0)
1054 goto release;
1055 mp = &m->m_next;
1056 if (resid <= 0) {
1057 if (flags & MSG_EOR)
1058 top->m_flags |= M_EOR;
1059 break;
1060 }
1061 } while (space > 0 && atomic);
1062
1063 if (so->so_state & SS_CANTSENDMORE) {
1064 error = EPIPE;
1065 goto release;
1066 }
1067 if (dontroute)
1068 so->so_options |= SO_DONTROUTE;
1069 if (resid > 0)
1070 so->so_state |= SS_MORETOCOME;
1071 if (flags & MSG_OOB) {
1072 error = (*so->so_proto->pr_usrreqs->pr_sendoob)(
1073 so, top, control);
1074 } else {
1075 error = (*so->so_proto->pr_usrreqs->pr_send)(so,
1076 top, addr, control, l);
1077 }
1078 if (dontroute)
1079 so->so_options &= ~SO_DONTROUTE;
1080 if (resid > 0)
1081 so->so_state &= ~SS_MORETOCOME;
1082 clen = 0;
1083 control = NULL;
1084 top = NULL;
1085 mp = ⊤
1086 if (error != 0)
1087 goto release;
1088 } while (resid && space > 0);
1089 } while (resid);
1090
1091 release:
1092 sbunlock(&so->so_snd);
1093 out:
1094 sounlock(so);
1095 splx(s);
1096 if (top)
1097 m_freem(top);
1098 if (control)
1099 m_freem(control);
1100 return (error);
1101 }
1102
1103 /*
1104 * Following replacement or removal of the first mbuf on the first
1105 * mbuf chain of a socket buffer, push necessary state changes back
1106 * into the socket buffer so that other consumers see the values
1107 * consistently. 'nextrecord' is the callers locally stored value of
1108 * the original value of sb->sb_mb->m_nextpkt which must be restored
1109 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL.
1110 */
1111 static void
1112 sbsync(struct sockbuf *sb, struct mbuf *nextrecord)
1113 {
1114
1115 KASSERT(solocked(sb->sb_so));
1116
1117 /*
1118 * First, update for the new value of nextrecord. If necessary,
1119 * make it the first record.
1120 */
1121 if (sb->sb_mb != NULL)
1122 sb->sb_mb->m_nextpkt = nextrecord;
1123 else
1124 sb->sb_mb = nextrecord;
1125
1126 /*
1127 * Now update any dependent socket buffer fields to reflect
1128 * the new state. This is an inline of SB_EMPTY_FIXUP, with
1129 * the addition of a second clause that takes care of the
1130 * case where sb_mb has been updated, but remains the last
1131 * record.
1132 */
1133 if (sb->sb_mb == NULL) {
1134 sb->sb_mbtail = NULL;
1135 sb->sb_lastrecord = NULL;
1136 } else if (sb->sb_mb->m_nextpkt == NULL)
1137 sb->sb_lastrecord = sb->sb_mb;
1138 }
1139
1140 /*
1141 * Implement receive operations on a socket.
1142 * We depend on the way that records are added to the sockbuf
1143 * by sbappend*. In particular, each record (mbufs linked through m_next)
1144 * must begin with an address if the protocol so specifies,
1145 * followed by an optional mbuf or mbufs containing ancillary data,
1146 * and then zero or more mbufs of data.
1147 * In order to avoid blocking network interrupts for the entire time here,
1148 * we splx() while doing the actual copy to user space.
1149 * Although the sockbuf is locked, new data may still be appended,
1150 * and thus we must maintain consistency of the sockbuf during that time.
1151 *
1152 * The caller may receive the data as a single mbuf chain by supplying
1153 * an mbuf **mp0 for use in returning the chain. The uio is then used
1154 * only for the count in uio_resid.
1155 */
1156 int
1157 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio,
1158 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1159 {
1160 struct lwp *l = curlwp;
1161 struct mbuf *m, **mp, *mt;
1162 size_t len, offset, moff, orig_resid;
1163 int atomic, flags, error, s, type;
1164 const struct protosw *pr;
1165 struct mbuf *nextrecord;
1166 int mbuf_removed = 0;
1167 const struct domain *dom;
1168 short wakeup_state = 0;
1169
1170 pr = so->so_proto;
1171 atomic = pr->pr_flags & PR_ATOMIC;
1172 dom = pr->pr_domain;
1173 mp = mp0;
1174 type = 0;
1175 orig_resid = uio->uio_resid;
1176
1177 if (paddr != NULL)
1178 *paddr = NULL;
1179 if (controlp != NULL)
1180 *controlp = NULL;
1181 if (flagsp != NULL)
1182 flags = *flagsp &~ MSG_EOR;
1183 else
1184 flags = 0;
1185
1186 if (flags & MSG_OOB) {
1187 m = m_get(M_WAIT, MT_DATA);
1188 solock(so);
1189 error = (*pr->pr_usrreqs->pr_recvoob)(so, m, flags & MSG_PEEK);
1190 sounlock(so);
1191 if (error)
1192 goto bad;
1193 do {
1194 error = uiomove(mtod(m, void *),
1195 MIN(uio->uio_resid, m->m_len), uio);
1196 m = m_free(m);
1197 } while (uio->uio_resid > 0 && error == 0 && m);
1198 bad:
1199 if (m != NULL)
1200 m_freem(m);
1201 return error;
1202 }
1203 if (mp != NULL)
1204 *mp = NULL;
1205
1206 /*
1207 * solock() provides atomicity of access. splsoftnet() prevents
1208 * protocol processing soft interrupts from interrupting us and
1209 * blocking (expensive).
1210 */
1211 s = splsoftnet();
1212 solock(so);
1213 restart:
1214 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) {
1215 sounlock(so);
1216 splx(s);
1217 return error;
1218 }
1219
1220 m = so->so_rcv.sb_mb;
1221 /*
1222 * If we have less data than requested, block awaiting more
1223 * (subject to any timeout) if:
1224 * 1. the current count is less than the low water mark,
1225 * 2. MSG_WAITALL is set, and it is possible to do the entire
1226 * receive operation at once if we block (resid <= hiwat), or
1227 * 3. MSG_DONTWAIT is not set.
1228 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1229 * we have to do the receive in sections, and thus risk returning
1230 * a short count if a timeout or signal occurs after we start.
1231 */
1232 if (m == NULL ||
1233 ((flags & MSG_DONTWAIT) == 0 &&
1234 so->so_rcv.sb_cc < uio->uio_resid &&
1235 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1236 ((flags & MSG_WAITALL) &&
1237 uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1238 m->m_nextpkt == NULL && !atomic)) {
1239 #ifdef DIAGNOSTIC
1240 if (m == NULL && so->so_rcv.sb_cc)
1241 panic("receive 1");
1242 #endif
1243 if (so->so_error) {
1244 if (m != NULL)
1245 goto dontblock;
1246 error = so->so_error;
1247 so->so_error = 0;
1248 goto release;
1249 }
1250 if (so->so_state & SS_CANTRCVMORE) {
1251 if (m != NULL)
1252 goto dontblock;
1253 else
1254 goto release;
1255 }
1256 for (; m != NULL; m = m->m_next)
1257 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1258 m = so->so_rcv.sb_mb;
1259 goto dontblock;
1260 }
1261 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1262 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1263 error = ENOTCONN;
1264 goto release;
1265 }
1266 if (uio->uio_resid == 0)
1267 goto release;
1268 if ((so->so_state & SS_NBIO) ||
1269 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1270 error = EWOULDBLOCK;
1271 goto release;
1272 }
1273 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1");
1274 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1");
1275 sbunlock(&so->so_rcv);
1276 if (wakeup_state & SS_RESTARTSYS)
1277 error = ERESTART;
1278 else
1279 error = sbwait(&so->so_rcv);
1280 if (error != 0) {
1281 sounlock(so);
1282 splx(s);
1283 return error;
1284 }
1285 wakeup_state = so->so_state;
1286 goto restart;
1287 }
1288 dontblock:
1289 /*
1290 * On entry here, m points to the first record of the socket buffer.
1291 * From this point onward, we maintain 'nextrecord' as a cache of the
1292 * pointer to the next record in the socket buffer. We must keep the
1293 * various socket buffer pointers and local stack versions of the
1294 * pointers in sync, pushing out modifications before dropping the
1295 * socket lock, and re-reading them when picking it up.
1296 *
1297 * Otherwise, we will race with the network stack appending new data
1298 * or records onto the socket buffer by using inconsistent/stale
1299 * versions of the field, possibly resulting in socket buffer
1300 * corruption.
1301 *
1302 * By holding the high-level sblock(), we prevent simultaneous
1303 * readers from pulling off the front of the socket buffer.
1304 */
1305 if (l != NULL)
1306 l->l_ru.ru_msgrcv++;
1307 KASSERT(m == so->so_rcv.sb_mb);
1308 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1");
1309 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1");
1310 nextrecord = m->m_nextpkt;
1311 if (pr->pr_flags & PR_ADDR) {
1312 #ifdef DIAGNOSTIC
1313 if (m->m_type != MT_SONAME)
1314 panic("receive 1a");
1315 #endif
1316 orig_resid = 0;
1317 if (flags & MSG_PEEK) {
1318 if (paddr)
1319 *paddr = m_copy(m, 0, m->m_len);
1320 m = m->m_next;
1321 } else {
1322 sbfree(&so->so_rcv, m);
1323 mbuf_removed = 1;
1324 if (paddr != NULL) {
1325 *paddr = m;
1326 so->so_rcv.sb_mb = m->m_next;
1327 m->m_next = NULL;
1328 m = so->so_rcv.sb_mb;
1329 } else {
1330 m = so->so_rcv.sb_mb = m_free(m);
1331 }
1332 sbsync(&so->so_rcv, nextrecord);
1333 }
1334 }
1335 if (pr->pr_flags & PR_ADDR_OPT) {
1336 /*
1337 * For SCTP we may be getting a
1338 * whole message OR a partial delivery.
1339 */
1340 if (m->m_type == MT_SONAME) {
1341 orig_resid = 0;
1342 if (flags & MSG_PEEK) {
1343 if (paddr)
1344 *paddr = m_copy(m, 0, m->m_len);
1345 m = m->m_next;
1346 } else {
1347 sbfree(&so->so_rcv, m);
1348 if (paddr) {
1349 *paddr = m;
1350 so->so_rcv.sb_mb = m->m_next;
1351 m->m_next = 0;
1352 m = so->so_rcv.sb_mb;
1353 } else {
1354 m = so->so_rcv.sb_mb = m_free(m);
1355 }
1356 }
1357 }
1358 }
1359
1360 /*
1361 * Process one or more MT_CONTROL mbufs present before any data mbufs
1362 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1363 * just copy the data; if !MSG_PEEK, we call into the protocol to
1364 * perform externalization (or freeing if controlp == NULL).
1365 */
1366 if (__predict_false(m != NULL && m->m_type == MT_CONTROL)) {
1367 struct mbuf *cm = NULL, *cmn;
1368 struct mbuf **cme = &cm;
1369
1370 do {
1371 if (flags & MSG_PEEK) {
1372 if (controlp != NULL) {
1373 *controlp = m_copy(m, 0, m->m_len);
1374 controlp = &(*controlp)->m_next;
1375 }
1376 m = m->m_next;
1377 } else {
1378 sbfree(&so->so_rcv, m);
1379 so->so_rcv.sb_mb = m->m_next;
1380 m->m_next = NULL;
1381 *cme = m;
1382 cme = &(*cme)->m_next;
1383 m = so->so_rcv.sb_mb;
1384 }
1385 } while (m != NULL && m->m_type == MT_CONTROL);
1386 if ((flags & MSG_PEEK) == 0)
1387 sbsync(&so->so_rcv, nextrecord);
1388 for (; cm != NULL; cm = cmn) {
1389 cmn = cm->m_next;
1390 cm->m_next = NULL;
1391 type = mtod(cm, struct cmsghdr *)->cmsg_type;
1392 if (controlp != NULL) {
1393 if (dom->dom_externalize != NULL &&
1394 type == SCM_RIGHTS) {
1395 sounlock(so);
1396 splx(s);
1397 error = (*dom->dom_externalize)(cm, l,
1398 (flags & MSG_CMSG_CLOEXEC) ?
1399 O_CLOEXEC : 0);
1400 s = splsoftnet();
1401 solock(so);
1402 }
1403 *controlp = cm;
1404 while (*controlp != NULL)
1405 controlp = &(*controlp)->m_next;
1406 } else {
1407 /*
1408 * Dispose of any SCM_RIGHTS message that went
1409 * through the read path rather than recv.
1410 */
1411 if (dom->dom_dispose != NULL &&
1412 type == SCM_RIGHTS) {
1413 sounlock(so);
1414 (*dom->dom_dispose)(cm);
1415 solock(so);
1416 }
1417 m_freem(cm);
1418 }
1419 }
1420 if (m != NULL)
1421 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1422 else
1423 nextrecord = so->so_rcv.sb_mb;
1424 orig_resid = 0;
1425 }
1426
1427 /* If m is non-NULL, we have some data to read. */
1428 if (__predict_true(m != NULL)) {
1429 type = m->m_type;
1430 if (type == MT_OOBDATA)
1431 flags |= MSG_OOB;
1432 }
1433 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2");
1434 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2");
1435
1436 moff = 0;
1437 offset = 0;
1438 while (m != NULL && uio->uio_resid > 0 && error == 0) {
1439 if (m->m_type == MT_OOBDATA) {
1440 if (type != MT_OOBDATA)
1441 break;
1442 } else if (type == MT_OOBDATA)
1443 break;
1444 #ifdef DIAGNOSTIC
1445 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
1446 panic("receive 3");
1447 #endif
1448 so->so_state &= ~SS_RCVATMARK;
1449 wakeup_state = 0;
1450 len = uio->uio_resid;
1451 if (so->so_oobmark && len > so->so_oobmark - offset)
1452 len = so->so_oobmark - offset;
1453 if (len > m->m_len - moff)
1454 len = m->m_len - moff;
1455 /*
1456 * If mp is set, just pass back the mbufs.
1457 * Otherwise copy them out via the uio, then free.
1458 * Sockbuf must be consistent here (points to current mbuf,
1459 * it points to next record) when we drop priority;
1460 * we must note any additions to the sockbuf when we
1461 * block interrupts again.
1462 */
1463 if (mp == NULL) {
1464 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove");
1465 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove");
1466 sounlock(so);
1467 splx(s);
1468 error = uiomove(mtod(m, char *) + moff, len, uio);
1469 s = splsoftnet();
1470 solock(so);
1471 if (error != 0) {
1472 /*
1473 * If any part of the record has been removed
1474 * (such as the MT_SONAME mbuf, which will
1475 * happen when PR_ADDR, and thus also
1476 * PR_ATOMIC, is set), then drop the entire
1477 * record to maintain the atomicity of the
1478 * receive operation.
1479 *
1480 * This avoids a later panic("receive 1a")
1481 * when compiled with DIAGNOSTIC.
1482 */
1483 if (m && mbuf_removed && atomic)
1484 (void) sbdroprecord(&so->so_rcv);
1485
1486 goto release;
1487 }
1488 } else
1489 uio->uio_resid -= len;
1490 if (len == m->m_len - moff) {
1491 if (m->m_flags & M_EOR)
1492 flags |= MSG_EOR;
1493 #ifdef SCTP
1494 if (m->m_flags & M_NOTIFICATION)
1495 flags |= MSG_NOTIFICATION;
1496 #endif /* SCTP */
1497 if (flags & MSG_PEEK) {
1498 m = m->m_next;
1499 moff = 0;
1500 } else {
1501 nextrecord = m->m_nextpkt;
1502 sbfree(&so->so_rcv, m);
1503 if (mp) {
1504 *mp = m;
1505 mp = &m->m_next;
1506 so->so_rcv.sb_mb = m = m->m_next;
1507 *mp = NULL;
1508 } else {
1509 m = so->so_rcv.sb_mb = m_free(m);
1510 }
1511 /*
1512 * If m != NULL, we also know that
1513 * so->so_rcv.sb_mb != NULL.
1514 */
1515 KASSERT(so->so_rcv.sb_mb == m);
1516 if (m) {
1517 m->m_nextpkt = nextrecord;
1518 if (nextrecord == NULL)
1519 so->so_rcv.sb_lastrecord = m;
1520 } else {
1521 so->so_rcv.sb_mb = nextrecord;
1522 SB_EMPTY_FIXUP(&so->so_rcv);
1523 }
1524 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3");
1525 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3");
1526 }
1527 } else if (flags & MSG_PEEK)
1528 moff += len;
1529 else {
1530 if (mp != NULL) {
1531 mt = m_copym(m, 0, len, M_NOWAIT);
1532 if (__predict_false(mt == NULL)) {
1533 sounlock(so);
1534 mt = m_copym(m, 0, len, M_WAIT);
1535 solock(so);
1536 }
1537 *mp = mt;
1538 }
1539 m->m_data += len;
1540 m->m_len -= len;
1541 so->so_rcv.sb_cc -= len;
1542 }
1543 if (so->so_oobmark) {
1544 if ((flags & MSG_PEEK) == 0) {
1545 so->so_oobmark -= len;
1546 if (so->so_oobmark == 0) {
1547 so->so_state |= SS_RCVATMARK;
1548 break;
1549 }
1550 } else {
1551 offset += len;
1552 if (offset == so->so_oobmark)
1553 break;
1554 }
1555 }
1556 if (flags & MSG_EOR)
1557 break;
1558 /*
1559 * If the MSG_WAITALL flag is set (for non-atomic socket),
1560 * we must not quit until "uio->uio_resid == 0" or an error
1561 * termination. If a signal/timeout occurs, return
1562 * with a short count but without error.
1563 * Keep sockbuf locked against other readers.
1564 */
1565 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1566 !sosendallatonce(so) && !nextrecord) {
1567 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1568 break;
1569 /*
1570 * If we are peeking and the socket receive buffer is
1571 * full, stop since we can't get more data to peek at.
1572 */
1573 if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0)
1574 break;
1575 /*
1576 * If we've drained the socket buffer, tell the
1577 * protocol in case it needs to do something to
1578 * get it filled again.
1579 */
1580 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
1581 (*pr->pr_usrreqs->pr_rcvd)(so, flags, l);
1582 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2");
1583 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2");
1584 if (wakeup_state & SS_RESTARTSYS)
1585 error = ERESTART;
1586 else
1587 error = sbwait(&so->so_rcv);
1588 if (error != 0) {
1589 sbunlock(&so->so_rcv);
1590 sounlock(so);
1591 splx(s);
1592 return 0;
1593 }
1594 if ((m = so->so_rcv.sb_mb) != NULL)
1595 nextrecord = m->m_nextpkt;
1596 wakeup_state = so->so_state;
1597 }
1598 }
1599
1600 if (m && atomic) {
1601 flags |= MSG_TRUNC;
1602 if ((flags & MSG_PEEK) == 0)
1603 (void) sbdroprecord(&so->so_rcv);
1604 }
1605 if ((flags & MSG_PEEK) == 0) {
1606 if (m == NULL) {
1607 /*
1608 * First part is an inline SB_EMPTY_FIXUP(). Second
1609 * part makes sure sb_lastrecord is up-to-date if
1610 * there is still data in the socket buffer.
1611 */
1612 so->so_rcv.sb_mb = nextrecord;
1613 if (so->so_rcv.sb_mb == NULL) {
1614 so->so_rcv.sb_mbtail = NULL;
1615 so->so_rcv.sb_lastrecord = NULL;
1616 } else if (nextrecord->m_nextpkt == NULL)
1617 so->so_rcv.sb_lastrecord = nextrecord;
1618 }
1619 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4");
1620 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4");
1621 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1622 (*pr->pr_usrreqs->pr_rcvd)(so, flags, l);
1623 }
1624 if (orig_resid == uio->uio_resid && orig_resid &&
1625 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1626 sbunlock(&so->so_rcv);
1627 goto restart;
1628 }
1629
1630 if (flagsp != NULL)
1631 *flagsp |= flags;
1632 release:
1633 sbunlock(&so->so_rcv);
1634 sounlock(so);
1635 splx(s);
1636 return error;
1637 }
1638
1639 int
1640 soshutdown(struct socket *so, int how)
1641 {
1642 const struct protosw *pr;
1643 int error;
1644
1645 KASSERT(solocked(so));
1646
1647 pr = so->so_proto;
1648 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1649 return (EINVAL);
1650
1651 if (how == SHUT_RD || how == SHUT_RDWR) {
1652 sorflush(so);
1653 error = 0;
1654 }
1655 if (how == SHUT_WR || how == SHUT_RDWR)
1656 error = (*pr->pr_usrreqs->pr_shutdown)(so);
1657
1658 return error;
1659 }
1660
1661 void
1662 sorestart(struct socket *so)
1663 {
1664 /*
1665 * An application has called close() on an fd on which another
1666 * of its threads has called a socket system call.
1667 * Mark this and wake everyone up, and code that would block again
1668 * instead returns ERESTART.
1669 * On system call re-entry the fd is validated and EBADF returned.
1670 * Any other fd will block again on the 2nd syscall.
1671 */
1672 solock(so);
1673 so->so_state |= SS_RESTARTSYS;
1674 cv_broadcast(&so->so_cv);
1675 cv_broadcast(&so->so_snd.sb_cv);
1676 cv_broadcast(&so->so_rcv.sb_cv);
1677 sounlock(so);
1678 }
1679
1680 void
1681 sorflush(struct socket *so)
1682 {
1683 struct sockbuf *sb, asb;
1684 const struct protosw *pr;
1685
1686 KASSERT(solocked(so));
1687
1688 sb = &so->so_rcv;
1689 pr = so->so_proto;
1690 socantrcvmore(so);
1691 sb->sb_flags |= SB_NOINTR;
1692 (void )sblock(sb, M_WAITOK);
1693 sbunlock(sb);
1694 asb = *sb;
1695 /*
1696 * Clear most of the sockbuf structure, but leave some of the
1697 * fields valid.
1698 */
1699 memset(&sb->sb_startzero, 0,
1700 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1701 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) {
1702 sounlock(so);
1703 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1704 solock(so);
1705 }
1706 sbrelease(&asb, so);
1707 }
1708
1709 /*
1710 * internal set SOL_SOCKET options
1711 */
1712 static int
1713 sosetopt1(struct socket *so, const struct sockopt *sopt)
1714 {
1715 int error = EINVAL, opt;
1716 int optval = 0; /* XXX: gcc */
1717 struct linger l;
1718 struct timeval tv;
1719
1720 switch ((opt = sopt->sopt_name)) {
1721
1722 case SO_ACCEPTFILTER:
1723 error = accept_filt_setopt(so, sopt);
1724 KASSERT(solocked(so));
1725 break;
1726
1727 case SO_LINGER:
1728 error = sockopt_get(sopt, &l, sizeof(l));
1729 solock(so);
1730 if (error)
1731 break;
1732 if (l.l_linger < 0 || l.l_linger > USHRT_MAX ||
1733 l.l_linger > (INT_MAX / hz)) {
1734 error = EDOM;
1735 break;
1736 }
1737 so->so_linger = l.l_linger;
1738 if (l.l_onoff)
1739 so->so_options |= SO_LINGER;
1740 else
1741 so->so_options &= ~SO_LINGER;
1742 break;
1743
1744 case SO_DEBUG:
1745 case SO_KEEPALIVE:
1746 case SO_DONTROUTE:
1747 case SO_USELOOPBACK:
1748 case SO_BROADCAST:
1749 case SO_REUSEADDR:
1750 case SO_REUSEPORT:
1751 case SO_OOBINLINE:
1752 case SO_TIMESTAMP:
1753 case SO_NOSIGPIPE:
1754 #ifdef SO_OTIMESTAMP
1755 case SO_OTIMESTAMP:
1756 #endif
1757 error = sockopt_getint(sopt, &optval);
1758 solock(so);
1759 if (error)
1760 break;
1761 if (optval)
1762 so->so_options |= opt;
1763 else
1764 so->so_options &= ~opt;
1765 break;
1766
1767 case SO_SNDBUF:
1768 case SO_RCVBUF:
1769 case SO_SNDLOWAT:
1770 case SO_RCVLOWAT:
1771 error = sockopt_getint(sopt, &optval);
1772 solock(so);
1773 if (error)
1774 break;
1775
1776 /*
1777 * Values < 1 make no sense for any of these
1778 * options, so disallow them.
1779 */
1780 if (optval < 1) {
1781 error = EINVAL;
1782 break;
1783 }
1784
1785 switch (opt) {
1786 case SO_SNDBUF:
1787 if (sbreserve(&so->so_snd, (u_long)optval, so) == 0) {
1788 error = ENOBUFS;
1789 break;
1790 }
1791 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
1792 break;
1793
1794 case SO_RCVBUF:
1795 if (sbreserve(&so->so_rcv, (u_long)optval, so) == 0) {
1796 error = ENOBUFS;
1797 break;
1798 }
1799 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1800 break;
1801
1802 /*
1803 * Make sure the low-water is never greater than
1804 * the high-water.
1805 */
1806 case SO_SNDLOWAT:
1807 if (optval > so->so_snd.sb_hiwat)
1808 optval = so->so_snd.sb_hiwat;
1809
1810 so->so_snd.sb_lowat = optval;
1811 break;
1812
1813 case SO_RCVLOWAT:
1814 if (optval > so->so_rcv.sb_hiwat)
1815 optval = so->so_rcv.sb_hiwat;
1816
1817 so->so_rcv.sb_lowat = optval;
1818 break;
1819 }
1820 break;
1821
1822 #ifdef COMPAT_50
1823 case SO_OSNDTIMEO:
1824 case SO_ORCVTIMEO: {
1825 struct timeval50 otv;
1826 error = sockopt_get(sopt, &otv, sizeof(otv));
1827 if (error) {
1828 solock(so);
1829 break;
1830 }
1831 timeval50_to_timeval(&otv, &tv);
1832 opt = opt == SO_OSNDTIMEO ? SO_SNDTIMEO : SO_RCVTIMEO;
1833 error = 0;
1834 /*FALLTHROUGH*/
1835 }
1836 #endif /* COMPAT_50 */
1837
1838 case SO_SNDTIMEO:
1839 case SO_RCVTIMEO:
1840 if (error)
1841 error = sockopt_get(sopt, &tv, sizeof(tv));
1842 solock(so);
1843 if (error)
1844 break;
1845
1846 if (tv.tv_sec > (INT_MAX - tv.tv_usec / tick) / hz) {
1847 error = EDOM;
1848 break;
1849 }
1850
1851 optval = tv.tv_sec * hz + tv.tv_usec / tick;
1852 if (optval == 0 && tv.tv_usec != 0)
1853 optval = 1;
1854
1855 switch (opt) {
1856 case SO_SNDTIMEO:
1857 so->so_snd.sb_timeo = optval;
1858 break;
1859 case SO_RCVTIMEO:
1860 so->so_rcv.sb_timeo = optval;
1861 break;
1862 }
1863 break;
1864
1865 default:
1866 solock(so);
1867 error = ENOPROTOOPT;
1868 break;
1869 }
1870 KASSERT(solocked(so));
1871 return error;
1872 }
1873
1874 int
1875 sosetopt(struct socket *so, struct sockopt *sopt)
1876 {
1877 int error, prerr;
1878
1879 if (sopt->sopt_level == SOL_SOCKET) {
1880 error = sosetopt1(so, sopt);
1881 KASSERT(solocked(so));
1882 } else {
1883 error = ENOPROTOOPT;
1884 solock(so);
1885 }
1886
1887 if ((error == 0 || error == ENOPROTOOPT) &&
1888 so->so_proto != NULL && so->so_proto->pr_ctloutput != NULL) {
1889 /* give the protocol stack a shot */
1890 prerr = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, sopt);
1891 if (prerr == 0)
1892 error = 0;
1893 else if (prerr != ENOPROTOOPT)
1894 error = prerr;
1895 }
1896 sounlock(so);
1897 return error;
1898 }
1899
1900 /*
1901 * so_setsockopt() is a wrapper providing a sockopt structure for sosetopt()
1902 */
1903 int
1904 so_setsockopt(struct lwp *l, struct socket *so, int level, int name,
1905 const void *val, size_t valsize)
1906 {
1907 struct sockopt sopt;
1908 int error;
1909
1910 KASSERT(valsize == 0 || val != NULL);
1911
1912 sockopt_init(&sopt, level, name, valsize);
1913 sockopt_set(&sopt, val, valsize);
1914
1915 error = sosetopt(so, &sopt);
1916
1917 sockopt_destroy(&sopt);
1918
1919 return error;
1920 }
1921
1922 /*
1923 * internal get SOL_SOCKET options
1924 */
1925 static int
1926 sogetopt1(struct socket *so, struct sockopt *sopt)
1927 {
1928 int error, optval, opt;
1929 struct linger l;
1930 struct timeval tv;
1931
1932 switch ((opt = sopt->sopt_name)) {
1933
1934 case SO_ACCEPTFILTER:
1935 error = accept_filt_getopt(so, sopt);
1936 break;
1937
1938 case SO_LINGER:
1939 l.l_onoff = (so->so_options & SO_LINGER) ? 1 : 0;
1940 l.l_linger = so->so_linger;
1941
1942 error = sockopt_set(sopt, &l, sizeof(l));
1943 break;
1944
1945 case SO_USELOOPBACK:
1946 case SO_DONTROUTE:
1947 case SO_DEBUG:
1948 case SO_KEEPALIVE:
1949 case SO_REUSEADDR:
1950 case SO_REUSEPORT:
1951 case SO_BROADCAST:
1952 case SO_OOBINLINE:
1953 case SO_TIMESTAMP:
1954 case SO_NOSIGPIPE:
1955 #ifdef SO_OTIMESTAMP
1956 case SO_OTIMESTAMP:
1957 #endif
1958 case SO_ACCEPTCONN:
1959 error = sockopt_setint(sopt, (so->so_options & opt) ? 1 : 0);
1960 break;
1961
1962 case SO_TYPE:
1963 error = sockopt_setint(sopt, so->so_type);
1964 break;
1965
1966 case SO_ERROR:
1967 error = sockopt_setint(sopt, so->so_error);
1968 so->so_error = 0;
1969 break;
1970
1971 case SO_SNDBUF:
1972 error = sockopt_setint(sopt, so->so_snd.sb_hiwat);
1973 break;
1974
1975 case SO_RCVBUF:
1976 error = sockopt_setint(sopt, so->so_rcv.sb_hiwat);
1977 break;
1978
1979 case SO_SNDLOWAT:
1980 error = sockopt_setint(sopt, so->so_snd.sb_lowat);
1981 break;
1982
1983 case SO_RCVLOWAT:
1984 error = sockopt_setint(sopt, so->so_rcv.sb_lowat);
1985 break;
1986
1987 #ifdef COMPAT_50
1988 case SO_OSNDTIMEO:
1989 case SO_ORCVTIMEO: {
1990 struct timeval50 otv;
1991
1992 optval = (opt == SO_OSNDTIMEO ?
1993 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1994
1995 otv.tv_sec = optval / hz;
1996 otv.tv_usec = (optval % hz) * tick;
1997
1998 error = sockopt_set(sopt, &otv, sizeof(otv));
1999 break;
2000 }
2001 #endif /* COMPAT_50 */
2002
2003 case SO_SNDTIMEO:
2004 case SO_RCVTIMEO:
2005 optval = (opt == SO_SNDTIMEO ?
2006 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2007
2008 tv.tv_sec = optval / hz;
2009 tv.tv_usec = (optval % hz) * tick;
2010
2011 error = sockopt_set(sopt, &tv, sizeof(tv));
2012 break;
2013
2014 case SO_OVERFLOWED:
2015 error = sockopt_setint(sopt, so->so_rcv.sb_overflowed);
2016 break;
2017
2018 default:
2019 error = ENOPROTOOPT;
2020 break;
2021 }
2022
2023 return (error);
2024 }
2025
2026 int
2027 sogetopt(struct socket *so, struct sockopt *sopt)
2028 {
2029 int error;
2030
2031 solock(so);
2032 if (sopt->sopt_level != SOL_SOCKET) {
2033 if (so->so_proto && so->so_proto->pr_ctloutput) {
2034 error = ((*so->so_proto->pr_ctloutput)
2035 (PRCO_GETOPT, so, sopt));
2036 } else
2037 error = (ENOPROTOOPT);
2038 } else {
2039 error = sogetopt1(so, sopt);
2040 }
2041 sounlock(so);
2042 return (error);
2043 }
2044
2045 /*
2046 * alloc sockopt data buffer buffer
2047 * - will be released at destroy
2048 */
2049 static int
2050 sockopt_alloc(struct sockopt *sopt, size_t len, km_flag_t kmflag)
2051 {
2052
2053 KASSERT(sopt->sopt_size == 0);
2054
2055 if (len > sizeof(sopt->sopt_buf)) {
2056 sopt->sopt_data = kmem_zalloc(len, kmflag);
2057 if (sopt->sopt_data == NULL)
2058 return ENOMEM;
2059 } else
2060 sopt->sopt_data = sopt->sopt_buf;
2061
2062 sopt->sopt_size = len;
2063 return 0;
2064 }
2065
2066 /*
2067 * initialise sockopt storage
2068 * - MAY sleep during allocation
2069 */
2070 void
2071 sockopt_init(struct sockopt *sopt, int level, int name, size_t size)
2072 {
2073
2074 memset(sopt, 0, sizeof(*sopt));
2075
2076 sopt->sopt_level = level;
2077 sopt->sopt_name = name;
2078 (void)sockopt_alloc(sopt, size, KM_SLEEP);
2079 }
2080
2081 /*
2082 * destroy sockopt storage
2083 * - will release any held memory references
2084 */
2085 void
2086 sockopt_destroy(struct sockopt *sopt)
2087 {
2088
2089 if (sopt->sopt_data != sopt->sopt_buf)
2090 kmem_free(sopt->sopt_data, sopt->sopt_size);
2091
2092 memset(sopt, 0, sizeof(*sopt));
2093 }
2094
2095 /*
2096 * set sockopt value
2097 * - value is copied into sockopt
2098 * - memory is allocated when necessary, will not sleep
2099 */
2100 int
2101 sockopt_set(struct sockopt *sopt, const void *buf, size_t len)
2102 {
2103 int error;
2104
2105 if (sopt->sopt_size == 0) {
2106 error = sockopt_alloc(sopt, len, KM_NOSLEEP);
2107 if (error)
2108 return error;
2109 }
2110
2111 if (sopt->sopt_size < len)
2112 return EINVAL;
2113
2114 memcpy(sopt->sopt_data, buf, len);
2115 sopt->sopt_retsize = len;
2116
2117 return 0;
2118 }
2119
2120 /*
2121 * common case of set sockopt integer value
2122 */
2123 int
2124 sockopt_setint(struct sockopt *sopt, int val)
2125 {
2126
2127 return sockopt_set(sopt, &val, sizeof(int));
2128 }
2129
2130 /*
2131 * get sockopt value
2132 * - correct size must be given
2133 */
2134 int
2135 sockopt_get(const struct sockopt *sopt, void *buf, size_t len)
2136 {
2137
2138 if (sopt->sopt_size != len)
2139 return EINVAL;
2140
2141 memcpy(buf, sopt->sopt_data, len);
2142 return 0;
2143 }
2144
2145 /*
2146 * common case of get sockopt integer value
2147 */
2148 int
2149 sockopt_getint(const struct sockopt *sopt, int *valp)
2150 {
2151
2152 return sockopt_get(sopt, valp, sizeof(int));
2153 }
2154
2155 /*
2156 * set sockopt value from mbuf
2157 * - ONLY for legacy code
2158 * - mbuf is released by sockopt
2159 * - will not sleep
2160 */
2161 int
2162 sockopt_setmbuf(struct sockopt *sopt, struct mbuf *m)
2163 {
2164 size_t len;
2165 int error;
2166
2167 len = m_length(m);
2168
2169 if (sopt->sopt_size == 0) {
2170 error = sockopt_alloc(sopt, len, KM_NOSLEEP);
2171 if (error)
2172 return error;
2173 }
2174
2175 if (sopt->sopt_size < len)
2176 return EINVAL;
2177
2178 m_copydata(m, 0, len, sopt->sopt_data);
2179 m_freem(m);
2180 sopt->sopt_retsize = len;
2181
2182 return 0;
2183 }
2184
2185 /*
2186 * get sockopt value into mbuf
2187 * - ONLY for legacy code
2188 * - mbuf to be released by the caller
2189 * - will not sleep
2190 */
2191 struct mbuf *
2192 sockopt_getmbuf(const struct sockopt *sopt)
2193 {
2194 struct mbuf *m;
2195
2196 if (sopt->sopt_size > MCLBYTES)
2197 return NULL;
2198
2199 m = m_get(M_DONTWAIT, MT_SOOPTS);
2200 if (m == NULL)
2201 return NULL;
2202
2203 if (sopt->sopt_size > MLEN) {
2204 MCLGET(m, M_DONTWAIT);
2205 if ((m->m_flags & M_EXT) == 0) {
2206 m_free(m);
2207 return NULL;
2208 }
2209 }
2210
2211 memcpy(mtod(m, void *), sopt->sopt_data, sopt->sopt_size);
2212 m->m_len = sopt->sopt_size;
2213
2214 return m;
2215 }
2216
2217 void
2218 sohasoutofband(struct socket *so)
2219 {
2220
2221 fownsignal(so->so_pgid, SIGURG, POLL_PRI, POLLPRI|POLLRDBAND, so);
2222 selnotify(&so->so_rcv.sb_sel, POLLPRI | POLLRDBAND, NOTE_SUBMIT);
2223 }
2224
2225 static void
2226 filt_sordetach(struct knote *kn)
2227 {
2228 struct socket *so;
2229
2230 so = ((file_t *)kn->kn_obj)->f_socket;
2231 solock(so);
2232 SLIST_REMOVE(&so->so_rcv.sb_sel.sel_klist, kn, knote, kn_selnext);
2233 if (SLIST_EMPTY(&so->so_rcv.sb_sel.sel_klist))
2234 so->so_rcv.sb_flags &= ~SB_KNOTE;
2235 sounlock(so);
2236 }
2237
2238 /*ARGSUSED*/
2239 static int
2240 filt_soread(struct knote *kn, long hint)
2241 {
2242 struct socket *so;
2243 int rv;
2244
2245 so = ((file_t *)kn->kn_obj)->f_socket;
2246 if (hint != NOTE_SUBMIT)
2247 solock(so);
2248 kn->kn_data = so->so_rcv.sb_cc;
2249 if (so->so_state & SS_CANTRCVMORE) {
2250 kn->kn_flags |= EV_EOF;
2251 kn->kn_fflags = so->so_error;
2252 rv = 1;
2253 } else if (so->so_error)
2254 rv = 1;
2255 else if (kn->kn_sfflags & NOTE_LOWAT)
2256 rv = (kn->kn_data >= kn->kn_sdata);
2257 else
2258 rv = (kn->kn_data >= so->so_rcv.sb_lowat);
2259 if (hint != NOTE_SUBMIT)
2260 sounlock(so);
2261 return rv;
2262 }
2263
2264 static void
2265 filt_sowdetach(struct knote *kn)
2266 {
2267 struct socket *so;
2268
2269 so = ((file_t *)kn->kn_obj)->f_socket;
2270 solock(so);
2271 SLIST_REMOVE(&so->so_snd.sb_sel.sel_klist, kn, knote, kn_selnext);
2272 if (SLIST_EMPTY(&so->so_snd.sb_sel.sel_klist))
2273 so->so_snd.sb_flags &= ~SB_KNOTE;
2274 sounlock(so);
2275 }
2276
2277 /*ARGSUSED*/
2278 static int
2279 filt_sowrite(struct knote *kn, long hint)
2280 {
2281 struct socket *so;
2282 int rv;
2283
2284 so = ((file_t *)kn->kn_obj)->f_socket;
2285 if (hint != NOTE_SUBMIT)
2286 solock(so);
2287 kn->kn_data = sbspace(&so->so_snd);
2288 if (so->so_state & SS_CANTSENDMORE) {
2289 kn->kn_flags |= EV_EOF;
2290 kn->kn_fflags = so->so_error;
2291 rv = 1;
2292 } else if (so->so_error)
2293 rv = 1;
2294 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
2295 (so->so_proto->pr_flags & PR_CONNREQUIRED))
2296 rv = 0;
2297 else if (kn->kn_sfflags & NOTE_LOWAT)
2298 rv = (kn->kn_data >= kn->kn_sdata);
2299 else
2300 rv = (kn->kn_data >= so->so_snd.sb_lowat);
2301 if (hint != NOTE_SUBMIT)
2302 sounlock(so);
2303 return rv;
2304 }
2305
2306 /*ARGSUSED*/
2307 static int
2308 filt_solisten(struct knote *kn, long hint)
2309 {
2310 struct socket *so;
2311 int rv;
2312
2313 so = ((file_t *)kn->kn_obj)->f_socket;
2314
2315 /*
2316 * Set kn_data to number of incoming connections, not
2317 * counting partial (incomplete) connections.
2318 */
2319 if (hint != NOTE_SUBMIT)
2320 solock(so);
2321 kn->kn_data = so->so_qlen;
2322 rv = (kn->kn_data > 0);
2323 if (hint != NOTE_SUBMIT)
2324 sounlock(so);
2325 return rv;
2326 }
2327
2328 static const struct filterops solisten_filtops = {
2329 .f_isfd = 1,
2330 .f_attach = NULL,
2331 .f_detach = filt_sordetach,
2332 .f_event = filt_solisten,
2333 };
2334
2335 static const struct filterops soread_filtops = {
2336 .f_isfd = 1,
2337 .f_attach = NULL,
2338 .f_detach = filt_sordetach,
2339 .f_event = filt_soread,
2340 };
2341
2342 static const struct filterops sowrite_filtops = {
2343 .f_isfd = 1,
2344 .f_attach = NULL,
2345 .f_detach = filt_sowdetach,
2346 .f_event = filt_sowrite,
2347 };
2348
2349 int
2350 soo_kqfilter(struct file *fp, struct knote *kn)
2351 {
2352 struct socket *so;
2353 struct sockbuf *sb;
2354
2355 so = ((file_t *)kn->kn_obj)->f_socket;
2356 solock(so);
2357 switch (kn->kn_filter) {
2358 case EVFILT_READ:
2359 if (so->so_options & SO_ACCEPTCONN)
2360 kn->kn_fop = &solisten_filtops;
2361 else
2362 kn->kn_fop = &soread_filtops;
2363 sb = &so->so_rcv;
2364 break;
2365 case EVFILT_WRITE:
2366 kn->kn_fop = &sowrite_filtops;
2367 sb = &so->so_snd;
2368 break;
2369 default:
2370 sounlock(so);
2371 return (EINVAL);
2372 }
2373 SLIST_INSERT_HEAD(&sb->sb_sel.sel_klist, kn, kn_selnext);
2374 sb->sb_flags |= SB_KNOTE;
2375 sounlock(so);
2376 return (0);
2377 }
2378
2379 static int
2380 sodopoll(struct socket *so, int events)
2381 {
2382 int revents;
2383
2384 revents = 0;
2385
2386 if (events & (POLLIN | POLLRDNORM))
2387 if (soreadable(so))
2388 revents |= events & (POLLIN | POLLRDNORM);
2389
2390 if (events & (POLLOUT | POLLWRNORM))
2391 if (sowritable(so))
2392 revents |= events & (POLLOUT | POLLWRNORM);
2393
2394 if (events & (POLLPRI | POLLRDBAND))
2395 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
2396 revents |= events & (POLLPRI | POLLRDBAND);
2397
2398 return revents;
2399 }
2400
2401 int
2402 sopoll(struct socket *so, int events)
2403 {
2404 int revents = 0;
2405
2406 #ifndef DIAGNOSTIC
2407 /*
2408 * Do a quick, unlocked check in expectation that the socket
2409 * will be ready for I/O. Don't do this check if DIAGNOSTIC,
2410 * as the solocked() assertions will fail.
2411 */
2412 if ((revents = sodopoll(so, events)) != 0)
2413 return revents;
2414 #endif
2415
2416 solock(so);
2417 if ((revents = sodopoll(so, events)) == 0) {
2418 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
2419 selrecord(curlwp, &so->so_rcv.sb_sel);
2420 so->so_rcv.sb_flags |= SB_NOTIFY;
2421 }
2422
2423 if (events & (POLLOUT | POLLWRNORM)) {
2424 selrecord(curlwp, &so->so_snd.sb_sel);
2425 so->so_snd.sb_flags |= SB_NOTIFY;
2426 }
2427 }
2428 sounlock(so);
2429
2430 return revents;
2431 }
2432
2433 struct mbuf **
2434 sbsavetimestamp(int opt, struct mbuf *m, struct mbuf **mp)
2435 {
2436 struct timeval tv;
2437 microtime(&tv);
2438
2439 #ifdef SO_OTIMESTAMP
2440 if (opt & SO_OTIMESTAMP) {
2441 struct timeval50 tv50;
2442
2443 timeval_to_timeval50(&tv, &tv50);
2444 *mp = sbcreatecontrol(&tv50, sizeof(tv50),
2445 SCM_OTIMESTAMP, SOL_SOCKET);
2446 if (*mp)
2447 mp = &(*mp)->m_next;
2448 } else
2449 #endif
2450
2451 if (opt & SO_TIMESTAMP) {
2452 *mp = sbcreatecontrol(&tv, sizeof(tv),
2453 SCM_TIMESTAMP, SOL_SOCKET);
2454 if (*mp)
2455 mp = &(*mp)->m_next;
2456 }
2457 return mp;
2458 }
2459
2460
2461 #include <sys/sysctl.h>
2462
2463 static int sysctl_kern_somaxkva(SYSCTLFN_PROTO);
2464 static int sysctl_kern_sbmax(SYSCTLFN_PROTO);
2465
2466 /*
2467 * sysctl helper routine for kern.somaxkva. ensures that the given
2468 * value is not too small.
2469 * (XXX should we maybe make sure it's not too large as well?)
2470 */
2471 static int
2472 sysctl_kern_somaxkva(SYSCTLFN_ARGS)
2473 {
2474 int error, new_somaxkva;
2475 struct sysctlnode node;
2476
2477 new_somaxkva = somaxkva;
2478 node = *rnode;
2479 node.sysctl_data = &new_somaxkva;
2480 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2481 if (error || newp == NULL)
2482 return (error);
2483
2484 if (new_somaxkva < (16 * 1024 * 1024)) /* sanity */
2485 return (EINVAL);
2486
2487 mutex_enter(&so_pendfree_lock);
2488 somaxkva = new_somaxkva;
2489 cv_broadcast(&socurkva_cv);
2490 mutex_exit(&so_pendfree_lock);
2491
2492 return (error);
2493 }
2494
2495 /*
2496 * sysctl helper routine for kern.sbmax. Basically just ensures that
2497 * any new value is not too small.
2498 */
2499 static int
2500 sysctl_kern_sbmax(SYSCTLFN_ARGS)
2501 {
2502 int error, new_sbmax;
2503 struct sysctlnode node;
2504
2505 new_sbmax = sb_max;
2506 node = *rnode;
2507 node.sysctl_data = &new_sbmax;
2508 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2509 if (error || newp == NULL)
2510 return (error);
2511
2512 KERNEL_LOCK(1, NULL);
2513 error = sb_max_set(new_sbmax);
2514 KERNEL_UNLOCK_ONE(NULL);
2515
2516 return (error);
2517 }
2518
2519 static void
2520 sysctl_kern_socket_setup(void)
2521 {
2522
2523 KASSERT(socket_sysctllog == NULL);
2524
2525 sysctl_createv(&socket_sysctllog, 0, NULL, NULL,
2526 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2527 CTLTYPE_INT, "somaxkva",
2528 SYSCTL_DESCR("Maximum amount of kernel memory to be "
2529 "used for socket buffers"),
2530 sysctl_kern_somaxkva, 0, NULL, 0,
2531 CTL_KERN, KERN_SOMAXKVA, CTL_EOL);
2532
2533 sysctl_createv(&socket_sysctllog, 0, NULL, NULL,
2534 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2535 CTLTYPE_INT, "sbmax",
2536 SYSCTL_DESCR("Maximum socket buffer size"),
2537 sysctl_kern_sbmax, 0, NULL, 0,
2538 CTL_KERN, KERN_SBMAX, CTL_EOL);
2539 }
2540