uipc_socket.c revision 1.272 1 /* $NetBSD: uipc_socket.c,v 1.272 2019/03/31 19:54:36 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2002, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of Wasabi Systems, Inc, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 2004 The FreeBSD Foundation
34 * Copyright (c) 2004 Robert Watson
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
61 *
62 * @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95
63 */
64
65 /*
66 * Socket operation routines.
67 *
68 * These routines are called by the routines in sys_socket.c or from a
69 * system process, and implement the semantics of socket operations by
70 * switching out to the protocol specific routines.
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.272 2019/03/31 19:54:36 maxv Exp $");
75
76 #ifdef _KERNEL_OPT
77 #include "opt_compat_netbsd.h"
78 #include "opt_sock_counters.h"
79 #include "opt_sosend_loan.h"
80 #include "opt_mbuftrace.h"
81 #include "opt_somaxkva.h"
82 #include "opt_multiprocessor.h" /* XXX */
83 #include "opt_sctp.h"
84 #endif
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/proc.h>
89 #include <sys/file.h>
90 #include <sys/filedesc.h>
91 #include <sys/kmem.h>
92 #include <sys/mbuf.h>
93 #include <sys/domain.h>
94 #include <sys/kernel.h>
95 #include <sys/protosw.h>
96 #include <sys/socket.h>
97 #include <sys/socketvar.h>
98 #include <sys/signalvar.h>
99 #include <sys/resourcevar.h>
100 #include <sys/uidinfo.h>
101 #include <sys/event.h>
102 #include <sys/poll.h>
103 #include <sys/kauth.h>
104 #include <sys/mutex.h>
105 #include <sys/condvar.h>
106 #include <sys/kthread.h>
107
108 #ifdef COMPAT_50
109 #include <compat/sys/time.h>
110 #include <compat/sys/socket.h>
111 #endif
112
113 #include <uvm/uvm_extern.h>
114 #include <uvm/uvm_loan.h>
115 #include <uvm/uvm_page.h>
116
117 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
118
119 extern const struct fileops socketops;
120
121 static int sooptions;
122 extern int somaxconn; /* patchable (XXX sysctl) */
123 int somaxconn = SOMAXCONN;
124 kmutex_t *softnet_lock;
125
126 #ifdef SOSEND_COUNTERS
127 #include <sys/device.h>
128
129 static struct evcnt sosend_loan_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
130 NULL, "sosend", "loan big");
131 static struct evcnt sosend_copy_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
132 NULL, "sosend", "copy big");
133 static struct evcnt sosend_copy_small = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
134 NULL, "sosend", "copy small");
135 static struct evcnt sosend_kvalimit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
136 NULL, "sosend", "kva limit");
137
138 #define SOSEND_COUNTER_INCR(ev) (ev)->ev_count++
139
140 EVCNT_ATTACH_STATIC(sosend_loan_big);
141 EVCNT_ATTACH_STATIC(sosend_copy_big);
142 EVCNT_ATTACH_STATIC(sosend_copy_small);
143 EVCNT_ATTACH_STATIC(sosend_kvalimit);
144 #else
145
146 #define SOSEND_COUNTER_INCR(ev) /* nothing */
147
148 #endif /* SOSEND_COUNTERS */
149
150 #if defined(SOSEND_NO_LOAN) || defined(MULTIPROCESSOR)
151 int sock_loan_thresh = -1;
152 #else
153 int sock_loan_thresh = 4096;
154 #endif
155
156 static kmutex_t so_pendfree_lock;
157 static struct mbuf *so_pendfree = NULL;
158
159 #ifndef SOMAXKVA
160 #define SOMAXKVA (16 * 1024 * 1024)
161 #endif
162 int somaxkva = SOMAXKVA;
163 static int socurkva;
164 static kcondvar_t socurkva_cv;
165
166 static kauth_listener_t socket_listener;
167
168 #define SOCK_LOAN_CHUNK 65536
169
170 static void sopendfree_thread(void *);
171 static kcondvar_t pendfree_thread_cv;
172 static lwp_t *sopendfree_lwp;
173
174 static void sysctl_kern_socket_setup(void);
175 static struct sysctllog *socket_sysctllog;
176
177 static vsize_t
178 sokvareserve(struct socket *so, vsize_t len)
179 {
180 int error;
181
182 mutex_enter(&so_pendfree_lock);
183 while (socurkva + len > somaxkva) {
184 SOSEND_COUNTER_INCR(&sosend_kvalimit);
185 error = cv_wait_sig(&socurkva_cv, &so_pendfree_lock);
186 if (error) {
187 len = 0;
188 break;
189 }
190 }
191 socurkva += len;
192 mutex_exit(&so_pendfree_lock);
193 return len;
194 }
195
196 static void
197 sokvaunreserve(vsize_t len)
198 {
199
200 mutex_enter(&so_pendfree_lock);
201 socurkva -= len;
202 cv_broadcast(&socurkva_cv);
203 mutex_exit(&so_pendfree_lock);
204 }
205
206 /*
207 * sokvaalloc: allocate kva for loan.
208 */
209 vaddr_t
210 sokvaalloc(vaddr_t sva, vsize_t len, struct socket *so)
211 {
212 vaddr_t lva;
213
214 if (sokvareserve(so, len) == 0)
215 return 0;
216
217 lva = uvm_km_alloc(kernel_map, len, atop(sva) & uvmexp.colormask,
218 UVM_KMF_COLORMATCH | UVM_KMF_VAONLY | UVM_KMF_WAITVA);
219 if (lva == 0) {
220 sokvaunreserve(len);
221 return 0;
222 }
223
224 return lva;
225 }
226
227 /*
228 * sokvafree: free kva for loan.
229 */
230 void
231 sokvafree(vaddr_t sva, vsize_t len)
232 {
233
234 uvm_km_free(kernel_map, sva, len, UVM_KMF_VAONLY);
235 sokvaunreserve(len);
236 }
237
238 static void
239 sodoloanfree(struct vm_page **pgs, void *buf, size_t size)
240 {
241 vaddr_t sva, eva;
242 vsize_t len;
243 int npgs;
244
245 KASSERT(pgs != NULL);
246
247 eva = round_page((vaddr_t) buf + size);
248 sva = trunc_page((vaddr_t) buf);
249 len = eva - sva;
250 npgs = len >> PAGE_SHIFT;
251
252 pmap_kremove(sva, len);
253 pmap_update(pmap_kernel());
254 uvm_unloan(pgs, npgs, UVM_LOAN_TOPAGE);
255 sokvafree(sva, len);
256 }
257
258 /*
259 * sopendfree_thread: free mbufs on "pendfree" list. Unlock and relock
260 * so_pendfree_lock when freeing mbufs.
261 */
262 static void
263 sopendfree_thread(void *v)
264 {
265 struct mbuf *m, *next;
266 size_t rv;
267
268 mutex_enter(&so_pendfree_lock);
269
270 for (;;) {
271 rv = 0;
272 while (so_pendfree != NULL) {
273 m = so_pendfree;
274 so_pendfree = NULL;
275 mutex_exit(&so_pendfree_lock);
276
277 for (; m != NULL; m = next) {
278 next = m->m_next;
279 KASSERT((~m->m_flags & (M_EXT|M_EXT_PAGES)) ==
280 0);
281 KASSERT(m->m_ext.ext_refcnt == 0);
282
283 rv += m->m_ext.ext_size;
284 sodoloanfree(m->m_ext.ext_pgs, m->m_ext.ext_buf,
285 m->m_ext.ext_size);
286 pool_cache_put(mb_cache, m);
287 }
288
289 mutex_enter(&so_pendfree_lock);
290 }
291 if (rv)
292 cv_broadcast(&socurkva_cv);
293 cv_wait(&pendfree_thread_cv, &so_pendfree_lock);
294 }
295 panic("sopendfree_thread");
296 /* NOTREACHED */
297 }
298
299 void
300 soloanfree(struct mbuf *m, void *buf, size_t size, void *arg)
301 {
302
303 KASSERT(m != NULL);
304
305 /*
306 * postpone freeing mbuf.
307 *
308 * we can't do it in interrupt context
309 * because we need to put kva back to kernel_map.
310 */
311
312 mutex_enter(&so_pendfree_lock);
313 m->m_next = so_pendfree;
314 so_pendfree = m;
315 cv_signal(&pendfree_thread_cv);
316 mutex_exit(&so_pendfree_lock);
317 }
318
319 static long
320 sosend_loan(struct socket *so, struct uio *uio, struct mbuf *m, long space)
321 {
322 struct iovec *iov = uio->uio_iov;
323 vaddr_t sva, eva;
324 vsize_t len;
325 vaddr_t lva;
326 int npgs, error;
327 vaddr_t va;
328 int i;
329
330 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace))
331 return 0;
332
333 if (iov->iov_len < (size_t) space)
334 space = iov->iov_len;
335 if (space > SOCK_LOAN_CHUNK)
336 space = SOCK_LOAN_CHUNK;
337
338 eva = round_page((vaddr_t) iov->iov_base + space);
339 sva = trunc_page((vaddr_t) iov->iov_base);
340 len = eva - sva;
341 npgs = len >> PAGE_SHIFT;
342
343 KASSERT(npgs <= M_EXT_MAXPAGES);
344
345 lva = sokvaalloc(sva, len, so);
346 if (lva == 0)
347 return 0;
348
349 error = uvm_loan(&uio->uio_vmspace->vm_map, sva, len,
350 m->m_ext.ext_pgs, UVM_LOAN_TOPAGE);
351 if (error) {
352 sokvafree(lva, len);
353 return 0;
354 }
355
356 for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE)
357 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(m->m_ext.ext_pgs[i]),
358 VM_PROT_READ, 0);
359 pmap_update(pmap_kernel());
360
361 lva += (vaddr_t) iov->iov_base & PAGE_MASK;
362
363 MEXTADD(m, (void *) lva, space, M_MBUF, soloanfree, so);
364 m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP;
365
366 uio->uio_resid -= space;
367 /* uio_offset not updated, not set/used for write(2) */
368 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + space;
369 uio->uio_iov->iov_len -= space;
370 if (uio->uio_iov->iov_len == 0) {
371 uio->uio_iov++;
372 uio->uio_iovcnt--;
373 }
374
375 return space;
376 }
377
378 static int
379 socket_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
380 void *arg0, void *arg1, void *arg2, void *arg3)
381 {
382 int result;
383 enum kauth_network_req req;
384
385 result = KAUTH_RESULT_DEFER;
386 req = (enum kauth_network_req)arg0;
387
388 if ((action != KAUTH_NETWORK_SOCKET) &&
389 (action != KAUTH_NETWORK_BIND))
390 return result;
391
392 switch (req) {
393 case KAUTH_REQ_NETWORK_BIND_PORT:
394 result = KAUTH_RESULT_ALLOW;
395 break;
396
397 case KAUTH_REQ_NETWORK_SOCKET_DROP: {
398 /* Normal users can only drop their own connections. */
399 struct socket *so = (struct socket *)arg1;
400
401 if (so->so_cred && proc_uidmatch(cred, so->so_cred) == 0)
402 result = KAUTH_RESULT_ALLOW;
403
404 break;
405 }
406
407 case KAUTH_REQ_NETWORK_SOCKET_OPEN:
408 /* We allow "raw" routing/bluetooth sockets to anyone. */
409 switch ((u_long)arg1) {
410 case PF_ROUTE:
411 case PF_OROUTE:
412 case PF_BLUETOOTH:
413 case PF_CAN:
414 result = KAUTH_RESULT_ALLOW;
415 break;
416 default:
417 /* Privileged, let secmodel handle this. */
418 if ((u_long)arg2 == SOCK_RAW)
419 break;
420 result = KAUTH_RESULT_ALLOW;
421 break;
422 }
423 break;
424
425 case KAUTH_REQ_NETWORK_SOCKET_CANSEE:
426 result = KAUTH_RESULT_ALLOW;
427
428 break;
429
430 default:
431 break;
432 }
433
434 return result;
435 }
436
437 void
438 soinit(void)
439 {
440
441 sysctl_kern_socket_setup();
442
443 mutex_init(&so_pendfree_lock, MUTEX_DEFAULT, IPL_VM);
444 softnet_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
445 cv_init(&socurkva_cv, "sokva");
446 cv_init(&pendfree_thread_cv, "sopendfr");
447 soinit2();
448
449 /* Set the initial adjusted socket buffer size. */
450 if (sb_max_set(sb_max))
451 panic("bad initial sb_max value: %lu", sb_max);
452
453 socket_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
454 socket_listener_cb, NULL);
455 }
456
457 void
458 soinit1(void)
459 {
460 int error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
461 sopendfree_thread, NULL, &sopendfree_lwp, "sopendfree");
462 if (error)
463 panic("soinit1 %d", error);
464 }
465
466 /*
467 * socreate: create a new socket of the specified type and the protocol.
468 *
469 * => Caller may specify another socket for lock sharing (must not be held).
470 * => Returns the new socket without lock held.
471 */
472 int
473 socreate(int dom, struct socket **aso, int type, int proto, struct lwp *l,
474 struct socket *lockso)
475 {
476 const struct protosw *prp;
477 struct socket *so;
478 uid_t uid;
479 int error;
480 kmutex_t *lock;
481
482 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_SOCKET,
483 KAUTH_REQ_NETWORK_SOCKET_OPEN, KAUTH_ARG(dom), KAUTH_ARG(type),
484 KAUTH_ARG(proto));
485 if (error != 0)
486 return error;
487
488 if (proto)
489 prp = pffindproto(dom, proto, type);
490 else
491 prp = pffindtype(dom, type);
492 if (prp == NULL) {
493 /* no support for domain */
494 if (pffinddomain(dom) == 0)
495 return EAFNOSUPPORT;
496 /* no support for socket type */
497 if (proto == 0 && type != 0)
498 return EPROTOTYPE;
499 return EPROTONOSUPPORT;
500 }
501 if (prp->pr_usrreqs == NULL)
502 return EPROTONOSUPPORT;
503 if (prp->pr_type != type)
504 return EPROTOTYPE;
505
506 so = soget(true);
507 so->so_type = type;
508 so->so_proto = prp;
509 so->so_send = sosend;
510 so->so_receive = soreceive;
511 so->so_options = sooptions;
512 #ifdef MBUFTRACE
513 so->so_rcv.sb_mowner = &prp->pr_domain->dom_mowner;
514 so->so_snd.sb_mowner = &prp->pr_domain->dom_mowner;
515 so->so_mowner = &prp->pr_domain->dom_mowner;
516 #endif
517 uid = kauth_cred_geteuid(l->l_cred);
518 so->so_uidinfo = uid_find(uid);
519 so->so_cpid = l->l_proc->p_pid;
520
521 /*
522 * Lock assigned and taken during PCB attach, unless we share
523 * the lock with another socket, e.g. socketpair(2) case.
524 */
525 if (lockso) {
526 lock = lockso->so_lock;
527 so->so_lock = lock;
528 mutex_obj_hold(lock);
529 mutex_enter(lock);
530 }
531
532 /* Attach the PCB (returns with the socket lock held). */
533 error = (*prp->pr_usrreqs->pr_attach)(so, proto);
534 KASSERT(solocked(so));
535
536 if (error) {
537 KASSERT(so->so_pcb == NULL);
538 so->so_state |= SS_NOFDREF;
539 sofree(so);
540 return error;
541 }
542 so->so_cred = kauth_cred_dup(l->l_cred);
543 sounlock(so);
544
545 *aso = so;
546 return 0;
547 }
548
549 /*
550 * fsocreate: create a socket and a file descriptor associated with it.
551 *
552 * => On success, write file descriptor to fdout and return zero.
553 * => On failure, return non-zero; *fdout will be undefined.
554 */
555 int
556 fsocreate(int domain, struct socket **sop, int type, int proto, int *fdout)
557 {
558 lwp_t *l = curlwp;
559 int error, fd, flags;
560 struct socket *so;
561 struct file *fp;
562
563 if ((error = fd_allocfile(&fp, &fd)) != 0) {
564 return error;
565 }
566 flags = type & SOCK_FLAGS_MASK;
567 fd_set_exclose(l, fd, (flags & SOCK_CLOEXEC) != 0);
568 fp->f_flag = FREAD|FWRITE|((flags & SOCK_NONBLOCK) ? FNONBLOCK : 0)|
569 ((flags & SOCK_NOSIGPIPE) ? FNOSIGPIPE : 0);
570 fp->f_type = DTYPE_SOCKET;
571 fp->f_ops = &socketops;
572
573 type &= ~SOCK_FLAGS_MASK;
574 error = socreate(domain, &so, type, proto, l, NULL);
575 if (error) {
576 fd_abort(curproc, fp, fd);
577 return error;
578 }
579 if (flags & SOCK_NONBLOCK) {
580 so->so_state |= SS_NBIO;
581 }
582 fp->f_socket = so;
583 fd_affix(curproc, fp, fd);
584
585 if (sop != NULL) {
586 *sop = so;
587 }
588 *fdout = fd;
589 return error;
590 }
591
592 int
593 sofamily(const struct socket *so)
594 {
595 const struct protosw *pr;
596 const struct domain *dom;
597
598 if ((pr = so->so_proto) == NULL)
599 return AF_UNSPEC;
600 if ((dom = pr->pr_domain) == NULL)
601 return AF_UNSPEC;
602 return dom->dom_family;
603 }
604
605 int
606 sobind(struct socket *so, struct sockaddr *nam, struct lwp *l)
607 {
608 int error;
609
610 solock(so);
611 if (nam->sa_family != so->so_proto->pr_domain->dom_family) {
612 sounlock(so);
613 return EAFNOSUPPORT;
614 }
615 error = (*so->so_proto->pr_usrreqs->pr_bind)(so, nam, l);
616 sounlock(so);
617 return error;
618 }
619
620 int
621 solisten(struct socket *so, int backlog, struct lwp *l)
622 {
623 int error;
624 short oldopt, oldqlimit;
625
626 solock(so);
627 if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
628 SS_ISDISCONNECTING)) != 0) {
629 sounlock(so);
630 return EINVAL;
631 }
632 oldopt = so->so_options;
633 oldqlimit = so->so_qlimit;
634 if (TAILQ_EMPTY(&so->so_q))
635 so->so_options |= SO_ACCEPTCONN;
636 if (backlog < 0)
637 backlog = 0;
638 so->so_qlimit = uimin(backlog, somaxconn);
639
640 error = (*so->so_proto->pr_usrreqs->pr_listen)(so, l);
641 if (error != 0) {
642 so->so_options = oldopt;
643 so->so_qlimit = oldqlimit;
644 sounlock(so);
645 return error;
646 }
647 sounlock(so);
648 return 0;
649 }
650
651 void
652 sofree(struct socket *so)
653 {
654 u_int refs;
655
656 KASSERT(solocked(so));
657
658 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) {
659 sounlock(so);
660 return;
661 }
662 if (so->so_head) {
663 /*
664 * We must not decommission a socket that's on the accept(2)
665 * queue. If we do, then accept(2) may hang after select(2)
666 * indicated that the listening socket was ready.
667 */
668 if (!soqremque(so, 0)) {
669 sounlock(so);
670 return;
671 }
672 }
673 if (so->so_rcv.sb_hiwat)
674 (void)chgsbsize(so->so_uidinfo, &so->so_rcv.sb_hiwat, 0,
675 RLIM_INFINITY);
676 if (so->so_snd.sb_hiwat)
677 (void)chgsbsize(so->so_uidinfo, &so->so_snd.sb_hiwat, 0,
678 RLIM_INFINITY);
679 sbrelease(&so->so_snd, so);
680 KASSERT(!cv_has_waiters(&so->so_cv));
681 KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv));
682 KASSERT(!cv_has_waiters(&so->so_snd.sb_cv));
683 sorflush(so);
684 refs = so->so_aborting; /* XXX */
685 /* Remove acccept filter if one is present. */
686 if (so->so_accf != NULL)
687 (void)accept_filt_clear(so);
688 sounlock(so);
689 if (refs == 0) /* XXX */
690 soput(so);
691 }
692
693 /*
694 * soclose: close a socket on last file table reference removal.
695 * Initiate disconnect if connected. Free socket when disconnect complete.
696 */
697 int
698 soclose(struct socket *so)
699 {
700 struct socket *so2;
701 int error = 0;
702
703 solock(so);
704 if (so->so_options & SO_ACCEPTCONN) {
705 for (;;) {
706 if ((so2 = TAILQ_FIRST(&so->so_q0)) != 0) {
707 KASSERT(solocked2(so, so2));
708 (void) soqremque(so2, 0);
709 /* soabort drops the lock. */
710 (void) soabort(so2);
711 solock(so);
712 continue;
713 }
714 if ((so2 = TAILQ_FIRST(&so->so_q)) != 0) {
715 KASSERT(solocked2(so, so2));
716 (void) soqremque(so2, 1);
717 /* soabort drops the lock. */
718 (void) soabort(so2);
719 solock(so);
720 continue;
721 }
722 break;
723 }
724 }
725 if (so->so_pcb == NULL)
726 goto discard;
727 if (so->so_state & SS_ISCONNECTED) {
728 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
729 error = sodisconnect(so);
730 if (error)
731 goto drop;
732 }
733 if (so->so_options & SO_LINGER) {
734 if ((so->so_state & (SS_ISDISCONNECTING|SS_NBIO)) ==
735 (SS_ISDISCONNECTING|SS_NBIO))
736 goto drop;
737 while (so->so_state & SS_ISCONNECTED) {
738 error = sowait(so, true, so->so_linger * hz);
739 if (error)
740 break;
741 }
742 }
743 }
744 drop:
745 if (so->so_pcb) {
746 KASSERT(solocked(so));
747 (*so->so_proto->pr_usrreqs->pr_detach)(so);
748 }
749 discard:
750 KASSERT((so->so_state & SS_NOFDREF) == 0);
751 kauth_cred_free(so->so_cred);
752 so->so_state |= SS_NOFDREF;
753 sofree(so);
754 return error;
755 }
756
757 /*
758 * Must be called with the socket locked.. Will return with it unlocked.
759 */
760 int
761 soabort(struct socket *so)
762 {
763 u_int refs;
764 int error;
765
766 KASSERT(solocked(so));
767 KASSERT(so->so_head == NULL);
768
769 so->so_aborting++; /* XXX */
770 error = (*so->so_proto->pr_usrreqs->pr_abort)(so);
771 refs = --so->so_aborting; /* XXX */
772 if (error || (refs == 0)) {
773 sofree(so);
774 } else {
775 sounlock(so);
776 }
777 return error;
778 }
779
780 int
781 soaccept(struct socket *so, struct sockaddr *nam)
782 {
783 int error;
784
785 KASSERT(solocked(so));
786 KASSERT((so->so_state & SS_NOFDREF) != 0);
787
788 so->so_state &= ~SS_NOFDREF;
789 if ((so->so_state & SS_ISDISCONNECTED) == 0 ||
790 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0)
791 error = (*so->so_proto->pr_usrreqs->pr_accept)(so, nam);
792 else
793 error = ECONNABORTED;
794
795 return error;
796 }
797
798 int
799 soconnect(struct socket *so, struct sockaddr *nam, struct lwp *l)
800 {
801 int error;
802
803 KASSERT(solocked(so));
804
805 if (so->so_options & SO_ACCEPTCONN)
806 return EOPNOTSUPP;
807 /*
808 * If protocol is connection-based, can only connect once.
809 * Otherwise, if connected, try to disconnect first.
810 * This allows user to disconnect by connecting to, e.g.,
811 * a null address.
812 */
813 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
814 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
815 (error = sodisconnect(so)))) {
816 error = EISCONN;
817 } else {
818 if (nam->sa_family != so->so_proto->pr_domain->dom_family) {
819 return EAFNOSUPPORT;
820 }
821 error = (*so->so_proto->pr_usrreqs->pr_connect)(so, nam, l);
822 }
823
824 return error;
825 }
826
827 int
828 soconnect2(struct socket *so1, struct socket *so2)
829 {
830 KASSERT(solocked2(so1, so2));
831
832 return (*so1->so_proto->pr_usrreqs->pr_connect2)(so1, so2);
833 }
834
835 int
836 sodisconnect(struct socket *so)
837 {
838 int error;
839
840 KASSERT(solocked(so));
841
842 if ((so->so_state & SS_ISCONNECTED) == 0) {
843 error = ENOTCONN;
844 } else if (so->so_state & SS_ISDISCONNECTING) {
845 error = EALREADY;
846 } else {
847 error = (*so->so_proto->pr_usrreqs->pr_disconnect)(so);
848 }
849 return error;
850 }
851
852 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
853 /*
854 * Send on a socket.
855 * If send must go all at once and message is larger than
856 * send buffering, then hard error.
857 * Lock against other senders.
858 * If must go all at once and not enough room now, then
859 * inform user that this would block and do nothing.
860 * Otherwise, if nonblocking, send as much as possible.
861 * The data to be sent is described by "uio" if nonzero,
862 * otherwise by the mbuf chain "top" (which must be null
863 * if uio is not). Data provided in mbuf chain must be small
864 * enough to send all at once.
865 *
866 * Returns nonzero on error, timeout or signal; callers
867 * must check for short counts if EINTR/ERESTART are returned.
868 * Data and control buffers are freed on return.
869 */
870 int
871 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
872 struct mbuf *top, struct mbuf *control, int flags, struct lwp *l)
873 {
874 struct mbuf **mp, *m;
875 long space, len, resid, clen, mlen;
876 int error, s, dontroute, atomic;
877 short wakeup_state = 0;
878
879 clen = 0;
880
881 /*
882 * solock() provides atomicity of access. splsoftnet() prevents
883 * protocol processing soft interrupts from interrupting us and
884 * blocking (expensive).
885 */
886 s = splsoftnet();
887 solock(so);
888 atomic = sosendallatonce(so) || top;
889 if (uio)
890 resid = uio->uio_resid;
891 else
892 resid = top->m_pkthdr.len;
893 /*
894 * In theory resid should be unsigned.
895 * However, space must be signed, as it might be less than 0
896 * if we over-committed, and we must use a signed comparison
897 * of space and resid. On the other hand, a negative resid
898 * causes us to loop sending 0-length segments to the protocol.
899 */
900 if (resid < 0) {
901 error = EINVAL;
902 goto out;
903 }
904 dontroute =
905 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
906 (so->so_proto->pr_flags & PR_ATOMIC);
907 l->l_ru.ru_msgsnd++;
908 if (control)
909 clen = control->m_len;
910 restart:
911 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
912 goto out;
913 do {
914 if (so->so_state & SS_CANTSENDMORE) {
915 error = EPIPE;
916 goto release;
917 }
918 if (so->so_error) {
919 error = so->so_error;
920 so->so_error = 0;
921 goto release;
922 }
923 if ((so->so_state & SS_ISCONNECTED) == 0) {
924 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
925 if (resid || clen == 0) {
926 error = ENOTCONN;
927 goto release;
928 }
929 } else if (addr == NULL) {
930 error = EDESTADDRREQ;
931 goto release;
932 }
933 }
934 space = sbspace(&so->so_snd);
935 if (flags & MSG_OOB)
936 space += 1024;
937 if ((atomic && resid > so->so_snd.sb_hiwat) ||
938 clen > so->so_snd.sb_hiwat) {
939 error = EMSGSIZE;
940 goto release;
941 }
942 if (space < resid + clen &&
943 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
944 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
945 error = EWOULDBLOCK;
946 goto release;
947 }
948 sbunlock(&so->so_snd);
949 if (wakeup_state & SS_RESTARTSYS) {
950 error = ERESTART;
951 goto out;
952 }
953 error = sbwait(&so->so_snd);
954 if (error)
955 goto out;
956 wakeup_state = so->so_state;
957 goto restart;
958 }
959 wakeup_state = 0;
960 mp = ⊤
961 space -= clen;
962 do {
963 if (uio == NULL) {
964 /*
965 * Data is prepackaged in "top".
966 */
967 resid = 0;
968 if (flags & MSG_EOR)
969 top->m_flags |= M_EOR;
970 } else do {
971 sounlock(so);
972 splx(s);
973 if (top == NULL) {
974 m = m_gethdr(M_WAIT, MT_DATA);
975 mlen = MHLEN;
976 m->m_pkthdr.len = 0;
977 m_reset_rcvif(m);
978 } else {
979 m = m_get(M_WAIT, MT_DATA);
980 mlen = MLEN;
981 }
982 MCLAIM(m, so->so_snd.sb_mowner);
983 if (sock_loan_thresh >= 0 &&
984 uio->uio_iov->iov_len >= sock_loan_thresh &&
985 space >= sock_loan_thresh &&
986 (len = sosend_loan(so, uio, m,
987 space)) != 0) {
988 SOSEND_COUNTER_INCR(&sosend_loan_big);
989 space -= len;
990 goto have_data;
991 }
992 if (resid >= MINCLSIZE && space >= MCLBYTES) {
993 SOSEND_COUNTER_INCR(&sosend_copy_big);
994 m_clget(m, M_DONTWAIT);
995 if ((m->m_flags & M_EXT) == 0)
996 goto nopages;
997 mlen = MCLBYTES;
998 if (atomic && top == 0) {
999 len = lmin(MCLBYTES - max_hdr,
1000 resid);
1001 m->m_data += max_hdr;
1002 } else
1003 len = lmin(MCLBYTES, resid);
1004 space -= len;
1005 } else {
1006 nopages:
1007 SOSEND_COUNTER_INCR(&sosend_copy_small);
1008 len = lmin(lmin(mlen, resid), space);
1009 space -= len;
1010 /*
1011 * For datagram protocols, leave room
1012 * for protocol headers in first mbuf.
1013 */
1014 if (atomic && top == 0 && len < mlen)
1015 m_align(m, len);
1016 }
1017 error = uiomove(mtod(m, void *), (int)len, uio);
1018 have_data:
1019 resid = uio->uio_resid;
1020 m->m_len = len;
1021 *mp = m;
1022 top->m_pkthdr.len += len;
1023 s = splsoftnet();
1024 solock(so);
1025 if (error != 0)
1026 goto release;
1027 mp = &m->m_next;
1028 if (resid <= 0) {
1029 if (flags & MSG_EOR)
1030 top->m_flags |= M_EOR;
1031 break;
1032 }
1033 } while (space > 0 && atomic);
1034
1035 if (so->so_state & SS_CANTSENDMORE) {
1036 error = EPIPE;
1037 goto release;
1038 }
1039 if (dontroute)
1040 so->so_options |= SO_DONTROUTE;
1041 if (resid > 0)
1042 so->so_state |= SS_MORETOCOME;
1043 if (flags & MSG_OOB) {
1044 error = (*so->so_proto->pr_usrreqs->pr_sendoob)(
1045 so, top, control);
1046 } else {
1047 error = (*so->so_proto->pr_usrreqs->pr_send)(so,
1048 top, addr, control, l);
1049 }
1050 if (dontroute)
1051 so->so_options &= ~SO_DONTROUTE;
1052 if (resid > 0)
1053 so->so_state &= ~SS_MORETOCOME;
1054 clen = 0;
1055 control = NULL;
1056 top = NULL;
1057 mp = ⊤
1058 if (error != 0)
1059 goto release;
1060 } while (resid && space > 0);
1061 } while (resid);
1062
1063 release:
1064 sbunlock(&so->so_snd);
1065 out:
1066 sounlock(so);
1067 splx(s);
1068 if (top)
1069 m_freem(top);
1070 if (control)
1071 m_freem(control);
1072 return error;
1073 }
1074
1075 /*
1076 * Following replacement or removal of the first mbuf on the first
1077 * mbuf chain of a socket buffer, push necessary state changes back
1078 * into the socket buffer so that other consumers see the values
1079 * consistently. 'nextrecord' is the caller's locally stored value of
1080 * the original value of sb->sb_mb->m_nextpkt which must be restored
1081 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL.
1082 */
1083 static void
1084 sbsync(struct sockbuf *sb, struct mbuf *nextrecord)
1085 {
1086
1087 KASSERT(solocked(sb->sb_so));
1088
1089 /*
1090 * First, update for the new value of nextrecord. If necessary,
1091 * make it the first record.
1092 */
1093 if (sb->sb_mb != NULL)
1094 sb->sb_mb->m_nextpkt = nextrecord;
1095 else
1096 sb->sb_mb = nextrecord;
1097
1098 /*
1099 * Now update any dependent socket buffer fields to reflect
1100 * the new state. This is an inline of SB_EMPTY_FIXUP, with
1101 * the addition of a second clause that takes care of the
1102 * case where sb_mb has been updated, but remains the last
1103 * record.
1104 */
1105 if (sb->sb_mb == NULL) {
1106 sb->sb_mbtail = NULL;
1107 sb->sb_lastrecord = NULL;
1108 } else if (sb->sb_mb->m_nextpkt == NULL)
1109 sb->sb_lastrecord = sb->sb_mb;
1110 }
1111
1112 /*
1113 * Implement receive operations on a socket.
1114 *
1115 * We depend on the way that records are added to the sockbuf by sbappend*. In
1116 * particular, each record (mbufs linked through m_next) must begin with an
1117 * address if the protocol so specifies, followed by an optional mbuf or mbufs
1118 * containing ancillary data, and then zero or more mbufs of data.
1119 *
1120 * In order to avoid blocking network interrupts for the entire time here, we
1121 * splx() while doing the actual copy to user space. Although the sockbuf is
1122 * locked, new data may still be appended, and thus we must maintain
1123 * consistency of the sockbuf during that time.
1124 *
1125 * The caller may receive the data as a single mbuf chain by supplying an mbuf
1126 * **mp0 for use in returning the chain. The uio is then used only for the
1127 * count in uio_resid.
1128 */
1129 int
1130 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio,
1131 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1132 {
1133 struct lwp *l = curlwp;
1134 struct mbuf *m, **mp, *mt;
1135 size_t len, offset, moff, orig_resid;
1136 int atomic, flags, error, s, type;
1137 const struct protosw *pr;
1138 struct mbuf *nextrecord;
1139 int mbuf_removed = 0;
1140 const struct domain *dom;
1141 short wakeup_state = 0;
1142
1143 pr = so->so_proto;
1144 atomic = pr->pr_flags & PR_ATOMIC;
1145 dom = pr->pr_domain;
1146 mp = mp0;
1147 type = 0;
1148 orig_resid = uio->uio_resid;
1149
1150 if (paddr != NULL)
1151 *paddr = NULL;
1152 if (controlp != NULL)
1153 *controlp = NULL;
1154 if (flagsp != NULL)
1155 flags = *flagsp &~ MSG_EOR;
1156 else
1157 flags = 0;
1158
1159 if (flags & MSG_OOB) {
1160 m = m_get(M_WAIT, MT_DATA);
1161 solock(so);
1162 error = (*pr->pr_usrreqs->pr_recvoob)(so, m, flags & MSG_PEEK);
1163 sounlock(so);
1164 if (error)
1165 goto bad;
1166 do {
1167 error = uiomove(mtod(m, void *),
1168 MIN(uio->uio_resid, m->m_len), uio);
1169 m = m_free(m);
1170 } while (uio->uio_resid > 0 && error == 0 && m);
1171 bad:
1172 if (m != NULL)
1173 m_freem(m);
1174 return error;
1175 }
1176 if (mp != NULL)
1177 *mp = NULL;
1178
1179 /*
1180 * solock() provides atomicity of access. splsoftnet() prevents
1181 * protocol processing soft interrupts from interrupting us and
1182 * blocking (expensive).
1183 */
1184 s = splsoftnet();
1185 solock(so);
1186 restart:
1187 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) {
1188 sounlock(so);
1189 splx(s);
1190 return error;
1191 }
1192 m = so->so_rcv.sb_mb;
1193
1194 /*
1195 * If we have less data than requested, block awaiting more
1196 * (subject to any timeout) if:
1197 * 1. the current count is less than the low water mark,
1198 * 2. MSG_WAITALL is set, and it is possible to do the entire
1199 * receive operation at once if we block (resid <= hiwat), or
1200 * 3. MSG_DONTWAIT is not set.
1201 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1202 * we have to do the receive in sections, and thus risk returning
1203 * a short count if a timeout or signal occurs after we start.
1204 */
1205 if (m == NULL ||
1206 ((flags & MSG_DONTWAIT) == 0 &&
1207 so->so_rcv.sb_cc < uio->uio_resid &&
1208 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1209 ((flags & MSG_WAITALL) &&
1210 uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1211 m->m_nextpkt == NULL && !atomic)) {
1212 #ifdef DIAGNOSTIC
1213 if (m == NULL && so->so_rcv.sb_cc)
1214 panic("receive 1");
1215 #endif
1216 if (so->so_error || so->so_rerror) {
1217 if (m != NULL)
1218 goto dontblock;
1219 if (so->so_error) {
1220 error = so->so_error;
1221 so->so_error = 0;
1222 } else {
1223 error = so->so_rerror;
1224 so->so_rerror = 0;
1225 }
1226 goto release;
1227 }
1228 if (so->so_state & SS_CANTRCVMORE) {
1229 if (m != NULL)
1230 goto dontblock;
1231 else
1232 goto release;
1233 }
1234 for (; m != NULL; m = m->m_next)
1235 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1236 m = so->so_rcv.sb_mb;
1237 goto dontblock;
1238 }
1239 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1240 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1241 error = ENOTCONN;
1242 goto release;
1243 }
1244 if (uio->uio_resid == 0)
1245 goto release;
1246 if ((so->so_state & SS_NBIO) ||
1247 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1248 error = EWOULDBLOCK;
1249 goto release;
1250 }
1251 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1");
1252 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1");
1253 sbunlock(&so->so_rcv);
1254 if (wakeup_state & SS_RESTARTSYS)
1255 error = ERESTART;
1256 else
1257 error = sbwait(&so->so_rcv);
1258 if (error != 0) {
1259 sounlock(so);
1260 splx(s);
1261 return error;
1262 }
1263 wakeup_state = so->so_state;
1264 goto restart;
1265 }
1266
1267 dontblock:
1268 /*
1269 * On entry here, m points to the first record of the socket buffer.
1270 * From this point onward, we maintain 'nextrecord' as a cache of the
1271 * pointer to the next record in the socket buffer. We must keep the
1272 * various socket buffer pointers and local stack versions of the
1273 * pointers in sync, pushing out modifications before dropping the
1274 * socket lock, and re-reading them when picking it up.
1275 *
1276 * Otherwise, we will race with the network stack appending new data
1277 * or records onto the socket buffer by using inconsistent/stale
1278 * versions of the field, possibly resulting in socket buffer
1279 * corruption.
1280 *
1281 * By holding the high-level sblock(), we prevent simultaneous
1282 * readers from pulling off the front of the socket buffer.
1283 */
1284 if (l != NULL)
1285 l->l_ru.ru_msgrcv++;
1286 KASSERT(m == so->so_rcv.sb_mb);
1287 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1");
1288 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1");
1289 nextrecord = m->m_nextpkt;
1290
1291 if (pr->pr_flags & PR_ADDR) {
1292 KASSERT(m->m_type == MT_SONAME);
1293 orig_resid = 0;
1294 if (flags & MSG_PEEK) {
1295 if (paddr)
1296 *paddr = m_copym(m, 0, m->m_len, M_DONTWAIT);
1297 m = m->m_next;
1298 } else {
1299 sbfree(&so->so_rcv, m);
1300 mbuf_removed = 1;
1301 if (paddr != NULL) {
1302 *paddr = m;
1303 so->so_rcv.sb_mb = m->m_next;
1304 m->m_next = NULL;
1305 m = so->so_rcv.sb_mb;
1306 } else {
1307 m = so->so_rcv.sb_mb = m_free(m);
1308 }
1309 sbsync(&so->so_rcv, nextrecord);
1310 }
1311 }
1312
1313 if (pr->pr_flags & PR_ADDR_OPT) {
1314 /*
1315 * For SCTP we may be getting a whole message OR a partial
1316 * delivery.
1317 */
1318 if (m->m_type == MT_SONAME) {
1319 orig_resid = 0;
1320 if (flags & MSG_PEEK) {
1321 if (paddr)
1322 *paddr = m_copym(m, 0, m->m_len, M_DONTWAIT);
1323 m = m->m_next;
1324 } else {
1325 sbfree(&so->so_rcv, m);
1326 if (paddr) {
1327 *paddr = m;
1328 so->so_rcv.sb_mb = m->m_next;
1329 m->m_next = 0;
1330 m = so->so_rcv.sb_mb;
1331 } else {
1332 m = so->so_rcv.sb_mb = m_free(m);
1333 }
1334 }
1335 }
1336 }
1337
1338 /*
1339 * Process one or more MT_CONTROL mbufs present before any data mbufs
1340 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1341 * just copy the data; if !MSG_PEEK, we call into the protocol to
1342 * perform externalization (or freeing if controlp == NULL).
1343 */
1344 if (__predict_false(m != NULL && m->m_type == MT_CONTROL)) {
1345 struct mbuf *cm = NULL, *cmn;
1346 struct mbuf **cme = &cm;
1347
1348 do {
1349 if (flags & MSG_PEEK) {
1350 if (controlp != NULL) {
1351 *controlp = m_copym(m, 0, m->m_len, M_DONTWAIT);
1352 controlp = &(*controlp)->m_next;
1353 }
1354 m = m->m_next;
1355 } else {
1356 sbfree(&so->so_rcv, m);
1357 so->so_rcv.sb_mb = m->m_next;
1358 m->m_next = NULL;
1359 *cme = m;
1360 cme = &(*cme)->m_next;
1361 m = so->so_rcv.sb_mb;
1362 }
1363 } while (m != NULL && m->m_type == MT_CONTROL);
1364 if ((flags & MSG_PEEK) == 0)
1365 sbsync(&so->so_rcv, nextrecord);
1366
1367 for (; cm != NULL; cm = cmn) {
1368 cmn = cm->m_next;
1369 cm->m_next = NULL;
1370 type = mtod(cm, struct cmsghdr *)->cmsg_type;
1371 if (controlp != NULL) {
1372 if (dom->dom_externalize != NULL &&
1373 type == SCM_RIGHTS) {
1374 sounlock(so);
1375 splx(s);
1376 error = (*dom->dom_externalize)(cm, l,
1377 (flags & MSG_CMSG_CLOEXEC) ?
1378 O_CLOEXEC : 0);
1379 s = splsoftnet();
1380 solock(so);
1381 }
1382 *controlp = cm;
1383 while (*controlp != NULL)
1384 controlp = &(*controlp)->m_next;
1385 } else {
1386 /*
1387 * Dispose of any SCM_RIGHTS message that went
1388 * through the read path rather than recv.
1389 */
1390 if (dom->dom_dispose != NULL &&
1391 type == SCM_RIGHTS) {
1392 sounlock(so);
1393 (*dom->dom_dispose)(cm);
1394 solock(so);
1395 }
1396 m_freem(cm);
1397 }
1398 }
1399 if (m != NULL)
1400 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1401 else
1402 nextrecord = so->so_rcv.sb_mb;
1403 orig_resid = 0;
1404 }
1405
1406 /* If m is non-NULL, we have some data to read. */
1407 if (__predict_true(m != NULL)) {
1408 type = m->m_type;
1409 if (type == MT_OOBDATA)
1410 flags |= MSG_OOB;
1411 }
1412 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2");
1413 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2");
1414
1415 moff = 0;
1416 offset = 0;
1417 while (m != NULL && uio->uio_resid > 0 && error == 0) {
1418 /*
1419 * If the type of mbuf has changed, end the receive
1420 * operation and do a short read.
1421 */
1422 if (m->m_type == MT_OOBDATA) {
1423 if (type != MT_OOBDATA)
1424 break;
1425 } else if (type == MT_OOBDATA) {
1426 break;
1427 } else if (m->m_type == MT_CONTROL) {
1428 break;
1429 }
1430 #ifdef DIAGNOSTIC
1431 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) {
1432 panic("%s: m_type=%d", __func__, m->m_type);
1433 }
1434 #endif
1435
1436 so->so_state &= ~SS_RCVATMARK;
1437 wakeup_state = 0;
1438 len = uio->uio_resid;
1439 if (so->so_oobmark && len > so->so_oobmark - offset)
1440 len = so->so_oobmark - offset;
1441 if (len > m->m_len - moff)
1442 len = m->m_len - moff;
1443
1444 /*
1445 * If mp is set, just pass back the mbufs.
1446 * Otherwise copy them out via the uio, then free.
1447 * Sockbuf must be consistent here (points to current mbuf,
1448 * it points to next record) when we drop priority;
1449 * we must note any additions to the sockbuf when we
1450 * block interrupts again.
1451 */
1452 if (mp == NULL) {
1453 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove");
1454 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove");
1455 sounlock(so);
1456 splx(s);
1457 error = uiomove(mtod(m, char *) + moff, len, uio);
1458 s = splsoftnet();
1459 solock(so);
1460 if (error != 0) {
1461 /*
1462 * If any part of the record has been removed
1463 * (such as the MT_SONAME mbuf, which will
1464 * happen when PR_ADDR, and thus also
1465 * PR_ATOMIC, is set), then drop the entire
1466 * record to maintain the atomicity of the
1467 * receive operation.
1468 *
1469 * This avoids a later panic("receive 1a")
1470 * when compiled with DIAGNOSTIC.
1471 */
1472 if (m && mbuf_removed && atomic)
1473 (void) sbdroprecord(&so->so_rcv);
1474
1475 goto release;
1476 }
1477 } else {
1478 uio->uio_resid -= len;
1479 }
1480
1481 if (len == m->m_len - moff) {
1482 if (m->m_flags & M_EOR)
1483 flags |= MSG_EOR;
1484 #ifdef SCTP
1485 if (m->m_flags & M_NOTIFICATION)
1486 flags |= MSG_NOTIFICATION;
1487 #endif
1488 if (flags & MSG_PEEK) {
1489 m = m->m_next;
1490 moff = 0;
1491 } else {
1492 nextrecord = m->m_nextpkt;
1493 sbfree(&so->so_rcv, m);
1494 if (mp) {
1495 *mp = m;
1496 mp = &m->m_next;
1497 so->so_rcv.sb_mb = m = m->m_next;
1498 *mp = NULL;
1499 } else {
1500 m = so->so_rcv.sb_mb = m_free(m);
1501 }
1502 /*
1503 * If m != NULL, we also know that
1504 * so->so_rcv.sb_mb != NULL.
1505 */
1506 KASSERT(so->so_rcv.sb_mb == m);
1507 if (m) {
1508 m->m_nextpkt = nextrecord;
1509 if (nextrecord == NULL)
1510 so->so_rcv.sb_lastrecord = m;
1511 } else {
1512 so->so_rcv.sb_mb = nextrecord;
1513 SB_EMPTY_FIXUP(&so->so_rcv);
1514 }
1515 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3");
1516 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3");
1517 }
1518 } else if (flags & MSG_PEEK) {
1519 moff += len;
1520 } else {
1521 if (mp != NULL) {
1522 mt = m_copym(m, 0, len, M_NOWAIT);
1523 if (__predict_false(mt == NULL)) {
1524 sounlock(so);
1525 mt = m_copym(m, 0, len, M_WAIT);
1526 solock(so);
1527 }
1528 *mp = mt;
1529 }
1530 m->m_data += len;
1531 m->m_len -= len;
1532 so->so_rcv.sb_cc -= len;
1533 }
1534
1535 if (so->so_oobmark) {
1536 if ((flags & MSG_PEEK) == 0) {
1537 so->so_oobmark -= len;
1538 if (so->so_oobmark == 0) {
1539 so->so_state |= SS_RCVATMARK;
1540 break;
1541 }
1542 } else {
1543 offset += len;
1544 if (offset == so->so_oobmark)
1545 break;
1546 }
1547 }
1548 if (flags & MSG_EOR)
1549 break;
1550
1551 /*
1552 * If the MSG_WAITALL flag is set (for non-atomic socket),
1553 * we must not quit until "uio->uio_resid == 0" or an error
1554 * termination. If a signal/timeout occurs, return
1555 * with a short count but without error.
1556 * Keep sockbuf locked against other readers.
1557 */
1558 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1559 !sosendallatonce(so) && !nextrecord) {
1560 if (so->so_error || so->so_rerror ||
1561 so->so_state & SS_CANTRCVMORE)
1562 break;
1563 /*
1564 * If we are peeking and the socket receive buffer is
1565 * full, stop since we can't get more data to peek at.
1566 */
1567 if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0)
1568 break;
1569 /*
1570 * If we've drained the socket buffer, tell the
1571 * protocol in case it needs to do something to
1572 * get it filled again.
1573 */
1574 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
1575 (*pr->pr_usrreqs->pr_rcvd)(so, flags, l);
1576 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2");
1577 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2");
1578 if (wakeup_state & SS_RESTARTSYS)
1579 error = ERESTART;
1580 else
1581 error = sbwait(&so->so_rcv);
1582 if (error != 0) {
1583 sbunlock(&so->so_rcv);
1584 sounlock(so);
1585 splx(s);
1586 return 0;
1587 }
1588 if ((m = so->so_rcv.sb_mb) != NULL)
1589 nextrecord = m->m_nextpkt;
1590 wakeup_state = so->so_state;
1591 }
1592 }
1593
1594 if (m && atomic) {
1595 flags |= MSG_TRUNC;
1596 if ((flags & MSG_PEEK) == 0)
1597 (void) sbdroprecord(&so->so_rcv);
1598 }
1599 if ((flags & MSG_PEEK) == 0) {
1600 if (m == NULL) {
1601 /*
1602 * First part is an inline SB_EMPTY_FIXUP(). Second
1603 * part makes sure sb_lastrecord is up-to-date if
1604 * there is still data in the socket buffer.
1605 */
1606 so->so_rcv.sb_mb = nextrecord;
1607 if (so->so_rcv.sb_mb == NULL) {
1608 so->so_rcv.sb_mbtail = NULL;
1609 so->so_rcv.sb_lastrecord = NULL;
1610 } else if (nextrecord->m_nextpkt == NULL)
1611 so->so_rcv.sb_lastrecord = nextrecord;
1612 }
1613 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4");
1614 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4");
1615 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1616 (*pr->pr_usrreqs->pr_rcvd)(so, flags, l);
1617 }
1618 if (orig_resid == uio->uio_resid && orig_resid &&
1619 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1620 sbunlock(&so->so_rcv);
1621 goto restart;
1622 }
1623
1624 if (flagsp != NULL)
1625 *flagsp |= flags;
1626 release:
1627 sbunlock(&so->so_rcv);
1628 sounlock(so);
1629 splx(s);
1630 return error;
1631 }
1632
1633 int
1634 soshutdown(struct socket *so, int how)
1635 {
1636 const struct protosw *pr;
1637 int error;
1638
1639 KASSERT(solocked(so));
1640
1641 pr = so->so_proto;
1642 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1643 return EINVAL;
1644
1645 if (how == SHUT_RD || how == SHUT_RDWR) {
1646 sorflush(so);
1647 error = 0;
1648 }
1649 if (how == SHUT_WR || how == SHUT_RDWR)
1650 error = (*pr->pr_usrreqs->pr_shutdown)(so);
1651
1652 return error;
1653 }
1654
1655 void
1656 sorestart(struct socket *so)
1657 {
1658 /*
1659 * An application has called close() on an fd on which another
1660 * of its threads has called a socket system call.
1661 * Mark this and wake everyone up, and code that would block again
1662 * instead returns ERESTART.
1663 * On system call re-entry the fd is validated and EBADF returned.
1664 * Any other fd will block again on the 2nd syscall.
1665 */
1666 solock(so);
1667 so->so_state |= SS_RESTARTSYS;
1668 cv_broadcast(&so->so_cv);
1669 cv_broadcast(&so->so_snd.sb_cv);
1670 cv_broadcast(&so->so_rcv.sb_cv);
1671 sounlock(so);
1672 }
1673
1674 void
1675 sorflush(struct socket *so)
1676 {
1677 struct sockbuf *sb, asb;
1678 const struct protosw *pr;
1679
1680 KASSERT(solocked(so));
1681
1682 sb = &so->so_rcv;
1683 pr = so->so_proto;
1684 socantrcvmore(so);
1685 sb->sb_flags |= SB_NOINTR;
1686 (void )sblock(sb, M_WAITOK);
1687 sbunlock(sb);
1688 asb = *sb;
1689 /*
1690 * Clear most of the sockbuf structure, but leave some of the
1691 * fields valid.
1692 */
1693 memset(&sb->sb_startzero, 0,
1694 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1695 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) {
1696 sounlock(so);
1697 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1698 solock(so);
1699 }
1700 sbrelease(&asb, so);
1701 }
1702
1703 /*
1704 * internal set SOL_SOCKET options
1705 */
1706 static int
1707 sosetopt1(struct socket *so, const struct sockopt *sopt)
1708 {
1709 int error = EINVAL, opt;
1710 int optval = 0; /* XXX: gcc */
1711 struct linger l;
1712 struct timeval tv;
1713
1714 switch ((opt = sopt->sopt_name)) {
1715
1716 case SO_ACCEPTFILTER:
1717 error = accept_filt_setopt(so, sopt);
1718 KASSERT(solocked(so));
1719 break;
1720
1721 case SO_LINGER:
1722 error = sockopt_get(sopt, &l, sizeof(l));
1723 solock(so);
1724 if (error)
1725 break;
1726 if (l.l_linger < 0 || l.l_linger > USHRT_MAX ||
1727 l.l_linger > (INT_MAX / hz)) {
1728 error = EDOM;
1729 break;
1730 }
1731 so->so_linger = l.l_linger;
1732 if (l.l_onoff)
1733 so->so_options |= SO_LINGER;
1734 else
1735 so->so_options &= ~SO_LINGER;
1736 break;
1737
1738 case SO_DEBUG:
1739 case SO_KEEPALIVE:
1740 case SO_DONTROUTE:
1741 case SO_USELOOPBACK:
1742 case SO_BROADCAST:
1743 case SO_REUSEADDR:
1744 case SO_REUSEPORT:
1745 case SO_OOBINLINE:
1746 case SO_TIMESTAMP:
1747 case SO_NOSIGPIPE:
1748 case SO_RERROR:
1749 #ifdef SO_OTIMESTAMP
1750 case SO_OTIMESTAMP:
1751 #endif
1752 error = sockopt_getint(sopt, &optval);
1753 solock(so);
1754 if (error)
1755 break;
1756 if (optval)
1757 so->so_options |= opt;
1758 else
1759 so->so_options &= ~opt;
1760 break;
1761
1762 case SO_SNDBUF:
1763 case SO_RCVBUF:
1764 case SO_SNDLOWAT:
1765 case SO_RCVLOWAT:
1766 error = sockopt_getint(sopt, &optval);
1767 solock(so);
1768 if (error)
1769 break;
1770
1771 /*
1772 * Values < 1 make no sense for any of these
1773 * options, so disallow them.
1774 */
1775 if (optval < 1) {
1776 error = EINVAL;
1777 break;
1778 }
1779
1780 switch (opt) {
1781 case SO_SNDBUF:
1782 if (sbreserve(&so->so_snd, (u_long)optval, so) == 0) {
1783 error = ENOBUFS;
1784 break;
1785 }
1786 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
1787 break;
1788
1789 case SO_RCVBUF:
1790 if (sbreserve(&so->so_rcv, (u_long)optval, so) == 0) {
1791 error = ENOBUFS;
1792 break;
1793 }
1794 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1795 break;
1796
1797 /*
1798 * Make sure the low-water is never greater than
1799 * the high-water.
1800 */
1801 case SO_SNDLOWAT:
1802 if (optval > so->so_snd.sb_hiwat)
1803 optval = so->so_snd.sb_hiwat;
1804
1805 so->so_snd.sb_lowat = optval;
1806 break;
1807
1808 case SO_RCVLOWAT:
1809 if (optval > so->so_rcv.sb_hiwat)
1810 optval = so->so_rcv.sb_hiwat;
1811
1812 so->so_rcv.sb_lowat = optval;
1813 break;
1814 }
1815 break;
1816
1817 #ifdef COMPAT_50
1818 case SO_OSNDTIMEO:
1819 case SO_ORCVTIMEO: {
1820 struct timeval50 otv;
1821 error = sockopt_get(sopt, &otv, sizeof(otv));
1822 if (error) {
1823 solock(so);
1824 break;
1825 }
1826 timeval50_to_timeval(&otv, &tv);
1827 opt = opt == SO_OSNDTIMEO ? SO_SNDTIMEO : SO_RCVTIMEO;
1828 error = 0;
1829 /*FALLTHROUGH*/
1830 }
1831 #endif /* COMPAT_50 */
1832
1833 /*FALLTHROUGH*/
1834 case SO_SNDTIMEO:
1835 /*FALLTHROUGH*/
1836 case SO_RCVTIMEO:
1837 if (error)
1838 error = sockopt_get(sopt, &tv, sizeof(tv));
1839 solock(so);
1840 if (error)
1841 break;
1842
1843 if (tv.tv_sec > (INT_MAX - tv.tv_usec / tick) / hz) {
1844 error = EDOM;
1845 break;
1846 }
1847
1848 optval = tv.tv_sec * hz + tv.tv_usec / tick;
1849 if (optval == 0 && tv.tv_usec != 0)
1850 optval = 1;
1851
1852 switch (opt) {
1853 case SO_SNDTIMEO:
1854 so->so_snd.sb_timeo = optval;
1855 break;
1856 case SO_RCVTIMEO:
1857 so->so_rcv.sb_timeo = optval;
1858 break;
1859 }
1860 break;
1861
1862 default:
1863 solock(so);
1864 error = ENOPROTOOPT;
1865 break;
1866 }
1867 KASSERT(solocked(so));
1868 return error;
1869 }
1870
1871 int
1872 sosetopt(struct socket *so, struct sockopt *sopt)
1873 {
1874 int error, prerr;
1875
1876 if (sopt->sopt_level == SOL_SOCKET) {
1877 error = sosetopt1(so, sopt);
1878 KASSERT(solocked(so));
1879 } else {
1880 error = ENOPROTOOPT;
1881 solock(so);
1882 }
1883
1884 if ((error == 0 || error == ENOPROTOOPT) &&
1885 so->so_proto != NULL && so->so_proto->pr_ctloutput != NULL) {
1886 /* give the protocol stack a shot */
1887 prerr = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, sopt);
1888 if (prerr == 0)
1889 error = 0;
1890 else if (prerr != ENOPROTOOPT)
1891 error = prerr;
1892 }
1893 sounlock(so);
1894 return error;
1895 }
1896
1897 /*
1898 * so_setsockopt() is a wrapper providing a sockopt structure for sosetopt()
1899 */
1900 int
1901 so_setsockopt(struct lwp *l, struct socket *so, int level, int name,
1902 const void *val, size_t valsize)
1903 {
1904 struct sockopt sopt;
1905 int error;
1906
1907 KASSERT(valsize == 0 || val != NULL);
1908
1909 sockopt_init(&sopt, level, name, valsize);
1910 sockopt_set(&sopt, val, valsize);
1911
1912 error = sosetopt(so, &sopt);
1913
1914 sockopt_destroy(&sopt);
1915
1916 return error;
1917 }
1918
1919 /*
1920 * internal get SOL_SOCKET options
1921 */
1922 static int
1923 sogetopt1(struct socket *so, struct sockopt *sopt)
1924 {
1925 int error, optval, opt;
1926 struct linger l;
1927 struct timeval tv;
1928
1929 switch ((opt = sopt->sopt_name)) {
1930
1931 case SO_ACCEPTFILTER:
1932 error = accept_filt_getopt(so, sopt);
1933 break;
1934
1935 case SO_LINGER:
1936 l.l_onoff = (so->so_options & SO_LINGER) ? 1 : 0;
1937 l.l_linger = so->so_linger;
1938
1939 error = sockopt_set(sopt, &l, sizeof(l));
1940 break;
1941
1942 case SO_USELOOPBACK:
1943 case SO_DONTROUTE:
1944 case SO_DEBUG:
1945 case SO_KEEPALIVE:
1946 case SO_REUSEADDR:
1947 case SO_REUSEPORT:
1948 case SO_BROADCAST:
1949 case SO_OOBINLINE:
1950 case SO_TIMESTAMP:
1951 case SO_NOSIGPIPE:
1952 case SO_RERROR:
1953 #ifdef SO_OTIMESTAMP
1954 case SO_OTIMESTAMP:
1955 #endif
1956 case SO_ACCEPTCONN:
1957 error = sockopt_setint(sopt, (so->so_options & opt) ? 1 : 0);
1958 break;
1959
1960 case SO_TYPE:
1961 error = sockopt_setint(sopt, so->so_type);
1962 break;
1963
1964 case SO_ERROR:
1965 if (so->so_error == 0) {
1966 so->so_error = so->so_rerror;
1967 so->so_rerror = 0;
1968 }
1969 error = sockopt_setint(sopt, so->so_error);
1970 so->so_error = 0;
1971 break;
1972
1973 case SO_SNDBUF:
1974 error = sockopt_setint(sopt, so->so_snd.sb_hiwat);
1975 break;
1976
1977 case SO_RCVBUF:
1978 error = sockopt_setint(sopt, so->so_rcv.sb_hiwat);
1979 break;
1980
1981 case SO_SNDLOWAT:
1982 error = sockopt_setint(sopt, so->so_snd.sb_lowat);
1983 break;
1984
1985 case SO_RCVLOWAT:
1986 error = sockopt_setint(sopt, so->so_rcv.sb_lowat);
1987 break;
1988
1989 #ifdef COMPAT_50
1990 case SO_OSNDTIMEO:
1991 case SO_ORCVTIMEO: {
1992 struct timeval50 otv;
1993
1994 optval = (opt == SO_OSNDTIMEO ?
1995 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1996
1997 otv.tv_sec = optval / hz;
1998 otv.tv_usec = (optval % hz) * tick;
1999
2000 error = sockopt_set(sopt, &otv, sizeof(otv));
2001 break;
2002 }
2003 #endif /* COMPAT_50 */
2004
2005 case SO_SNDTIMEO:
2006 case SO_RCVTIMEO:
2007 optval = (opt == SO_SNDTIMEO ?
2008 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2009
2010 tv.tv_sec = optval / hz;
2011 tv.tv_usec = (optval % hz) * tick;
2012
2013 error = sockopt_set(sopt, &tv, sizeof(tv));
2014 break;
2015
2016 case SO_OVERFLOWED:
2017 error = sockopt_setint(sopt, so->so_rcv.sb_overflowed);
2018 break;
2019
2020 default:
2021 error = ENOPROTOOPT;
2022 break;
2023 }
2024
2025 return error;
2026 }
2027
2028 int
2029 sogetopt(struct socket *so, struct sockopt *sopt)
2030 {
2031 int error;
2032
2033 solock(so);
2034 if (sopt->sopt_level != SOL_SOCKET) {
2035 if (so->so_proto && so->so_proto->pr_ctloutput) {
2036 error = ((*so->so_proto->pr_ctloutput)
2037 (PRCO_GETOPT, so, sopt));
2038 } else
2039 error = (ENOPROTOOPT);
2040 } else {
2041 error = sogetopt1(so, sopt);
2042 }
2043 sounlock(so);
2044 return error;
2045 }
2046
2047 /*
2048 * alloc sockopt data buffer buffer
2049 * - will be released at destroy
2050 */
2051 static int
2052 sockopt_alloc(struct sockopt *sopt, size_t len, km_flag_t kmflag)
2053 {
2054
2055 KASSERT(sopt->sopt_size == 0);
2056
2057 if (len > sizeof(sopt->sopt_buf)) {
2058 sopt->sopt_data = kmem_zalloc(len, kmflag);
2059 if (sopt->sopt_data == NULL)
2060 return ENOMEM;
2061 } else
2062 sopt->sopt_data = sopt->sopt_buf;
2063
2064 sopt->sopt_size = len;
2065 return 0;
2066 }
2067
2068 /*
2069 * initialise sockopt storage
2070 * - MAY sleep during allocation
2071 */
2072 void
2073 sockopt_init(struct sockopt *sopt, int level, int name, size_t size)
2074 {
2075
2076 memset(sopt, 0, sizeof(*sopt));
2077
2078 sopt->sopt_level = level;
2079 sopt->sopt_name = name;
2080 (void)sockopt_alloc(sopt, size, KM_SLEEP);
2081 }
2082
2083 /*
2084 * destroy sockopt storage
2085 * - will release any held memory references
2086 */
2087 void
2088 sockopt_destroy(struct sockopt *sopt)
2089 {
2090
2091 if (sopt->sopt_data != sopt->sopt_buf)
2092 kmem_free(sopt->sopt_data, sopt->sopt_size);
2093
2094 memset(sopt, 0, sizeof(*sopt));
2095 }
2096
2097 /*
2098 * set sockopt value
2099 * - value is copied into sockopt
2100 * - memory is allocated when necessary, will not sleep
2101 */
2102 int
2103 sockopt_set(struct sockopt *sopt, const void *buf, size_t len)
2104 {
2105 int error;
2106
2107 if (sopt->sopt_size == 0) {
2108 error = sockopt_alloc(sopt, len, KM_NOSLEEP);
2109 if (error)
2110 return error;
2111 }
2112
2113 if (sopt->sopt_size < len)
2114 return EINVAL;
2115
2116 memcpy(sopt->sopt_data, buf, len);
2117 sopt->sopt_retsize = len;
2118
2119 return 0;
2120 }
2121
2122 /*
2123 * common case of set sockopt integer value
2124 */
2125 int
2126 sockopt_setint(struct sockopt *sopt, int val)
2127 {
2128
2129 return sockopt_set(sopt, &val, sizeof(int));
2130 }
2131
2132 /*
2133 * get sockopt value
2134 * - correct size must be given
2135 */
2136 int
2137 sockopt_get(const struct sockopt *sopt, void *buf, size_t len)
2138 {
2139
2140 if (sopt->sopt_size != len)
2141 return EINVAL;
2142
2143 memcpy(buf, sopt->sopt_data, len);
2144 return 0;
2145 }
2146
2147 /*
2148 * common case of get sockopt integer value
2149 */
2150 int
2151 sockopt_getint(const struct sockopt *sopt, int *valp)
2152 {
2153
2154 return sockopt_get(sopt, valp, sizeof(int));
2155 }
2156
2157 /*
2158 * set sockopt value from mbuf
2159 * - ONLY for legacy code
2160 * - mbuf is released by sockopt
2161 * - will not sleep
2162 */
2163 int
2164 sockopt_setmbuf(struct sockopt *sopt, struct mbuf *m)
2165 {
2166 size_t len;
2167 int error;
2168
2169 len = m_length(m);
2170
2171 if (sopt->sopt_size == 0) {
2172 error = sockopt_alloc(sopt, len, KM_NOSLEEP);
2173 if (error)
2174 return error;
2175 }
2176
2177 if (sopt->sopt_size < len)
2178 return EINVAL;
2179
2180 m_copydata(m, 0, len, sopt->sopt_data);
2181 m_freem(m);
2182 sopt->sopt_retsize = len;
2183
2184 return 0;
2185 }
2186
2187 /*
2188 * get sockopt value into mbuf
2189 * - ONLY for legacy code
2190 * - mbuf to be released by the caller
2191 * - will not sleep
2192 */
2193 struct mbuf *
2194 sockopt_getmbuf(const struct sockopt *sopt)
2195 {
2196 struct mbuf *m;
2197
2198 if (sopt->sopt_size > MCLBYTES)
2199 return NULL;
2200
2201 m = m_get(M_DONTWAIT, MT_SOOPTS);
2202 if (m == NULL)
2203 return NULL;
2204
2205 if (sopt->sopt_size > MLEN) {
2206 MCLGET(m, M_DONTWAIT);
2207 if ((m->m_flags & M_EXT) == 0) {
2208 m_free(m);
2209 return NULL;
2210 }
2211 }
2212
2213 memcpy(mtod(m, void *), sopt->sopt_data, sopt->sopt_size);
2214 m->m_len = sopt->sopt_size;
2215
2216 return m;
2217 }
2218
2219 void
2220 sohasoutofband(struct socket *so)
2221 {
2222
2223 fownsignal(so->so_pgid, SIGURG, POLL_PRI, POLLPRI|POLLRDBAND, so);
2224 selnotify(&so->so_rcv.sb_sel, POLLPRI | POLLRDBAND, NOTE_SUBMIT);
2225 }
2226
2227 static void
2228 filt_sordetach(struct knote *kn)
2229 {
2230 struct socket *so;
2231
2232 so = ((file_t *)kn->kn_obj)->f_socket;
2233 solock(so);
2234 SLIST_REMOVE(&so->so_rcv.sb_sel.sel_klist, kn, knote, kn_selnext);
2235 if (SLIST_EMPTY(&so->so_rcv.sb_sel.sel_klist))
2236 so->so_rcv.sb_flags &= ~SB_KNOTE;
2237 sounlock(so);
2238 }
2239
2240 /*ARGSUSED*/
2241 static int
2242 filt_soread(struct knote *kn, long hint)
2243 {
2244 struct socket *so;
2245 int rv;
2246
2247 so = ((file_t *)kn->kn_obj)->f_socket;
2248 if (hint != NOTE_SUBMIT)
2249 solock(so);
2250 kn->kn_data = so->so_rcv.sb_cc;
2251 if (so->so_state & SS_CANTRCVMORE) {
2252 kn->kn_flags |= EV_EOF;
2253 kn->kn_fflags = so->so_error;
2254 rv = 1;
2255 } else if (so->so_error || so->so_rerror)
2256 rv = 1;
2257 else if (kn->kn_sfflags & NOTE_LOWAT)
2258 rv = (kn->kn_data >= kn->kn_sdata);
2259 else
2260 rv = (kn->kn_data >= so->so_rcv.sb_lowat);
2261 if (hint != NOTE_SUBMIT)
2262 sounlock(so);
2263 return rv;
2264 }
2265
2266 static void
2267 filt_sowdetach(struct knote *kn)
2268 {
2269 struct socket *so;
2270
2271 so = ((file_t *)kn->kn_obj)->f_socket;
2272 solock(so);
2273 SLIST_REMOVE(&so->so_snd.sb_sel.sel_klist, kn, knote, kn_selnext);
2274 if (SLIST_EMPTY(&so->so_snd.sb_sel.sel_klist))
2275 so->so_snd.sb_flags &= ~SB_KNOTE;
2276 sounlock(so);
2277 }
2278
2279 /*ARGSUSED*/
2280 static int
2281 filt_sowrite(struct knote *kn, long hint)
2282 {
2283 struct socket *so;
2284 int rv;
2285
2286 so = ((file_t *)kn->kn_obj)->f_socket;
2287 if (hint != NOTE_SUBMIT)
2288 solock(so);
2289 kn->kn_data = sbspace(&so->so_snd);
2290 if (so->so_state & SS_CANTSENDMORE) {
2291 kn->kn_flags |= EV_EOF;
2292 kn->kn_fflags = so->so_error;
2293 rv = 1;
2294 } else if (so->so_error)
2295 rv = 1;
2296 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
2297 (so->so_proto->pr_flags & PR_CONNREQUIRED))
2298 rv = 0;
2299 else if (kn->kn_sfflags & NOTE_LOWAT)
2300 rv = (kn->kn_data >= kn->kn_sdata);
2301 else
2302 rv = (kn->kn_data >= so->so_snd.sb_lowat);
2303 if (hint != NOTE_SUBMIT)
2304 sounlock(so);
2305 return rv;
2306 }
2307
2308 /*ARGSUSED*/
2309 static int
2310 filt_solisten(struct knote *kn, long hint)
2311 {
2312 struct socket *so;
2313 int rv;
2314
2315 so = ((file_t *)kn->kn_obj)->f_socket;
2316
2317 /*
2318 * Set kn_data to number of incoming connections, not
2319 * counting partial (incomplete) connections.
2320 */
2321 if (hint != NOTE_SUBMIT)
2322 solock(so);
2323 kn->kn_data = so->so_qlen;
2324 rv = (kn->kn_data > 0);
2325 if (hint != NOTE_SUBMIT)
2326 sounlock(so);
2327 return rv;
2328 }
2329
2330 static const struct filterops solisten_filtops = {
2331 .f_isfd = 1,
2332 .f_attach = NULL,
2333 .f_detach = filt_sordetach,
2334 .f_event = filt_solisten,
2335 };
2336
2337 static const struct filterops soread_filtops = {
2338 .f_isfd = 1,
2339 .f_attach = NULL,
2340 .f_detach = filt_sordetach,
2341 .f_event = filt_soread,
2342 };
2343
2344 static const struct filterops sowrite_filtops = {
2345 .f_isfd = 1,
2346 .f_attach = NULL,
2347 .f_detach = filt_sowdetach,
2348 .f_event = filt_sowrite,
2349 };
2350
2351 int
2352 soo_kqfilter(struct file *fp, struct knote *kn)
2353 {
2354 struct socket *so;
2355 struct sockbuf *sb;
2356
2357 so = ((file_t *)kn->kn_obj)->f_socket;
2358 solock(so);
2359 switch (kn->kn_filter) {
2360 case EVFILT_READ:
2361 if (so->so_options & SO_ACCEPTCONN)
2362 kn->kn_fop = &solisten_filtops;
2363 else
2364 kn->kn_fop = &soread_filtops;
2365 sb = &so->so_rcv;
2366 break;
2367 case EVFILT_WRITE:
2368 kn->kn_fop = &sowrite_filtops;
2369 sb = &so->so_snd;
2370 break;
2371 default:
2372 sounlock(so);
2373 return EINVAL;
2374 }
2375 SLIST_INSERT_HEAD(&sb->sb_sel.sel_klist, kn, kn_selnext);
2376 sb->sb_flags |= SB_KNOTE;
2377 sounlock(so);
2378 return 0;
2379 }
2380
2381 static int
2382 sodopoll(struct socket *so, int events)
2383 {
2384 int revents;
2385
2386 revents = 0;
2387
2388 if (events & (POLLIN | POLLRDNORM))
2389 if (soreadable(so))
2390 revents |= events & (POLLIN | POLLRDNORM);
2391
2392 if (events & (POLLOUT | POLLWRNORM))
2393 if (sowritable(so))
2394 revents |= events & (POLLOUT | POLLWRNORM);
2395
2396 if (events & (POLLPRI | POLLRDBAND))
2397 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
2398 revents |= events & (POLLPRI | POLLRDBAND);
2399
2400 return revents;
2401 }
2402
2403 int
2404 sopoll(struct socket *so, int events)
2405 {
2406 int revents = 0;
2407
2408 #ifndef DIAGNOSTIC
2409 /*
2410 * Do a quick, unlocked check in expectation that the socket
2411 * will be ready for I/O. Don't do this check if DIAGNOSTIC,
2412 * as the solocked() assertions will fail.
2413 */
2414 if ((revents = sodopoll(so, events)) != 0)
2415 return revents;
2416 #endif
2417
2418 solock(so);
2419 if ((revents = sodopoll(so, events)) == 0) {
2420 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
2421 selrecord(curlwp, &so->so_rcv.sb_sel);
2422 so->so_rcv.sb_flags |= SB_NOTIFY;
2423 }
2424
2425 if (events & (POLLOUT | POLLWRNORM)) {
2426 selrecord(curlwp, &so->so_snd.sb_sel);
2427 so->so_snd.sb_flags |= SB_NOTIFY;
2428 }
2429 }
2430 sounlock(so);
2431
2432 return revents;
2433 }
2434
2435 struct mbuf **
2436 sbsavetimestamp(int opt, struct mbuf **mp)
2437 {
2438 struct timeval tv;
2439 microtime(&tv);
2440
2441 #ifdef SO_OTIMESTAMP
2442 if (opt & SO_OTIMESTAMP) {
2443 struct timeval50 tv50;
2444
2445 timeval_to_timeval50(&tv, &tv50);
2446 *mp = sbcreatecontrol(&tv50, sizeof(tv50),
2447 SCM_OTIMESTAMP, SOL_SOCKET);
2448 if (*mp)
2449 mp = &(*mp)->m_next;
2450 } else
2451 #endif
2452
2453 if (opt & SO_TIMESTAMP) {
2454 *mp = sbcreatecontrol(&tv, sizeof(tv),
2455 SCM_TIMESTAMP, SOL_SOCKET);
2456 if (*mp)
2457 mp = &(*mp)->m_next;
2458 }
2459 return mp;
2460 }
2461
2462
2463 #include <sys/sysctl.h>
2464
2465 static int sysctl_kern_somaxkva(SYSCTLFN_PROTO);
2466 static int sysctl_kern_sbmax(SYSCTLFN_PROTO);
2467
2468 /*
2469 * sysctl helper routine for kern.somaxkva. ensures that the given
2470 * value is not too small.
2471 * (XXX should we maybe make sure it's not too large as well?)
2472 */
2473 static int
2474 sysctl_kern_somaxkva(SYSCTLFN_ARGS)
2475 {
2476 int error, new_somaxkva;
2477 struct sysctlnode node;
2478
2479 new_somaxkva = somaxkva;
2480 node = *rnode;
2481 node.sysctl_data = &new_somaxkva;
2482 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2483 if (error || newp == NULL)
2484 return error;
2485
2486 if (new_somaxkva < (16 * 1024 * 1024)) /* sanity */
2487 return EINVAL;
2488
2489 mutex_enter(&so_pendfree_lock);
2490 somaxkva = new_somaxkva;
2491 cv_broadcast(&socurkva_cv);
2492 mutex_exit(&so_pendfree_lock);
2493
2494 return error;
2495 }
2496
2497 /*
2498 * sysctl helper routine for kern.sbmax. Basically just ensures that
2499 * any new value is not too small.
2500 */
2501 static int
2502 sysctl_kern_sbmax(SYSCTLFN_ARGS)
2503 {
2504 int error, new_sbmax;
2505 struct sysctlnode node;
2506
2507 new_sbmax = sb_max;
2508 node = *rnode;
2509 node.sysctl_data = &new_sbmax;
2510 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2511 if (error || newp == NULL)
2512 return error;
2513
2514 KERNEL_LOCK(1, NULL);
2515 error = sb_max_set(new_sbmax);
2516 KERNEL_UNLOCK_ONE(NULL);
2517
2518 return error;
2519 }
2520
2521 /*
2522 * sysctl helper routine for kern.sooptions. Ensures that only allowed
2523 * options can be set.
2524 */
2525 static int
2526 sysctl_kern_sooptions(SYSCTLFN_ARGS)
2527 {
2528 int error, new_options;
2529 struct sysctlnode node;
2530
2531 new_options = sooptions;
2532 node = *rnode;
2533 node.sysctl_data = &new_options;
2534 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2535 if (error || newp == NULL)
2536 return error;
2537
2538 if (new_options & ~SO_DEFOPTS)
2539 return EINVAL;
2540
2541 sooptions = new_options;
2542
2543 return 0;
2544 }
2545
2546 static void
2547 sysctl_kern_socket_setup(void)
2548 {
2549
2550 KASSERT(socket_sysctllog == NULL);
2551
2552 sysctl_createv(&socket_sysctllog, 0, NULL, NULL,
2553 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2554 CTLTYPE_INT, "somaxkva",
2555 SYSCTL_DESCR("Maximum amount of kernel memory to be "
2556 "used for socket buffers"),
2557 sysctl_kern_somaxkva, 0, NULL, 0,
2558 CTL_KERN, KERN_SOMAXKVA, CTL_EOL);
2559
2560 sysctl_createv(&socket_sysctllog, 0, NULL, NULL,
2561 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2562 CTLTYPE_INT, "sbmax",
2563 SYSCTL_DESCR("Maximum socket buffer size"),
2564 sysctl_kern_sbmax, 0, NULL, 0,
2565 CTL_KERN, KERN_SBMAX, CTL_EOL);
2566
2567 sysctl_createv(&socket_sysctllog, 0, NULL, NULL,
2568 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2569 CTLTYPE_INT, "sooptions",
2570 SYSCTL_DESCR("Default socket options"),
2571 sysctl_kern_sooptions, 0, NULL, 0,
2572 CTL_KERN, CTL_CREATE, CTL_EOL);
2573 }
2574