uipc_socket.c revision 1.155 1 /* $NetBSD: uipc_socket.c,v 1.155 2008/03/21 21:55:00 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1982, 1986, 1988, 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 * @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95
68 */
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.155 2008/03/21 21:55:00 ad Exp $");
72
73 #include "opt_sock_counters.h"
74 #include "opt_sosend_loan.h"
75 #include "opt_mbuftrace.h"
76 #include "opt_somaxkva.h"
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/proc.h>
81 #include <sys/file.h>
82 #include <sys/filedesc.h>
83 #include <sys/malloc.h>
84 #include <sys/mbuf.h>
85 #include <sys/domain.h>
86 #include <sys/kernel.h>
87 #include <sys/protosw.h>
88 #include <sys/socket.h>
89 #include <sys/socketvar.h>
90 #include <sys/signalvar.h>
91 #include <sys/resourcevar.h>
92 #include <sys/pool.h>
93 #include <sys/event.h>
94 #include <sys/poll.h>
95 #include <sys/kauth.h>
96 #include <sys/mutex.h>
97 #include <sys/condvar.h>
98
99 #include <uvm/uvm.h>
100
101 POOL_INIT(socket_pool, sizeof(struct socket), 0, 0, 0, "sockpl", NULL,
102 IPL_SOFTNET);
103
104 MALLOC_DEFINE(M_SOOPTS, "soopts", "socket options");
105 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
106
107 extern const struct fileops socketops;
108
109 extern int somaxconn; /* patchable (XXX sysctl) */
110 int somaxconn = SOMAXCONN;
111
112 #ifdef SOSEND_COUNTERS
113 #include <sys/device.h>
114
115 static struct evcnt sosend_loan_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
116 NULL, "sosend", "loan big");
117 static struct evcnt sosend_copy_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
118 NULL, "sosend", "copy big");
119 static struct evcnt sosend_copy_small = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
120 NULL, "sosend", "copy small");
121 static struct evcnt sosend_kvalimit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
122 NULL, "sosend", "kva limit");
123
124 #define SOSEND_COUNTER_INCR(ev) (ev)->ev_count++
125
126 EVCNT_ATTACH_STATIC(sosend_loan_big);
127 EVCNT_ATTACH_STATIC(sosend_copy_big);
128 EVCNT_ATTACH_STATIC(sosend_copy_small);
129 EVCNT_ATTACH_STATIC(sosend_kvalimit);
130 #else
131
132 #define SOSEND_COUNTER_INCR(ev) /* nothing */
133
134 #endif /* SOSEND_COUNTERS */
135
136 static struct callback_entry sokva_reclaimerentry;
137
138 #ifdef SOSEND_NO_LOAN
139 int sock_loan_thresh = -1;
140 #else
141 int sock_loan_thresh = 4096;
142 #endif
143
144 static kmutex_t so_pendfree_lock;
145 static struct mbuf *so_pendfree;
146
147 #ifndef SOMAXKVA
148 #define SOMAXKVA (16 * 1024 * 1024)
149 #endif
150 int somaxkva = SOMAXKVA;
151 static int socurkva;
152 static kcondvar_t socurkva_cv;
153
154 #define SOCK_LOAN_CHUNK 65536
155
156 static size_t sodopendfree(void);
157 static size_t sodopendfreel(void);
158
159 static vsize_t
160 sokvareserve(struct socket *so, vsize_t len)
161 {
162 int error;
163
164 mutex_enter(&so_pendfree_lock);
165 while (socurkva + len > somaxkva) {
166 size_t freed;
167
168 /*
169 * try to do pendfree.
170 */
171
172 freed = sodopendfreel();
173
174 /*
175 * if some kva was freed, try again.
176 */
177
178 if (freed)
179 continue;
180
181 SOSEND_COUNTER_INCR(&sosend_kvalimit);
182 error = cv_wait_sig(&socurkva_cv, &so_pendfree_lock);
183 if (error) {
184 len = 0;
185 break;
186 }
187 }
188 socurkva += len;
189 mutex_exit(&so_pendfree_lock);
190 return len;
191 }
192
193 static void
194 sokvaunreserve(vsize_t len)
195 {
196
197 mutex_enter(&so_pendfree_lock);
198 socurkva -= len;
199 cv_broadcast(&socurkva_cv);
200 mutex_exit(&so_pendfree_lock);
201 }
202
203 /*
204 * sokvaalloc: allocate kva for loan.
205 */
206
207 vaddr_t
208 sokvaalloc(vsize_t len, struct socket *so)
209 {
210 vaddr_t lva;
211
212 /*
213 * reserve kva.
214 */
215
216 if (sokvareserve(so, len) == 0)
217 return 0;
218
219 /*
220 * allocate kva.
221 */
222
223 lva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
224 if (lva == 0) {
225 sokvaunreserve(len);
226 return (0);
227 }
228
229 return lva;
230 }
231
232 /*
233 * sokvafree: free kva for loan.
234 */
235
236 void
237 sokvafree(vaddr_t sva, vsize_t len)
238 {
239
240 /*
241 * free kva.
242 */
243
244 uvm_km_free(kernel_map, sva, len, UVM_KMF_VAONLY);
245
246 /*
247 * unreserve kva.
248 */
249
250 sokvaunreserve(len);
251 }
252
253 static void
254 sodoloanfree(struct vm_page **pgs, void *buf, size_t size)
255 {
256 vaddr_t va, sva, eva;
257 vsize_t len;
258 paddr_t pa;
259 int i, npgs;
260
261 eva = round_page((vaddr_t) buf + size);
262 sva = trunc_page((vaddr_t) buf);
263 len = eva - sva;
264 npgs = len >> PAGE_SHIFT;
265
266 if (__predict_false(pgs == NULL)) {
267 pgs = alloca(npgs * sizeof(*pgs));
268
269 for (i = 0, va = sva; va < eva; i++, va += PAGE_SIZE) {
270 if (pmap_extract(pmap_kernel(), va, &pa) == false)
271 panic("sodoloanfree: va 0x%lx not mapped", va);
272 pgs[i] = PHYS_TO_VM_PAGE(pa);
273 }
274 }
275
276 pmap_kremove(sva, len);
277 pmap_update(pmap_kernel());
278 uvm_unloan(pgs, npgs, UVM_LOAN_TOPAGE);
279 sokvafree(sva, len);
280 }
281
282 static size_t
283 sodopendfree(void)
284 {
285 size_t rv;
286
287 mutex_enter(&so_pendfree_lock);
288 rv = sodopendfreel();
289 mutex_exit(&so_pendfree_lock);
290
291 return rv;
292 }
293
294 /*
295 * sodopendfreel: free mbufs on "pendfree" list.
296 * unlock and relock so_pendfree_lock when freeing mbufs.
297 *
298 * => called with so_pendfree_lock held.
299 */
300
301 static size_t
302 sodopendfreel(void)
303 {
304 struct mbuf *m, *next;
305 size_t rv = 0;
306
307 KASSERT(mutex_owned(&so_pendfree_lock));
308
309 while (so_pendfree != NULL) {
310 m = so_pendfree;
311 so_pendfree = NULL;
312 mutex_exit(&so_pendfree_lock);
313
314 for (; m != NULL; m = next) {
315 next = m->m_next;
316
317 rv += m->m_ext.ext_size;
318 sodoloanfree((m->m_flags & M_EXT_PAGES) ?
319 m->m_ext.ext_pgs : NULL, m->m_ext.ext_buf,
320 m->m_ext.ext_size);
321 pool_cache_put(mb_cache, m);
322 }
323
324 mutex_enter(&so_pendfree_lock);
325 }
326
327 return (rv);
328 }
329
330 void
331 soloanfree(struct mbuf *m, void *buf, size_t size, void *arg)
332 {
333
334 if (m == NULL) {
335
336 /*
337 * called from MEXTREMOVE.
338 */
339
340 sodoloanfree(NULL, buf, size);
341 return;
342 }
343
344 /*
345 * postpone freeing mbuf.
346 *
347 * we can't do it in interrupt context
348 * because we need to put kva back to kernel_map.
349 */
350
351 mutex_enter(&so_pendfree_lock);
352 m->m_next = so_pendfree;
353 so_pendfree = m;
354 cv_broadcast(&socurkva_cv);
355 mutex_exit(&so_pendfree_lock);
356 }
357
358 static long
359 sosend_loan(struct socket *so, struct uio *uio, struct mbuf *m, long space)
360 {
361 struct iovec *iov = uio->uio_iov;
362 vaddr_t sva, eva;
363 vsize_t len;
364 vaddr_t lva, va;
365 int npgs, i, error;
366
367 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace))
368 return (0);
369
370 if (iov->iov_len < (size_t) space)
371 space = iov->iov_len;
372 if (space > SOCK_LOAN_CHUNK)
373 space = SOCK_LOAN_CHUNK;
374
375 eva = round_page((vaddr_t) iov->iov_base + space);
376 sva = trunc_page((vaddr_t) iov->iov_base);
377 len = eva - sva;
378 npgs = len >> PAGE_SHIFT;
379
380 /* XXX KDASSERT */
381 KASSERT(npgs <= M_EXT_MAXPAGES);
382
383 lva = sokvaalloc(len, so);
384 if (lva == 0)
385 return 0;
386
387 error = uvm_loan(&uio->uio_vmspace->vm_map, sva, len,
388 m->m_ext.ext_pgs, UVM_LOAN_TOPAGE);
389 if (error) {
390 sokvafree(lva, len);
391 return (0);
392 }
393
394 for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE)
395 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(m->m_ext.ext_pgs[i]),
396 VM_PROT_READ);
397 pmap_update(pmap_kernel());
398
399 lva += (vaddr_t) iov->iov_base & PAGE_MASK;
400
401 MEXTADD(m, (void *) lva, space, M_MBUF, soloanfree, so);
402 m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP;
403
404 uio->uio_resid -= space;
405 /* uio_offset not updated, not set/used for write(2) */
406 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + space;
407 uio->uio_iov->iov_len -= space;
408 if (uio->uio_iov->iov_len == 0) {
409 uio->uio_iov++;
410 uio->uio_iovcnt--;
411 }
412
413 return (space);
414 }
415
416 static int
417 sokva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
418 {
419
420 KASSERT(ce == &sokva_reclaimerentry);
421 KASSERT(obj == NULL);
422
423 sodopendfree();
424 if (!vm_map_starved_p(kernel_map)) {
425 return CALLBACK_CHAIN_ABORT;
426 }
427 return CALLBACK_CHAIN_CONTINUE;
428 }
429
430 struct mbuf *
431 getsombuf(struct socket *so, int type)
432 {
433 struct mbuf *m;
434
435 m = m_get(M_WAIT, type);
436 MCLAIM(m, so->so_mowner);
437 return m;
438 }
439
440 struct mbuf *
441 m_intopt(struct socket *so, int val)
442 {
443 struct mbuf *m;
444
445 m = getsombuf(so, MT_SOOPTS);
446 m->m_len = sizeof(int);
447 *mtod(m, int *) = val;
448 return m;
449 }
450
451 void
452 soinit(void)
453 {
454
455 mutex_init(&so_pendfree_lock, MUTEX_DEFAULT, IPL_VM);
456 cv_init(&socurkva_cv, "sokva");
457
458 /* Set the initial adjusted socket buffer size. */
459 if (sb_max_set(sb_max))
460 panic("bad initial sb_max value: %lu", sb_max);
461
462 callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback,
463 &sokva_reclaimerentry, NULL, sokva_reclaim_callback);
464 }
465
466 /*
467 * Socket operation routines.
468 * These routines are called by the routines in
469 * sys_socket.c or from a system process, and
470 * implement the semantics of socket operations by
471 * switching out to the protocol specific routines.
472 */
473 /*ARGSUSED*/
474 int
475 socreate(int dom, struct socket **aso, int type, int proto, struct lwp *l)
476 {
477 const struct protosw *prp;
478 struct socket *so;
479 uid_t uid;
480 int error, s;
481
482 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_SOCKET,
483 KAUTH_REQ_NETWORK_SOCKET_OPEN, KAUTH_ARG(dom), KAUTH_ARG(type),
484 KAUTH_ARG(proto));
485 if (error != 0)
486 return error;
487
488 if (proto)
489 prp = pffindproto(dom, proto, type);
490 else
491 prp = pffindtype(dom, type);
492 if (prp == NULL) {
493 /* no support for domain */
494 if (pffinddomain(dom) == 0)
495 return EAFNOSUPPORT;
496 /* no support for socket type */
497 if (proto == 0 && type != 0)
498 return EPROTOTYPE;
499 return EPROTONOSUPPORT;
500 }
501 if (prp->pr_usrreq == NULL)
502 return EPROTONOSUPPORT;
503 if (prp->pr_type != type)
504 return EPROTOTYPE;
505 s = splsoftnet();
506 so = pool_get(&socket_pool, PR_WAITOK);
507 memset(so, 0, sizeof(*so));
508 TAILQ_INIT(&so->so_q0);
509 TAILQ_INIT(&so->so_q);
510 so->so_type = type;
511 so->so_proto = prp;
512 so->so_send = sosend;
513 so->so_receive = soreceive;
514 #ifdef MBUFTRACE
515 so->so_rcv.sb_mowner = &prp->pr_domain->dom_mowner;
516 so->so_snd.sb_mowner = &prp->pr_domain->dom_mowner;
517 so->so_mowner = &prp->pr_domain->dom_mowner;
518 #endif
519 selinit(&so->so_rcv.sb_sel);
520 selinit(&so->so_snd.sb_sel);
521 uid = kauth_cred_geteuid(l->l_cred);
522 so->so_uidinfo = uid_find(uid);
523 error = (*prp->pr_usrreq)(so, PRU_ATTACH, NULL,
524 (struct mbuf *)(long)proto, NULL, l);
525 if (error != 0) {
526 so->so_state |= SS_NOFDREF;
527 sofree(so);
528 splx(s);
529 return error;
530 }
531 splx(s);
532 *aso = so;
533 return 0;
534 }
535
536 /* On success, write file descriptor to fdout and return zero. On
537 * failure, return non-zero; *fdout will be undefined.
538 */
539 int
540 fsocreate(int domain, struct socket **sop, int type, int protocol,
541 struct lwp *l, int *fdout)
542 {
543 struct socket *so;
544 struct file *fp;
545 int fd, error;
546
547 if ((error = fd_allocfile(&fp, &fd)) != 0)
548 return (error);
549 fp->f_flag = FREAD|FWRITE;
550 fp->f_type = DTYPE_SOCKET;
551 fp->f_ops = &socketops;
552 error = socreate(domain, &so, type, protocol, l);
553 if (error != 0) {
554 fd_abort(curproc, fp, fd);
555 } else {
556 if (sop != NULL)
557 *sop = so;
558 fp->f_data = so;
559 fd_affix(curproc, fp, fd);
560 *fdout = fd;
561 }
562 return error;
563 }
564
565 int
566 sobind(struct socket *so, struct mbuf *nam, struct lwp *l)
567 {
568 int s, error;
569
570 s = splsoftnet();
571 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, l);
572 splx(s);
573 return error;
574 }
575
576 int
577 solisten(struct socket *so, int backlog, struct lwp *l)
578 {
579 int s, error;
580
581 s = splsoftnet();
582 error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL,
583 NULL, NULL, l);
584 if (error != 0) {
585 splx(s);
586 return error;
587 }
588 if (TAILQ_EMPTY(&so->so_q))
589 so->so_options |= SO_ACCEPTCONN;
590 if (backlog < 0)
591 backlog = 0;
592 so->so_qlimit = min(backlog, somaxconn);
593 splx(s);
594 return 0;
595 }
596
597 void
598 sofree(struct socket *so)
599 {
600
601 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
602 return;
603 if (so->so_head) {
604 /*
605 * We must not decommission a socket that's on the accept(2)
606 * queue. If we do, then accept(2) may hang after select(2)
607 * indicated that the listening socket was ready.
608 */
609 if (!soqremque(so, 0))
610 return;
611 }
612 if (so->so_rcv.sb_hiwat)
613 (void)chgsbsize(so->so_uidinfo, &so->so_rcv.sb_hiwat, 0,
614 RLIM_INFINITY);
615 if (so->so_snd.sb_hiwat)
616 (void)chgsbsize(so->so_uidinfo, &so->so_snd.sb_hiwat, 0,
617 RLIM_INFINITY);
618 sbrelease(&so->so_snd, so);
619 sorflush(so);
620 seldestroy(&so->so_rcv.sb_sel);
621 seldestroy(&so->so_snd.sb_sel);
622 pool_put(&socket_pool, so);
623 }
624
625 /*
626 * Close a socket on last file table reference removal.
627 * Initiate disconnect if connected.
628 * Free socket when disconnect complete.
629 */
630 int
631 soclose(struct socket *so)
632 {
633 struct socket *so2;
634 int s, error;
635
636 error = 0;
637 s = splsoftnet(); /* conservative */
638 if (so->so_options & SO_ACCEPTCONN) {
639 while ((so2 = TAILQ_FIRST(&so->so_q0)) != 0) {
640 (void) soqremque(so2, 0);
641 (void) soabort(so2);
642 }
643 while ((so2 = TAILQ_FIRST(&so->so_q)) != 0) {
644 (void) soqremque(so2, 1);
645 (void) soabort(so2);
646 }
647 }
648 if (so->so_pcb == 0)
649 goto discard;
650 if (so->so_state & SS_ISCONNECTED) {
651 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
652 error = sodisconnect(so);
653 if (error)
654 goto drop;
655 }
656 if (so->so_options & SO_LINGER) {
657 if ((so->so_state & SS_ISDISCONNECTING) && so->so_nbio)
658 goto drop;
659 while (so->so_state & SS_ISCONNECTED) {
660 error = tsleep((void *)&so->so_timeo,
661 PSOCK | PCATCH, netcls,
662 so->so_linger * hz);
663 if (error)
664 break;
665 }
666 }
667 }
668 drop:
669 if (so->so_pcb) {
670 int error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
671 NULL, NULL, NULL, NULL);
672 if (error == 0)
673 error = error2;
674 }
675 discard:
676 if (so->so_state & SS_NOFDREF)
677 panic("soclose: NOFDREF");
678 so->so_state |= SS_NOFDREF;
679 sofree(so);
680 splx(s);
681 return (error);
682 }
683
684 /*
685 * Must be called at splsoftnet...
686 */
687 int
688 soabort(struct socket *so)
689 {
690 int error;
691
692 KASSERT(so->so_head == NULL);
693 error = (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL,
694 NULL, NULL, NULL);
695 if (error) {
696 sofree(so);
697 }
698 return error;
699 }
700
701 int
702 soaccept(struct socket *so, struct mbuf *nam)
703 {
704 int s, error;
705
706 error = 0;
707 s = splsoftnet();
708 if ((so->so_state & SS_NOFDREF) == 0)
709 panic("soaccept: !NOFDREF");
710 so->so_state &= ~SS_NOFDREF;
711 if ((so->so_state & SS_ISDISCONNECTED) == 0 ||
712 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0)
713 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
714 NULL, nam, NULL, NULL);
715 else
716 error = ECONNABORTED;
717
718 splx(s);
719 return (error);
720 }
721
722 int
723 soconnect(struct socket *so, struct mbuf *nam, struct lwp *l)
724 {
725 int s, error;
726
727 if (so->so_options & SO_ACCEPTCONN)
728 return (EOPNOTSUPP);
729 s = splsoftnet();
730 /*
731 * If protocol is connection-based, can only connect once.
732 * Otherwise, if connected, try to disconnect first.
733 * This allows user to disconnect by connecting to, e.g.,
734 * a null address.
735 */
736 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
737 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
738 (error = sodisconnect(so))))
739 error = EISCONN;
740 else
741 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
742 NULL, nam, NULL, l);
743 splx(s);
744 return (error);
745 }
746
747 int
748 soconnect2(struct socket *so1, struct socket *so2)
749 {
750 int s, error;
751
752 s = splsoftnet();
753 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
754 NULL, (struct mbuf *)so2, NULL, NULL);
755 splx(s);
756 return (error);
757 }
758
759 int
760 sodisconnect(struct socket *so)
761 {
762 int s, error;
763
764 s = splsoftnet();
765 if ((so->so_state & SS_ISCONNECTED) == 0) {
766 error = ENOTCONN;
767 goto bad;
768 }
769 if (so->so_state & SS_ISDISCONNECTING) {
770 error = EALREADY;
771 goto bad;
772 }
773 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
774 NULL, NULL, NULL, NULL);
775 bad:
776 splx(s);
777 sodopendfree();
778 return (error);
779 }
780
781 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
782 /*
783 * Send on a socket.
784 * If send must go all at once and message is larger than
785 * send buffering, then hard error.
786 * Lock against other senders.
787 * If must go all at once and not enough room now, then
788 * inform user that this would block and do nothing.
789 * Otherwise, if nonblocking, send as much as possible.
790 * The data to be sent is described by "uio" if nonzero,
791 * otherwise by the mbuf chain "top" (which must be null
792 * if uio is not). Data provided in mbuf chain must be small
793 * enough to send all at once.
794 *
795 * Returns nonzero on error, timeout or signal; callers
796 * must check for short counts if EINTR/ERESTART are returned.
797 * Data and control buffers are freed on return.
798 */
799 int
800 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top,
801 struct mbuf *control, int flags, struct lwp *l)
802 {
803 struct mbuf **mp, *m;
804 struct proc *p;
805 long space, len, resid, clen, mlen;
806 int error, s, dontroute, atomic;
807
808 p = l->l_proc;
809 sodopendfree();
810
811 clen = 0;
812 atomic = sosendallatonce(so) || top;
813 if (uio)
814 resid = uio->uio_resid;
815 else
816 resid = top->m_pkthdr.len;
817 /*
818 * In theory resid should be unsigned.
819 * However, space must be signed, as it might be less than 0
820 * if we over-committed, and we must use a signed comparison
821 * of space and resid. On the other hand, a negative resid
822 * causes us to loop sending 0-length segments to the protocol.
823 */
824 if (resid < 0) {
825 error = EINVAL;
826 goto out;
827 }
828 dontroute =
829 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
830 (so->so_proto->pr_flags & PR_ATOMIC);
831 if (p)
832 p->p_stats->p_ru.ru_msgsnd++;
833 if (control)
834 clen = control->m_len;
835 #define snderr(errno) { error = errno; splx(s); goto release; }
836
837 restart:
838 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
839 goto out;
840 do {
841 s = splsoftnet();
842 if (so->so_state & SS_CANTSENDMORE)
843 snderr(EPIPE);
844 if (so->so_error) {
845 error = so->so_error;
846 so->so_error = 0;
847 splx(s);
848 goto release;
849 }
850 if ((so->so_state & SS_ISCONNECTED) == 0) {
851 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
852 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
853 !(resid == 0 && clen != 0))
854 snderr(ENOTCONN);
855 } else if (addr == 0)
856 snderr(EDESTADDRREQ);
857 }
858 space = sbspace(&so->so_snd);
859 if (flags & MSG_OOB)
860 space += 1024;
861 if ((atomic && resid > so->so_snd.sb_hiwat) ||
862 clen > so->so_snd.sb_hiwat)
863 snderr(EMSGSIZE);
864 if (space < resid + clen &&
865 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
866 if (so->so_nbio)
867 snderr(EWOULDBLOCK);
868 sbunlock(&so->so_snd);
869 error = sbwait(&so->so_snd);
870 splx(s);
871 if (error)
872 goto out;
873 goto restart;
874 }
875 splx(s);
876 mp = ⊤
877 space -= clen;
878 do {
879 if (uio == NULL) {
880 /*
881 * Data is prepackaged in "top".
882 */
883 resid = 0;
884 if (flags & MSG_EOR)
885 top->m_flags |= M_EOR;
886 } else do {
887 if (top == NULL) {
888 m = m_gethdr(M_WAIT, MT_DATA);
889 mlen = MHLEN;
890 m->m_pkthdr.len = 0;
891 m->m_pkthdr.rcvif = NULL;
892 } else {
893 m = m_get(M_WAIT, MT_DATA);
894 mlen = MLEN;
895 }
896 MCLAIM(m, so->so_snd.sb_mowner);
897 if (sock_loan_thresh >= 0 &&
898 uio->uio_iov->iov_len >= sock_loan_thresh &&
899 space >= sock_loan_thresh &&
900 (len = sosend_loan(so, uio, m,
901 space)) != 0) {
902 SOSEND_COUNTER_INCR(&sosend_loan_big);
903 space -= len;
904 goto have_data;
905 }
906 if (resid >= MINCLSIZE && space >= MCLBYTES) {
907 SOSEND_COUNTER_INCR(&sosend_copy_big);
908 m_clget(m, M_WAIT);
909 if ((m->m_flags & M_EXT) == 0)
910 goto nopages;
911 mlen = MCLBYTES;
912 if (atomic && top == 0) {
913 len = lmin(MCLBYTES - max_hdr,
914 resid);
915 m->m_data += max_hdr;
916 } else
917 len = lmin(MCLBYTES, resid);
918 space -= len;
919 } else {
920 nopages:
921 SOSEND_COUNTER_INCR(&sosend_copy_small);
922 len = lmin(lmin(mlen, resid), space);
923 space -= len;
924 /*
925 * For datagram protocols, leave room
926 * for protocol headers in first mbuf.
927 */
928 if (atomic && top == 0 && len < mlen)
929 MH_ALIGN(m, len);
930 }
931 error = uiomove(mtod(m, void *), (int)len, uio);
932 have_data:
933 resid = uio->uio_resid;
934 m->m_len = len;
935 *mp = m;
936 top->m_pkthdr.len += len;
937 if (error != 0)
938 goto release;
939 mp = &m->m_next;
940 if (resid <= 0) {
941 if (flags & MSG_EOR)
942 top->m_flags |= M_EOR;
943 break;
944 }
945 } while (space > 0 && atomic);
946
947 s = splsoftnet();
948
949 if (so->so_state & SS_CANTSENDMORE)
950 snderr(EPIPE);
951
952 if (dontroute)
953 so->so_options |= SO_DONTROUTE;
954 if (resid > 0)
955 so->so_state |= SS_MORETOCOME;
956 error = (*so->so_proto->pr_usrreq)(so,
957 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
958 top, addr, control, curlwp); /* XXX */
959 if (dontroute)
960 so->so_options &= ~SO_DONTROUTE;
961 if (resid > 0)
962 so->so_state &= ~SS_MORETOCOME;
963 splx(s);
964
965 clen = 0;
966 control = NULL;
967 top = NULL;
968 mp = ⊤
969 if (error != 0)
970 goto release;
971 } while (resid && space > 0);
972 } while (resid);
973
974 release:
975 sbunlock(&so->so_snd);
976 out:
977 if (top)
978 m_freem(top);
979 if (control)
980 m_freem(control);
981 return (error);
982 }
983
984 /*
985 * Implement receive operations on a socket.
986 * We depend on the way that records are added to the sockbuf
987 * by sbappend*. In particular, each record (mbufs linked through m_next)
988 * must begin with an address if the protocol so specifies,
989 * followed by an optional mbuf or mbufs containing ancillary data,
990 * and then zero or more mbufs of data.
991 * In order to avoid blocking network interrupts for the entire time here,
992 * we splx() while doing the actual copy to user space.
993 * Although the sockbuf is locked, new data may still be appended,
994 * and thus we must maintain consistency of the sockbuf during that time.
995 *
996 * The caller may receive the data as a single mbuf chain by supplying
997 * an mbuf **mp0 for use in returning the chain. The uio is then used
998 * only for the count in uio_resid.
999 */
1000 int
1001 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio,
1002 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1003 {
1004 struct lwp *l = curlwp;
1005 struct mbuf *m, **mp;
1006 int atomic, flags, len, error, s, offset, moff, type, orig_resid;
1007 const struct protosw *pr;
1008 struct mbuf *nextrecord;
1009 int mbuf_removed = 0;
1010 const struct domain *dom;
1011
1012 pr = so->so_proto;
1013 atomic = pr->pr_flags & PR_ATOMIC;
1014 dom = pr->pr_domain;
1015 mp = mp0;
1016 type = 0;
1017 orig_resid = uio->uio_resid;
1018
1019 if (paddr != NULL)
1020 *paddr = NULL;
1021 if (controlp != NULL)
1022 *controlp = NULL;
1023 if (flagsp != NULL)
1024 flags = *flagsp &~ MSG_EOR;
1025 else
1026 flags = 0;
1027
1028 if ((flags & MSG_DONTWAIT) == 0)
1029 sodopendfree();
1030
1031 if (flags & MSG_OOB) {
1032 m = m_get(M_WAIT, MT_DATA);
1033 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,
1034 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, l);
1035 if (error)
1036 goto bad;
1037 do {
1038 error = uiomove(mtod(m, void *),
1039 (int) min(uio->uio_resid, m->m_len), uio);
1040 m = m_free(m);
1041 } while (uio->uio_resid > 0 && error == 0 && m);
1042 bad:
1043 if (m != NULL)
1044 m_freem(m);
1045 return error;
1046 }
1047 if (mp != NULL)
1048 *mp = NULL;
1049 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
1050 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, l);
1051
1052 restart:
1053 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0)
1054 return error;
1055 s = splsoftnet();
1056
1057 m = so->so_rcv.sb_mb;
1058 /*
1059 * If we have less data than requested, block awaiting more
1060 * (subject to any timeout) if:
1061 * 1. the current count is less than the low water mark,
1062 * 2. MSG_WAITALL is set, and it is possible to do the entire
1063 * receive operation at once if we block (resid <= hiwat), or
1064 * 3. MSG_DONTWAIT is not set.
1065 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1066 * we have to do the receive in sections, and thus risk returning
1067 * a short count if a timeout or signal occurs after we start.
1068 */
1069 if (m == NULL ||
1070 ((flags & MSG_DONTWAIT) == 0 &&
1071 so->so_rcv.sb_cc < uio->uio_resid &&
1072 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1073 ((flags & MSG_WAITALL) &&
1074 uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1075 m->m_nextpkt == NULL && !atomic)) {
1076 #ifdef DIAGNOSTIC
1077 if (m == NULL && so->so_rcv.sb_cc)
1078 panic("receive 1");
1079 #endif
1080 if (so->so_error) {
1081 if (m != NULL)
1082 goto dontblock;
1083 error = so->so_error;
1084 if ((flags & MSG_PEEK) == 0)
1085 so->so_error = 0;
1086 goto release;
1087 }
1088 if (so->so_state & SS_CANTRCVMORE) {
1089 if (m != NULL)
1090 goto dontblock;
1091 else
1092 goto release;
1093 }
1094 for (; m != NULL; m = m->m_next)
1095 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1096 m = so->so_rcv.sb_mb;
1097 goto dontblock;
1098 }
1099 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1100 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1101 error = ENOTCONN;
1102 goto release;
1103 }
1104 if (uio->uio_resid == 0)
1105 goto release;
1106 if (so->so_nbio || (flags & MSG_DONTWAIT)) {
1107 error = EWOULDBLOCK;
1108 goto release;
1109 }
1110 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1");
1111 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1");
1112 sbunlock(&so->so_rcv);
1113 error = sbwait(&so->so_rcv);
1114 splx(s);
1115 if (error != 0)
1116 return error;
1117 goto restart;
1118 }
1119 dontblock:
1120 /*
1121 * On entry here, m points to the first record of the socket buffer.
1122 * While we process the initial mbufs containing address and control
1123 * info, we save a copy of m->m_nextpkt into nextrecord.
1124 */
1125 if (l != NULL)
1126 l->l_proc->p_stats->p_ru.ru_msgrcv++;
1127 KASSERT(m == so->so_rcv.sb_mb);
1128 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1");
1129 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1");
1130 nextrecord = m->m_nextpkt;
1131 if (pr->pr_flags & PR_ADDR) {
1132 #ifdef DIAGNOSTIC
1133 if (m->m_type != MT_SONAME)
1134 panic("receive 1a");
1135 #endif
1136 orig_resid = 0;
1137 if (flags & MSG_PEEK) {
1138 if (paddr)
1139 *paddr = m_copy(m, 0, m->m_len);
1140 m = m->m_next;
1141 } else {
1142 sbfree(&so->so_rcv, m);
1143 mbuf_removed = 1;
1144 if (paddr != NULL) {
1145 *paddr = m;
1146 so->so_rcv.sb_mb = m->m_next;
1147 m->m_next = NULL;
1148 m = so->so_rcv.sb_mb;
1149 } else {
1150 MFREE(m, so->so_rcv.sb_mb);
1151 m = so->so_rcv.sb_mb;
1152 }
1153 }
1154 }
1155 while (m != NULL && m->m_type == MT_CONTROL && error == 0) {
1156 if (flags & MSG_PEEK) {
1157 if (controlp != NULL)
1158 *controlp = m_copy(m, 0, m->m_len);
1159 m = m->m_next;
1160 } else {
1161 sbfree(&so->so_rcv, m);
1162 mbuf_removed = 1;
1163 if (controlp != NULL) {
1164 if (dom->dom_externalize && l &&
1165 mtod(m, struct cmsghdr *)->cmsg_type ==
1166 SCM_RIGHTS)
1167 error = (*dom->dom_externalize)(m, l);
1168 *controlp = m;
1169 so->so_rcv.sb_mb = m->m_next;
1170 m->m_next = NULL;
1171 m = so->so_rcv.sb_mb;
1172 } else {
1173 /*
1174 * Dispose of any SCM_RIGHTS message that went
1175 * through the read path rather than recv.
1176 */
1177 if (dom->dom_dispose &&
1178 mtod(m, struct cmsghdr *)->cmsg_type == SCM_RIGHTS)
1179 (*dom->dom_dispose)(m);
1180 MFREE(m, so->so_rcv.sb_mb);
1181 m = so->so_rcv.sb_mb;
1182 }
1183 }
1184 if (controlp != NULL) {
1185 orig_resid = 0;
1186 controlp = &(*controlp)->m_next;
1187 }
1188 }
1189
1190 /*
1191 * If m is non-NULL, we have some data to read. From now on,
1192 * make sure to keep sb_lastrecord consistent when working on
1193 * the last packet on the chain (nextrecord == NULL) and we
1194 * change m->m_nextpkt.
1195 */
1196 if (m != NULL) {
1197 if ((flags & MSG_PEEK) == 0) {
1198 m->m_nextpkt = nextrecord;
1199 /*
1200 * If nextrecord == NULL (this is a single chain),
1201 * then sb_lastrecord may not be valid here if m
1202 * was changed earlier.
1203 */
1204 if (nextrecord == NULL) {
1205 KASSERT(so->so_rcv.sb_mb == m);
1206 so->so_rcv.sb_lastrecord = m;
1207 }
1208 }
1209 type = m->m_type;
1210 if (type == MT_OOBDATA)
1211 flags |= MSG_OOB;
1212 } else {
1213 if ((flags & MSG_PEEK) == 0) {
1214 KASSERT(so->so_rcv.sb_mb == m);
1215 so->so_rcv.sb_mb = nextrecord;
1216 SB_EMPTY_FIXUP(&so->so_rcv);
1217 }
1218 }
1219 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2");
1220 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2");
1221
1222 moff = 0;
1223 offset = 0;
1224 while (m != NULL && uio->uio_resid > 0 && error == 0) {
1225 if (m->m_type == MT_OOBDATA) {
1226 if (type != MT_OOBDATA)
1227 break;
1228 } else if (type == MT_OOBDATA)
1229 break;
1230 #ifdef DIAGNOSTIC
1231 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
1232 panic("receive 3");
1233 #endif
1234 so->so_state &= ~SS_RCVATMARK;
1235 len = uio->uio_resid;
1236 if (so->so_oobmark && len > so->so_oobmark - offset)
1237 len = so->so_oobmark - offset;
1238 if (len > m->m_len - moff)
1239 len = m->m_len - moff;
1240 /*
1241 * If mp is set, just pass back the mbufs.
1242 * Otherwise copy them out via the uio, then free.
1243 * Sockbuf must be consistent here (points to current mbuf,
1244 * it points to next record) when we drop priority;
1245 * we must note any additions to the sockbuf when we
1246 * block interrupts again.
1247 */
1248 if (mp == NULL) {
1249 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove");
1250 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove");
1251 splx(s);
1252 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1253 s = splsoftnet();
1254 if (error != 0) {
1255 /*
1256 * If any part of the record has been removed
1257 * (such as the MT_SONAME mbuf, which will
1258 * happen when PR_ADDR, and thus also
1259 * PR_ATOMIC, is set), then drop the entire
1260 * record to maintain the atomicity of the
1261 * receive operation.
1262 *
1263 * This avoids a later panic("receive 1a")
1264 * when compiled with DIAGNOSTIC.
1265 */
1266 if (m && mbuf_removed && atomic)
1267 (void) sbdroprecord(&so->so_rcv);
1268
1269 goto release;
1270 }
1271 } else
1272 uio->uio_resid -= len;
1273 if (len == m->m_len - moff) {
1274 if (m->m_flags & M_EOR)
1275 flags |= MSG_EOR;
1276 if (flags & MSG_PEEK) {
1277 m = m->m_next;
1278 moff = 0;
1279 } else {
1280 nextrecord = m->m_nextpkt;
1281 sbfree(&so->so_rcv, m);
1282 if (mp) {
1283 *mp = m;
1284 mp = &m->m_next;
1285 so->so_rcv.sb_mb = m = m->m_next;
1286 *mp = NULL;
1287 } else {
1288 MFREE(m, so->so_rcv.sb_mb);
1289 m = so->so_rcv.sb_mb;
1290 }
1291 /*
1292 * If m != NULL, we also know that
1293 * so->so_rcv.sb_mb != NULL.
1294 */
1295 KASSERT(so->so_rcv.sb_mb == m);
1296 if (m) {
1297 m->m_nextpkt = nextrecord;
1298 if (nextrecord == NULL)
1299 so->so_rcv.sb_lastrecord = m;
1300 } else {
1301 so->so_rcv.sb_mb = nextrecord;
1302 SB_EMPTY_FIXUP(&so->so_rcv);
1303 }
1304 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3");
1305 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3");
1306 }
1307 } else if (flags & MSG_PEEK)
1308 moff += len;
1309 else {
1310 if (mp != NULL)
1311 *mp = m_copym(m, 0, len, M_WAIT);
1312 m->m_data += len;
1313 m->m_len -= len;
1314 so->so_rcv.sb_cc -= len;
1315 }
1316 if (so->so_oobmark) {
1317 if ((flags & MSG_PEEK) == 0) {
1318 so->so_oobmark -= len;
1319 if (so->so_oobmark == 0) {
1320 so->so_state |= SS_RCVATMARK;
1321 break;
1322 }
1323 } else {
1324 offset += len;
1325 if (offset == so->so_oobmark)
1326 break;
1327 }
1328 }
1329 if (flags & MSG_EOR)
1330 break;
1331 /*
1332 * If the MSG_WAITALL flag is set (for non-atomic socket),
1333 * we must not quit until "uio->uio_resid == 0" or an error
1334 * termination. If a signal/timeout occurs, return
1335 * with a short count but without error.
1336 * Keep sockbuf locked against other readers.
1337 */
1338 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1339 !sosendallatonce(so) && !nextrecord) {
1340 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1341 break;
1342 /*
1343 * If we are peeking and the socket receive buffer is
1344 * full, stop since we can't get more data to peek at.
1345 */
1346 if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0)
1347 break;
1348 /*
1349 * If we've drained the socket buffer, tell the
1350 * protocol in case it needs to do something to
1351 * get it filled again.
1352 */
1353 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
1354 (*pr->pr_usrreq)(so, PRU_RCVD,
1355 NULL, (struct mbuf *)(long)flags, NULL, l);
1356 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2");
1357 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2");
1358 error = sbwait(&so->so_rcv);
1359 if (error != 0) {
1360 sbunlock(&so->so_rcv);
1361 splx(s);
1362 return 0;
1363 }
1364 if ((m = so->so_rcv.sb_mb) != NULL)
1365 nextrecord = m->m_nextpkt;
1366 }
1367 }
1368
1369 if (m && atomic) {
1370 flags |= MSG_TRUNC;
1371 if ((flags & MSG_PEEK) == 0)
1372 (void) sbdroprecord(&so->so_rcv);
1373 }
1374 if ((flags & MSG_PEEK) == 0) {
1375 if (m == NULL) {
1376 /*
1377 * First part is an inline SB_EMPTY_FIXUP(). Second
1378 * part makes sure sb_lastrecord is up-to-date if
1379 * there is still data in the socket buffer.
1380 */
1381 so->so_rcv.sb_mb = nextrecord;
1382 if (so->so_rcv.sb_mb == NULL) {
1383 so->so_rcv.sb_mbtail = NULL;
1384 so->so_rcv.sb_lastrecord = NULL;
1385 } else if (nextrecord->m_nextpkt == NULL)
1386 so->so_rcv.sb_lastrecord = nextrecord;
1387 }
1388 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4");
1389 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4");
1390 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1391 (*pr->pr_usrreq)(so, PRU_RCVD, NULL,
1392 (struct mbuf *)(long)flags, NULL, l);
1393 }
1394 if (orig_resid == uio->uio_resid && orig_resid &&
1395 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1396 sbunlock(&so->so_rcv);
1397 splx(s);
1398 goto restart;
1399 }
1400
1401 if (flagsp != NULL)
1402 *flagsp |= flags;
1403 release:
1404 sbunlock(&so->so_rcv);
1405 splx(s);
1406 return error;
1407 }
1408
1409 int
1410 soshutdown(struct socket *so, int how)
1411 {
1412 const struct protosw *pr;
1413
1414 pr = so->so_proto;
1415 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1416 return (EINVAL);
1417
1418 if (how == SHUT_RD || how == SHUT_RDWR)
1419 sorflush(so);
1420 if (how == SHUT_WR || how == SHUT_RDWR)
1421 return (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL,
1422 NULL, NULL, NULL);
1423 return 0;
1424 }
1425
1426 void
1427 sorflush(struct socket *so)
1428 {
1429 struct sockbuf *sb, asb;
1430 const struct protosw *pr;
1431 int s;
1432
1433 sb = &so->so_rcv;
1434 pr = so->so_proto;
1435 sb->sb_flags |= SB_NOINTR;
1436 (void) sblock(sb, M_WAITOK);
1437 s = splnet();
1438 socantrcvmore(so);
1439 sbunlock(sb);
1440 asb = *sb;
1441 /*
1442 * Clear most of the sockbuf structure, but leave some of the
1443 * fields valid.
1444 */
1445 memset(&sb->sb_startzero, 0,
1446 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1447 splx(s);
1448 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1449 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1450 sbrelease(&asb, so);
1451 }
1452
1453 static int
1454 sosetopt1(struct socket *so, int level, int optname, struct mbuf *m)
1455 {
1456 int optval, val;
1457 struct linger *l;
1458 struct sockbuf *sb;
1459 struct timeval *tv;
1460
1461 switch (optname) {
1462
1463 case SO_LINGER:
1464 if (m == NULL || m->m_len != sizeof(struct linger))
1465 return EINVAL;
1466 l = mtod(m, struct linger *);
1467 if (l->l_linger < 0 || l->l_linger > USHRT_MAX ||
1468 l->l_linger > (INT_MAX / hz))
1469 return EDOM;
1470 so->so_linger = l->l_linger;
1471 if (l->l_onoff)
1472 so->so_options |= SO_LINGER;
1473 else
1474 so->so_options &= ~SO_LINGER;
1475 break;
1476
1477 case SO_DEBUG:
1478 case SO_KEEPALIVE:
1479 case SO_DONTROUTE:
1480 case SO_USELOOPBACK:
1481 case SO_BROADCAST:
1482 case SO_REUSEADDR:
1483 case SO_REUSEPORT:
1484 case SO_OOBINLINE:
1485 case SO_TIMESTAMP:
1486 if (m == NULL || m->m_len < sizeof(int))
1487 return EINVAL;
1488 if (*mtod(m, int *))
1489 so->so_options |= optname;
1490 else
1491 so->so_options &= ~optname;
1492 break;
1493
1494 case SO_SNDBUF:
1495 case SO_RCVBUF:
1496 case SO_SNDLOWAT:
1497 case SO_RCVLOWAT:
1498 if (m == NULL || m->m_len < sizeof(int))
1499 return EINVAL;
1500
1501 /*
1502 * Values < 1 make no sense for any of these
1503 * options, so disallow them.
1504 */
1505 optval = *mtod(m, int *);
1506 if (optval < 1)
1507 return EINVAL;
1508
1509 switch (optname) {
1510
1511 case SO_SNDBUF:
1512 case SO_RCVBUF:
1513 sb = (optname == SO_SNDBUF) ?
1514 &so->so_snd : &so->so_rcv;
1515 if (sbreserve(sb, (u_long)optval, so) == 0)
1516 return ENOBUFS;
1517 sb->sb_flags &= ~SB_AUTOSIZE;
1518 break;
1519
1520 /*
1521 * Make sure the low-water is never greater than
1522 * the high-water.
1523 */
1524 case SO_SNDLOWAT:
1525 so->so_snd.sb_lowat =
1526 (optval > so->so_snd.sb_hiwat) ?
1527 so->so_snd.sb_hiwat : optval;
1528 break;
1529 case SO_RCVLOWAT:
1530 so->so_rcv.sb_lowat =
1531 (optval > so->so_rcv.sb_hiwat) ?
1532 so->so_rcv.sb_hiwat : optval;
1533 break;
1534 }
1535 break;
1536
1537 case SO_SNDTIMEO:
1538 case SO_RCVTIMEO:
1539 if (m == NULL || m->m_len < sizeof(*tv))
1540 return EINVAL;
1541 tv = mtod(m, struct timeval *);
1542 if (tv->tv_sec > (INT_MAX - tv->tv_usec / tick) / hz)
1543 return EDOM;
1544 val = tv->tv_sec * hz + tv->tv_usec / tick;
1545 if (val == 0 && tv->tv_usec != 0)
1546 val = 1;
1547
1548 switch (optname) {
1549
1550 case SO_SNDTIMEO:
1551 so->so_snd.sb_timeo = val;
1552 break;
1553 case SO_RCVTIMEO:
1554 so->so_rcv.sb_timeo = val;
1555 break;
1556 }
1557 break;
1558
1559 default:
1560 return ENOPROTOOPT;
1561 }
1562 return 0;
1563 }
1564
1565 int
1566 sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
1567 {
1568 int error, prerr;
1569
1570 if (level == SOL_SOCKET)
1571 error = sosetopt1(so, level, optname, m);
1572 else
1573 error = ENOPROTOOPT;
1574
1575 if ((error == 0 || error == ENOPROTOOPT) &&
1576 so->so_proto != NULL && so->so_proto->pr_ctloutput != NULL) {
1577 /* give the protocol stack a shot */
1578 prerr = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, level,
1579 optname, &m);
1580 if (prerr == 0)
1581 error = 0;
1582 else if (prerr != ENOPROTOOPT)
1583 error = prerr;
1584 } else if (m != NULL)
1585 (void)m_free(m);
1586 return error;
1587 }
1588
1589 int
1590 sogetopt(struct socket *so, int level, int optname, struct mbuf **mp)
1591 {
1592 struct mbuf *m;
1593
1594 if (level != SOL_SOCKET) {
1595 if (so->so_proto && so->so_proto->pr_ctloutput) {
1596 return ((*so->so_proto->pr_ctloutput)
1597 (PRCO_GETOPT, so, level, optname, mp));
1598 } else
1599 return (ENOPROTOOPT);
1600 } else {
1601 m = m_get(M_WAIT, MT_SOOPTS);
1602 m->m_len = sizeof(int);
1603
1604 switch (optname) {
1605
1606 case SO_LINGER:
1607 m->m_len = sizeof(struct linger);
1608 mtod(m, struct linger *)->l_onoff =
1609 (so->so_options & SO_LINGER) ? 1 : 0;
1610 mtod(m, struct linger *)->l_linger = so->so_linger;
1611 break;
1612
1613 case SO_USELOOPBACK:
1614 case SO_DONTROUTE:
1615 case SO_DEBUG:
1616 case SO_KEEPALIVE:
1617 case SO_REUSEADDR:
1618 case SO_REUSEPORT:
1619 case SO_BROADCAST:
1620 case SO_OOBINLINE:
1621 case SO_TIMESTAMP:
1622 *mtod(m, int *) = (so->so_options & optname) ? 1 : 0;
1623 break;
1624
1625 case SO_TYPE:
1626 *mtod(m, int *) = so->so_type;
1627 break;
1628
1629 case SO_ERROR:
1630 *mtod(m, int *) = so->so_error;
1631 so->so_error = 0;
1632 break;
1633
1634 case SO_SNDBUF:
1635 *mtod(m, int *) = so->so_snd.sb_hiwat;
1636 break;
1637
1638 case SO_RCVBUF:
1639 *mtod(m, int *) = so->so_rcv.sb_hiwat;
1640 break;
1641
1642 case SO_SNDLOWAT:
1643 *mtod(m, int *) = so->so_snd.sb_lowat;
1644 break;
1645
1646 case SO_RCVLOWAT:
1647 *mtod(m, int *) = so->so_rcv.sb_lowat;
1648 break;
1649
1650 case SO_SNDTIMEO:
1651 case SO_RCVTIMEO:
1652 {
1653 int val = (optname == SO_SNDTIMEO ?
1654 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1655
1656 m->m_len = sizeof(struct timeval);
1657 mtod(m, struct timeval *)->tv_sec = val / hz;
1658 mtod(m, struct timeval *)->tv_usec =
1659 (val % hz) * tick;
1660 break;
1661 }
1662
1663 case SO_OVERFLOWED:
1664 *mtod(m, int *) = so->so_rcv.sb_overflowed;
1665 break;
1666
1667 default:
1668 (void)m_free(m);
1669 return (ENOPROTOOPT);
1670 }
1671 *mp = m;
1672 return (0);
1673 }
1674 }
1675
1676 void
1677 sohasoutofband(struct socket *so)
1678 {
1679
1680 fownsignal(so->so_pgid, SIGURG, POLL_PRI, POLLPRI|POLLRDBAND, so);
1681 selnotify(&so->so_rcv.sb_sel, POLLPRI | POLLRDBAND, 0);
1682 }
1683
1684 static void
1685 filt_sordetach(struct knote *kn)
1686 {
1687 struct socket *so;
1688
1689 so = ((file_t *)kn->kn_obj)->f_data;
1690 SLIST_REMOVE(&so->so_rcv.sb_sel.sel_klist, kn, knote, kn_selnext);
1691 if (SLIST_EMPTY(&so->so_rcv.sb_sel.sel_klist))
1692 so->so_rcv.sb_flags &= ~SB_KNOTE;
1693 }
1694
1695 /*ARGSUSED*/
1696 static int
1697 filt_soread(struct knote *kn, long hint)
1698 {
1699 struct socket *so;
1700
1701 so = ((file_t *)kn->kn_obj)->f_data;
1702 kn->kn_data = so->so_rcv.sb_cc;
1703 if (so->so_state & SS_CANTRCVMORE) {
1704 kn->kn_flags |= EV_EOF;
1705 kn->kn_fflags = so->so_error;
1706 return (1);
1707 }
1708 if (so->so_error) /* temporary udp error */
1709 return (1);
1710 if (kn->kn_sfflags & NOTE_LOWAT)
1711 return (kn->kn_data >= kn->kn_sdata);
1712 return (kn->kn_data >= so->so_rcv.sb_lowat);
1713 }
1714
1715 static void
1716 filt_sowdetach(struct knote *kn)
1717 {
1718 struct socket *so;
1719
1720 so = ((file_t *)kn->kn_obj)->f_data;
1721 SLIST_REMOVE(&so->so_snd.sb_sel.sel_klist, kn, knote, kn_selnext);
1722 if (SLIST_EMPTY(&so->so_snd.sb_sel.sel_klist))
1723 so->so_snd.sb_flags &= ~SB_KNOTE;
1724 }
1725
1726 /*ARGSUSED*/
1727 static int
1728 filt_sowrite(struct knote *kn, long hint)
1729 {
1730 struct socket *so;
1731
1732 so = ((file_t *)kn->kn_obj)->f_data;
1733 kn->kn_data = sbspace(&so->so_snd);
1734 if (so->so_state & SS_CANTSENDMORE) {
1735 kn->kn_flags |= EV_EOF;
1736 kn->kn_fflags = so->so_error;
1737 return (1);
1738 }
1739 if (so->so_error) /* temporary udp error */
1740 return (1);
1741 if (((so->so_state & SS_ISCONNECTED) == 0) &&
1742 (so->so_proto->pr_flags & PR_CONNREQUIRED))
1743 return (0);
1744 if (kn->kn_sfflags & NOTE_LOWAT)
1745 return (kn->kn_data >= kn->kn_sdata);
1746 return (kn->kn_data >= so->so_snd.sb_lowat);
1747 }
1748
1749 /*ARGSUSED*/
1750 static int
1751 filt_solisten(struct knote *kn, long hint)
1752 {
1753 struct socket *so;
1754
1755 so = ((file_t *)kn->kn_obj)->f_data;
1756
1757 /*
1758 * Set kn_data to number of incoming connections, not
1759 * counting partial (incomplete) connections.
1760 */
1761 kn->kn_data = so->so_qlen;
1762 return (kn->kn_data > 0);
1763 }
1764
1765 static const struct filterops solisten_filtops =
1766 { 1, NULL, filt_sordetach, filt_solisten };
1767 static const struct filterops soread_filtops =
1768 { 1, NULL, filt_sordetach, filt_soread };
1769 static const struct filterops sowrite_filtops =
1770 { 1, NULL, filt_sowdetach, filt_sowrite };
1771
1772 int
1773 soo_kqfilter(struct file *fp, struct knote *kn)
1774 {
1775 struct socket *so;
1776 struct sockbuf *sb;
1777
1778 so = ((file_t *)kn->kn_obj)->f_data;
1779 switch (kn->kn_filter) {
1780 case EVFILT_READ:
1781 if (so->so_options & SO_ACCEPTCONN)
1782 kn->kn_fop = &solisten_filtops;
1783 else
1784 kn->kn_fop = &soread_filtops;
1785 sb = &so->so_rcv;
1786 break;
1787 case EVFILT_WRITE:
1788 kn->kn_fop = &sowrite_filtops;
1789 sb = &so->so_snd;
1790 break;
1791 default:
1792 return (EINVAL);
1793 }
1794 SLIST_INSERT_HEAD(&sb->sb_sel.sel_klist, kn, kn_selnext);
1795 sb->sb_flags |= SB_KNOTE;
1796 return (0);
1797 }
1798
1799 static int
1800 sodopoll(struct socket *so, int events)
1801 {
1802 int revents;
1803
1804 revents = 0;
1805
1806 if (events & (POLLIN | POLLRDNORM))
1807 if (soreadable(so))
1808 revents |= events & (POLLIN | POLLRDNORM);
1809
1810 if (events & (POLLOUT | POLLWRNORM))
1811 if (sowritable(so))
1812 revents |= events & (POLLOUT | POLLWRNORM);
1813
1814 if (events & (POLLPRI | POLLRDBAND))
1815 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1816 revents |= events & (POLLPRI | POLLRDBAND);
1817
1818 return revents;
1819 }
1820
1821 int
1822 sopoll(struct socket *so, int events)
1823 {
1824 int revents = 0;
1825 int s;
1826
1827 if ((revents = sodopoll(so, events)) != 0)
1828 return revents;
1829
1830 KERNEL_LOCK(1, curlwp);
1831 s = splsoftnet();
1832
1833 if ((revents = sodopoll(so, events)) == 0) {
1834 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
1835 selrecord(curlwp, &so->so_rcv.sb_sel);
1836 so->so_rcv.sb_flags |= SB_SEL;
1837 }
1838
1839 if (events & (POLLOUT | POLLWRNORM)) {
1840 selrecord(curlwp, &so->so_snd.sb_sel);
1841 so->so_snd.sb_flags |= SB_SEL;
1842 }
1843 }
1844
1845 splx(s);
1846 KERNEL_UNLOCK_ONE(curlwp);
1847
1848 return revents;
1849 }
1850
1851
1852 #include <sys/sysctl.h>
1853
1854 static int sysctl_kern_somaxkva(SYSCTLFN_PROTO);
1855
1856 /*
1857 * sysctl helper routine for kern.somaxkva. ensures that the given
1858 * value is not too small.
1859 * (XXX should we maybe make sure it's not too large as well?)
1860 */
1861 static int
1862 sysctl_kern_somaxkva(SYSCTLFN_ARGS)
1863 {
1864 int error, new_somaxkva;
1865 struct sysctlnode node;
1866
1867 new_somaxkva = somaxkva;
1868 node = *rnode;
1869 node.sysctl_data = &new_somaxkva;
1870 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1871 if (error || newp == NULL)
1872 return (error);
1873
1874 if (new_somaxkva < (16 * 1024 * 1024)) /* sanity */
1875 return (EINVAL);
1876
1877 mutex_enter(&so_pendfree_lock);
1878 somaxkva = new_somaxkva;
1879 cv_broadcast(&socurkva_cv);
1880 mutex_exit(&so_pendfree_lock);
1881
1882 return (error);
1883 }
1884
1885 SYSCTL_SETUP(sysctl_kern_somaxkva_setup, "sysctl kern.somaxkva setup")
1886 {
1887
1888 sysctl_createv(clog, 0, NULL, NULL,
1889 CTLFLAG_PERMANENT,
1890 CTLTYPE_NODE, "kern", NULL,
1891 NULL, 0, NULL, 0,
1892 CTL_KERN, CTL_EOL);
1893
1894 sysctl_createv(clog, 0, NULL, NULL,
1895 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1896 CTLTYPE_INT, "somaxkva",
1897 SYSCTL_DESCR("Maximum amount of kernel memory to be "
1898 "used for socket buffers"),
1899 sysctl_kern_somaxkva, 0, NULL, 0,
1900 CTL_KERN, KERN_SOMAXKVA, CTL_EOL);
1901 }
1902