kttcp.c revision 1.13 1 /* $NetBSD: kttcp.c,v 1.13 2003/06/29 22:29:59 fvdl Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Frank van der Linden and Jason R. Thorpe for
8 * Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * kttcp.c --
41 *
42 * This module provides kernel support for testing network
43 * throughput from the perspective of the kernel. It is
44 * similar in spirit to the classic ttcp network benchmark
45 * program, the main difference being that with kttcp, the
46 * kernel is the source and sink of the data.
47 *
48 * Testing like this is useful for a few reasons:
49 *
50 * 1. This allows us to know what kind of performance we can
51 * expect from network applications that run in the kernel
52 * space, such as the NFS server or the NFS client. These
53 * applications don't have to move the data to/from userspace,
54 * and so benchmark programs which run in userspace don't
55 * give us an accurate model.
56 *
57 * 2. Since data received is just thrown away, the receiver
58 * is very fast. This can provide better exercise for the
59 * sender at the other end.
60 *
61 * 3. Since the NetBSD kernel currently uses a run-to-completion
62 * scheduling model, kttcp provides a benchmark model where
63 * preemption of the benchmark program is not an issue.
64 */
65
66 #include <sys/param.h>
67 #include <sys/types.h>
68 #include <sys/ioctl.h>
69 #include <sys/file.h>
70 #include <sys/filedesc.h>
71 #include <sys/conf.h>
72 #include <sys/systm.h>
73 #include <sys/protosw.h>
74 #include <sys/proc.h>
75 #include <sys/resourcevar.h>
76 #include <sys/signal.h>
77 #include <sys/socketvar.h>
78 #include <sys/socket.h>
79 #include <sys/mbuf.h>
80 #include <sys/sa.h>
81 #include <sys/mount.h>
82 #include <sys/syscallargs.h>
83
84 #include <dev/kttcpio.h>
85
86 static int kttcp_send(struct proc *p, struct kttcp_io_args *);
87 static int kttcp_recv(struct proc *p, struct kttcp_io_args *);
88 static int kttcp_sosend(struct socket *, unsigned long long,
89 unsigned long long *, struct proc *, int);
90 static int kttcp_soreceive(struct socket *, unsigned long long,
91 unsigned long long *, struct proc *, int *);
92
93 void kttcpattach(int);
94
95 dev_type_ioctl(kttcpioctl);
96
97 const struct cdevsw kttcp_cdevsw = {
98 nullopen, nullclose, noread, nowrite, kttcpioctl,
99 nostop, notty, nopoll, nommap, nokqfilter,
100 };
101
102 void
103 kttcpattach(int count)
104 {
105 /* Do nothing. */
106 }
107
108 int
109 kttcpioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
110 {
111 int error;
112
113 if ((flag & FWRITE) == 0)
114 return EPERM;
115
116 switch (cmd) {
117 case KTTCP_IO_SEND:
118 error = kttcp_send(p, (struct kttcp_io_args *) data);
119 break;
120
121 case KTTCP_IO_RECV:
122 error = kttcp_recv(p, (struct kttcp_io_args *) data);
123 break;
124
125 default:
126 return EINVAL;
127 }
128
129 return error;
130 }
131
132 static int
133 kttcp_send(struct proc *p, struct kttcp_io_args *kio)
134 {
135 struct file *fp;
136 int error;
137 struct timeval t0, t1;
138 unsigned long long len, done;
139
140 if (kio->kio_totalsize >= KTTCP_MAX_XMIT)
141 return EINVAL;
142
143 fp = fd_getfile(p->p_fd, kio->kio_socket);
144 if (fp == NULL)
145 return EBADF;
146 FILE_USE(fp);
147 if (fp->f_type != DTYPE_SOCKET) {
148 FILE_UNUSE(fp, p);
149 return EFTYPE;
150 }
151
152 len = kio->kio_totalsize;
153 microtime(&t0);
154 do {
155 error = kttcp_sosend((struct socket *)fp->f_data, len,
156 &done, p, 0);
157 len -= done;
158 } while (error == 0 && len > 0);
159
160 FILE_UNUSE(fp, p);
161
162 microtime(&t1);
163 if (error != 0)
164 return error;
165 timersub(&t1, &t0, &kio->kio_elapsed);
166
167 kio->kio_bytesdone = kio->kio_totalsize - len;
168
169 return 0;
170 }
171
172 static int
173 kttcp_recv(struct proc *p, struct kttcp_io_args *kio)
174 {
175 struct file *fp;
176 int error;
177 struct timeval t0, t1;
178 unsigned long long len, done;
179
180 if (kio->kio_totalsize > KTTCP_MAX_XMIT)
181 return EINVAL;
182
183 fp = fd_getfile(p->p_fd, kio->kio_socket);
184 if (fp == NULL)
185 return EBADF;
186 FILE_USE(fp);
187 if (fp->f_type != DTYPE_SOCKET) {
188 FILE_UNUSE(fp, p);
189 return EBADF;
190 }
191 len = kio->kio_totalsize;
192 microtime(&t0);
193 do {
194 error = kttcp_soreceive((struct socket *)fp->f_data,
195 len, &done, p, NULL);
196 len -= done;
197 } while (error == 0 && len > 0 && done > 0);
198
199 FILE_UNUSE(fp, p);
200
201 microtime(&t1);
202 if (error == EPIPE)
203 error = 0;
204 if (error != 0)
205 return error;
206 timersub(&t1, &t0, &kio->kio_elapsed);
207
208 kio->kio_bytesdone = kio->kio_totalsize - len;
209
210 return 0;
211 }
212
213 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
214
215 /*
216 * Slightly changed version of sosend()
217 */
218 static int
219 kttcp_sosend(struct socket *so, unsigned long long slen,
220 unsigned long long *done, struct proc *p, int flags)
221 {
222 struct mbuf **mp, *m, *top;
223 long space, len, mlen;
224 int error, s, dontroute, atomic;
225 long long resid;
226
227 atomic = sosendallatonce(so);
228 resid = slen;
229 top = NULL;
230 /*
231 * In theory resid should be unsigned.
232 * However, space must be signed, as it might be less than 0
233 * if we over-committed, and we must use a signed comparison
234 * of space and resid. On the other hand, a negative resid
235 * causes us to loop sending 0-length segments to the protocol.
236 */
237 if (resid < 0) {
238 error = EINVAL;
239 goto out;
240 }
241 dontroute =
242 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
243 (so->so_proto->pr_flags & PR_ATOMIC);
244 p->p_stats->p_ru.ru_msgsnd++;
245 #define snderr(errno) { error = errno; splx(s); goto release; }
246
247 restart:
248 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
249 goto out;
250 do {
251 s = splsoftnet();
252 if (so->so_state & SS_CANTSENDMORE)
253 snderr(EPIPE);
254 if (so->so_error) {
255 error = so->so_error;
256 so->so_error = 0;
257 splx(s);
258 goto release;
259 }
260 if ((so->so_state & SS_ISCONNECTED) == 0) {
261 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
262 if ((so->so_state & SS_ISCONFIRMING) == 0)
263 snderr(ENOTCONN);
264 } else
265 snderr(EDESTADDRREQ);
266 }
267 space = sbspace(&so->so_snd);
268 if (flags & MSG_OOB)
269 space += 1024;
270 if ((atomic && resid > so->so_snd.sb_hiwat))
271 snderr(EMSGSIZE);
272 if (space < resid && (atomic || space < so->so_snd.sb_lowat)) {
273 if (so->so_state & SS_NBIO)
274 snderr(EWOULDBLOCK);
275 SBLASTRECORDCHK(&so->so_rcv,
276 "kttcp_soreceive sbwait 1");
277 SBLASTMBUFCHK(&so->so_rcv,
278 "kttcp_soreceive sbwait 1");
279 sbunlock(&so->so_snd);
280 error = sbwait(&so->so_snd);
281 splx(s);
282 if (error)
283 goto out;
284 goto restart;
285 }
286 splx(s);
287 mp = ⊤
288 do {
289 do {
290 if (top == 0) {
291 m = m_gethdr(M_WAIT, MT_DATA);
292 mlen = MHLEN;
293 m->m_pkthdr.len = 0;
294 m->m_pkthdr.rcvif = NULL;
295 } else {
296 m = m_get(M_WAIT, MT_DATA);
297 mlen = MLEN;
298 }
299 if (resid >= MINCLSIZE && space >= MCLBYTES) {
300 m_clget(m, M_WAIT);
301 if ((m->m_flags & M_EXT) == 0)
302 goto nopages;
303 mlen = MCLBYTES;
304 #ifdef MAPPED_MBUFS
305 len = lmin(MCLBYTES, resid);
306 #else
307 if (atomic && top == 0) {
308 len = lmin(MCLBYTES - max_hdr,
309 resid);
310 m->m_data += max_hdr;
311 } else
312 len = lmin(MCLBYTES, resid);
313 #endif
314 space -= len;
315 } else {
316 nopages:
317 len = lmin(lmin(mlen, resid), space);
318 space -= len;
319 /*
320 * For datagram protocols, leave room
321 * for protocol headers in first mbuf.
322 */
323 if (atomic && top == 0 && len < mlen)
324 MH_ALIGN(m, len);
325 }
326 resid -= len;
327 m->m_len = len;
328 *mp = m;
329 top->m_pkthdr.len += len;
330 if (error)
331 goto release;
332 mp = &m->m_next;
333 if (resid <= 0) {
334 if (flags & MSG_EOR)
335 top->m_flags |= M_EOR;
336 break;
337 }
338 } while (space > 0 && atomic);
339
340 s = splsoftnet();
341
342 if (so->so_state & SS_CANTSENDMORE)
343 snderr(EPIPE);
344
345 if (dontroute)
346 so->so_options |= SO_DONTROUTE;
347 if (resid > 0)
348 so->so_state |= SS_MORETOCOME;
349 error = (*so->so_proto->pr_usrreq)(so,
350 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
351 top, NULL, NULL, p);
352 if (dontroute)
353 so->so_options &= ~SO_DONTROUTE;
354 if (resid > 0)
355 so->so_state &= ~SS_MORETOCOME;
356 splx(s);
357
358 top = 0;
359 mp = ⊤
360 if (error)
361 goto release;
362 } while (resid && space > 0);
363 } while (resid);
364
365 release:
366 sbunlock(&so->so_snd);
367 out:
368 if (top)
369 m_freem(top);
370 *done = slen - resid;
371 #if 0
372 printf("sosend: error %d slen %llu resid %lld\n", error, slen, resid);
373 #endif
374 return (error);
375 }
376
377 static int
378 kttcp_soreceive(struct socket *so, unsigned long long slen,
379 unsigned long long *done, struct proc *p, int *flagsp)
380 {
381 struct mbuf *m, **mp;
382 int flags, len, error, s, offset, moff, type;
383 long long orig_resid, resid;
384 struct protosw *pr;
385 struct mbuf *nextrecord;
386
387 pr = so->so_proto;
388 mp = NULL;
389 type = 0;
390 resid = orig_resid = slen;
391 if (flagsp)
392 flags = *flagsp &~ MSG_EOR;
393 else
394 flags = 0;
395 if (flags & MSG_OOB) {
396 m = m_get(M_WAIT, MT_DATA);
397 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,
398 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, NULL);
399 if (error)
400 goto bad;
401 do {
402 resid -= min(resid, m->m_len);
403 m = m_free(m);
404 } while (resid && error == 0 && m);
405 bad:
406 if (m)
407 m_freem(m);
408 return (error);
409 }
410 if (mp)
411 *mp = NULL;
412 if (so->so_state & SS_ISCONFIRMING && resid)
413 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, NULL);
414
415 restart:
416 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0)
417 return (error);
418 s = splsoftnet();
419
420 m = so->so_rcv.sb_mb;
421 /*
422 * If we have less data than requested, block awaiting more
423 * (subject to any timeout) if:
424 * 1. the current count is less than the low water mark,
425 * 2. MSG_WAITALL is set, and it is possible to do the entire
426 * receive operation at once if we block (resid <= hiwat), or
427 * 3. MSG_DONTWAIT is not set.
428 * If MSG_WAITALL is set but resid is larger than the receive buffer,
429 * we have to do the receive in sections, and thus risk returning
430 * a short count if a timeout or signal occurs after we start.
431 */
432 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
433 so->so_rcv.sb_cc < resid) &&
434 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
435 ((flags & MSG_WAITALL) && resid <= so->so_rcv.sb_hiwat)) &&
436 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
437 #ifdef DIAGNOSTIC
438 if (m == NULL && so->so_rcv.sb_cc)
439 panic("receive 1");
440 #endif
441 if (so->so_error) {
442 if (m)
443 goto dontblock;
444 error = so->so_error;
445 if ((flags & MSG_PEEK) == 0)
446 so->so_error = 0;
447 goto release;
448 }
449 if (so->so_state & SS_CANTRCVMORE) {
450 if (m)
451 goto dontblock;
452 else
453 goto release;
454 }
455 for (; m; m = m->m_next)
456 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
457 m = so->so_rcv.sb_mb;
458 goto dontblock;
459 }
460 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
461 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
462 error = ENOTCONN;
463 goto release;
464 }
465 if (resid == 0)
466 goto release;
467 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
468 error = EWOULDBLOCK;
469 goto release;
470 }
471 sbunlock(&so->so_rcv);
472 error = sbwait(&so->so_rcv);
473 splx(s);
474 if (error)
475 return (error);
476 goto restart;
477 }
478 dontblock:
479 /*
480 * On entry here, m points to the first record of the socket buffer.
481 * While we process the initial mbufs containing address and control
482 * info, we save a copy of m->m_nextpkt into nextrecord.
483 */
484 #ifdef notyet /* XXXX */
485 if (uio->uio_procp)
486 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
487 #endif
488 KASSERT(m == so->so_rcv.sb_mb);
489 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 1");
490 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 1");
491 nextrecord = m->m_nextpkt;
492 if (pr->pr_flags & PR_ADDR) {
493 #ifdef DIAGNOSTIC
494 if (m->m_type != MT_SONAME)
495 panic("receive 1a");
496 #endif
497 orig_resid = 0;
498 if (flags & MSG_PEEK) {
499 m = m->m_next;
500 } else {
501 sbfree(&so->so_rcv, m);
502 MFREE(m, so->so_rcv.sb_mb);
503 m = so->so_rcv.sb_mb;
504 }
505 }
506 while (m && m->m_type == MT_CONTROL && error == 0) {
507 if (flags & MSG_PEEK) {
508 m = m->m_next;
509 } else {
510 sbfree(&so->so_rcv, m);
511 MFREE(m, so->so_rcv.sb_mb);
512 m = so->so_rcv.sb_mb;
513 }
514 }
515
516 /*
517 * If m is non-NULL, we have some data to read. From now on,
518 * make sure to keep sb_lastrecord consistent when working on
519 * the last packet on the chain (nextrecord == NULL) and we
520 * change m->m_nextpkt.
521 */
522 if (m) {
523 if ((flags & MSG_PEEK) == 0) {
524 m->m_nextpkt = nextrecord;
525 /*
526 * If nextrecord == NULL (this is a single chain),
527 * then sb_lastrecord may not be valid here if m
528 * was changed earlier.
529 */
530 if (nextrecord == NULL) {
531 KASSERT(so->so_rcv.sb_mb == m);
532 so->so_rcv.sb_lastrecord = m;
533 }
534 }
535 type = m->m_type;
536 if (type == MT_OOBDATA)
537 flags |= MSG_OOB;
538 } else {
539 if ((flags & MSG_PEEK) == 0) {
540 KASSERT(so->so_rcv.sb_mb == m);
541 so->so_rcv.sb_mb = nextrecord;
542 SB_EMPTY_FIXUP(&so->so_rcv);
543 }
544 }
545 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 2");
546 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 2");
547
548 moff = 0;
549 offset = 0;
550 while (m && resid > 0 && error == 0) {
551 if (m->m_type == MT_OOBDATA) {
552 if (type != MT_OOBDATA)
553 break;
554 } else if (type == MT_OOBDATA)
555 break;
556 #ifdef DIAGNOSTIC
557 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
558 panic("receive 3");
559 #endif
560 so->so_state &= ~SS_RCVATMARK;
561 len = resid;
562 if (so->so_oobmark && len > so->so_oobmark - offset)
563 len = so->so_oobmark - offset;
564 if (len > m->m_len - moff)
565 len = m->m_len - moff;
566 /*
567 * If mp is set, just pass back the mbufs.
568 * Otherwise copy them out via the uio, then free.
569 * Sockbuf must be consistent here (points to current mbuf,
570 * it points to next record) when we drop priority;
571 * we must note any additions to the sockbuf when we
572 * block interrupts again.
573 */
574 resid -= len;
575 if (len == m->m_len - moff) {
576 if (m->m_flags & M_EOR)
577 flags |= MSG_EOR;
578 if (flags & MSG_PEEK) {
579 m = m->m_next;
580 moff = 0;
581 } else {
582 nextrecord = m->m_nextpkt;
583 sbfree(&so->so_rcv, m);
584 if (mp) {
585 *mp = m;
586 mp = &m->m_next;
587 so->so_rcv.sb_mb = m = m->m_next;
588 *mp = NULL;
589 } else {
590 MFREE(m, so->so_rcv.sb_mb);
591 m = so->so_rcv.sb_mb;
592 }
593 /*
594 * If m != NULL, we also know that
595 * so->so_rcv.sb_mb != NULL.
596 */
597 KASSERT(so->so_rcv.sb_mb == m);
598 if (m) {
599 m->m_nextpkt = nextrecord;
600 if (nextrecord == NULL)
601 so->so_rcv.sb_lastrecord = m;
602 } else {
603 so->so_rcv.sb_mb = nextrecord;
604 SB_EMPTY_FIXUP(&so->so_rcv);
605 }
606 SBLASTRECORDCHK(&so->so_rcv,
607 "kttcp_soreceive 3");
608 SBLASTMBUFCHK(&so->so_rcv,
609 "kttcp_soreceive 3");
610 }
611 } else {
612 if (flags & MSG_PEEK)
613 moff += len;
614 else {
615 if (mp)
616 *mp = m_copym(m, 0, len, M_WAIT);
617 m->m_data += len;
618 m->m_len -= len;
619 so->so_rcv.sb_cc -= len;
620 }
621 }
622 if (so->so_oobmark) {
623 if ((flags & MSG_PEEK) == 0) {
624 so->so_oobmark -= len;
625 if (so->so_oobmark == 0) {
626 so->so_state |= SS_RCVATMARK;
627 break;
628 }
629 } else {
630 offset += len;
631 if (offset == so->so_oobmark)
632 break;
633 }
634 }
635 if (flags & MSG_EOR)
636 break;
637 /*
638 * If the MSG_WAITALL flag is set (for non-atomic socket),
639 * we must not quit until "uio->uio_resid == 0" or an error
640 * termination. If a signal/timeout occurs, return
641 * with a short count but without error.
642 * Keep sockbuf locked against other readers.
643 */
644 while (flags & MSG_WAITALL && m == NULL && resid > 0 &&
645 !sosendallatonce(so) && !nextrecord) {
646 if (so->so_error || so->so_state & SS_CANTRCVMORE)
647 break;
648 /*
649 * If we are peeking and the socket receive buffer is
650 * full, stop since we can't get more data to peek at.
651 */
652 if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0)
653 break;
654 /*
655 * If we've drained the socket buffer, tell the
656 * protocol in case it needs to do something to
657 * get it filled again.
658 */
659 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
660 (*pr->pr_usrreq)(so, PRU_RCVD, NULL,
661 (struct mbuf *)(long)flags, NULL, NULL);
662 SBLASTRECORDCHK(&so->so_rcv,
663 "kttcp_soreceive sbwait 2");
664 SBLASTMBUFCHK(&so->so_rcv,
665 "kttcp_soreceive sbwait 2");
666 error = sbwait(&so->so_rcv);
667 if (error) {
668 sbunlock(&so->so_rcv);
669 splx(s);
670 return (0);
671 }
672 if ((m = so->so_rcv.sb_mb) != NULL)
673 nextrecord = m->m_nextpkt;
674 }
675 }
676
677 if (m && pr->pr_flags & PR_ATOMIC) {
678 flags |= MSG_TRUNC;
679 if ((flags & MSG_PEEK) == 0)
680 (void) sbdroprecord(&so->so_rcv);
681 }
682 if ((flags & MSG_PEEK) == 0) {
683 if (m == NULL) {
684 /*
685 * First part is an SB_EMPTY_FIXUP(). Second part
686 * makes sure sb_lastrecord is up-to-date if
687 * there is still data in the socket buffer.
688 */
689 so->so_rcv.sb_mb = nextrecord;
690 if (so->so_rcv.sb_mb == NULL) {
691 so->so_rcv.sb_mbtail = NULL;
692 so->so_rcv.sb_lastrecord = NULL;
693 } else if (nextrecord->m_nextpkt == NULL)
694 so->so_rcv.sb_lastrecord = nextrecord;
695 }
696 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 4");
697 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 4");
698 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
699 (*pr->pr_usrreq)(so, PRU_RCVD, NULL,
700 (struct mbuf *)(long)flags, NULL, NULL);
701 }
702 if (orig_resid == resid && orig_resid &&
703 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
704 sbunlock(&so->so_rcv);
705 splx(s);
706 goto restart;
707 }
708
709 if (flagsp)
710 *flagsp |= flags;
711 release:
712 sbunlock(&so->so_rcv);
713 splx(s);
714 *done = slen - resid;
715 #if 0
716 printf("soreceive: error %d slen %llu resid %lld\n", error, slen, resid);
717 #endif
718 return (error);
719 }
720