kttcp.c revision 1.3 1 /* $NetBSD: kttcp.c,v 1.3 2002/07/03 19:36:52 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Frank van der Linden and Jason R. Thorpe for
8 * Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * kttcp.c --
41 *
42 * This module provides kernel support for testing network
43 * throughput from the perspective of the kernel. It is
44 * similar in spirit to the classic ttcp network benchmark
45 * program, the main difference being that with kttcp, the
46 * kernel is the source and sink of the data.
47 *
48 * Testing like this is useful for a few reasons:
49 *
50 * 1. This allows us to know what kind of performance we can
51 * expect from network applications that run in the kernel
52 * space, such as the NFS server or the NFS client. These
53 * applications don't have to move the data to/from userspace,
54 * and so benchmark programs which run in userspace don't
55 * give us an accurate model.
56 *
57 * 2. Since data received is just thrown away, the receiver
58 * is very fast. This can provide better exercise for the
59 * sender at the other end.
60 *
61 * 3. Since the NetBSD kernel currently uses a run-to-completion
62 * scheduling model, kttcp provides a benchmark model where
63 * preemption of the benchmark program is not an issue.
64 */
65
66 #include <sys/param.h>
67 #include <sys/types.h>
68 #include <sys/ioctl.h>
69 #include <sys/file.h>
70 #include <sys/filedesc.h>
71 #include <sys/conf.h>
72 #include <sys/systm.h>
73 #include <sys/protosw.h>
74 #include <sys/proc.h>
75 #include <sys/resourcevar.h>
76 #include <sys/signal.h>
77 #include <sys/socketvar.h>
78 #include <sys/socket.h>
79 #include <sys/mbuf.h>
80 #include <sys/mount.h>
81 #include <sys/syscallargs.h>
82
83 #include <dev/kttcpio.h>
84
85 static int kttcp_send(struct proc *p, struct kttcp_io_args *);
86 static int kttcp_recv(struct proc *p, struct kttcp_io_args *);
87 static int kttcp_sosend(struct socket *, unsigned long long,
88 unsigned long long *, struct proc *, int);
89 static int kttcp_soreceive(struct socket *, unsigned long long,
90 unsigned long long *, struct proc *, int *);
91
92 void kttcpattach(int);
93
94 cdev_decl(kttcp);
95
96 void
97 kttcpattach(int count)
98 {
99 /* Do nothing. */
100 }
101
102 int
103 kttcpopen(dev_t dev, int flags, int fmt, struct proc *p)
104 {
105
106 /* Always succeeds. */
107 return (0);
108 }
109
110 int
111 kttcpclose(dev_t dev, int flags, int fmt, struct proc *p)
112 {
113
114 /* Always succeeds. */
115 return (0);
116 }
117
118 int
119 kttcpioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
120 {
121 int error;
122
123 if ((flag & FWRITE) == 0)
124 return EPERM;
125
126 switch (cmd) {
127 case KTTCP_IO_SEND:
128 error = kttcp_send(p, (struct kttcp_io_args *) data);
129 break;
130
131 case KTTCP_IO_RECV:
132 error = kttcp_recv(p, (struct kttcp_io_args *) data);
133 break;
134
135 default:
136 return EINVAL;
137 }
138
139 return error;
140 }
141
142 static int
143 kttcp_send(struct proc *p, struct kttcp_io_args *kio)
144 {
145 struct file *fp;
146 int error;
147 struct timeval t0, t1;
148 unsigned long long len, done;
149
150 if (kio->kio_totalsize >= KTTCP_MAX_XMIT)
151 return EINVAL;
152
153 fp = fd_getfile(p->p_fd, kio->kio_socket);
154 if (fp == NULL)
155 return EBADF;
156 if (fp->f_type != DTYPE_SOCKET)
157 return EFTYPE;
158
159 len = kio->kio_totalsize;
160 microtime(&t0);
161 do {
162 error = kttcp_sosend((struct socket *)fp->f_data, len,
163 &done, p, 0);
164 len -= done;
165 } while (error == 0 && len > 0);
166 microtime(&t1);
167 if (error != 0)
168 return error;
169 timersub(&t1, &t0, &kio->kio_elapsed);
170
171 kio->kio_bytesdone = kio->kio_totalsize - len;
172
173 return 0;
174 }
175
176 static int
177 kttcp_recv(struct proc *p, struct kttcp_io_args *kio)
178 {
179 struct file *fp;
180 int error;
181 struct timeval t0, t1;
182 unsigned long long len, done;
183
184 if (kio->kio_totalsize > KTTCP_MAX_XMIT)
185 return EINVAL;
186
187 fp = fd_getfile(p->p_fd, kio->kio_socket);
188 if (fp == NULL || fp->f_type != DTYPE_SOCKET)
189 return EBADF;
190 len = kio->kio_totalsize;
191 microtime(&t0);
192 do {
193 error = kttcp_soreceive((struct socket *)fp->f_data,
194 len, &done, p, NULL);
195 len -= done;
196 } while (error == 0 && len > 0 && done > 0);
197 microtime(&t1);
198 if (error == EPIPE)
199 error = 0;
200 if (error != 0)
201 return error;
202 timersub(&t1, &t0, &kio->kio_elapsed);
203
204 kio->kio_bytesdone = kio->kio_totalsize - len;
205
206 return 0;
207 }
208
209 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
210
211 /*
212 * Slightly changed version of sosend()
213 */
214 static int
215 kttcp_sosend(struct socket *so, unsigned long long slen,
216 unsigned long long *done, struct proc *p, int flags)
217 {
218 struct mbuf **mp, *m, *top;
219 long space, len, mlen;
220 int error, s, dontroute, atomic;
221 long long resid;
222
223 atomic = sosendallatonce(so);
224 resid = slen;
225 top = NULL;
226 /*
227 * In theory resid should be unsigned.
228 * However, space must be signed, as it might be less than 0
229 * if we over-committed, and we must use a signed comparison
230 * of space and resid. On the other hand, a negative resid
231 * causes us to loop sending 0-length segments to the protocol.
232 */
233 if (resid < 0) {
234 error = EINVAL;
235 goto out;
236 }
237 dontroute =
238 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
239 (so->so_proto->pr_flags & PR_ATOMIC);
240 p->p_stats->p_ru.ru_msgsnd++;
241 #define snderr(errno) { error = errno; splx(s); goto release; }
242
243 restart:
244 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
245 goto out;
246 do {
247 s = splsoftnet();
248 if (so->so_state & SS_CANTSENDMORE)
249 snderr(EPIPE);
250 if (so->so_error) {
251 error = so->so_error;
252 so->so_error = 0;
253 splx(s);
254 goto release;
255 }
256 if ((so->so_state & SS_ISCONNECTED) == 0) {
257 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
258 if ((so->so_state & SS_ISCONFIRMING) == 0)
259 snderr(ENOTCONN);
260 } else
261 snderr(EDESTADDRREQ);
262 }
263 space = sbspace(&so->so_snd);
264 if (flags & MSG_OOB)
265 space += 1024;
266 if ((atomic && resid > so->so_snd.sb_hiwat))
267 snderr(EMSGSIZE);
268 if (space < resid && (atomic || space < so->so_snd.sb_lowat)) {
269 if (so->so_state & SS_NBIO)
270 snderr(EWOULDBLOCK);
271 SBLASTRECORDCHK(&so->so_rcv,
272 "kttcp_soreceive sbwait 1");
273 SBLASTMBUFCHK(&so->so_rcv,
274 "kttcp_soreceive sbwait 1");
275 sbunlock(&so->so_snd);
276 error = sbwait(&so->so_snd);
277 splx(s);
278 if (error)
279 goto out;
280 goto restart;
281 }
282 splx(s);
283 mp = ⊤
284 do {
285 do {
286 if (top == 0) {
287 MGETHDR(m, M_WAIT, MT_DATA);
288 mlen = MHLEN;
289 m->m_pkthdr.len = 0;
290 m->m_pkthdr.rcvif = (struct ifnet *)0;
291 } else {
292 MGET(m, M_WAIT, MT_DATA);
293 mlen = MLEN;
294 }
295 if (resid >= MINCLSIZE && space >= MCLBYTES) {
296 MCLGET(m, M_WAIT);
297 if ((m->m_flags & M_EXT) == 0)
298 goto nopages;
299 mlen = MCLBYTES;
300 #ifdef MAPPED_MBUFS
301 len = lmin(MCLBYTES, resid);
302 #else
303 if (atomic && top == 0) {
304 len = lmin(MCLBYTES - max_hdr,
305 resid);
306 m->m_data += max_hdr;
307 } else
308 len = lmin(MCLBYTES, resid);
309 #endif
310 space -= len;
311 } else {
312 nopages:
313 len = lmin(lmin(mlen, resid), space);
314 space -= len;
315 /*
316 * For datagram protocols, leave room
317 * for protocol headers in first mbuf.
318 */
319 if (atomic && top == 0 && len < mlen)
320 MH_ALIGN(m, len);
321 }
322 resid -= len;
323 m->m_len = len;
324 *mp = m;
325 top->m_pkthdr.len += len;
326 if (error)
327 goto release;
328 mp = &m->m_next;
329 if (resid <= 0) {
330 if (flags & MSG_EOR)
331 top->m_flags |= M_EOR;
332 break;
333 }
334 } while (space > 0 && atomic);
335
336 s = splsoftnet();
337
338 if (so->so_state & SS_CANTSENDMORE)
339 snderr(EPIPE);
340
341 if (dontroute)
342 so->so_options |= SO_DONTROUTE;
343 if (resid > 0)
344 so->so_state |= SS_MORETOCOME;
345 error = (*so->so_proto->pr_usrreq)(so,
346 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
347 top, NULL, NULL, p);
348 if (dontroute)
349 so->so_options &= ~SO_DONTROUTE;
350 if (resid > 0)
351 so->so_state &= ~SS_MORETOCOME;
352 splx(s);
353
354 top = 0;
355 mp = ⊤
356 if (error)
357 goto release;
358 } while (resid && space > 0);
359 } while (resid);
360
361 release:
362 sbunlock(&so->so_snd);
363 out:
364 if (top)
365 m_freem(top);
366 *done = slen - resid;
367 #if 0
368 printf("sosend: error %d slen %llu resid %lld\n", error, slen, resid);
369 #endif
370 return (error);
371 }
372
373 static int
374 kttcp_soreceive(struct socket *so, unsigned long long slen,
375 unsigned long long *done, struct proc *p, int *flagsp)
376 {
377 struct mbuf *m, **mp;
378 int flags, len, error, s, offset, moff, type;
379 long long orig_resid, resid;
380 struct protosw *pr;
381 struct mbuf *nextrecord;
382
383 pr = so->so_proto;
384 mp = NULL;
385 type = 0;
386 resid = orig_resid = slen;
387 if (flagsp)
388 flags = *flagsp &~ MSG_EOR;
389 else
390 flags = 0;
391 if (flags & MSG_OOB) {
392 m = m_get(M_WAIT, MT_DATA);
393 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,
394 (struct mbuf *)(long)(flags & MSG_PEEK), (struct mbuf *)0,
395 (struct proc *)0);
396 if (error)
397 goto bad;
398 do {
399 resid -= min(resid, m->m_len);
400 m = m_free(m);
401 } while (resid && error == 0 && m);
402 bad:
403 if (m)
404 m_freem(m);
405 return (error);
406 }
407 if (mp)
408 *mp = (struct mbuf *)0;
409 if (so->so_state & SS_ISCONFIRMING && resid)
410 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
411 (struct mbuf *)0, (struct mbuf *)0, (struct proc *)0);
412
413 restart:
414 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0)
415 return (error);
416 s = splsoftnet();
417
418 m = so->so_rcv.sb_mb;
419 /*
420 * If we have less data than requested, block awaiting more
421 * (subject to any timeout) if:
422 * 1. the current count is less than the low water mark,
423 * 2. MSG_WAITALL is set, and it is possible to do the entire
424 * receive operation at once if we block (resid <= hiwat), or
425 * 3. MSG_DONTWAIT is not set.
426 * If MSG_WAITALL is set but resid is larger than the receive buffer,
427 * we have to do the receive in sections, and thus risk returning
428 * a short count if a timeout or signal occurs after we start.
429 */
430 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
431 so->so_rcv.sb_cc < resid) &&
432 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
433 ((flags & MSG_WAITALL) && resid <= so->so_rcv.sb_hiwat)) &&
434 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
435 #ifdef DIAGNOSTIC
436 if (m == 0 && so->so_rcv.sb_cc)
437 panic("receive 1");
438 #endif
439 if (so->so_error) {
440 if (m)
441 goto dontblock;
442 error = so->so_error;
443 if ((flags & MSG_PEEK) == 0)
444 so->so_error = 0;
445 goto release;
446 }
447 if (so->so_state & SS_CANTRCVMORE) {
448 if (m)
449 goto dontblock;
450 else
451 goto release;
452 }
453 for (; m; m = m->m_next)
454 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
455 m = so->so_rcv.sb_mb;
456 goto dontblock;
457 }
458 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
459 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
460 error = ENOTCONN;
461 goto release;
462 }
463 if (resid == 0)
464 goto release;
465 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
466 error = EWOULDBLOCK;
467 goto release;
468 }
469 sbunlock(&so->so_rcv);
470 error = sbwait(&so->so_rcv);
471 splx(s);
472 if (error)
473 return (error);
474 goto restart;
475 }
476 dontblock:
477 /*
478 * On entry here, m points to the first record of the socket buffer.
479 * While we process the initial mbufs containing address and control
480 * info, we save a copy of m->m_nextpkt into nextrecord.
481 */
482 #ifdef notyet /* XXXX */
483 if (uio->uio_procp)
484 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
485 #endif
486 KASSERT(m == so->so_rcv.sb_mb);
487 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 1");
488 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 1");
489 nextrecord = m->m_nextpkt;
490 if (pr->pr_flags & PR_ADDR) {
491 #ifdef DIAGNOSTIC
492 if (m->m_type != MT_SONAME)
493 panic("receive 1a");
494 #endif
495 orig_resid = 0;
496 if (flags & MSG_PEEK) {
497 m = m->m_next;
498 } else {
499 sbfree(&so->so_rcv, m);
500 MFREE(m, so->so_rcv.sb_mb);
501 m = so->so_rcv.sb_mb;
502 }
503 }
504 while (m && m->m_type == MT_CONTROL && error == 0) {
505 if (flags & MSG_PEEK) {
506 m = m->m_next;
507 } else {
508 sbfree(&so->so_rcv, m);
509 MFREE(m, so->so_rcv.sb_mb);
510 m = so->so_rcv.sb_mb;
511 }
512 }
513
514 /*
515 * If m is non-NULL, we have some data to read. From now on,
516 * make sure to keep sb_lastrecord consistent when working on
517 * the last packet on the chain (nextrecord == NULL) and we
518 * change m->m_nextpkt.
519 */
520 if (m) {
521 if ((flags & MSG_PEEK) == 0) {
522 m->m_nextpkt = nextrecord;
523 /*
524 * If nextrecord == NULL (this is a single chain),
525 * then sb_lastrecord may not be valid here if m
526 * was changed earlier.
527 */
528 if (nextrecord == NULL) {
529 KASSERT(so->so_rcv.sb_mb == m);
530 so->so_rcv.sb_lastrecord = m;
531 }
532 }
533 type = m->m_type;
534 if (type == MT_OOBDATA)
535 flags |= MSG_OOB;
536 } else {
537 if ((flags & MSG_PEEK) == 0) {
538 KASSERT(so->so_rcv.sb_mb == m);
539 so->so_rcv.sb_mb = nextrecord;
540 SB_UPDATE_TAIL(&so->so_rcv);
541 }
542 }
543 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 2");
544 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 2");
545
546 moff = 0;
547 offset = 0;
548 while (m && resid > 0 && error == 0) {
549 if (m->m_type == MT_OOBDATA) {
550 if (type != MT_OOBDATA)
551 break;
552 } else if (type == MT_OOBDATA)
553 break;
554 #ifdef DIAGNOSTIC
555 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
556 panic("receive 3");
557 #endif
558 so->so_state &= ~SS_RCVATMARK;
559 len = resid;
560 if (so->so_oobmark && len > so->so_oobmark - offset)
561 len = so->so_oobmark - offset;
562 if (len > m->m_len - moff)
563 len = m->m_len - moff;
564 /*
565 * If mp is set, just pass back the mbufs.
566 * Otherwise copy them out via the uio, then free.
567 * Sockbuf must be consistent here (points to current mbuf,
568 * it points to next record) when we drop priority;
569 * we must note any additions to the sockbuf when we
570 * block interrupts again.
571 */
572 resid -= len;
573 if (len == m->m_len - moff) {
574 if (m->m_flags & M_EOR)
575 flags |= MSG_EOR;
576 if (flags & MSG_PEEK) {
577 m = m->m_next;
578 moff = 0;
579 } else {
580 nextrecord = m->m_nextpkt;
581 sbfree(&so->so_rcv, m);
582 if (mp) {
583 *mp = m;
584 mp = &m->m_next;
585 so->so_rcv.sb_mb = m = m->m_next;
586 *mp = (struct mbuf *)0;
587 } else {
588 MFREE(m, so->so_rcv.sb_mb);
589 m = so->so_rcv.sb_mb;
590 }
591 /*
592 * If m != NULL, we also know that
593 * so->so_rcv.sb_mb != NULL.
594 */
595 KASSERT(so->so_rcv.sb_mb == m);
596 if (m) {
597 m->m_nextpkt = nextrecord;
598 if (nextrecord == NULL)
599 so->so_rcv.sb_lastrecord = m;
600 } else {
601 so->so_rcv.sb_mb = nextrecord;
602 SB_UPDATE_TAIL(&so->so_rcv);
603 }
604 SBLASTRECORDCHK(&so->so_rcv,
605 "kttcp_soreceive 3");
606 SBLASTMBUFCHK(&so->so_rcv,
607 "kttcp_soreceive 3");
608 }
609 } else {
610 if (flags & MSG_PEEK)
611 moff += len;
612 else {
613 if (mp)
614 *mp = m_copym(m, 0, len, M_WAIT);
615 m->m_data += len;
616 m->m_len -= len;
617 so->so_rcv.sb_cc -= len;
618 }
619 }
620 if (so->so_oobmark) {
621 if ((flags & MSG_PEEK) == 0) {
622 so->so_oobmark -= len;
623 if (so->so_oobmark == 0) {
624 so->so_state |= SS_RCVATMARK;
625 break;
626 }
627 } else {
628 offset += len;
629 if (offset == so->so_oobmark)
630 break;
631 }
632 }
633 if (flags & MSG_EOR)
634 break;
635 /*
636 * If the MSG_WAITALL flag is set (for non-atomic socket),
637 * we must not quit until "uio->uio_resid == 0" or an error
638 * termination. If a signal/timeout occurs, return
639 * with a short count but without error.
640 * Keep sockbuf locked against other readers.
641 */
642 while (flags & MSG_WAITALL && m == 0 && resid > 0 &&
643 !sosendallatonce(so) && !nextrecord) {
644 if (so->so_error || so->so_state & SS_CANTRCVMORE)
645 break;
646 /*
647 * If we are peeking and the socket receive buffer is
648 * full, stop since we can't get more data to peek at.
649 */
650 if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0)
651 break;
652 /*
653 * If we've drained the socket buffer, tell the
654 * protocol in case it needs to do something to
655 * get it filled again.
656 */
657 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
658 (*pr->pr_usrreq)(so, PRU_RCVD,
659 (struct mbuf *)0,
660 (struct mbuf *)(long)flags,
661 (struct mbuf *)0,
662 (struct proc *)0);
663 SBLASTRECORDCHK(&so->so_rcv,
664 "kttcp_soreceive sbwait 2");
665 SBLASTMBUFCHK(&so->so_rcv,
666 "kttcp_soreceive sbwait 2");
667 error = sbwait(&so->so_rcv);
668 if (error) {
669 sbunlock(&so->so_rcv);
670 splx(s);
671 return (0);
672 }
673 if ((m = so->so_rcv.sb_mb) != NULL)
674 nextrecord = m->m_nextpkt;
675 }
676 }
677
678 if (m && pr->pr_flags & PR_ATOMIC) {
679 flags |= MSG_TRUNC;
680 if ((flags & MSG_PEEK) == 0)
681 (void) sbdroprecord(&so->so_rcv);
682 }
683 if ((flags & MSG_PEEK) == 0) {
684 if (m == 0) {
685 /*
686 * First part is an SB_UPDATE_TAIL(). Second part
687 * makes sure sb_lastrecord is up-to-date if
688 * there is still data in the socket buffer.
689 */
690 so->so_rcv.sb_mb = nextrecord;
691 if (so->so_rcv.sb_mb == NULL) {
692 so->so_rcv.sb_mbtail = NULL;
693 so->so_rcv.sb_lastrecord = NULL;
694 } else if (nextrecord->m_nextpkt == NULL)
695 so->so_rcv.sb_lastrecord = nextrecord;
696 }
697 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 4");
698 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 4");
699 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
700 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
701 (struct mbuf *)(long)flags, (struct mbuf *)0,
702 (struct proc *)0);
703 }
704 if (orig_resid == resid && orig_resid &&
705 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
706 sbunlock(&so->so_rcv);
707 splx(s);
708 goto restart;
709 }
710
711 if (flagsp)
712 *flagsp |= flags;
713 release:
714 sbunlock(&so->so_rcv);
715 splx(s);
716 *done = slen - resid;
717 #if 0
718 printf("soreceive: error %d slen %llu resid %lld\n", error, slen, resid);
719 #endif
720 return (error);
721 }
722