kttcp.c revision 1.10 1 /* $NetBSD: kttcp.c,v 1.10 2003/06/28 14:21:31 darrenr Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Frank van der Linden and Jason R. Thorpe for
8 * Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * kttcp.c --
41 *
42 * This module provides kernel support for testing network
43 * throughput from the perspective of the kernel. It is
44 * similar in spirit to the classic ttcp network benchmark
45 * program, the main difference being that with kttcp, the
46 * kernel is the source and sink of the data.
47 *
48 * Testing like this is useful for a few reasons:
49 *
50 * 1. This allows us to know what kind of performance we can
51 * expect from network applications that run in the kernel
52 * space, such as the NFS server or the NFS client. These
53 * applications don't have to move the data to/from userspace,
54 * and so benchmark programs which run in userspace don't
55 * give us an accurate model.
56 *
57 * 2. Since data received is just thrown away, the receiver
58 * is very fast. This can provide better exercise for the
59 * sender at the other end.
60 *
61 * 3. Since the NetBSD kernel currently uses a run-to-completion
62 * scheduling model, kttcp provides a benchmark model where
63 * preemption of the benchmark program is not an issue.
64 */
65
66 #include <sys/param.h>
67 #include <sys/types.h>
68 #include <sys/ioctl.h>
69 #include <sys/file.h>
70 #include <sys/filedesc.h>
71 #include <sys/conf.h>
72 #include <sys/systm.h>
73 #include <sys/protosw.h>
74 #include <sys/proc.h>
75 #include <sys/resourcevar.h>
76 #include <sys/signal.h>
77 #include <sys/socketvar.h>
78 #include <sys/socket.h>
79 #include <sys/mbuf.h>
80 #include <sys/sa.h>
81 #include <sys/mount.h>
82 #include <sys/syscallargs.h>
83
84 #include <dev/kttcpio.h>
85
86 static int kttcp_send(struct proc *p, struct kttcp_io_args *);
87 static int kttcp_recv(struct proc *p, struct kttcp_io_args *);
88 static int kttcp_sosend(struct socket *, unsigned long long,
89 unsigned long long *, struct proc *, int);
90 static int kttcp_soreceive(struct socket *, unsigned long long,
91 unsigned long long *, struct proc *, int *);
92
93 void kttcpattach(int);
94
95 dev_type_ioctl(kttcpioctl);
96
97 const struct cdevsw kttcp_cdevsw = {
98 nullopen, nullclose, noread, nowrite, kttcpioctl,
99 nostop, notty, nopoll, nommap, nokqfilter,
100 };
101
102 void
103 kttcpattach(int count)
104 {
105 /* Do nothing. */
106 }
107
108 int
109 kttcpioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
110 {
111 int error;
112
113 if ((flag & FWRITE) == 0)
114 return EPERM;
115
116 switch (cmd) {
117 case KTTCP_IO_SEND:
118 error = kttcp_send(p, (struct kttcp_io_args *) data);
119 break;
120
121 case KTTCP_IO_RECV:
122 error = kttcp_recv(p, (struct kttcp_io_args *) data);
123 break;
124
125 default:
126 return EINVAL;
127 }
128
129 return error;
130 }
131
132 static int
133 kttcp_send(struct proc *p, struct kttcp_io_args *kio)
134 {
135 struct file *fp;
136 int error;
137 struct timeval t0, t1;
138 unsigned long long len, done;
139
140 if (kio->kio_totalsize >= KTTCP_MAX_XMIT)
141 return EINVAL;
142
143 fp = fd_getfile(p->p_fd, kio->kio_socket);
144 if (fp == NULL)
145 return EBADF;
146 FILE_USE(fp);
147 if (fp->f_type != DTYPE_SOCKET) {
148 FILE_UNUSE(fp, p);
149 return EFTYPE;
150 }
151
152 len = kio->kio_totalsize;
153 microtime(&t0);
154 do {
155 error = kttcp_sosend((struct socket *)fp->f_data, len,
156 &done, p, 0);
157 len -= done;
158 } while (error == 0 && len > 0);
159
160 FILE_UNUSE(fp, p);
161
162 microtime(&t1);
163 if (error != 0)
164 return error;
165 timersub(&t1, &t0, &kio->kio_elapsed);
166
167 kio->kio_bytesdone = kio->kio_totalsize - len;
168
169 return 0;
170 }
171
172 static int
173 kttcp_recv(struct proc *p, struct kttcp_io_args *kio)
174 {
175 struct file *fp;
176 int error;
177 struct timeval t0, t1;
178 unsigned long long len, done;
179
180 if (kio->kio_totalsize > KTTCP_MAX_XMIT)
181 return EINVAL;
182
183 fp = fd_getfile(p->p_fd, kio->kio_socket);
184 if (fp == NULL)
185 return EBADF;
186 FILE_USE(fp);
187 if (fp->f_type != DTYPE_SOCKET) {
188 FILE_UNUSE(fp, p);
189 return EBADF;
190 }
191 len = kio->kio_totalsize;
192 microtime(&t0);
193 do {
194 error = kttcp_soreceive((struct socket *)fp->f_data,
195 len, &done, p, NULL);
196 len -= done;
197 } while (error == 0 && len > 0 && done > 0);
198
199 FILE_UNUSE(fp, p);
200
201 microtime(&t1);
202 if (error == EPIPE)
203 error = 0;
204 if (error != 0)
205 return error;
206 timersub(&t1, &t0, &kio->kio_elapsed);
207
208 kio->kio_bytesdone = kio->kio_totalsize - len;
209
210 return 0;
211 }
212
213 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
214
215 /*
216 * Slightly changed version of sosend()
217 */
218 static int
219 kttcp_sosend(struct socket *so, unsigned long long slen,
220 unsigned long long *done, struct proc *p, int flags)
221 {
222 struct mbuf **mp, *m, *top;
223 long space, len, mlen;
224 int error, s, dontroute, atomic;
225 long long resid;
226
227 atomic = sosendallatonce(so);
228 resid = slen;
229 top = NULL;
230 /*
231 * In theory resid should be unsigned.
232 * However, space must be signed, as it might be less than 0
233 * if we over-committed, and we must use a signed comparison
234 * of space and resid. On the other hand, a negative resid
235 * causes us to loop sending 0-length segments to the protocol.
236 */
237 if (resid < 0) {
238 error = EINVAL;
239 goto out;
240 }
241 dontroute =
242 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
243 (so->so_proto->pr_flags & PR_ATOMIC);
244 p->p_stats->p_ru.ru_msgsnd++;
245 #define snderr(errno) { error = errno; splx(s); goto release; }
246
247 restart:
248 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
249 goto out;
250 do {
251 s = splsoftnet();
252 if (so->so_state & SS_CANTSENDMORE)
253 snderr(EPIPE);
254 if (so->so_error) {
255 error = so->so_error;
256 so->so_error = 0;
257 splx(s);
258 goto release;
259 }
260 if ((so->so_state & SS_ISCONNECTED) == 0) {
261 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
262 if ((so->so_state & SS_ISCONFIRMING) == 0)
263 snderr(ENOTCONN);
264 } else
265 snderr(EDESTADDRREQ);
266 }
267 space = sbspace(&so->so_snd);
268 if (flags & MSG_OOB)
269 space += 1024;
270 if ((atomic && resid > so->so_snd.sb_hiwat))
271 snderr(EMSGSIZE);
272 if (space < resid && (atomic || space < so->so_snd.sb_lowat)) {
273 if (so->so_state & SS_NBIO)
274 snderr(EWOULDBLOCK);
275 SBLASTRECORDCHK(&so->so_rcv,
276 "kttcp_soreceive sbwait 1");
277 SBLASTMBUFCHK(&so->so_rcv,
278 "kttcp_soreceive sbwait 1");
279 sbunlock(&so->so_snd);
280 error = sbwait(&so->so_snd);
281 splx(s);
282 if (error)
283 goto out;
284 goto restart;
285 }
286 splx(s);
287 mp = ⊤
288 do {
289 do {
290 if (top == 0) {
291 m = m_gethdr(M_WAIT, MT_DATA);
292 mlen = MHLEN;
293 m->m_pkthdr.len = 0;
294 m->m_pkthdr.rcvif = (struct ifnet *)0;
295 } else {
296 m = m_get(M_WAIT, MT_DATA);
297 mlen = MLEN;
298 }
299 if (resid >= MINCLSIZE && space >= MCLBYTES) {
300 m_clget(m, M_WAIT);
301 if ((m->m_flags & M_EXT) == 0)
302 goto nopages;
303 mlen = MCLBYTES;
304 #ifdef MAPPED_MBUFS
305 len = lmin(MCLBYTES, resid);
306 #else
307 if (atomic && top == 0) {
308 len = lmin(MCLBYTES - max_hdr,
309 resid);
310 m->m_data += max_hdr;
311 } else
312 len = lmin(MCLBYTES, resid);
313 #endif
314 space -= len;
315 } else {
316 nopages:
317 len = lmin(lmin(mlen, resid), space);
318 space -= len;
319 /*
320 * For datagram protocols, leave room
321 * for protocol headers in first mbuf.
322 */
323 if (atomic && top == 0 && len < mlen)
324 MH_ALIGN(m, len);
325 }
326 resid -= len;
327 m->m_len = len;
328 *mp = m;
329 top->m_pkthdr.len += len;
330 if (error)
331 goto release;
332 mp = &m->m_next;
333 if (resid <= 0) {
334 if (flags & MSG_EOR)
335 top->m_flags |= M_EOR;
336 break;
337 }
338 } while (space > 0 && atomic);
339
340 s = splsoftnet();
341
342 if (so->so_state & SS_CANTSENDMORE)
343 snderr(EPIPE);
344
345 if (dontroute)
346 so->so_options |= SO_DONTROUTE;
347 if (resid > 0)
348 so->so_state |= SS_MORETOCOME;
349 error = (*so->so_proto->pr_usrreq)(so,
350 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
351 top, NULL, NULL, p);
352 if (dontroute)
353 so->so_options &= ~SO_DONTROUTE;
354 if (resid > 0)
355 so->so_state &= ~SS_MORETOCOME;
356 splx(s);
357
358 top = 0;
359 mp = ⊤
360 if (error)
361 goto release;
362 } while (resid && space > 0);
363 } while (resid);
364
365 release:
366 sbunlock(&so->so_snd);
367 out:
368 if (top)
369 m_freem(top);
370 *done = slen - resid;
371 #if 0
372 printf("sosend: error %d slen %llu resid %lld\n", error, slen, resid);
373 #endif
374 return (error);
375 }
376
377 static int
378 kttcp_soreceive(struct socket *so, unsigned long long slen,
379 unsigned long long *done, struct proc *p, int *flagsp)
380 {
381 struct mbuf *m, **mp;
382 int flags, len, error, s, offset, moff, type;
383 long long orig_resid, resid;
384 struct protosw *pr;
385 struct mbuf *nextrecord;
386
387 pr = so->so_proto;
388 mp = NULL;
389 type = 0;
390 resid = orig_resid = slen;
391 if (flagsp)
392 flags = *flagsp &~ MSG_EOR;
393 else
394 flags = 0;
395 if (flags & MSG_OOB) {
396 m = m_get(M_WAIT, MT_DATA);
397 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,
398 (struct mbuf *)(long)(flags & MSG_PEEK), (struct mbuf *)0,
399 (struct proc *)0);
400 if (error)
401 goto bad;
402 do {
403 resid -= min(resid, m->m_len);
404 m = m_free(m);
405 } while (resid && error == 0 && m);
406 bad:
407 if (m)
408 m_freem(m);
409 return (error);
410 }
411 if (mp)
412 *mp = (struct mbuf *)0;
413 if (so->so_state & SS_ISCONFIRMING && resid)
414 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
415 (struct mbuf *)0, (struct mbuf *)0, (struct proc *)0);
416
417 restart:
418 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0)
419 return (error);
420 s = splsoftnet();
421
422 m = so->so_rcv.sb_mb;
423 /*
424 * If we have less data than requested, block awaiting more
425 * (subject to any timeout) if:
426 * 1. the current count is less than the low water mark,
427 * 2. MSG_WAITALL is set, and it is possible to do the entire
428 * receive operation at once if we block (resid <= hiwat), or
429 * 3. MSG_DONTWAIT is not set.
430 * If MSG_WAITALL is set but resid is larger than the receive buffer,
431 * we have to do the receive in sections, and thus risk returning
432 * a short count if a timeout or signal occurs after we start.
433 */
434 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
435 so->so_rcv.sb_cc < resid) &&
436 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
437 ((flags & MSG_WAITALL) && resid <= so->so_rcv.sb_hiwat)) &&
438 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
439 #ifdef DIAGNOSTIC
440 if (m == 0 && so->so_rcv.sb_cc)
441 panic("receive 1");
442 #endif
443 if (so->so_error) {
444 if (m)
445 goto dontblock;
446 error = so->so_error;
447 if ((flags & MSG_PEEK) == 0)
448 so->so_error = 0;
449 goto release;
450 }
451 if (so->so_state & SS_CANTRCVMORE) {
452 if (m)
453 goto dontblock;
454 else
455 goto release;
456 }
457 for (; m; m = m->m_next)
458 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
459 m = so->so_rcv.sb_mb;
460 goto dontblock;
461 }
462 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
463 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
464 error = ENOTCONN;
465 goto release;
466 }
467 if (resid == 0)
468 goto release;
469 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
470 error = EWOULDBLOCK;
471 goto release;
472 }
473 sbunlock(&so->so_rcv);
474 error = sbwait(&so->so_rcv);
475 splx(s);
476 if (error)
477 return (error);
478 goto restart;
479 }
480 dontblock:
481 /*
482 * On entry here, m points to the first record of the socket buffer.
483 * While we process the initial mbufs containing address and control
484 * info, we save a copy of m->m_nextpkt into nextrecord.
485 */
486 #ifdef notyet /* XXXX */
487 if (uio->uio_lwp)
488 uio->uio_lwp->l_proc->p_stats->p_ru.ru_msgrcv++;
489 #endif
490 KASSERT(m == so->so_rcv.sb_mb);
491 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 1");
492 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 1");
493 nextrecord = m->m_nextpkt;
494 if (pr->pr_flags & PR_ADDR) {
495 #ifdef DIAGNOSTIC
496 if (m->m_type != MT_SONAME)
497 panic("receive 1a");
498 #endif
499 orig_resid = 0;
500 if (flags & MSG_PEEK) {
501 m = m->m_next;
502 } else {
503 sbfree(&so->so_rcv, m);
504 MFREE(m, so->so_rcv.sb_mb);
505 m = so->so_rcv.sb_mb;
506 }
507 }
508 while (m && m->m_type == MT_CONTROL && error == 0) {
509 if (flags & MSG_PEEK) {
510 m = m->m_next;
511 } else {
512 sbfree(&so->so_rcv, m);
513 MFREE(m, so->so_rcv.sb_mb);
514 m = so->so_rcv.sb_mb;
515 }
516 }
517
518 /*
519 * If m is non-NULL, we have some data to read. From now on,
520 * make sure to keep sb_lastrecord consistent when working on
521 * the last packet on the chain (nextrecord == NULL) and we
522 * change m->m_nextpkt.
523 */
524 if (m) {
525 if ((flags & MSG_PEEK) == 0) {
526 m->m_nextpkt = nextrecord;
527 /*
528 * If nextrecord == NULL (this is a single chain),
529 * then sb_lastrecord may not be valid here if m
530 * was changed earlier.
531 */
532 if (nextrecord == NULL) {
533 KASSERT(so->so_rcv.sb_mb == m);
534 so->so_rcv.sb_lastrecord = m;
535 }
536 }
537 type = m->m_type;
538 if (type == MT_OOBDATA)
539 flags |= MSG_OOB;
540 } else {
541 if ((flags & MSG_PEEK) == 0) {
542 KASSERT(so->so_rcv.sb_mb == m);
543 so->so_rcv.sb_mb = nextrecord;
544 SB_EMPTY_FIXUP(&so->so_rcv);
545 }
546 }
547 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 2");
548 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 2");
549
550 moff = 0;
551 offset = 0;
552 while (m && resid > 0 && error == 0) {
553 if (m->m_type == MT_OOBDATA) {
554 if (type != MT_OOBDATA)
555 break;
556 } else if (type == MT_OOBDATA)
557 break;
558 #ifdef DIAGNOSTIC
559 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
560 panic("receive 3");
561 #endif
562 so->so_state &= ~SS_RCVATMARK;
563 len = resid;
564 if (so->so_oobmark && len > so->so_oobmark - offset)
565 len = so->so_oobmark - offset;
566 if (len > m->m_len - moff)
567 len = m->m_len - moff;
568 /*
569 * If mp is set, just pass back the mbufs.
570 * Otherwise copy them out via the uio, then free.
571 * Sockbuf must be consistent here (points to current mbuf,
572 * it points to next record) when we drop priority;
573 * we must note any additions to the sockbuf when we
574 * block interrupts again.
575 */
576 resid -= len;
577 if (len == m->m_len - moff) {
578 if (m->m_flags & M_EOR)
579 flags |= MSG_EOR;
580 if (flags & MSG_PEEK) {
581 m = m->m_next;
582 moff = 0;
583 } else {
584 nextrecord = m->m_nextpkt;
585 sbfree(&so->so_rcv, m);
586 if (mp) {
587 *mp = m;
588 mp = &m->m_next;
589 so->so_rcv.sb_mb = m = m->m_next;
590 *mp = (struct mbuf *)0;
591 } else {
592 MFREE(m, so->so_rcv.sb_mb);
593 m = so->so_rcv.sb_mb;
594 }
595 /*
596 * If m != NULL, we also know that
597 * so->so_rcv.sb_mb != NULL.
598 */
599 KASSERT(so->so_rcv.sb_mb == m);
600 if (m) {
601 m->m_nextpkt = nextrecord;
602 if (nextrecord == NULL)
603 so->so_rcv.sb_lastrecord = m;
604 } else {
605 so->so_rcv.sb_mb = nextrecord;
606 SB_EMPTY_FIXUP(&so->so_rcv);
607 }
608 SBLASTRECORDCHK(&so->so_rcv,
609 "kttcp_soreceive 3");
610 SBLASTMBUFCHK(&so->so_rcv,
611 "kttcp_soreceive 3");
612 }
613 } else {
614 if (flags & MSG_PEEK)
615 moff += len;
616 else {
617 if (mp)
618 *mp = m_copym(m, 0, len, M_WAIT);
619 m->m_data += len;
620 m->m_len -= len;
621 so->so_rcv.sb_cc -= len;
622 }
623 }
624 if (so->so_oobmark) {
625 if ((flags & MSG_PEEK) == 0) {
626 so->so_oobmark -= len;
627 if (so->so_oobmark == 0) {
628 so->so_state |= SS_RCVATMARK;
629 break;
630 }
631 } else {
632 offset += len;
633 if (offset == so->so_oobmark)
634 break;
635 }
636 }
637 if (flags & MSG_EOR)
638 break;
639 /*
640 * If the MSG_WAITALL flag is set (for non-atomic socket),
641 * we must not quit until "uio->uio_resid == 0" or an error
642 * termination. If a signal/timeout occurs, return
643 * with a short count but without error.
644 * Keep sockbuf locked against other readers.
645 */
646 while (flags & MSG_WAITALL && m == 0 && resid > 0 &&
647 !sosendallatonce(so) && !nextrecord) {
648 if (so->so_error || so->so_state & SS_CANTRCVMORE)
649 break;
650 /*
651 * If we are peeking and the socket receive buffer is
652 * full, stop since we can't get more data to peek at.
653 */
654 if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0)
655 break;
656 /*
657 * If we've drained the socket buffer, tell the
658 * protocol in case it needs to do something to
659 * get it filled again.
660 */
661 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
662 (*pr->pr_usrreq)(so, PRU_RCVD,
663 (struct mbuf *)0,
664 (struct mbuf *)(long)flags,
665 (struct mbuf *)0,
666 (struct proc *)0);
667 SBLASTRECORDCHK(&so->so_rcv,
668 "kttcp_soreceive sbwait 2");
669 SBLASTMBUFCHK(&so->so_rcv,
670 "kttcp_soreceive sbwait 2");
671 error = sbwait(&so->so_rcv);
672 if (error) {
673 sbunlock(&so->so_rcv);
674 splx(s);
675 return (0);
676 }
677 if ((m = so->so_rcv.sb_mb) != NULL)
678 nextrecord = m->m_nextpkt;
679 }
680 }
681
682 if (m && pr->pr_flags & PR_ATOMIC) {
683 flags |= MSG_TRUNC;
684 if ((flags & MSG_PEEK) == 0)
685 (void) sbdroprecord(&so->so_rcv);
686 }
687 if ((flags & MSG_PEEK) == 0) {
688 if (m == 0) {
689 /*
690 * First part is an SB_EMPTY_FIXUP(). Second part
691 * makes sure sb_lastrecord is up-to-date if
692 * there is still data in the socket buffer.
693 */
694 so->so_rcv.sb_mb = nextrecord;
695 if (so->so_rcv.sb_mb == NULL) {
696 so->so_rcv.sb_mbtail = NULL;
697 so->so_rcv.sb_lastrecord = NULL;
698 } else if (nextrecord->m_nextpkt == NULL)
699 so->so_rcv.sb_lastrecord = nextrecord;
700 }
701 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 4");
702 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 4");
703 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
704 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
705 (struct mbuf *)(long)flags, (struct mbuf *)0,
706 (struct proc *)0);
707 }
708 if (orig_resid == resid && orig_resid &&
709 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
710 sbunlock(&so->so_rcv);
711 splx(s);
712 goto restart;
713 }
714
715 if (flagsp)
716 *flagsp |= flags;
717 release:
718 sbunlock(&so->so_rcv);
719 splx(s);
720 *done = slen - resid;
721 #if 0
722 printf("soreceive: error %d slen %llu resid %lld\n", error, slen, resid);
723 #endif
724 return (error);
725 }
726