uipc_mbuf.c revision 1.42 1 /* $NetBSD: uipc_mbuf.c,v 1.42 1999/04/26 22:04:28 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1982, 1986, 1988, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by the University of
55 * California, Berkeley and its contributors.
56 * 4. Neither the name of the University nor the names of its contributors
57 * may be used to endorse or promote products derived from this software
58 * without specific prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * SUCH DAMAGE.
71 *
72 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/proc.h>
78 #include <sys/malloc.h>
79 #include <sys/map.h>
80 #define MBTYPES
81 #include <sys/mbuf.h>
82 #include <sys/kernel.h>
83 #include <sys/syslog.h>
84 #include <sys/domain.h>
85 #include <sys/protosw.h>
86 #include <sys/pool.h>
87 #include <sys/socket.h>
88 #include <net/if.h>
89
90 #include <vm/vm.h>
91 #include <vm/vm_kern.h>
92
93 #include <uvm/uvm_extern.h>
94
95 #include <sys/sysctl.h>
96
97 struct pool mbpool; /* mbuf pool */
98 struct pool mclpool; /* mbuf cluster pool */
99
100 struct mbstat mbstat;
101 int max_linkhdr;
102 int max_protohdr;
103 int max_hdr;
104 int max_datalen;
105
106 void *mclpool_alloc __P((unsigned long, int, int));
107 void mclpool_release __P((void *, unsigned long, int));
108
109 const char *mclpool_warnmsg =
110 "WARNING: mclpool limit reached; increase NMBCLUSTERS";
111
112 /*
113 * Initialize the mbuf allcator.
114 */
115 void
116 mbinit()
117 {
118
119 pool_init(&mbpool, msize, 0, 0, 0, "mbpl", 0, NULL, NULL, 0);
120 pool_init(&mclpool, mclbytes, 0, 0, 0, "mclpl", 0, mclpool_alloc,
121 mclpool_release, 0);
122
123 /*
124 * Set the hard limit on the mclpool to the number of
125 * mbuf clusters the kernel is to support. Log the limit
126 * reached message max once a minute.
127 */
128 pool_sethardlimit(&mclpool, nmbclusters, mclpool_warnmsg, 60);
129
130 /*
131 * Set a low water mark for both mbufs and clusters. This should
132 * help ensure that they can be allocated in a memory starvation
133 * situation. This is important for e.g. diskless systems which
134 * must allocate mbufs in order for the pagedaemon to clean pages.
135 */
136 pool_setlowat(&mbpool, mblowat);
137 pool_setlowat(&mclpool, mcllowat);
138 }
139
140 int
141 sysctl_dombuf(name, namelen, oldp, oldlenp, newp, newlen)
142 int *name;
143 u_int namelen;
144 void *oldp;
145 size_t *oldlenp;
146 void *newp;
147 size_t newlen;
148 {
149 int error, newval;
150
151 /* All sysctl names at this level are terminal. */
152 if (namelen != 1)
153 return (ENOTDIR); /* overloaded */
154
155 switch (name[0]) {
156 case MBUF_MSIZE:
157 return (sysctl_rdint(oldp, oldlenp, newp, msize));
158 case MBUF_MCLBYTES:
159 return (sysctl_rdint(oldp, oldlenp, newp, mclbytes));
160 case MBUF_NMBCLUSTERS:
161 /*
162 * If we have direct-mapped pool pages, we can adjust this
163 * number on the fly. If not, we're limited by the size
164 * of mb_map, and cannot change this value.
165 *
166 * Note: we only allow the value to be increased, never
167 * decreased.
168 */
169 if (mb_map == NULL) {
170 newval = nmbclusters;
171 error = sysctl_int(oldp, oldlenp, newp, newlen,
172 &newval);
173 if (error != 0)
174 return (error);
175 if (newp != NULL) {
176 if (newval >= nmbclusters) {
177 nmbclusters = newval;
178 pool_sethardlimit(&mclpool,
179 nmbclusters, mclpool_warnmsg, 60);
180 } else
181 error = EINVAL;
182 }
183 return (error);
184 } else
185 return (sysctl_rdint(oldp, oldlenp, newp, nmbclusters));
186 case MBUF_MBLOWAT:
187 case MBUF_MCLLOWAT:
188 /* New value must be >= 0. */
189 newval = (name[0] == MBUF_MBLOWAT) ? mblowat : mcllowat;
190 error = sysctl_int(oldp, oldlenp, newp, newlen, &newval);
191 if (error != 0)
192 return (error);
193 if (newp != NULL) {
194 if (newval >= 0) {
195 if (name[0] == MBUF_MBLOWAT) {
196 mblowat = newval;
197 pool_setlowat(&mbpool, newval);
198 } else {
199 mcllowat = newval;
200 pool_setlowat(&mclpool, newval);
201 }
202 } else
203 error = EINVAL;
204 }
205 return (error);
206 default:
207 return (EOPNOTSUPP);
208 }
209 /* NOTREACHED */
210 }
211
212 void *
213 mclpool_alloc(sz, flags, mtype)
214 unsigned long sz;
215 int flags;
216 int mtype;
217 {
218 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
219
220 return ((void *)uvm_km_alloc_poolpage1(mb_map, uvmexp.mb_object,
221 waitok));
222 }
223
224 void
225 mclpool_release(v, sz, mtype)
226 void *v;
227 unsigned long sz;
228 int mtype;
229 {
230
231 uvm_km_free_poolpage1(mb_map, (vaddr_t)v);
232 }
233
234 /*
235 * When MGET failes, ask protocols to free space when short of memory,
236 * then re-attempt to allocate an mbuf.
237 */
238 struct mbuf *
239 m_retry(i, t)
240 int i, t;
241 {
242 struct mbuf *m;
243
244 m_reclaim(i);
245 #define m_retry(i, t) (struct mbuf *)0
246 MGET(m, i, t);
247 #undef m_retry
248 if (m != NULL)
249 mbstat.m_wait++;
250 else
251 mbstat.m_drops++;
252 return (m);
253 }
254
255 /*
256 * As above; retry an MGETHDR.
257 */
258 struct mbuf *
259 m_retryhdr(i, t)
260 int i, t;
261 {
262 struct mbuf *m;
263
264 m_reclaim(i);
265 #define m_retryhdr(i, t) (struct mbuf *)0
266 MGETHDR(m, i, t);
267 #undef m_retryhdr
268 if (m != NULL)
269 mbstat.m_wait++;
270 else
271 mbstat.m_drops++;
272 return (m);
273 }
274
275 void
276 m_reclaim(how)
277 int how;
278 {
279 struct domain *dp;
280 struct protosw *pr;
281 struct ifnet *ifp;
282 int s = splimp();
283
284 for (dp = domains; dp; dp = dp->dom_next)
285 for (pr = dp->dom_protosw;
286 pr < dp->dom_protoswNPROTOSW; pr++)
287 if (pr->pr_drain)
288 (*pr->pr_drain)();
289 for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list))
290 if (ifp->if_drain)
291 (*ifp->if_drain)(ifp);
292 splx(s);
293 mbstat.m_drain++;
294 }
295
296 /*
297 * Space allocation routines.
298 * These are also available as macros
299 * for critical paths.
300 */
301 struct mbuf *
302 m_get(nowait, type)
303 int nowait, type;
304 {
305 struct mbuf *m;
306
307 MGET(m, nowait, type);
308 return (m);
309 }
310
311 struct mbuf *
312 m_gethdr(nowait, type)
313 int nowait, type;
314 {
315 struct mbuf *m;
316
317 MGETHDR(m, nowait, type);
318 return (m);
319 }
320
321 struct mbuf *
322 m_getclr(nowait, type)
323 int nowait, type;
324 {
325 struct mbuf *m;
326
327 MGET(m, nowait, type);
328 if (m == 0)
329 return (0);
330 memset(mtod(m, caddr_t), 0, MLEN);
331 return (m);
332 }
333
334 struct mbuf *
335 m_free(m)
336 struct mbuf *m;
337 {
338 struct mbuf *n;
339
340 MFREE(m, n);
341 return (n);
342 }
343
344 void
345 m_freem(m)
346 struct mbuf *m;
347 {
348 struct mbuf *n;
349
350 if (m == NULL)
351 return;
352 do {
353 MFREE(m, n);
354 m = n;
355 } while (m);
356 }
357
358 /*
359 * Mbuffer utility routines.
360 */
361
362 /*
363 * Lesser-used path for M_PREPEND:
364 * allocate new mbuf to prepend to chain,
365 * copy junk along.
366 */
367 struct mbuf *
368 m_prepend(m, len, how)
369 struct mbuf *m;
370 int len, how;
371 {
372 struct mbuf *mn;
373
374 MGET(mn, how, m->m_type);
375 if (mn == (struct mbuf *)NULL) {
376 m_freem(m);
377 return ((struct mbuf *)NULL);
378 }
379 if (m->m_flags & M_PKTHDR) {
380 M_COPY_PKTHDR(mn, m);
381 m->m_flags &= ~M_PKTHDR;
382 }
383 mn->m_next = m;
384 m = mn;
385 if (len < MHLEN)
386 MH_ALIGN(m, len);
387 m->m_len = len;
388 return (m);
389 }
390
391 /*
392 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
393 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
394 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
395 */
396 int MCFail;
397
398 struct mbuf *
399 m_copym(m, off0, len, wait)
400 struct mbuf *m;
401 int off0, wait;
402 int len;
403 {
404 struct mbuf *n, **np;
405 int off = off0;
406 struct mbuf *top;
407 int copyhdr = 0;
408
409 if (off < 0 || len < 0)
410 panic("m_copym");
411 if (off == 0 && m->m_flags & M_PKTHDR)
412 copyhdr = 1;
413 while (off > 0) {
414 if (m == 0)
415 panic("m_copym");
416 if (off < m->m_len)
417 break;
418 off -= m->m_len;
419 m = m->m_next;
420 }
421 np = ⊤
422 top = 0;
423 while (len > 0) {
424 if (m == 0) {
425 if (len != M_COPYALL)
426 panic("m_copym");
427 break;
428 }
429 MGET(n, wait, m->m_type);
430 *np = n;
431 if (n == 0)
432 goto nospace;
433 if (copyhdr) {
434 M_COPY_PKTHDR(n, m);
435 if (len == M_COPYALL)
436 n->m_pkthdr.len -= off0;
437 else
438 n->m_pkthdr.len = len;
439 copyhdr = 0;
440 }
441 n->m_len = min(len, m->m_len - off);
442 if (m->m_flags & M_EXT) {
443 n->m_data = m->m_data + off;
444 n->m_ext = m->m_ext;
445 MCLADDREFERENCE(m, n);
446 } else
447 memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off,
448 (unsigned)n->m_len);
449 if (len != M_COPYALL)
450 len -= n->m_len;
451 off = 0;
452 m = m->m_next;
453 np = &n->m_next;
454 }
455 if (top == 0)
456 MCFail++;
457 return (top);
458 nospace:
459 m_freem(top);
460 MCFail++;
461 return (0);
462 }
463
464 /*
465 * Copy an entire packet, including header (which must be present).
466 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
467 */
468 struct mbuf *
469 m_copypacket(m, how)
470 struct mbuf *m;
471 int how;
472 {
473 struct mbuf *top, *n, *o;
474
475 MGET(n, how, m->m_type);
476 top = n;
477 if (!n)
478 goto nospace;
479
480 M_COPY_PKTHDR(n, m);
481 n->m_len = m->m_len;
482 if (m->m_flags & M_EXT) {
483 n->m_data = m->m_data;
484 n->m_ext = m->m_ext;
485 MCLADDREFERENCE(m, n);
486 } else {
487 memcpy(mtod(n, char *), mtod(m, char *), n->m_len);
488 }
489
490 m = m->m_next;
491 while (m) {
492 MGET(o, how, m->m_type);
493 if (!o)
494 goto nospace;
495
496 n->m_next = o;
497 n = n->m_next;
498
499 n->m_len = m->m_len;
500 if (m->m_flags & M_EXT) {
501 n->m_data = m->m_data;
502 n->m_ext = m->m_ext;
503 MCLADDREFERENCE(m, n);
504 } else {
505 memcpy(mtod(n, char *), mtod(m, char *), n->m_len);
506 }
507
508 m = m->m_next;
509 }
510 return top;
511 nospace:
512 m_freem(top);
513 MCFail++;
514 return 0;
515 }
516
517 /*
518 * Copy data from an mbuf chain starting "off" bytes from the beginning,
519 * continuing for "len" bytes, into the indicated buffer.
520 */
521 void
522 m_copydata(m, off, len, cp)
523 struct mbuf *m;
524 int off;
525 int len;
526 caddr_t cp;
527 {
528 unsigned count;
529
530 if (off < 0 || len < 0)
531 panic("m_copydata");
532 while (off > 0) {
533 if (m == 0)
534 panic("m_copydata");
535 if (off < m->m_len)
536 break;
537 off -= m->m_len;
538 m = m->m_next;
539 }
540 while (len > 0) {
541 if (m == 0)
542 panic("m_copydata");
543 count = min(m->m_len - off, len);
544 memcpy(cp, mtod(m, caddr_t) + off, count);
545 len -= count;
546 cp += count;
547 off = 0;
548 m = m->m_next;
549 }
550 }
551
552 /*
553 * Concatenate mbuf chain n to m.
554 * Both chains must be of the same type (e.g. MT_DATA).
555 * Any m_pkthdr is not updated.
556 */
557 void
558 m_cat(m, n)
559 struct mbuf *m, *n;
560 {
561 while (m->m_next)
562 m = m->m_next;
563 while (n) {
564 if (m->m_flags & M_EXT ||
565 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
566 /* just join the two chains */
567 m->m_next = n;
568 return;
569 }
570 /* splat the data from one into the other */
571 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
572 (u_int)n->m_len);
573 m->m_len += n->m_len;
574 n = m_free(n);
575 }
576 }
577
578 void
579 m_adj(mp, req_len)
580 struct mbuf *mp;
581 int req_len;
582 {
583 int len = req_len;
584 struct mbuf *m;
585 int count;
586
587 if ((m = mp) == NULL)
588 return;
589 if (len >= 0) {
590 /*
591 * Trim from head.
592 */
593 while (m != NULL && len > 0) {
594 if (m->m_len <= len) {
595 len -= m->m_len;
596 m->m_len = 0;
597 m = m->m_next;
598 } else {
599 m->m_len -= len;
600 m->m_data += len;
601 len = 0;
602 }
603 }
604 m = mp;
605 if (mp->m_flags & M_PKTHDR)
606 m->m_pkthdr.len -= (req_len - len);
607 } else {
608 /*
609 * Trim from tail. Scan the mbuf chain,
610 * calculating its length and finding the last mbuf.
611 * If the adjustment only affects this mbuf, then just
612 * adjust and return. Otherwise, rescan and truncate
613 * after the remaining size.
614 */
615 len = -len;
616 count = 0;
617 for (;;) {
618 count += m->m_len;
619 if (m->m_next == (struct mbuf *)0)
620 break;
621 m = m->m_next;
622 }
623 if (m->m_len >= len) {
624 m->m_len -= len;
625 if (mp->m_flags & M_PKTHDR)
626 mp->m_pkthdr.len -= len;
627 return;
628 }
629 count -= len;
630 if (count < 0)
631 count = 0;
632 /*
633 * Correct length for chain is "count".
634 * Find the mbuf with last data, adjust its length,
635 * and toss data from remaining mbufs on chain.
636 */
637 m = mp;
638 if (m->m_flags & M_PKTHDR)
639 m->m_pkthdr.len = count;
640 for (; m; m = m->m_next) {
641 if (m->m_len >= count) {
642 m->m_len = count;
643 break;
644 }
645 count -= m->m_len;
646 }
647 while (m->m_next)
648 (m = m->m_next) ->m_len = 0;
649 }
650 }
651
652 /*
653 * Rearange an mbuf chain so that len bytes are contiguous
654 * and in the data area of an mbuf (so that mtod and dtom
655 * will work for a structure of size len). Returns the resulting
656 * mbuf chain on success, frees it and returns null on failure.
657 * If there is room, it will add up to max_protohdr-len extra bytes to the
658 * contiguous region in an attempt to avoid being called next time.
659 */
660 int MPFail;
661
662 struct mbuf *
663 m_pullup(n, len)
664 struct mbuf *n;
665 int len;
666 {
667 struct mbuf *m;
668 int count;
669 int space;
670
671 /*
672 * If first mbuf has no cluster, and has room for len bytes
673 * without shifting current data, pullup into it,
674 * otherwise allocate a new mbuf to prepend to the chain.
675 */
676 if ((n->m_flags & M_EXT) == 0 &&
677 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
678 if (n->m_len >= len)
679 return (n);
680 m = n;
681 n = n->m_next;
682 len -= m->m_len;
683 } else {
684 if (len > MHLEN)
685 goto bad;
686 MGET(m, M_DONTWAIT, n->m_type);
687 if (m == 0)
688 goto bad;
689 m->m_len = 0;
690 if (n->m_flags & M_PKTHDR) {
691 M_COPY_PKTHDR(m, n);
692 n->m_flags &= ~M_PKTHDR;
693 }
694 }
695 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
696 do {
697 count = min(min(max(len, max_protohdr), space), n->m_len);
698 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
699 (unsigned)count);
700 len -= count;
701 m->m_len += count;
702 n->m_len -= count;
703 space -= count;
704 if (n->m_len)
705 n->m_data += count;
706 else
707 n = m_free(n);
708 } while (len > 0 && n);
709 if (len > 0) {
710 (void) m_free(m);
711 goto bad;
712 }
713 m->m_next = n;
714 return (m);
715 bad:
716 m_freem(n);
717 MPFail++;
718 return (0);
719 }
720
721 /*
722 * Partition an mbuf chain in two pieces, returning the tail --
723 * all but the first len0 bytes. In case of failure, it returns NULL and
724 * attempts to restore the chain to its original state.
725 */
726 struct mbuf *
727 m_split(m0, len0, wait)
728 struct mbuf *m0;
729 int len0, wait;
730 {
731 struct mbuf *m, *n;
732 unsigned len = len0, remain, len_save;
733
734 for (m = m0; m && len > m->m_len; m = m->m_next)
735 len -= m->m_len;
736 if (m == 0)
737 return (0);
738 remain = m->m_len - len;
739 if (m0->m_flags & M_PKTHDR) {
740 MGETHDR(n, wait, m0->m_type);
741 if (n == 0)
742 return (0);
743 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
744 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
745 len_save = m0->m_pkthdr.len;
746 m0->m_pkthdr.len = len0;
747 if (m->m_flags & M_EXT)
748 goto extpacket;
749 if (remain > MHLEN) {
750 /* m can't be the lead packet */
751 MH_ALIGN(n, 0);
752 n->m_next = m_split(m, len, wait);
753 if (n->m_next == 0) {
754 (void) m_free(n);
755 m0->m_pkthdr.len = len_save;
756 return (0);
757 } else
758 return (n);
759 } else
760 MH_ALIGN(n, remain);
761 } else if (remain == 0) {
762 n = m->m_next;
763 m->m_next = 0;
764 return (n);
765 } else {
766 MGET(n, wait, m->m_type);
767 if (n == 0)
768 return (0);
769 M_ALIGN(n, remain);
770 }
771 extpacket:
772 if (m->m_flags & M_EXT) {
773 n->m_ext = m->m_ext;
774 MCLADDREFERENCE(m, n);
775 n->m_data = m->m_data + len;
776 } else {
777 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
778 }
779 n->m_len = remain;
780 m->m_len = len;
781 n->m_next = m->m_next;
782 m->m_next = 0;
783 return (n);
784 }
785 /*
786 * Routine to copy from device local memory into mbufs.
787 */
788 struct mbuf *
789 m_devget(buf, totlen, off0, ifp, copy)
790 char *buf;
791 int totlen, off0;
792 struct ifnet *ifp;
793 void (*copy) __P((const void *from, void *to, size_t len));
794 {
795 struct mbuf *m;
796 struct mbuf *top = 0, **mp = ⊤
797 int off = off0, len;
798 char *cp;
799 char *epkt;
800
801 cp = buf;
802 epkt = cp + totlen;
803 if (off) {
804 /*
805 * If 'off' is non-zero, packet is trailer-encapsulated,
806 * so we have to skip the type and length fields.
807 */
808 cp += off + 2 * sizeof(u_int16_t);
809 totlen -= 2 * sizeof(u_int16_t);
810 }
811 MGETHDR(m, M_DONTWAIT, MT_DATA);
812 if (m == 0)
813 return (0);
814 m->m_pkthdr.rcvif = ifp;
815 m->m_pkthdr.len = totlen;
816 m->m_len = MHLEN;
817
818 while (totlen > 0) {
819 if (top) {
820 MGET(m, M_DONTWAIT, MT_DATA);
821 if (m == 0) {
822 m_freem(top);
823 return (0);
824 }
825 m->m_len = MLEN;
826 }
827 len = min(totlen, epkt - cp);
828 if (len >= MINCLSIZE) {
829 MCLGET(m, M_DONTWAIT);
830 if ((m->m_flags & M_EXT) == 0) {
831 m_free(m);
832 m_freem(top);
833 return (0);
834 }
835 m->m_len = len = min(len, MCLBYTES);
836 } else {
837 /*
838 * Place initial small packet/header at end of mbuf.
839 */
840 if (len < m->m_len) {
841 if (top == 0 && len + max_linkhdr <= m->m_len)
842 m->m_data += max_linkhdr;
843 m->m_len = len;
844 } else
845 len = m->m_len;
846 }
847 if (copy)
848 copy(cp, mtod(m, caddr_t), (size_t)len);
849 else
850 memcpy(mtod(m, caddr_t), cp, (size_t)len);
851 cp += len;
852 *mp = m;
853 mp = &m->m_next;
854 totlen -= len;
855 if (cp == epkt)
856 cp = buf;
857 }
858 return (top);
859 }
860
861 /*
862 * Copy data from a buffer back into the indicated mbuf chain,
863 * starting "off" bytes from the beginning, extending the mbuf
864 * chain if necessary.
865 */
866 void
867 m_copyback(m0, off, len, cp)
868 struct mbuf *m0;
869 int off;
870 int len;
871 caddr_t cp;
872 {
873 int mlen;
874 struct mbuf *m = m0, *n;
875 int totlen = 0;
876
877 if (m0 == 0)
878 return;
879 while (off > (mlen = m->m_len)) {
880 off -= mlen;
881 totlen += mlen;
882 if (m->m_next == 0) {
883 n = m_getclr(M_DONTWAIT, m->m_type);
884 if (n == 0)
885 goto out;
886 n->m_len = min(MLEN, len + off);
887 m->m_next = n;
888 }
889 m = m->m_next;
890 }
891 while (len > 0) {
892 mlen = min (m->m_len - off, len);
893 memcpy(mtod(m, caddr_t) + off, cp, (unsigned)mlen);
894 cp += mlen;
895 len -= mlen;
896 mlen += off;
897 off = 0;
898 totlen += mlen;
899 if (len == 0)
900 break;
901 if (m->m_next == 0) {
902 n = m_get(M_DONTWAIT, m->m_type);
903 if (n == 0)
904 break;
905 n->m_len = min(MLEN, len);
906 m->m_next = n;
907 }
908 m = m->m_next;
909 }
910 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
911 m->m_pkthdr.len = totlen;
912 }
913