uipc_mbuf.c revision 1.54 1 /* $NetBSD: uipc_mbuf.c,v 1.54 2001/09/15 20:36:37 chs Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1982, 1986, 1988, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by the University of
55 * California, Berkeley and its contributors.
56 * 4. Neither the name of the University nor the names of its contributors
57 * may be used to endorse or promote products derived from this software
58 * without specific prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * SUCH DAMAGE.
71 *
72 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/proc.h>
78 #include <sys/malloc.h>
79 #include <sys/map.h>
80 #define MBTYPES
81 #include <sys/mbuf.h>
82 #include <sys/kernel.h>
83 #include <sys/syslog.h>
84 #include <sys/domain.h>
85 #include <sys/protosw.h>
86 #include <sys/pool.h>
87 #include <sys/socket.h>
88 #include <net/if.h>
89
90 #include <uvm/uvm_extern.h>
91
92 #include <sys/sysctl.h>
93
94 struct pool mbpool; /* mbuf pool */
95 struct pool mclpool; /* mbuf cluster pool */
96
97 struct pool_cache mbpool_cache;
98 struct pool_cache mclpool_cache;
99
100 struct mbstat mbstat;
101 int max_linkhdr;
102 int max_protohdr;
103 int max_hdr;
104 int max_datalen;
105
106 void *mclpool_alloc __P((unsigned long, int, int));
107 void mclpool_release __P((void *, unsigned long, int));
108 static struct mbuf *m_copym0 __P((struct mbuf *, int, int, int, int));
109
110 const char *mclpool_warnmsg =
111 "WARNING: mclpool limit reached; increase NMBCLUSTERS";
112
113 /*
114 * Initialize the mbuf allcator.
115 */
116 void
117 mbinit()
118 {
119
120 pool_init(&mbpool, msize, 0, 0, 0, "mbpl", 0, NULL, NULL, 0);
121 pool_init(&mclpool, mclbytes, 0, 0, 0, "mclpl", 0, mclpool_alloc,
122 mclpool_release, 0);
123
124 pool_cache_init(&mbpool_cache, &mbpool, NULL, NULL, NULL);
125 pool_cache_init(&mclpool_cache, &mclpool, NULL, NULL, NULL);
126
127 /*
128 * Set the hard limit on the mclpool to the number of
129 * mbuf clusters the kernel is to support. Log the limit
130 * reached message max once a minute.
131 */
132 pool_sethardlimit(&mclpool, nmbclusters, mclpool_warnmsg, 60);
133
134 /*
135 * Set a low water mark for both mbufs and clusters. This should
136 * help ensure that they can be allocated in a memory starvation
137 * situation. This is important for e.g. diskless systems which
138 * must allocate mbufs in order for the pagedaemon to clean pages.
139 */
140 pool_setlowat(&mbpool, mblowat);
141 pool_setlowat(&mclpool, mcllowat);
142 }
143
144 int
145 sysctl_dombuf(name, namelen, oldp, oldlenp, newp, newlen)
146 int *name;
147 u_int namelen;
148 void *oldp;
149 size_t *oldlenp;
150 void *newp;
151 size_t newlen;
152 {
153 int error, newval;
154
155 /* All sysctl names at this level are terminal. */
156 if (namelen != 1)
157 return (ENOTDIR); /* overloaded */
158
159 switch (name[0]) {
160 case MBUF_MSIZE:
161 return (sysctl_rdint(oldp, oldlenp, newp, msize));
162 case MBUF_MCLBYTES:
163 return (sysctl_rdint(oldp, oldlenp, newp, mclbytes));
164 case MBUF_NMBCLUSTERS:
165 /*
166 * If we have direct-mapped pool pages, we can adjust this
167 * number on the fly. If not, we're limited by the size
168 * of mb_map, and cannot change this value.
169 *
170 * Note: we only allow the value to be increased, never
171 * decreased.
172 */
173 if (mb_map == NULL) {
174 newval = nmbclusters;
175 error = sysctl_int(oldp, oldlenp, newp, newlen,
176 &newval);
177 if (error != 0)
178 return (error);
179 if (newp != NULL) {
180 if (newval >= nmbclusters) {
181 nmbclusters = newval;
182 pool_sethardlimit(&mclpool,
183 nmbclusters, mclpool_warnmsg, 60);
184 } else
185 error = EINVAL;
186 }
187 return (error);
188 } else
189 return (sysctl_rdint(oldp, oldlenp, newp, nmbclusters));
190 case MBUF_MBLOWAT:
191 case MBUF_MCLLOWAT:
192 /* New value must be >= 0. */
193 newval = (name[0] == MBUF_MBLOWAT) ? mblowat : mcllowat;
194 error = sysctl_int(oldp, oldlenp, newp, newlen, &newval);
195 if (error != 0)
196 return (error);
197 if (newp != NULL) {
198 if (newval >= 0) {
199 if (name[0] == MBUF_MBLOWAT) {
200 mblowat = newval;
201 pool_setlowat(&mbpool, newval);
202 } else {
203 mcllowat = newval;
204 pool_setlowat(&mclpool, newval);
205 }
206 } else
207 error = EINVAL;
208 }
209 return (error);
210 default:
211 return (EOPNOTSUPP);
212 }
213 /* NOTREACHED */
214 }
215
216 void *
217 mclpool_alloc(sz, flags, mtype)
218 unsigned long sz;
219 int flags;
220 int mtype;
221 {
222 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
223
224 return ((void *)uvm_km_alloc_poolpage1(mb_map, NULL, waitok));
225 }
226
227 void
228 mclpool_release(v, sz, mtype)
229 void *v;
230 unsigned long sz;
231 int mtype;
232 {
233
234 uvm_km_free_poolpage1(mb_map, (vaddr_t)v);
235 }
236
237 /*
238 * When MGET failes, ask protocols to free space when short of memory,
239 * then re-attempt to allocate an mbuf.
240 */
241 struct mbuf *
242 m_retry(i, t)
243 int i, t;
244 {
245 struct mbuf *m;
246
247 m_reclaim(i);
248 #define m_retry(i, t) (struct mbuf *)0
249 MGET(m, i, t);
250 #undef m_retry
251 if (m != NULL)
252 mbstat.m_wait++;
253 else
254 mbstat.m_drops++;
255 return (m);
256 }
257
258 /*
259 * As above; retry an MGETHDR.
260 */
261 struct mbuf *
262 m_retryhdr(i, t)
263 int i, t;
264 {
265 struct mbuf *m;
266
267 m_reclaim(i);
268 #define m_retryhdr(i, t) (struct mbuf *)0
269 MGETHDR(m, i, t);
270 #undef m_retryhdr
271 if (m != NULL)
272 mbstat.m_wait++;
273 else
274 mbstat.m_drops++;
275 return (m);
276 }
277
278 void
279 m_reclaim(how)
280 int how;
281 {
282 struct domain *dp;
283 struct protosw *pr;
284 struct ifnet *ifp;
285 int s = splvm();
286
287 for (dp = domains; dp; dp = dp->dom_next)
288 for (pr = dp->dom_protosw;
289 pr < dp->dom_protoswNPROTOSW; pr++)
290 if (pr->pr_drain)
291 (*pr->pr_drain)();
292 for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list))
293 if (ifp->if_drain)
294 (*ifp->if_drain)(ifp);
295 splx(s);
296 mbstat.m_drain++;
297 }
298
299 /*
300 * Space allocation routines.
301 * These are also available as macros
302 * for critical paths.
303 */
304 struct mbuf *
305 m_get(nowait, type)
306 int nowait, type;
307 {
308 struct mbuf *m;
309
310 MGET(m, nowait, type);
311 return (m);
312 }
313
314 struct mbuf *
315 m_gethdr(nowait, type)
316 int nowait, type;
317 {
318 struct mbuf *m;
319
320 MGETHDR(m, nowait, type);
321 return (m);
322 }
323
324 struct mbuf *
325 m_getclr(nowait, type)
326 int nowait, type;
327 {
328 struct mbuf *m;
329
330 MGET(m, nowait, type);
331 if (m == 0)
332 return (0);
333 memset(mtod(m, caddr_t), 0, MLEN);
334 return (m);
335 }
336
337 struct mbuf *
338 m_free(m)
339 struct mbuf *m;
340 {
341 struct mbuf *n;
342
343 MFREE(m, n);
344 return (n);
345 }
346
347 void
348 m_freem(m)
349 struct mbuf *m;
350 {
351 struct mbuf *n;
352
353 if (m == NULL)
354 return;
355 do {
356 MFREE(m, n);
357 m = n;
358 } while (m);
359 }
360
361 /*
362 * Mbuffer utility routines.
363 */
364
365 /*
366 * Lesser-used path for M_PREPEND:
367 * allocate new mbuf to prepend to chain,
368 * copy junk along.
369 */
370 struct mbuf *
371 m_prepend(m, len, how)
372 struct mbuf *m;
373 int len, how;
374 {
375 struct mbuf *mn;
376
377 MGET(mn, how, m->m_type);
378 if (mn == (struct mbuf *)NULL) {
379 m_freem(m);
380 return ((struct mbuf *)NULL);
381 }
382 if (m->m_flags & M_PKTHDR) {
383 M_COPY_PKTHDR(mn, m);
384 m->m_flags &= ~M_PKTHDR;
385 }
386 mn->m_next = m;
387 m = mn;
388 if (len < MHLEN)
389 MH_ALIGN(m, len);
390 m->m_len = len;
391 return (m);
392 }
393
394 /*
395 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
396 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
397 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
398 */
399 int MCFail;
400
401 struct mbuf *
402 m_copym(m, off0, len, wait)
403 struct mbuf *m;
404 int off0, wait;
405 int len;
406 {
407 return m_copym0(m, off0, len, wait, 0); /* shallow copy on M_EXT */
408 }
409
410 struct mbuf *
411 m_dup(m, off0, len, wait)
412 struct mbuf *m;
413 int off0, wait;
414 int len;
415 {
416 return m_copym0(m, off0, len, wait, 1); /* deep copy */
417 }
418
419 static struct mbuf *
420 m_copym0(m, off0, len, wait, deep)
421 struct mbuf *m;
422 int off0, wait;
423 int len;
424 int deep; /* deep copy */
425 {
426 struct mbuf *n, **np;
427 int off = off0;
428 struct mbuf *top;
429 int copyhdr = 0;
430
431 if (off < 0 || len < 0)
432 panic("m_copym: off %d, len %d", off, len);
433 if (off == 0 && m->m_flags & M_PKTHDR)
434 copyhdr = 1;
435 while (off > 0) {
436 if (m == 0)
437 panic("m_copym: m == 0");
438 if (off < m->m_len)
439 break;
440 off -= m->m_len;
441 m = m->m_next;
442 }
443 np = ⊤
444 top = 0;
445 while (len > 0) {
446 if (m == 0) {
447 if (len != M_COPYALL)
448 panic("m_copym: m == 0 and not COPYALL");
449 break;
450 }
451 MGET(n, wait, m->m_type);
452 *np = n;
453 if (n == 0)
454 goto nospace;
455 if (copyhdr) {
456 M_COPY_PKTHDR(n, m);
457 if (len == M_COPYALL)
458 n->m_pkthdr.len -= off0;
459 else
460 n->m_pkthdr.len = len;
461 copyhdr = 0;
462 }
463 n->m_len = min(len, m->m_len - off);
464 if (m->m_flags & M_EXT) {
465 if (!deep) {
466 n->m_data = m->m_data + off;
467 n->m_ext = m->m_ext;
468 MCLADDREFERENCE(m, n);
469 } else {
470 /*
471 * we are unsure about the way m was allocated.
472 * copy into multiple MCLBYTES cluster mbufs.
473 */
474 MCLGET(n, wait);
475 n->m_len = 0;
476 n->m_len = M_TRAILINGSPACE(n);
477 n->m_len = min(n->m_len, len);
478 n->m_len = min(n->m_len, m->m_len - off);
479 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off,
480 (unsigned)n->m_len);
481 }
482 } else
483 memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off,
484 (unsigned)n->m_len);
485 if (len != M_COPYALL)
486 len -= n->m_len;
487 off += n->m_len;
488 #ifdef DIAGNOSTIC
489 if (off > m->m_len)
490 panic("m_copym0 overrun");
491 #endif
492 if (off == m->m_len) {
493 m = m->m_next;
494 off = 0;
495 }
496 np = &n->m_next;
497 }
498 if (top == 0)
499 MCFail++;
500 return (top);
501 nospace:
502 m_freem(top);
503 MCFail++;
504 return (0);
505 }
506
507 /*
508 * Copy an entire packet, including header (which must be present).
509 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
510 */
511 struct mbuf *
512 m_copypacket(m, how)
513 struct mbuf *m;
514 int how;
515 {
516 struct mbuf *top, *n, *o;
517
518 MGET(n, how, m->m_type);
519 top = n;
520 if (!n)
521 goto nospace;
522
523 M_COPY_PKTHDR(n, m);
524 n->m_len = m->m_len;
525 if (m->m_flags & M_EXT) {
526 n->m_data = m->m_data;
527 n->m_ext = m->m_ext;
528 MCLADDREFERENCE(m, n);
529 } else {
530 memcpy(mtod(n, char *), mtod(m, char *), n->m_len);
531 }
532
533 m = m->m_next;
534 while (m) {
535 MGET(o, how, m->m_type);
536 if (!o)
537 goto nospace;
538
539 n->m_next = o;
540 n = n->m_next;
541
542 n->m_len = m->m_len;
543 if (m->m_flags & M_EXT) {
544 n->m_data = m->m_data;
545 n->m_ext = m->m_ext;
546 MCLADDREFERENCE(m, n);
547 } else {
548 memcpy(mtod(n, char *), mtod(m, char *), n->m_len);
549 }
550
551 m = m->m_next;
552 }
553 return top;
554 nospace:
555 m_freem(top);
556 MCFail++;
557 return 0;
558 }
559
560 /*
561 * Copy data from an mbuf chain starting "off" bytes from the beginning,
562 * continuing for "len" bytes, into the indicated buffer.
563 */
564 void
565 m_copydata(m, off, len, cp)
566 struct mbuf *m;
567 int off;
568 int len;
569 caddr_t cp;
570 {
571 unsigned count;
572
573 if (off < 0 || len < 0)
574 panic("m_copydata");
575 while (off > 0) {
576 if (m == 0)
577 panic("m_copydata");
578 if (off < m->m_len)
579 break;
580 off -= m->m_len;
581 m = m->m_next;
582 }
583 while (len > 0) {
584 if (m == 0)
585 panic("m_copydata");
586 count = min(m->m_len - off, len);
587 memcpy(cp, mtod(m, caddr_t) + off, count);
588 len -= count;
589 cp += count;
590 off = 0;
591 m = m->m_next;
592 }
593 }
594
595 /*
596 * Concatenate mbuf chain n to m.
597 * Both chains must be of the same type (e.g. MT_DATA).
598 * Any m_pkthdr is not updated.
599 */
600 void
601 m_cat(m, n)
602 struct mbuf *m, *n;
603 {
604 while (m->m_next)
605 m = m->m_next;
606 while (n) {
607 if (m->m_flags & M_EXT ||
608 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
609 /* just join the two chains */
610 m->m_next = n;
611 return;
612 }
613 /* splat the data from one into the other */
614 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
615 (u_int)n->m_len);
616 m->m_len += n->m_len;
617 n = m_free(n);
618 }
619 }
620
621 void
622 m_adj(mp, req_len)
623 struct mbuf *mp;
624 int req_len;
625 {
626 int len = req_len;
627 struct mbuf *m;
628 int count;
629
630 if ((m = mp) == NULL)
631 return;
632 if (len >= 0) {
633 /*
634 * Trim from head.
635 */
636 while (m != NULL && len > 0) {
637 if (m->m_len <= len) {
638 len -= m->m_len;
639 m->m_len = 0;
640 m = m->m_next;
641 } else {
642 m->m_len -= len;
643 m->m_data += len;
644 len = 0;
645 }
646 }
647 m = mp;
648 if (mp->m_flags & M_PKTHDR)
649 m->m_pkthdr.len -= (req_len - len);
650 } else {
651 /*
652 * Trim from tail. Scan the mbuf chain,
653 * calculating its length and finding the last mbuf.
654 * If the adjustment only affects this mbuf, then just
655 * adjust and return. Otherwise, rescan and truncate
656 * after the remaining size.
657 */
658 len = -len;
659 count = 0;
660 for (;;) {
661 count += m->m_len;
662 if (m->m_next == (struct mbuf *)0)
663 break;
664 m = m->m_next;
665 }
666 if (m->m_len >= len) {
667 m->m_len -= len;
668 if (mp->m_flags & M_PKTHDR)
669 mp->m_pkthdr.len -= len;
670 return;
671 }
672 count -= len;
673 if (count < 0)
674 count = 0;
675 /*
676 * Correct length for chain is "count".
677 * Find the mbuf with last data, adjust its length,
678 * and toss data from remaining mbufs on chain.
679 */
680 m = mp;
681 if (m->m_flags & M_PKTHDR)
682 m->m_pkthdr.len = count;
683 for (; m; m = m->m_next) {
684 if (m->m_len >= count) {
685 m->m_len = count;
686 break;
687 }
688 count -= m->m_len;
689 }
690 while (m->m_next)
691 (m = m->m_next) ->m_len = 0;
692 }
693 }
694
695 /*
696 * Rearange an mbuf chain so that len bytes are contiguous
697 * and in the data area of an mbuf (so that mtod and dtom
698 * will work for a structure of size len). Returns the resulting
699 * mbuf chain on success, frees it and returns null on failure.
700 * If there is room, it will add up to max_protohdr-len extra bytes to the
701 * contiguous region in an attempt to avoid being called next time.
702 */
703 int MPFail;
704
705 struct mbuf *
706 m_pullup(n, len)
707 struct mbuf *n;
708 int len;
709 {
710 struct mbuf *m;
711 int count;
712 int space;
713
714 /*
715 * If first mbuf has no cluster, and has room for len bytes
716 * without shifting current data, pullup into it,
717 * otherwise allocate a new mbuf to prepend to the chain.
718 */
719 if ((n->m_flags & M_EXT) == 0 &&
720 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
721 if (n->m_len >= len)
722 return (n);
723 m = n;
724 n = n->m_next;
725 len -= m->m_len;
726 } else {
727 if (len > MHLEN)
728 goto bad;
729 MGET(m, M_DONTWAIT, n->m_type);
730 if (m == 0)
731 goto bad;
732 m->m_len = 0;
733 if (n->m_flags & M_PKTHDR) {
734 M_COPY_PKTHDR(m, n);
735 n->m_flags &= ~M_PKTHDR;
736 }
737 }
738 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
739 do {
740 count = min(min(max(len, max_protohdr), space), n->m_len);
741 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
742 (unsigned)count);
743 len -= count;
744 m->m_len += count;
745 n->m_len -= count;
746 space -= count;
747 if (n->m_len)
748 n->m_data += count;
749 else
750 n = m_free(n);
751 } while (len > 0 && n);
752 if (len > 0) {
753 (void) m_free(m);
754 goto bad;
755 }
756 m->m_next = n;
757 return (m);
758 bad:
759 m_freem(n);
760 MPFail++;
761 return (0);
762 }
763
764 /*
765 * Partition an mbuf chain in two pieces, returning the tail --
766 * all but the first len0 bytes. In case of failure, it returns NULL and
767 * attempts to restore the chain to its original state.
768 */
769 struct mbuf *
770 m_split(m0, len0, wait)
771 struct mbuf *m0;
772 int len0, wait;
773 {
774 struct mbuf *m, *n;
775 unsigned len = len0, remain, len_save;
776
777 for (m = m0; m && len > m->m_len; m = m->m_next)
778 len -= m->m_len;
779 if (m == 0)
780 return (0);
781 remain = m->m_len - len;
782 if (m0->m_flags & M_PKTHDR) {
783 MGETHDR(n, wait, m0->m_type);
784 if (n == 0)
785 return (0);
786 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
787 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
788 len_save = m0->m_pkthdr.len;
789 m0->m_pkthdr.len = len0;
790 if (m->m_flags & M_EXT)
791 goto extpacket;
792 if (remain > MHLEN) {
793 /* m can't be the lead packet */
794 MH_ALIGN(n, 0);
795 n->m_next = m_split(m, len, wait);
796 if (n->m_next == 0) {
797 (void) m_free(n);
798 m0->m_pkthdr.len = len_save;
799 return (0);
800 } else
801 return (n);
802 } else
803 MH_ALIGN(n, remain);
804 } else if (remain == 0) {
805 n = m->m_next;
806 m->m_next = 0;
807 return (n);
808 } else {
809 MGET(n, wait, m->m_type);
810 if (n == 0)
811 return (0);
812 M_ALIGN(n, remain);
813 }
814 extpacket:
815 if (m->m_flags & M_EXT) {
816 n->m_ext = m->m_ext;
817 MCLADDREFERENCE(m, n);
818 n->m_data = m->m_data + len;
819 } else {
820 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
821 }
822 n->m_len = remain;
823 m->m_len = len;
824 n->m_next = m->m_next;
825 m->m_next = 0;
826 return (n);
827 }
828 /*
829 * Routine to copy from device local memory into mbufs.
830 */
831 struct mbuf *
832 m_devget(buf, totlen, off0, ifp, copy)
833 char *buf;
834 int totlen, off0;
835 struct ifnet *ifp;
836 void (*copy) __P((const void *from, void *to, size_t len));
837 {
838 struct mbuf *m;
839 struct mbuf *top = 0, **mp = ⊤
840 int off = off0, len;
841 char *cp;
842 char *epkt;
843
844 cp = buf;
845 epkt = cp + totlen;
846 if (off) {
847 /*
848 * If 'off' is non-zero, packet is trailer-encapsulated,
849 * so we have to skip the type and length fields.
850 */
851 cp += off + 2 * sizeof(u_int16_t);
852 totlen -= 2 * sizeof(u_int16_t);
853 }
854 MGETHDR(m, M_DONTWAIT, MT_DATA);
855 if (m == 0)
856 return (0);
857 m->m_pkthdr.rcvif = ifp;
858 m->m_pkthdr.len = totlen;
859 m->m_len = MHLEN;
860
861 while (totlen > 0) {
862 if (top) {
863 MGET(m, M_DONTWAIT, MT_DATA);
864 if (m == 0) {
865 m_freem(top);
866 return (0);
867 }
868 m->m_len = MLEN;
869 }
870 len = min(totlen, epkt - cp);
871 if (len >= MINCLSIZE) {
872 MCLGET(m, M_DONTWAIT);
873 if ((m->m_flags & M_EXT) == 0) {
874 m_free(m);
875 m_freem(top);
876 return (0);
877 }
878 m->m_len = len = min(len, MCLBYTES);
879 } else {
880 /*
881 * Place initial small packet/header at end of mbuf.
882 */
883 if (len < m->m_len) {
884 if (top == 0 && len + max_linkhdr <= m->m_len)
885 m->m_data += max_linkhdr;
886 m->m_len = len;
887 } else
888 len = m->m_len;
889 }
890 if (copy)
891 copy(cp, mtod(m, caddr_t), (size_t)len);
892 else
893 memcpy(mtod(m, caddr_t), cp, (size_t)len);
894 cp += len;
895 *mp = m;
896 mp = &m->m_next;
897 totlen -= len;
898 if (cp == epkt)
899 cp = buf;
900 }
901 return (top);
902 }
903
904 /*
905 * Copy data from a buffer back into the indicated mbuf chain,
906 * starting "off" bytes from the beginning, extending the mbuf
907 * chain if necessary.
908 */
909 void
910 m_copyback(m0, off, len, cp)
911 struct mbuf *m0;
912 int off;
913 int len;
914 caddr_t cp;
915 {
916 int mlen;
917 struct mbuf *m = m0, *n;
918 int totlen = 0;
919
920 if (m0 == 0)
921 return;
922 while (off > (mlen = m->m_len)) {
923 off -= mlen;
924 totlen += mlen;
925 if (m->m_next == 0) {
926 n = m_getclr(M_DONTWAIT, m->m_type);
927 if (n == 0)
928 goto out;
929 n->m_len = min(MLEN, len + off);
930 m->m_next = n;
931 }
932 m = m->m_next;
933 }
934 while (len > 0) {
935 mlen = min (m->m_len - off, len);
936 memcpy(mtod(m, caddr_t) + off, cp, (unsigned)mlen);
937 cp += mlen;
938 len -= mlen;
939 mlen += off;
940 off = 0;
941 totlen += mlen;
942 if (len == 0)
943 break;
944 if (m->m_next == 0) {
945 n = m_get(M_DONTWAIT, m->m_type);
946 if (n == 0)
947 break;
948 n->m_len = min(MLEN, len);
949 m->m_next = n;
950 }
951 m = m->m_next;
952 }
953 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
954 m->m_pkthdr.len = totlen;
955 }
956