uipc_mbuf.c revision 1.4 1 /*
2 * Copyright (c) 1982, 1986, 1988, 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * from: @(#)uipc_mbuf.c 7.19 (Berkeley) 4/20/91
34 * $Id: uipc_mbuf.c,v 1.4 1993/09/04 00:01:43 jtc Exp $
35 */
36
37 #include "param.h"
38 #include "systm.h"
39 #include "proc.h"
40 #include "malloc.h"
41 #define MBTYPES
42 #include "mbuf.h"
43 #include "kernel.h"
44 #include "syslog.h"
45 #include "domain.h"
46 #include "protosw.h"
47 #include "vm/vm.h"
48
49 extern vm_map_t mb_map;
50 struct mbuf *mbutl;
51 char *mclrefcnt;
52
53 void
54 mbinit()
55 {
56 int s;
57
58 #if CLBYTES < 4096
59 #define NCL_INIT (4096/CLBYTES)
60 #else
61 #define NCL_INIT 1
62 #endif
63 s = splimp();
64 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
65 goto bad;
66 splx(s);
67 return;
68 bad:
69 panic("mbinit");
70 }
71
72 /*
73 * Allocate some number of mbuf clusters
74 * and place on cluster free list.
75 * Must be called at splimp.
76 */
77 /* ARGSUSED */
78 m_clalloc(ncl, how) /* 31 Aug 92*/
79 register int ncl;
80 {
81 int npg, mbx;
82 register caddr_t p;
83 register int i;
84 static int logged;
85
86 npg = ncl * CLSIZE;
87 /* 31 Aug 92*/
88 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !(how&M_DONTWAIT));
89 if (p == NULL) {
90 if (logged == 0) {
91 logged++;
92 log(LOG_ERR, "mb_map full\n");
93 }
94 return (0);
95 }
96 ncl = ncl * CLBYTES / MCLBYTES;
97 for (i = 0; i < ncl; i++) {
98 ((union mcluster *)p)->mcl_next = mclfree;
99 mclfree = (union mcluster *)p;
100 p += MCLBYTES;
101 mbstat.m_clfree++;
102 }
103 mbstat.m_clusters += ncl;
104 return (1);
105 }
106
107 /*
108 * When MGET failes, ask protocols to free space when short of memory,
109 * then re-attempt to allocate an mbuf.
110 */
111 struct mbuf *
112 m_retry(i, t)
113 int i, t;
114 {
115 register struct mbuf *m;
116
117 m_reclaim();
118 #define m_retry(i, t) (struct mbuf *)0
119 MGET(m, i, t);
120 #undef m_retry
121 return (m);
122 }
123
124 /*
125 * As above; retry an MGETHDR.
126 */
127 struct mbuf *
128 m_retryhdr(i, t)
129 int i, t;
130 {
131 register struct mbuf *m;
132
133 m_reclaim();
134 #define m_retryhdr(i, t) (struct mbuf *)0
135 MGETHDR(m, i, t);
136 #undef m_retryhdr
137 return (m);
138 }
139
140 m_reclaim()
141 {
142 register struct domain *dp;
143 register struct protosw *pr;
144 int s = splimp();
145
146 for (dp = domains; dp; dp = dp->dom_next)
147 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
148 if (pr->pr_drain)
149 (*pr->pr_drain)();
150 splx(s);
151 mbstat.m_drain++;
152 }
153
154 /*
155 * Space allocation routines.
156 * These are also available as macros
157 * for critical paths.
158 */
159 struct mbuf *
160 m_get(how, type) /* 31 Aug 92*/
161 int how, type;
162 {
163 register struct mbuf *m;
164
165 MGET(m, how, type);
166 return (m);
167 }
168
169 struct mbuf *
170 m_gethdr(how, type) /* 31 Aug 92*/
171 int how, type;
172 {
173 register struct mbuf *m;
174
175 MGETHDR(m, how, type);
176 return (m);
177 }
178
179 struct mbuf *
180 m_getclr(how, type) /* 31 Aug 92*/
181 int how, type;
182 {
183 register struct mbuf *m;
184
185 MGET(m, how, type);
186 if (m == 0)
187 return (0);
188 bzero(mtod(m, caddr_t), MLEN);
189 return (m);
190 }
191
192 struct mbuf *
193 m_free(m)
194 struct mbuf *m;
195 {
196 register struct mbuf *n;
197
198 MFREE(m, n);
199 return (n);
200 }
201
202 m_freem(m)
203 register struct mbuf *m;
204 {
205 register struct mbuf *n;
206
207 if (m == NULL)
208 return;
209 do {
210 MFREE(m, n);
211 } while (m = n);
212 }
213
214 /*
215 * Mbuffer utility routines.
216 */
217
218 /*
219 * Lesser-used path for M_PREPEND:
220 * allocate new mbuf to prepend to chain,
221 * copy junk along.
222 */
223 struct mbuf *
224 m_prepend(m, len, how)
225 register struct mbuf *m;
226 int len, how;
227 {
228 struct mbuf *mn;
229
230 MGET(mn, how, m->m_type);
231 if (mn == (struct mbuf *)NULL) {
232 m_freem(m);
233 return ((struct mbuf *)NULL);
234 }
235 if (m->m_flags & M_PKTHDR) {
236 M_COPY_PKTHDR(mn, m);
237 m->m_flags &= ~M_PKTHDR;
238 }
239 mn->m_next = m;
240 m = mn;
241 if (len < MHLEN)
242 MH_ALIGN(m, len);
243 m->m_len = len;
244 return (m);
245 }
246
247 /*
248 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
249 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
250 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
251 */
252 int MCFail;
253
254 struct mbuf *
255 m_copym(m, off0, len, wait)
256 register struct mbuf *m;
257 int off0, wait;
258 register int len;
259 {
260 register struct mbuf *n, **np;
261 register int off = off0;
262 struct mbuf *top;
263 int copyhdr = 0;
264
265 if (off < 0 || len < 0)
266 panic("m_copym");
267 if (off == 0 && m->m_flags & M_PKTHDR)
268 copyhdr = 1;
269 while (off > 0) {
270 if (m == 0)
271 panic("m_copym");
272 if (off < m->m_len)
273 break;
274 off -= m->m_len;
275 m = m->m_next;
276 }
277 np = ⊤
278 top = 0;
279 while (len > 0) {
280 if (m == 0) {
281 if (len != M_COPYALL)
282 panic("m_copym");
283 break;
284 }
285 MGET(n, wait, m->m_type);
286 *np = n;
287 if (n == 0)
288 goto nospace;
289 if (copyhdr) {
290 M_COPY_PKTHDR(n, m);
291 if (len == M_COPYALL)
292 n->m_pkthdr.len -= off0;
293 else
294 n->m_pkthdr.len = len;
295 copyhdr = 0;
296 }
297 n->m_len = MIN(len, m->m_len - off);
298 if (m->m_flags & M_EXT) {
299 n->m_data = m->m_data + off;
300 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
301 n->m_ext = m->m_ext;
302 n->m_flags |= M_EXT;
303 } else
304 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
305 (unsigned)n->m_len);
306 if (len != M_COPYALL)
307 len -= n->m_len;
308 off = 0;
309 m = m->m_next;
310 np = &n->m_next;
311 }
312 if (top == 0)
313 MCFail++;
314 return (top);
315 nospace:
316 m_freem(top);
317 MCFail++;
318 return (0);
319 }
320
321 /*
322 * Copy data from an mbuf chain starting "off" bytes from the beginning,
323 * continuing for "len" bytes, into the indicated buffer.
324 */
325 m_copydata(m, off, len, cp)
326 register struct mbuf *m;
327 register int off;
328 register int len;
329 caddr_t cp;
330 {
331 register unsigned count;
332
333 if (off < 0 || len < 0)
334 panic("m_copydata");
335 while (off > 0) {
336 if (m == 0)
337 panic("m_copydata");
338 if (off < m->m_len)
339 break;
340 off -= m->m_len;
341 m = m->m_next;
342 }
343 while (len > 0) {
344 if (m == 0)
345 panic("m_copydata");
346 count = MIN(m->m_len - off, len);
347 bcopy(mtod(m, caddr_t) + off, cp, count);
348 len -= count;
349 cp += count;
350 off = 0;
351 m = m->m_next;
352 }
353 }
354
355 /*
356 * Concatenate mbuf chain n to m.
357 * Both chains must be of the same type (e.g. MT_DATA).
358 * Any m_pkthdr is not updated.
359 */
360 m_cat(m, n)
361 register struct mbuf *m, *n;
362 {
363 while (m->m_next)
364 m = m->m_next;
365 while (n) {
366 if (m->m_flags & M_EXT ||
367 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
368 /* just join the two chains */
369 m->m_next = n;
370 return;
371 }
372 /* splat the data from one into the other */
373 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
374 (u_int)n->m_len);
375 m->m_len += n->m_len;
376 n = m_free(n);
377 }
378 }
379
380 m_adj(mp, req_len)
381 struct mbuf *mp;
382 {
383 register int len = req_len;
384 register struct mbuf *m;
385 register count;
386
387 if ((m = mp) == NULL)
388 return;
389 if (len >= 0) {
390 /*
391 * Trim from head.
392 */
393 while (m != NULL && len > 0) {
394 if (m->m_len <= len) {
395 len -= m->m_len;
396 m->m_len = 0;
397 m = m->m_next;
398 } else {
399 m->m_len -= len;
400 m->m_data += len;
401 len = 0;
402 }
403 }
404 m = mp;
405 if (mp->m_flags & M_PKTHDR)
406 m->m_pkthdr.len -= (req_len - len);
407 } else {
408 /*
409 * Trim from tail. Scan the mbuf chain,
410 * calculating its length and finding the last mbuf.
411 * If the adjustment only affects this mbuf, then just
412 * adjust and return. Otherwise, rescan and truncate
413 * after the remaining size.
414 */
415 len = -len;
416 count = 0;
417 for (;;) {
418 count += m->m_len;
419 if (m->m_next == (struct mbuf *)0)
420 break;
421 m = m->m_next;
422 }
423 if (m->m_len >= len) {
424 m->m_len -= len;
425 if ((mp = m)->m_flags & M_PKTHDR)
426 m->m_pkthdr.len -= len;
427 return;
428 }
429 count -= len;
430 if (count < 0)
431 count = 0;
432 /*
433 * Correct length for chain is "count".
434 * Find the mbuf with last data, adjust its length,
435 * and toss data from remaining mbufs on chain.
436 */
437 m = mp;
438 if (m->m_flags & M_PKTHDR)
439 m->m_pkthdr.len = count;
440 for (; m; m = m->m_next) {
441 if (m->m_len >= count) {
442 m->m_len = count;
443 break;
444 }
445 count -= m->m_len;
446 }
447 while (m = m->m_next)
448 m->m_len = 0;
449 }
450 }
451
452 /*
453 * Rearange an mbuf chain so that len bytes are contiguous
454 * and in the data area of an mbuf (so that mtod and dtom
455 * will work for a structure of size len). Returns the resulting
456 * mbuf chain on success, frees it and returns null on failure.
457 * If there is room, it will add up to max_protohdr-len extra bytes to the
458 * contiguous region in an attempt to avoid being called next time.
459 */
460 int MPFail;
461
462 struct mbuf *
463 m_pullup(n, len)
464 register struct mbuf *n;
465 int len;
466 {
467 register struct mbuf *m;
468 register int count;
469 int space;
470
471 /*
472 * If first mbuf has no cluster, and has room for len bytes
473 * without shifting current data, pullup into it,
474 * otherwise allocate a new mbuf to prepend to the chain.
475 */
476 if ((n->m_flags & M_EXT) == 0 &&
477 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
478 if (n->m_len >= len)
479 return (n);
480 m = n;
481 n = n->m_next;
482 len -= m->m_len;
483 } else {
484 if (len > MHLEN)
485 goto bad;
486 MGET(m, M_DONTWAIT, n->m_type);
487 if (m == 0)
488 goto bad;
489 m->m_len = 0;
490 if (n->m_flags & M_PKTHDR) {
491 M_COPY_PKTHDR(m, n);
492 n->m_flags &= ~M_PKTHDR;
493 }
494 }
495 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
496 do {
497 count = min(min(max(len, max_protohdr), space), n->m_len);
498 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
499 (unsigned)count);
500 len -= count;
501 m->m_len += count;
502 n->m_len -= count;
503 space -= count;
504 if (n->m_len)
505 n->m_data += count;
506 else
507 n = m_free(n);
508 } while (len > 0 && n);
509 if (len > 0) {
510 (void) m_free(m);
511 goto bad;
512 }
513 m->m_next = n;
514 return (m);
515 bad:
516 m_freem(n);
517 MPFail++;
518 return (0);
519 }
520