uipc_mbuf.c revision 1.5 1 /*
2 * Copyright (c) 1982, 1986, 1988, 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * from: @(#)uipc_mbuf.c 7.19 (Berkeley) 4/20/91
34 * $Id: uipc_mbuf.c,v 1.5 1993/10/22 02:48:35 cgd Exp $
35 */
36
37 #include "param.h"
38 #include "systm.h"
39 #include "proc.h"
40 #include "malloc.h"
41 #define MBTYPES
42 #include "mbuf.h"
43 #include "kernel.h"
44 #include "syslog.h"
45 #include "domain.h"
46 #include "protosw.h"
47 #include "vm/vm.h"
48
49 extern vm_map_t mb_map;
50 struct mbuf *mbutl;
51 char *mclrefcnt;
52
53 void
54 mbinit()
55 {
56 int s;
57
58 #if CLBYTES < 4096
59 #define NCL_INIT (4096/CLBYTES)
60 #else
61 #define NCL_INIT 1
62 #endif
63 s = splimp();
64 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
65 goto bad;
66 splx(s);
67 return;
68 bad:
69 panic("mbinit");
70 }
71
72 /*
73 * Allocate some number of mbuf clusters
74 * and place on cluster free list.
75 * Must be called at splimp.
76 */
77 /* ARGSUSED */
78 m_clalloc(ncl, nowait)
79 register int ncl;
80 {
81 int npg, mbx;
82 register caddr_t p;
83 register int i;
84 static int logged;
85
86 npg = ncl * CLSIZE;
87 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !nowait);
88 if (p == NULL) {
89 if (logged == 0) {
90 logged++;
91 log(LOG_ERR, "mb_map full\n");
92 }
93 return (0);
94 }
95 ncl = ncl * CLBYTES / MCLBYTES;
96 for (i = 0; i < ncl; i++) {
97 ((union mcluster *)p)->mcl_next = mclfree;
98 mclfree = (union mcluster *)p;
99 p += MCLBYTES;
100 mbstat.m_clfree++;
101 }
102 mbstat.m_clusters += ncl;
103 return (1);
104 }
105
106 /*
107 * When MGET failes, ask protocols to free space when short of memory,
108 * then re-attempt to allocate an mbuf.
109 */
110 struct mbuf *
111 m_retry(i, t)
112 int i, t;
113 {
114 register struct mbuf *m;
115
116 m_reclaim();
117 #define m_retry(i, t) (struct mbuf *)0
118 MGET(m, i, t);
119 #undef m_retry
120 return (m);
121 }
122
123 /*
124 * As above; retry an MGETHDR.
125 */
126 struct mbuf *
127 m_retryhdr(i, t)
128 int i, t;
129 {
130 register struct mbuf *m;
131
132 m_reclaim();
133 #define m_retryhdr(i, t) (struct mbuf *)0
134 MGETHDR(m, i, t);
135 #undef m_retryhdr
136 return (m);
137 }
138
139 m_reclaim()
140 {
141 register struct domain *dp;
142 register struct protosw *pr;
143 int s = splimp();
144
145 for (dp = domains; dp; dp = dp->dom_next)
146 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
147 if (pr->pr_drain)
148 (*pr->pr_drain)();
149 splx(s);
150 mbstat.m_drain++;
151 }
152
153 /*
154 * Space allocation routines.
155 * These are also available as macros
156 * for critical paths.
157 */
158 struct mbuf *
159 m_get(nowait, type)
160 int nowait, type;
161 {
162 register struct mbuf *m;
163
164 MGET(m, nowait, type);
165 return (m);
166 }
167
168 struct mbuf *
169 m_gethdr(nowait, type)
170 int nowait, type;
171 {
172 register struct mbuf *m;
173
174 MGETHDR(m, nowait, type);
175 return (m);
176 }
177
178 struct mbuf *
179 m_getclr(nowait, type)
180 int nowait, type;
181 {
182 register struct mbuf *m;
183
184 MGET(m, nowait, type);
185 if (m == 0)
186 return (0);
187 bzero(mtod(m, caddr_t), MLEN);
188 return (m);
189 }
190
191 struct mbuf *
192 m_free(m)
193 struct mbuf *m;
194 {
195 register struct mbuf *n;
196
197 MFREE(m, n);
198 return (n);
199 }
200
201 m_freem(m)
202 register struct mbuf *m;
203 {
204 register struct mbuf *n;
205
206 if (m == NULL)
207 return;
208 do {
209 MFREE(m, n);
210 } while (m = n);
211 }
212
213 /*
214 * Mbuffer utility routines.
215 */
216
217 /*
218 * Lesser-used path for M_PREPEND:
219 * allocate new mbuf to prepend to chain,
220 * copy junk along.
221 */
222 struct mbuf *
223 m_prepend(m, len, nowait)
224 register struct mbuf *m;
225 int len, nowait;
226 {
227 struct mbuf *mn;
228
229 MGET(mn, nowait, m->m_type);
230 if (mn == (struct mbuf *)NULL) {
231 m_freem(m);
232 return ((struct mbuf *)NULL);
233 }
234 if (m->m_flags & M_PKTHDR) {
235 M_COPY_PKTHDR(mn, m);
236 m->m_flags &= ~M_PKTHDR;
237 }
238 mn->m_next = m;
239 m = mn;
240 if (len < MHLEN)
241 MH_ALIGN(m, len);
242 m->m_len = len;
243 return (m);
244 }
245
246 /*
247 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
248 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
249 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
250 */
251 int MCFail;
252
253 struct mbuf *
254 m_copym(m, off0, len, wait)
255 register struct mbuf *m;
256 int off0, wait;
257 register int len;
258 {
259 register struct mbuf *n, **np;
260 register int off = off0;
261 struct mbuf *top;
262 int copyhdr = 0;
263
264 if (off < 0 || len < 0)
265 panic("m_copym");
266 if (off == 0 && m->m_flags & M_PKTHDR)
267 copyhdr = 1;
268 while (off > 0) {
269 if (m == 0)
270 panic("m_copym");
271 if (off < m->m_len)
272 break;
273 off -= m->m_len;
274 m = m->m_next;
275 }
276 np = ⊤
277 top = 0;
278 while (len > 0) {
279 if (m == 0) {
280 if (len != M_COPYALL)
281 panic("m_copym");
282 break;
283 }
284 MGET(n, wait, m->m_type);
285 *np = n;
286 if (n == 0)
287 goto nospace;
288 if (copyhdr) {
289 M_COPY_PKTHDR(n, m);
290 if (len == M_COPYALL)
291 n->m_pkthdr.len -= off0;
292 else
293 n->m_pkthdr.len = len;
294 copyhdr = 0;
295 }
296 n->m_len = MIN(len, m->m_len - off);
297 if (m->m_flags & M_EXT) {
298 n->m_data = m->m_data + off;
299 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
300 n->m_ext = m->m_ext;
301 n->m_flags |= M_EXT;
302 } else
303 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
304 (unsigned)n->m_len);
305 if (len != M_COPYALL)
306 len -= n->m_len;
307 off = 0;
308 m = m->m_next;
309 np = &n->m_next;
310 }
311 if (top == 0)
312 MCFail++;
313 return (top);
314 nospace:
315 m_freem(top);
316 MCFail++;
317 return (0);
318 }
319
320 /*
321 * Copy data from an mbuf chain starting "off" bytes from the beginning,
322 * continuing for "len" bytes, into the indicated buffer.
323 */
324 m_copydata(m, off, len, cp)
325 register struct mbuf *m;
326 register int off;
327 register int len;
328 caddr_t cp;
329 {
330 register unsigned count;
331
332 if (off < 0 || len < 0)
333 panic("m_copydata");
334 while (off > 0) {
335 if (m == 0)
336 panic("m_copydata");
337 if (off < m->m_len)
338 break;
339 off -= m->m_len;
340 m = m->m_next;
341 }
342 while (len > 0) {
343 if (m == 0)
344 panic("m_copydata");
345 count = MIN(m->m_len - off, len);
346 bcopy(mtod(m, caddr_t) + off, cp, count);
347 len -= count;
348 cp += count;
349 off = 0;
350 m = m->m_next;
351 }
352 }
353
354 /*
355 * Concatenate mbuf chain n to m.
356 * Both chains must be of the same type (e.g. MT_DATA).
357 * Any m_pkthdr is not updated.
358 */
359 m_cat(m, n)
360 register struct mbuf *m, *n;
361 {
362 while (m->m_next)
363 m = m->m_next;
364 while (n) {
365 if (m->m_flags & M_EXT ||
366 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
367 /* just join the two chains */
368 m->m_next = n;
369 return;
370 }
371 /* splat the data from one into the other */
372 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
373 (u_int)n->m_len);
374 m->m_len += n->m_len;
375 n = m_free(n);
376 }
377 }
378
379 m_adj(mp, req_len)
380 struct mbuf *mp;
381 {
382 register int len = req_len;
383 register struct mbuf *m;
384 register count;
385
386 if ((m = mp) == NULL)
387 return;
388 if (len >= 0) {
389 /*
390 * Trim from head.
391 */
392 while (m != NULL && len > 0) {
393 if (m->m_len <= len) {
394 len -= m->m_len;
395 m->m_len = 0;
396 m = m->m_next;
397 } else {
398 m->m_len -= len;
399 m->m_data += len;
400 len = 0;
401 }
402 }
403 m = mp;
404 if (mp->m_flags & M_PKTHDR)
405 m->m_pkthdr.len -= (req_len - len);
406 } else {
407 /*
408 * Trim from tail. Scan the mbuf chain,
409 * calculating its length and finding the last mbuf.
410 * If the adjustment only affects this mbuf, then just
411 * adjust and return. Otherwise, rescan and truncate
412 * after the remaining size.
413 */
414 len = -len;
415 count = 0;
416 for (;;) {
417 count += m->m_len;
418 if (m->m_next == (struct mbuf *)0)
419 break;
420 m = m->m_next;
421 }
422 if (m->m_len >= len) {
423 m->m_len -= len;
424 if ((mp = m)->m_flags & M_PKTHDR)
425 m->m_pkthdr.len -= len;
426 return;
427 }
428 count -= len;
429 if (count < 0)
430 count = 0;
431 /*
432 * Correct length for chain is "count".
433 * Find the mbuf with last data, adjust its length,
434 * and toss data from remaining mbufs on chain.
435 */
436 m = mp;
437 if (m->m_flags & M_PKTHDR)
438 m->m_pkthdr.len = count;
439 for (; m; m = m->m_next) {
440 if (m->m_len >= count) {
441 m->m_len = count;
442 break;
443 }
444 count -= m->m_len;
445 }
446 while (m = m->m_next)
447 m->m_len = 0;
448 }
449 }
450
451 /*
452 * Rearange an mbuf chain so that len bytes are contiguous
453 * and in the data area of an mbuf (so that mtod and dtom
454 * will work for a structure of size len). Returns the resulting
455 * mbuf chain on success, frees it and returns null on failure.
456 * If there is room, it will add up to max_protohdr-len extra bytes to the
457 * contiguous region in an attempt to avoid being called next time.
458 */
459 int MPFail;
460
461 struct mbuf *
462 m_pullup(n, len)
463 register struct mbuf *n;
464 int len;
465 {
466 register struct mbuf *m;
467 register int count;
468 int space;
469
470 /*
471 * If first mbuf has no cluster, and has room for len bytes
472 * without shifting current data, pullup into it,
473 * otherwise allocate a new mbuf to prepend to the chain.
474 */
475 if ((n->m_flags & M_EXT) == 0 &&
476 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
477 if (n->m_len >= len)
478 return (n);
479 m = n;
480 n = n->m_next;
481 len -= m->m_len;
482 } else {
483 if (len > MHLEN)
484 goto bad;
485 MGET(m, M_DONTWAIT, n->m_type);
486 if (m == 0)
487 goto bad;
488 m->m_len = 0;
489 if (n->m_flags & M_PKTHDR) {
490 M_COPY_PKTHDR(m, n);
491 n->m_flags &= ~M_PKTHDR;
492 }
493 }
494 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
495 do {
496 count = min(min(max(len, max_protohdr), space), n->m_len);
497 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
498 (unsigned)count);
499 len -= count;
500 m->m_len += count;
501 n->m_len -= count;
502 space -= count;
503 if (n->m_len)
504 n->m_data += count;
505 else
506 n = m_free(n);
507 } while (len > 0 && n);
508 if (len > 0) {
509 (void) m_free(m);
510 goto bad;
511 }
512 m->m_next = n;
513 return (m);
514 bad:
515 m_freem(n);
516 MPFail++;
517 return (0);
518 }
519