uipc_mbuf.c revision 1.3 1 /*
2 * Copyright (c) 1982, 1986, 1988, 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * from: @(#)uipc_mbuf.c 7.19 (Berkeley) 4/20/91
34 * $Id: uipc_mbuf.c,v 1.3 1993/05/20 02:55:24 cgd Exp $
35 */
36
37 #include "param.h"
38 #include "proc.h"
39 #include "malloc.h"
40 #define MBTYPES
41 #include "mbuf.h"
42 #include "kernel.h"
43 #include "syslog.h"
44 #include "domain.h"
45 #include "protosw.h"
46 #include "vm/vm.h"
47
48 extern vm_map_t mb_map;
49 struct mbuf *mbutl;
50 char *mclrefcnt;
51
52 mbinit()
53 {
54 int s;
55
56 #if CLBYTES < 4096
57 #define NCL_INIT (4096/CLBYTES)
58 #else
59 #define NCL_INIT 1
60 #endif
61 s = splimp();
62 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
63 goto bad;
64 splx(s);
65 return;
66 bad:
67 panic("mbinit");
68 }
69
70 /*
71 * Allocate some number of mbuf clusters
72 * and place on cluster free list.
73 * Must be called at splimp.
74 */
75 /* ARGSUSED */
76 m_clalloc(ncl, how) /* 31 Aug 92*/
77 register int ncl;
78 {
79 int npg, mbx;
80 register caddr_t p;
81 register int i;
82 static int logged;
83
84 npg = ncl * CLSIZE;
85 /* 31 Aug 92*/
86 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !(how&M_DONTWAIT));
87 if (p == NULL) {
88 if (logged == 0) {
89 logged++;
90 log(LOG_ERR, "mb_map full\n");
91 }
92 return (0);
93 }
94 ncl = ncl * CLBYTES / MCLBYTES;
95 for (i = 0; i < ncl; i++) {
96 ((union mcluster *)p)->mcl_next = mclfree;
97 mclfree = (union mcluster *)p;
98 p += MCLBYTES;
99 mbstat.m_clfree++;
100 }
101 mbstat.m_clusters += ncl;
102 return (1);
103 }
104
105 /*
106 * When MGET failes, ask protocols to free space when short of memory,
107 * then re-attempt to allocate an mbuf.
108 */
109 struct mbuf *
110 m_retry(i, t)
111 int i, t;
112 {
113 register struct mbuf *m;
114
115 m_reclaim();
116 #define m_retry(i, t) (struct mbuf *)0
117 MGET(m, i, t);
118 #undef m_retry
119 return (m);
120 }
121
122 /*
123 * As above; retry an MGETHDR.
124 */
125 struct mbuf *
126 m_retryhdr(i, t)
127 int i, t;
128 {
129 register struct mbuf *m;
130
131 m_reclaim();
132 #define m_retryhdr(i, t) (struct mbuf *)0
133 MGETHDR(m, i, t);
134 #undef m_retryhdr
135 return (m);
136 }
137
138 m_reclaim()
139 {
140 register struct domain *dp;
141 register struct protosw *pr;
142 int s = splimp();
143
144 for (dp = domains; dp; dp = dp->dom_next)
145 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
146 if (pr->pr_drain)
147 (*pr->pr_drain)();
148 splx(s);
149 mbstat.m_drain++;
150 }
151
152 /*
153 * Space allocation routines.
154 * These are also available as macros
155 * for critical paths.
156 */
157 struct mbuf *
158 m_get(how, type) /* 31 Aug 92*/
159 int how, type;
160 {
161 register struct mbuf *m;
162
163 MGET(m, how, type);
164 return (m);
165 }
166
167 struct mbuf *
168 m_gethdr(how, type) /* 31 Aug 92*/
169 int how, type;
170 {
171 register struct mbuf *m;
172
173 MGETHDR(m, how, type);
174 return (m);
175 }
176
177 struct mbuf *
178 m_getclr(how, type) /* 31 Aug 92*/
179 int how, type;
180 {
181 register struct mbuf *m;
182
183 MGET(m, how, type);
184 if (m == 0)
185 return (0);
186 bzero(mtod(m, caddr_t), MLEN);
187 return (m);
188 }
189
190 struct mbuf *
191 m_free(m)
192 struct mbuf *m;
193 {
194 register struct mbuf *n;
195
196 MFREE(m, n);
197 return (n);
198 }
199
200 m_freem(m)
201 register struct mbuf *m;
202 {
203 register struct mbuf *n;
204
205 if (m == NULL)
206 return;
207 do {
208 MFREE(m, n);
209 } while (m = n);
210 }
211
212 /*
213 * Mbuffer utility routines.
214 */
215
216 /*
217 * Lesser-used path for M_PREPEND:
218 * allocate new mbuf to prepend to chain,
219 * copy junk along.
220 */
221 struct mbuf *
222 m_prepend(m, len, how)
223 register struct mbuf *m;
224 int len, how;
225 {
226 struct mbuf *mn;
227
228 MGET(mn, how, m->m_type);
229 if (mn == (struct mbuf *)NULL) {
230 m_freem(m);
231 return ((struct mbuf *)NULL);
232 }
233 if (m->m_flags & M_PKTHDR) {
234 M_COPY_PKTHDR(mn, m);
235 m->m_flags &= ~M_PKTHDR;
236 }
237 mn->m_next = m;
238 m = mn;
239 if (len < MHLEN)
240 MH_ALIGN(m, len);
241 m->m_len = len;
242 return (m);
243 }
244
245 /*
246 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
247 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
248 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
249 */
250 int MCFail;
251
252 struct mbuf *
253 m_copym(m, off0, len, wait)
254 register struct mbuf *m;
255 int off0, wait;
256 register int len;
257 {
258 register struct mbuf *n, **np;
259 register int off = off0;
260 struct mbuf *top;
261 int copyhdr = 0;
262
263 if (off < 0 || len < 0)
264 panic("m_copym");
265 if (off == 0 && m->m_flags & M_PKTHDR)
266 copyhdr = 1;
267 while (off > 0) {
268 if (m == 0)
269 panic("m_copym");
270 if (off < m->m_len)
271 break;
272 off -= m->m_len;
273 m = m->m_next;
274 }
275 np = ⊤
276 top = 0;
277 while (len > 0) {
278 if (m == 0) {
279 if (len != M_COPYALL)
280 panic("m_copym");
281 break;
282 }
283 MGET(n, wait, m->m_type);
284 *np = n;
285 if (n == 0)
286 goto nospace;
287 if (copyhdr) {
288 M_COPY_PKTHDR(n, m);
289 if (len == M_COPYALL)
290 n->m_pkthdr.len -= off0;
291 else
292 n->m_pkthdr.len = len;
293 copyhdr = 0;
294 }
295 n->m_len = MIN(len, m->m_len - off);
296 if (m->m_flags & M_EXT) {
297 n->m_data = m->m_data + off;
298 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
299 n->m_ext = m->m_ext;
300 n->m_flags |= M_EXT;
301 } else
302 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
303 (unsigned)n->m_len);
304 if (len != M_COPYALL)
305 len -= n->m_len;
306 off = 0;
307 m = m->m_next;
308 np = &n->m_next;
309 }
310 if (top == 0)
311 MCFail++;
312 return (top);
313 nospace:
314 m_freem(top);
315 MCFail++;
316 return (0);
317 }
318
319 /*
320 * Copy data from an mbuf chain starting "off" bytes from the beginning,
321 * continuing for "len" bytes, into the indicated buffer.
322 */
323 m_copydata(m, off, len, cp)
324 register struct mbuf *m;
325 register int off;
326 register int len;
327 caddr_t cp;
328 {
329 register unsigned count;
330
331 if (off < 0 || len < 0)
332 panic("m_copydata");
333 while (off > 0) {
334 if (m == 0)
335 panic("m_copydata");
336 if (off < m->m_len)
337 break;
338 off -= m->m_len;
339 m = m->m_next;
340 }
341 while (len > 0) {
342 if (m == 0)
343 panic("m_copydata");
344 count = MIN(m->m_len - off, len);
345 bcopy(mtod(m, caddr_t) + off, cp, count);
346 len -= count;
347 cp += count;
348 off = 0;
349 m = m->m_next;
350 }
351 }
352
353 /*
354 * Concatenate mbuf chain n to m.
355 * Both chains must be of the same type (e.g. MT_DATA).
356 * Any m_pkthdr is not updated.
357 */
358 m_cat(m, n)
359 register struct mbuf *m, *n;
360 {
361 while (m->m_next)
362 m = m->m_next;
363 while (n) {
364 if (m->m_flags & M_EXT ||
365 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
366 /* just join the two chains */
367 m->m_next = n;
368 return;
369 }
370 /* splat the data from one into the other */
371 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
372 (u_int)n->m_len);
373 m->m_len += n->m_len;
374 n = m_free(n);
375 }
376 }
377
378 m_adj(mp, req_len)
379 struct mbuf *mp;
380 {
381 register int len = req_len;
382 register struct mbuf *m;
383 register count;
384
385 if ((m = mp) == NULL)
386 return;
387 if (len >= 0) {
388 /*
389 * Trim from head.
390 */
391 while (m != NULL && len > 0) {
392 if (m->m_len <= len) {
393 len -= m->m_len;
394 m->m_len = 0;
395 m = m->m_next;
396 } else {
397 m->m_len -= len;
398 m->m_data += len;
399 len = 0;
400 }
401 }
402 m = mp;
403 if (mp->m_flags & M_PKTHDR)
404 m->m_pkthdr.len -= (req_len - len);
405 } else {
406 /*
407 * Trim from tail. Scan the mbuf chain,
408 * calculating its length and finding the last mbuf.
409 * If the adjustment only affects this mbuf, then just
410 * adjust and return. Otherwise, rescan and truncate
411 * after the remaining size.
412 */
413 len = -len;
414 count = 0;
415 for (;;) {
416 count += m->m_len;
417 if (m->m_next == (struct mbuf *)0)
418 break;
419 m = m->m_next;
420 }
421 if (m->m_len >= len) {
422 m->m_len -= len;
423 if ((mp = m)->m_flags & M_PKTHDR)
424 m->m_pkthdr.len -= len;
425 return;
426 }
427 count -= len;
428 if (count < 0)
429 count = 0;
430 /*
431 * Correct length for chain is "count".
432 * Find the mbuf with last data, adjust its length,
433 * and toss data from remaining mbufs on chain.
434 */
435 m = mp;
436 if (m->m_flags & M_PKTHDR)
437 m->m_pkthdr.len = count;
438 for (; m; m = m->m_next) {
439 if (m->m_len >= count) {
440 m->m_len = count;
441 break;
442 }
443 count -= m->m_len;
444 }
445 while (m = m->m_next)
446 m->m_len = 0;
447 }
448 }
449
450 /*
451 * Rearange an mbuf chain so that len bytes are contiguous
452 * and in the data area of an mbuf (so that mtod and dtom
453 * will work for a structure of size len). Returns the resulting
454 * mbuf chain on success, frees it and returns null on failure.
455 * If there is room, it will add up to max_protohdr-len extra bytes to the
456 * contiguous region in an attempt to avoid being called next time.
457 */
458 int MPFail;
459
460 struct mbuf *
461 m_pullup(n, len)
462 register struct mbuf *n;
463 int len;
464 {
465 register struct mbuf *m;
466 register int count;
467 int space;
468
469 /*
470 * If first mbuf has no cluster, and has room for len bytes
471 * without shifting current data, pullup into it,
472 * otherwise allocate a new mbuf to prepend to the chain.
473 */
474 if ((n->m_flags & M_EXT) == 0 &&
475 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
476 if (n->m_len >= len)
477 return (n);
478 m = n;
479 n = n->m_next;
480 len -= m->m_len;
481 } else {
482 if (len > MHLEN)
483 goto bad;
484 MGET(m, M_DONTWAIT, n->m_type);
485 if (m == 0)
486 goto bad;
487 m->m_len = 0;
488 if (n->m_flags & M_PKTHDR) {
489 M_COPY_PKTHDR(m, n);
490 n->m_flags &= ~M_PKTHDR;
491 }
492 }
493 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
494 do {
495 count = min(min(max(len, max_protohdr), space), n->m_len);
496 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
497 (unsigned)count);
498 len -= count;
499 m->m_len += count;
500 n->m_len -= count;
501 space -= count;
502 if (n->m_len)
503 n->m_data += count;
504 else
505 n = m_free(n);
506 } while (len > 0 && n);
507 if (len > 0) {
508 (void) m_free(m);
509 goto bad;
510 }
511 m->m_next = n;
512 return (m);
513 bad:
514 m_freem(n);
515 MPFail++;
516 return (0);
517 }
518