Home | History | Annotate | Line # | Download | only in kern
uipc_mbuf.c revision 1.8
      1 /*
      2  * Copyright (c) 1982, 1986, 1988, 1991 Regents of the University of California.
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. All advertising materials mentioning features or use of this software
     14  *    must display the following acknowledgement:
     15  *	This product includes software developed by the University of
     16  *	California, Berkeley and its contributors.
     17  * 4. Neither the name of the University nor the names of its contributors
     18  *    may be used to endorse or promote products derived from this software
     19  *    without specific prior written permission.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     31  * SUCH DAMAGE.
     32  *
     33  *	from: @(#)uipc_mbuf.c	7.19 (Berkeley) 4/20/91
     34  *	$Id: uipc_mbuf.c,v 1.8 1994/04/14 21:34:17 deraadt Exp $
     35  */
     36 
     37 #include <sys/param.h>
     38 #include <sys/systm.h>
     39 #include <sys/proc.h>
     40 #include <sys/malloc.h>
     41 #define MBTYPES
     42 #include <sys/mbuf.h>
     43 #include <sys/kernel.h>
     44 #include <sys/syslog.h>
     45 #include <sys/domain.h>
     46 #include <sys/protosw.h>
     47 
     48 #include <vm/vm.h>
     49 #include <vm/vm_kern.h>
     50 
     51 extern	vm_map_t mb_map;
     52 struct	mbuf *mbutl;
     53 char	*mclrefcnt;
     54 
     55 void
     56 mbinit()
     57 {
     58 	int s;
     59 
     60 #if CLBYTES < 4096
     61 #define NCL_INIT	(4096/CLBYTES)
     62 #else
     63 #define NCL_INIT	1
     64 #endif
     65 	s = splimp();
     66 	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
     67 		goto bad;
     68 	splx(s);
     69 	return;
     70 bad:
     71 	panic("mbinit");
     72 }
     73 
     74 /*
     75  * Allocate some number of mbuf clusters
     76  * and place on cluster free list.
     77  * Must be called at splimp.
     78  */
     79 /* ARGSUSED */
     80 m_clalloc(ncl, nowait)
     81 	register int ncl;
     82 {
     83 	int npg, mbx;
     84 	register caddr_t p;
     85 	register int i;
     86 	static int logged;
     87 
     88 	npg = ncl * CLSIZE;
     89 	p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !nowait);
     90 	if (p == NULL) {
     91 		if (logged == 0) {
     92 			logged++;
     93 			log(LOG_ERR, "mb_map full\n");
     94 		}
     95 		return (0);
     96 	}
     97 	ncl = ncl * CLBYTES / MCLBYTES;
     98 	for (i = 0; i < ncl; i++) {
     99 		((union mcluster *)p)->mcl_next = mclfree;
    100 		mclfree = (union mcluster *)p;
    101 		p += MCLBYTES;
    102 		mbstat.m_clfree++;
    103 	}
    104 	mbstat.m_clusters += ncl;
    105 	return (1);
    106 }
    107 
    108 /*
    109  * When MGET failes, ask protocols to free space when short of memory,
    110  * then re-attempt to allocate an mbuf.
    111  */
    112 struct mbuf *
    113 m_retry(i, t)
    114 	int i, t;
    115 {
    116 	register struct mbuf *m;
    117 
    118 	m_reclaim();
    119 #define m_retry(i, t)	(struct mbuf *)0
    120 	MGET(m, i, t);
    121 #undef m_retry
    122 	return (m);
    123 }
    124 
    125 /*
    126  * As above; retry an MGETHDR.
    127  */
    128 struct mbuf *
    129 m_retryhdr(i, t)
    130 	int i, t;
    131 {
    132 	register struct mbuf *m;
    133 
    134 	m_reclaim();
    135 #define m_retryhdr(i, t) (struct mbuf *)0
    136 	MGETHDR(m, i, t);
    137 #undef m_retryhdr
    138 	return (m);
    139 }
    140 
    141 m_reclaim()
    142 {
    143 	register struct domain *dp;
    144 	register struct protosw *pr;
    145 	int s = splimp();
    146 
    147 	for (dp = domains; dp; dp = dp->dom_next)
    148 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
    149 			if (pr->pr_drain)
    150 				(*pr->pr_drain)();
    151 	splx(s);
    152 	mbstat.m_drain++;
    153 }
    154 
    155 /*
    156  * Space allocation routines.
    157  * These are also available as macros
    158  * for critical paths.
    159  */
    160 struct mbuf *
    161 m_get(nowait, type)
    162 	int nowait, type;
    163 {
    164 	register struct mbuf *m;
    165 
    166 	MGET(m, nowait, type);
    167 	return (m);
    168 }
    169 
    170 struct mbuf *
    171 m_gethdr(nowait, type)
    172 	int nowait, type;
    173 {
    174 	register struct mbuf *m;
    175 
    176 	MGETHDR(m, nowait, type);
    177 	return (m);
    178 }
    179 
    180 struct mbuf *
    181 m_getclr(nowait, type)
    182 	int nowait, type;
    183 {
    184 	register struct mbuf *m;
    185 
    186 	MGET(m, nowait, type);
    187 	if (m == 0)
    188 		return (0);
    189 	bzero(mtod(m, caddr_t), MLEN);
    190 	return (m);
    191 }
    192 
    193 struct mbuf *
    194 m_free(m)
    195 	struct mbuf *m;
    196 {
    197 	register struct mbuf *n;
    198 
    199 	MFREE(m, n);
    200 	return (n);
    201 }
    202 
    203 m_freem(m)
    204 	register struct mbuf *m;
    205 {
    206 	register struct mbuf *n;
    207 
    208 	if (m == NULL)
    209 		return;
    210 	do {
    211 		MFREE(m, n);
    212 	} while (m = n);
    213 }
    214 
    215 /*
    216  * Mbuffer utility routines.
    217  */
    218 
    219 /*
    220  * Lesser-used path for M_PREPEND:
    221  * allocate new mbuf to prepend to chain,
    222  * copy junk along.
    223  */
    224 struct mbuf *
    225 m_prepend(m, len, nowait)
    226 	register struct mbuf *m;
    227 	int len, nowait;
    228 {
    229 	struct mbuf *mn;
    230 
    231 	MGET(mn, nowait, m->m_type);
    232 	if (mn == (struct mbuf *)NULL) {
    233 		m_freem(m);
    234 		return ((struct mbuf *)NULL);
    235 	}
    236 	if (m->m_flags & M_PKTHDR) {
    237 		M_COPY_PKTHDR(mn, m);
    238 		m->m_flags &= ~M_PKTHDR;
    239 	}
    240 	mn->m_next = m;
    241 	m = mn;
    242 	if (len < MHLEN)
    243 		MH_ALIGN(m, len);
    244 	m->m_len = len;
    245 	return (m);
    246 }
    247 
    248 /*
    249  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
    250  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
    251  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
    252  */
    253 int MCFail;
    254 
    255 struct mbuf *
    256 m_copym(m, off0, len, wait)
    257 	register struct mbuf *m;
    258 	int off0, wait;
    259 	register int len;
    260 {
    261 	register struct mbuf *n, **np;
    262 	register int off = off0;
    263 	struct mbuf *top;
    264 	int copyhdr = 0;
    265 
    266 	if (off < 0 || len < 0)
    267 		panic("m_copym");
    268 	if (off == 0 && m->m_flags & M_PKTHDR)
    269 		copyhdr = 1;
    270 	while (off > 0) {
    271 		if (m == 0)
    272 			panic("m_copym");
    273 		if (off < m->m_len)
    274 			break;
    275 		off -= m->m_len;
    276 		m = m->m_next;
    277 	}
    278 	np = &top;
    279 	top = 0;
    280 	while (len > 0) {
    281 		if (m == 0) {
    282 			if (len != M_COPYALL)
    283 				panic("m_copym");
    284 			break;
    285 		}
    286 		MGET(n, wait, m->m_type);
    287 		*np = n;
    288 		if (n == 0)
    289 			goto nospace;
    290 		if (copyhdr) {
    291 			M_COPY_PKTHDR(n, m);
    292 			if (len == M_COPYALL)
    293 				n->m_pkthdr.len -= off0;
    294 			else
    295 				n->m_pkthdr.len = len;
    296 			copyhdr = 0;
    297 		}
    298 		n->m_len = MIN(len, m->m_len - off);
    299 		if (m->m_flags & M_EXT) {
    300 			n->m_data = m->m_data + off;
    301 			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
    302 			n->m_ext = m->m_ext;
    303 			n->m_flags |= M_EXT;
    304 		} else
    305 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
    306 			    (unsigned)n->m_len);
    307 		if (len != M_COPYALL)
    308 			len -= n->m_len;
    309 		off = 0;
    310 		m = m->m_next;
    311 		np = &n->m_next;
    312 	}
    313 	if (top == 0)
    314 		MCFail++;
    315 	return (top);
    316 nospace:
    317 	m_freem(top);
    318 	MCFail++;
    319 	return (0);
    320 }
    321 
    322 /*
    323  * Copy data from an mbuf chain starting "off" bytes from the beginning,
    324  * continuing for "len" bytes, into the indicated buffer.
    325  */
    326 m_copydata(m, off, len, cp)
    327 	register struct mbuf *m;
    328 	register int off;
    329 	register int len;
    330 	caddr_t cp;
    331 {
    332 	register unsigned count;
    333 
    334 	if (off < 0 || len < 0)
    335 		panic("m_copydata");
    336 	while (off > 0) {
    337 		if (m == 0)
    338 			panic("m_copydata");
    339 		if (off < m->m_len)
    340 			break;
    341 		off -= m->m_len;
    342 		m = m->m_next;
    343 	}
    344 	while (len > 0) {
    345 		if (m == 0)
    346 			panic("m_copydata");
    347 		count = MIN(m->m_len - off, len);
    348 		bcopy(mtod(m, caddr_t) + off, cp, count);
    349 		len -= count;
    350 		cp += count;
    351 		off = 0;
    352 		m = m->m_next;
    353 	}
    354 }
    355 
    356 /*
    357  * Concatenate mbuf chain n to m.
    358  * Both chains must be of the same type (e.g. MT_DATA).
    359  * Any m_pkthdr is not updated.
    360  */
    361 m_cat(m, n)
    362 	register struct mbuf *m, *n;
    363 {
    364 	while (m->m_next)
    365 		m = m->m_next;
    366 	while (n) {
    367 		if (m->m_flags & M_EXT ||
    368 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
    369 			/* just join the two chains */
    370 			m->m_next = n;
    371 			return;
    372 		}
    373 		/* splat the data from one into the other */
    374 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
    375 		    (u_int)n->m_len);
    376 		m->m_len += n->m_len;
    377 		n = m_free(n);
    378 	}
    379 }
    380 
    381 m_adj(mp, req_len)
    382 	struct mbuf *mp;
    383 	int req_len;
    384 {
    385 	register int len = req_len;
    386 	register struct mbuf *m;
    387 	register count;
    388 
    389 	if ((m = mp) == NULL)
    390 		return;
    391 	if (len >= 0) {
    392 		/*
    393 		 * Trim from head.
    394 		 */
    395 		while (m != NULL && len > 0) {
    396 			if (m->m_len <= len) {
    397 				len -= m->m_len;
    398 				m->m_len = 0;
    399 				m = m->m_next;
    400 			} else {
    401 				m->m_len -= len;
    402 				m->m_data += len;
    403 				len = 0;
    404 			}
    405 		}
    406 		m = mp;
    407 		if (mp->m_flags & M_PKTHDR)
    408 			m->m_pkthdr.len -= (req_len - len);
    409 	} else {
    410 		/*
    411 		 * Trim from tail.  Scan the mbuf chain,
    412 		 * calculating its length and finding the last mbuf.
    413 		 * If the adjustment only affects this mbuf, then just
    414 		 * adjust and return.  Otherwise, rescan and truncate
    415 		 * after the remaining size.
    416 		 */
    417 		len = -len;
    418 		count = 0;
    419 		for (;;) {
    420 			count += m->m_len;
    421 			if (m->m_next == (struct mbuf *)0)
    422 				break;
    423 			m = m->m_next;
    424 		}
    425 		if (m->m_len >= len) {
    426 			m->m_len -= len;
    427 			if (mp->m_flags & M_PKTHDR)
    428 				mp->m_pkthdr.len -= len;
    429 			return;
    430 		}
    431 		count -= len;
    432 		if (count < 0)
    433 			count = 0;
    434 		/*
    435 		 * Correct length for chain is "count".
    436 		 * Find the mbuf with last data, adjust its length,
    437 		 * and toss data from remaining mbufs on chain.
    438 		 */
    439 		m = mp;
    440 		if (m->m_flags & M_PKTHDR)
    441 			m->m_pkthdr.len = count;
    442 		for (; m; m = m->m_next) {
    443 			if (m->m_len >= count) {
    444 				m->m_len = count;
    445 				break;
    446 			}
    447 			count -= m->m_len;
    448 		}
    449 		while (m = m->m_next)
    450 			m->m_len = 0;
    451 	}
    452 }
    453 
    454 /*
    455  * Rearange an mbuf chain so that len bytes are contiguous
    456  * and in the data area of an mbuf (so that mtod and dtom
    457  * will work for a structure of size len).  Returns the resulting
    458  * mbuf chain on success, frees it and returns null on failure.
    459  * If there is room, it will add up to max_protohdr-len extra bytes to the
    460  * contiguous region in an attempt to avoid being called next time.
    461  */
    462 int MPFail;
    463 
    464 struct mbuf *
    465 m_pullup(n, len)
    466 	register struct mbuf *n;
    467 	int len;
    468 {
    469 	register struct mbuf *m;
    470 	register int count;
    471 	int space;
    472 
    473 	/*
    474 	 * If first mbuf has no cluster, and has room for len bytes
    475 	 * without shifting current data, pullup into it,
    476 	 * otherwise allocate a new mbuf to prepend to the chain.
    477 	 */
    478 	if ((n->m_flags & M_EXT) == 0 &&
    479 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
    480 		if (n->m_len >= len)
    481 			return (n);
    482 		m = n;
    483 		n = n->m_next;
    484 		len -= m->m_len;
    485 	} else {
    486 		if (len > MHLEN)
    487 			goto bad;
    488 		MGET(m, M_DONTWAIT, n->m_type);
    489 		if (m == 0)
    490 			goto bad;
    491 		m->m_len = 0;
    492 		if (n->m_flags & M_PKTHDR) {
    493 			M_COPY_PKTHDR(m, n);
    494 			n->m_flags &= ~M_PKTHDR;
    495 		}
    496 	}
    497 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
    498 	do {
    499 		count = min(min(max(len, max_protohdr), space), n->m_len);
    500 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
    501 		  (unsigned)count);
    502 		len -= count;
    503 		m->m_len += count;
    504 		n->m_len -= count;
    505 		space -= count;
    506 		if (n->m_len)
    507 			n->m_data += count;
    508 		else
    509 			n = m_free(n);
    510 	} while (len > 0 && n);
    511 	if (len > 0) {
    512 		(void) m_free(m);
    513 		goto bad;
    514 	}
    515 	m->m_next = n;
    516 	return (m);
    517 bad:
    518 	m_freem(n);
    519 	MPFail++;
    520 	return (0);
    521 }
    522