1 1.255 skrll /* $NetBSD: uipc_mbuf.c,v 1.255 2024/12/15 11:07:10 skrll Exp $ */ 2 1.42 thorpej 3 1.177 maxv /* 4 1.227 maxv * Copyright (c) 1999, 2001, 2018 The NetBSD Foundation, Inc. 5 1.42 thorpej * All rights reserved. 6 1.42 thorpej * 7 1.42 thorpej * This code is derived from software contributed to The NetBSD Foundation 8 1.42 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 1.227 maxv * NASA Ames Research Center, and Maxime Villard. 10 1.42 thorpej * 11 1.42 thorpej * Redistribution and use in source and binary forms, with or without 12 1.42 thorpej * modification, are permitted provided that the following conditions 13 1.42 thorpej * are met: 14 1.42 thorpej * 1. Redistributions of source code must retain the above copyright 15 1.42 thorpej * notice, this list of conditions and the following disclaimer. 16 1.42 thorpej * 2. Redistributions in binary form must reproduce the above copyright 17 1.42 thorpej * notice, this list of conditions and the following disclaimer in the 18 1.42 thorpej * documentation and/or other materials provided with the distribution. 19 1.42 thorpej * 20 1.42 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 1.42 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 1.42 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 1.42 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 1.42 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 1.42 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 1.42 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 1.42 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 1.42 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 1.42 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 1.42 thorpej * POSSIBILITY OF SUCH DAMAGE. 31 1.42 thorpej */ 32 1.10 cgd 33 1.1 cgd /* 34 1.9 mycroft * Copyright (c) 1982, 1986, 1988, 1991, 1993 35 1.9 mycroft * The Regents of the University of California. All rights reserved. 36 1.1 cgd * 37 1.1 cgd * Redistribution and use in source and binary forms, with or without 38 1.1 cgd * modification, are permitted provided that the following conditions 39 1.1 cgd * are met: 40 1.1 cgd * 1. Redistributions of source code must retain the above copyright 41 1.1 cgd * notice, this list of conditions and the following disclaimer. 42 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright 43 1.1 cgd * notice, this list of conditions and the following disclaimer in the 44 1.1 cgd * documentation and/or other materials provided with the distribution. 45 1.70 agc * 3. Neither the name of the University nor the names of its contributors 46 1.1 cgd * may be used to endorse or promote products derived from this software 47 1.1 cgd * without specific prior written permission. 48 1.1 cgd * 49 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 1.1 cgd * SUCH DAMAGE. 60 1.1 cgd * 61 1.26 fvdl * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95 62 1.1 cgd */ 63 1.56 lukem 64 1.56 lukem #include <sys/cdefs.h> 65 1.255 skrll __KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.255 2024/12/15 11:07:10 skrll Exp $"); 66 1.69 martin 67 1.163 pooka #ifdef _KERNEL_OPT 68 1.253 riastrad #include "ether.h" 69 1.253 riastrad #include "opt_ddb.h" 70 1.69 martin #include "opt_mbuftrace.h" 71 1.133 joerg #include "opt_nmbclusters.h" 72 1.163 pooka #endif 73 1.24 mrg 74 1.6 mycroft #include <sys/param.h> 75 1.253 riastrad #include <sys/types.h> 76 1.253 riastrad 77 1.125 yamt #include <sys/atomic.h> 78 1.124 yamt #include <sys/cpu.h> 79 1.253 riastrad #include <sys/domain.h> 80 1.253 riastrad #include <sys/kernel.h> 81 1.6 mycroft #include <sys/mbuf.h> 82 1.124 yamt #include <sys/percpu.h> 83 1.28 thorpej #include <sys/pool.h> 84 1.253 riastrad #include <sys/proc.h> 85 1.253 riastrad #include <sys/protosw.h> 86 1.254 riastrad #include <sys/sdt.h> 87 1.27 matt #include <sys/socket.h> 88 1.55 simonb #include <sys/sysctl.h> 89 1.253 riastrad #include <sys/syslog.h> 90 1.253 riastrad #include <sys/systm.h> 91 1.55 simonb 92 1.27 matt #include <net/if.h> 93 1.14 christos 94 1.122 ad pool_cache_t mb_cache; /* mbuf cache */ 95 1.218 maxv static pool_cache_t mcl_cache; /* mbuf cluster cache */ 96 1.53 thorpej 97 1.18 thorpej struct mbstat mbstat; 98 1.199 maxv int max_linkhdr; 99 1.199 maxv int max_protohdr; 100 1.199 maxv int max_hdr; 101 1.199 maxv int max_datalen; 102 1.18 thorpej 103 1.203 maxv static void mb_drain(void *, int); 104 1.65 thorpej static int mb_ctor(void *, void *, int); 105 1.65 thorpej 106 1.199 maxv static void sysctl_kern_mbuf_setup(void); 107 1.129 pooka 108 1.129 pooka static struct sysctllog *mbuf_sysctllog; 109 1.129 pooka 110 1.195 maxv static struct mbuf *m_copy_internal(struct mbuf *, int, int, int, bool); 111 1.195 maxv static struct mbuf *m_split_internal(struct mbuf *, int, int, bool); 112 1.196 maxv static int m_copyback_internal(struct mbuf **, int, int, const void *, 113 1.196 maxv int, int); 114 1.85 yamt 115 1.196 maxv /* Flags for m_copyback_internal. */ 116 1.196 maxv #define CB_COPYBACK 0x0001 /* copyback from cp */ 117 1.196 maxv #define CB_PRESERVE 0x0002 /* preserve original data */ 118 1.196 maxv #define CB_COW 0x0004 /* do copy-on-write */ 119 1.196 maxv #define CB_EXTEND 0x0008 /* extend chain */ 120 1.28 thorpej 121 1.103 thorpej static const char mclpool_warnmsg[] = 122 1.133 joerg "WARNING: mclpool limit reached; increase kern.mbuf.nmbclusters"; 123 1.63 thorpej 124 1.63 thorpej MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 125 1.42 thorpej 126 1.124 yamt static percpu_t *mbstat_percpu; 127 1.124 yamt 128 1.64 matt #ifdef MBUFTRACE 129 1.64 matt struct mownerhead mowners = LIST_HEAD_INITIALIZER(mowners); 130 1.64 matt struct mowner unknown_mowners[] = { 131 1.114 dogcow MOWNER_INIT("unknown", "free"), 132 1.114 dogcow MOWNER_INIT("unknown", "data"), 133 1.114 dogcow MOWNER_INIT("unknown", "header"), 134 1.114 dogcow MOWNER_INIT("unknown", "soname"), 135 1.114 dogcow MOWNER_INIT("unknown", "soopts"), 136 1.114 dogcow MOWNER_INIT("unknown", "ftable"), 137 1.114 dogcow MOWNER_INIT("unknown", "control"), 138 1.114 dogcow MOWNER_INIT("unknown", "oobdata"), 139 1.64 matt }; 140 1.114 dogcow struct mowner revoked_mowner = MOWNER_INIT("revoked", ""); 141 1.64 matt #endif 142 1.64 matt 143 1.125 yamt #define MEXT_ISEMBEDDED(m) ((m)->m_ext_ref == (m)) 144 1.125 yamt 145 1.125 yamt #define MCLADDREFERENCE(o, n) \ 146 1.125 yamt do { \ 147 1.125 yamt KASSERT(((o)->m_flags & M_EXT) != 0); \ 148 1.125 yamt KASSERT(((n)->m_flags & M_EXT) == 0); \ 149 1.125 yamt KASSERT((o)->m_ext.ext_refcnt >= 1); \ 150 1.125 yamt (n)->m_flags |= ((o)->m_flags & M_EXTCOPYFLAGS); \ 151 1.125 yamt atomic_inc_uint(&(o)->m_ext.ext_refcnt); \ 152 1.125 yamt (n)->m_ext_ref = (o)->m_ext_ref; \ 153 1.125 yamt mowner_ref((n), (n)->m_flags); \ 154 1.125 yamt } while (/* CONSTCOND */ 0) 155 1.125 yamt 156 1.133 joerg static int 157 1.133 joerg nmbclusters_limit(void) 158 1.133 joerg { 159 1.136 pooka #if defined(PMAP_MAP_POOLPAGE) 160 1.147 para /* direct mapping, doesn't use space in kmem_arena */ 161 1.133 joerg vsize_t max_size = physmem / 4; 162 1.133 joerg #else 163 1.145 para vsize_t max_size = MIN(physmem / 4, nkmempages / 4); 164 1.133 joerg #endif 165 1.133 joerg 166 1.133 joerg max_size = max_size * PAGE_SIZE / MCLBYTES; 167 1.133 joerg #ifdef NMBCLUSTERS_MAX 168 1.133 joerg max_size = MIN(max_size, NMBCLUSTERS_MAX); 169 1.133 joerg #endif 170 1.133 joerg 171 1.133 joerg return max_size; 172 1.133 joerg } 173 1.133 joerg 174 1.28 thorpej /* 175 1.68 simonb * Initialize the mbuf allocator. 176 1.28 thorpej */ 177 1.4 jtc void 178 1.62 thorpej mbinit(void) 179 1.1 cgd { 180 1.65 thorpej 181 1.128 matt CTASSERT(sizeof(struct _m_ext) <= MHLEN); 182 1.128 matt CTASSERT(sizeof(struct mbuf) == MSIZE); 183 1.65 thorpej 184 1.129 pooka sysctl_kern_mbuf_setup(); 185 1.129 pooka 186 1.122 ad mb_cache = pool_cache_init(msize, 0, 0, 0, "mbpl", 187 1.122 ad NULL, IPL_VM, mb_ctor, NULL, NULL); 188 1.122 ad KASSERT(mb_cache != NULL); 189 1.122 ad 190 1.235 tnn mcl_cache = pool_cache_init(mclbytes, COHERENCY_UNIT, 0, 0, "mclpl", 191 1.235 tnn NULL, IPL_VM, NULL, NULL, NULL); 192 1.122 ad KASSERT(mcl_cache != NULL); 193 1.59 thorpej 194 1.203 maxv pool_cache_set_drain_hook(mb_cache, mb_drain, NULL); 195 1.203 maxv pool_cache_set_drain_hook(mcl_cache, mb_drain, NULL); 196 1.37 thorpej 197 1.37 thorpej /* 198 1.133 joerg * Set an arbitrary default limit on the number of mbuf clusters. 199 1.133 joerg */ 200 1.133 joerg #ifdef NMBCLUSTERS 201 1.244 msaitoh nmbclusters = MIN(NMBCLUSTERS, nmbclusters_limit()); 202 1.133 joerg #else 203 1.133 joerg nmbclusters = MAX(1024, 204 1.133 joerg (vsize_t)physmem * PAGE_SIZE / MCLBYTES / 16); 205 1.133 joerg nmbclusters = MIN(nmbclusters, nmbclusters_limit()); 206 1.133 joerg #endif 207 1.133 joerg 208 1.133 joerg /* 209 1.39 thorpej * Set the hard limit on the mclpool to the number of 210 1.39 thorpej * mbuf clusters the kernel is to support. Log the limit 211 1.39 thorpej * reached message max once a minute. 212 1.39 thorpej */ 213 1.122 ad pool_cache_sethardlimit(mcl_cache, nmbclusters, mclpool_warnmsg, 60); 214 1.42 thorpej 215 1.124 yamt mbstat_percpu = percpu_alloc(sizeof(struct mbstat_cpu)); 216 1.124 yamt 217 1.39 thorpej /* 218 1.42 thorpej * Set a low water mark for both mbufs and clusters. This should 219 1.42 thorpej * help ensure that they can be allocated in a memory starvation 220 1.42 thorpej * situation. This is important for e.g. diskless systems which 221 1.42 thorpej * must allocate mbufs in order for the pagedaemon to clean pages. 222 1.37 thorpej */ 223 1.122 ad pool_cache_setlowat(mb_cache, mblowat); 224 1.122 ad pool_cache_setlowat(mcl_cache, mcllowat); 225 1.64 matt 226 1.64 matt #ifdef MBUFTRACE 227 1.64 matt { 228 1.64 matt /* 229 1.64 matt * Attach the unknown mowners. 230 1.64 matt */ 231 1.64 matt int i; 232 1.64 matt MOWNER_ATTACH(&revoked_mowner); 233 1.64 matt for (i = sizeof(unknown_mowners)/sizeof(unknown_mowners[0]); 234 1.64 matt i-- > 0; ) 235 1.64 matt MOWNER_ATTACH(&unknown_mowners[i]); 236 1.64 matt } 237 1.64 matt #endif 238 1.42 thorpej } 239 1.42 thorpej 240 1.203 maxv static void 241 1.203 maxv mb_drain(void *arg, int flags) 242 1.203 maxv { 243 1.203 maxv struct domain *dp; 244 1.203 maxv const struct protosw *pr; 245 1.203 maxv struct ifnet *ifp; 246 1.203 maxv int s; 247 1.203 maxv 248 1.203 maxv KERNEL_LOCK(1, NULL); 249 1.203 maxv s = splvm(); 250 1.203 maxv DOMAIN_FOREACH(dp) { 251 1.203 maxv for (pr = dp->dom_protosw; 252 1.203 maxv pr < dp->dom_protoswNPROTOSW; pr++) 253 1.203 maxv if (pr->pr_drain) 254 1.203 maxv (*pr->pr_drain)(); 255 1.203 maxv } 256 1.203 maxv /* XXX we cannot use psref in H/W interrupt */ 257 1.203 maxv if (!cpu_intr_p()) { 258 1.203 maxv int bound = curlwp_bind(); 259 1.203 maxv IFNET_READER_FOREACH(ifp) { 260 1.203 maxv struct psref psref; 261 1.203 maxv 262 1.203 maxv if_acquire(ifp, &psref); 263 1.203 maxv 264 1.203 maxv if (ifp->if_drain) 265 1.203 maxv (*ifp->if_drain)(ifp); 266 1.203 maxv 267 1.203 maxv if_release(ifp, &psref); 268 1.203 maxv } 269 1.203 maxv curlwp_bindx(bound); 270 1.203 maxv } 271 1.203 maxv splx(s); 272 1.203 maxv mbstat.m_drain++; 273 1.203 maxv KERNEL_UNLOCK_ONE(NULL); 274 1.203 maxv } 275 1.203 maxv 276 1.75 atatat /* 277 1.133 joerg * sysctl helper routine for the kern.mbuf subtree. 278 1.133 joerg * nmbclusters, mblowat and mcllowat need range 279 1.75 atatat * checking and pool tweaking after being reset. 280 1.75 atatat */ 281 1.75 atatat static int 282 1.75 atatat sysctl_kern_mbuf(SYSCTLFN_ARGS) 283 1.42 thorpej { 284 1.42 thorpej int error, newval; 285 1.75 atatat struct sysctlnode node; 286 1.42 thorpej 287 1.75 atatat node = *rnode; 288 1.75 atatat node.sysctl_data = &newval; 289 1.75 atatat switch (rnode->sysctl_num) { 290 1.42 thorpej case MBUF_NMBCLUSTERS: 291 1.42 thorpej case MBUF_MBLOWAT: 292 1.42 thorpej case MBUF_MCLLOWAT: 293 1.75 atatat newval = *(int*)rnode->sysctl_data; 294 1.75 atatat break; 295 1.247 msaitoh case MBUF_NMBCLUSTERS_LIMIT: 296 1.247 msaitoh newval = nmbclusters_limit(); 297 1.247 msaitoh break; 298 1.75 atatat default: 299 1.254 riastrad return SET_ERROR(EOPNOTSUPP); 300 1.75 atatat } 301 1.75 atatat 302 1.75 atatat error = sysctl_lookup(SYSCTLFN_CALL(&node)); 303 1.75 atatat if (error || newp == NULL) 304 1.199 maxv return error; 305 1.75 atatat if (newval < 0) 306 1.254 riastrad return SET_ERROR(EINVAL); 307 1.75 atatat 308 1.75 atatat switch (node.sysctl_num) { 309 1.75 atatat case MBUF_NMBCLUSTERS: 310 1.75 atatat if (newval < nmbclusters) 311 1.254 riastrad return SET_ERROR(EINVAL); 312 1.133 joerg if (newval > nmbclusters_limit()) 313 1.254 riastrad return SET_ERROR(EINVAL); 314 1.75 atatat nmbclusters = newval; 315 1.122 ad pool_cache_sethardlimit(mcl_cache, nmbclusters, 316 1.122 ad mclpool_warnmsg, 60); 317 1.75 atatat break; 318 1.75 atatat case MBUF_MBLOWAT: 319 1.75 atatat mblowat = newval; 320 1.122 ad pool_cache_setlowat(mb_cache, mblowat); 321 1.75 atatat break; 322 1.75 atatat case MBUF_MCLLOWAT: 323 1.76 atatat mcllowat = newval; 324 1.122 ad pool_cache_setlowat(mcl_cache, mcllowat); 325 1.75 atatat break; 326 1.75 atatat } 327 1.75 atatat 328 1.199 maxv return 0; 329 1.75 atatat } 330 1.75 atatat 331 1.64 matt #ifdef MBUFTRACE 332 1.124 yamt static void 333 1.220 msaitoh mowner_convert_to_user_cb(void *v1, void *v2, struct cpu_info *ci) 334 1.124 yamt { 335 1.124 yamt struct mowner_counter *mc = v1; 336 1.124 yamt struct mowner_user *mo_user = v2; 337 1.124 yamt int i; 338 1.124 yamt 339 1.124 yamt for (i = 0; i < MOWNER_COUNTER_NCOUNTERS; i++) { 340 1.124 yamt mo_user->mo_counter[i] += mc->mc_counter[i]; 341 1.124 yamt } 342 1.124 yamt } 343 1.124 yamt 344 1.124 yamt static void 345 1.124 yamt mowner_convert_to_user(struct mowner *mo, struct mowner_user *mo_user) 346 1.124 yamt { 347 1.124 yamt 348 1.124 yamt memset(mo_user, 0, sizeof(*mo_user)); 349 1.128 matt CTASSERT(sizeof(mo_user->mo_name) == sizeof(mo->mo_name)); 350 1.128 matt CTASSERT(sizeof(mo_user->mo_descr) == sizeof(mo->mo_descr)); 351 1.124 yamt memcpy(mo_user->mo_name, mo->mo_name, sizeof(mo->mo_name)); 352 1.124 yamt memcpy(mo_user->mo_descr, mo->mo_descr, sizeof(mo->mo_descr)); 353 1.220 msaitoh percpu_foreach(mo->mo_counters, mowner_convert_to_user_cb, mo_user); 354 1.124 yamt } 355 1.124 yamt 356 1.75 atatat static int 357 1.75 atatat sysctl_kern_mbuf_mowners(SYSCTLFN_ARGS) 358 1.75 atatat { 359 1.75 atatat struct mowner *mo; 360 1.75 atatat size_t len = 0; 361 1.75 atatat int error = 0; 362 1.75 atatat 363 1.75 atatat if (namelen != 0) 364 1.254 riastrad return SET_ERROR(EINVAL); 365 1.75 atatat if (newp != NULL) 366 1.254 riastrad return SET_ERROR(EPERM); 367 1.75 atatat 368 1.75 atatat LIST_FOREACH(mo, &mowners, mo_link) { 369 1.124 yamt struct mowner_user mo_user; 370 1.124 yamt 371 1.124 yamt mowner_convert_to_user(mo, &mo_user); 372 1.124 yamt 373 1.75 atatat if (oldp != NULL) { 374 1.124 yamt if (*oldlenp - len < sizeof(mo_user)) { 375 1.254 riastrad error = SET_ERROR(ENOMEM); 376 1.75 atatat break; 377 1.75 atatat } 378 1.124 yamt error = copyout(&mo_user, (char *)oldp + len, 379 1.124 yamt sizeof(mo_user)); 380 1.75 atatat if (error) 381 1.75 atatat break; 382 1.64 matt } 383 1.124 yamt len += sizeof(mo_user); 384 1.75 atatat } 385 1.75 atatat 386 1.75 atatat if (error == 0) 387 1.64 matt *oldlenp = len; 388 1.75 atatat 389 1.199 maxv return error; 390 1.75 atatat } 391 1.75 atatat #endif /* MBUFTRACE */ 392 1.75 atatat 393 1.205 maxv void 394 1.205 maxv mbstat_type_add(int type, int diff) 395 1.205 maxv { 396 1.205 maxv struct mbstat_cpu *mb; 397 1.205 maxv int s; 398 1.205 maxv 399 1.205 maxv s = splvm(); 400 1.205 maxv mb = percpu_getref(mbstat_percpu); 401 1.205 maxv mb->m_mtypes[type] += diff; 402 1.205 maxv percpu_putref(mbstat_percpu); 403 1.205 maxv splx(s); 404 1.205 maxv } 405 1.205 maxv 406 1.124 yamt static void 407 1.234 jmcneill mbstat_convert_to_user_cb(void *v1, void *v2, struct cpu_info *ci) 408 1.124 yamt { 409 1.124 yamt struct mbstat_cpu *mbsc = v1; 410 1.124 yamt struct mbstat *mbs = v2; 411 1.124 yamt int i; 412 1.124 yamt 413 1.124 yamt for (i = 0; i < __arraycount(mbs->m_mtypes); i++) { 414 1.124 yamt mbs->m_mtypes[i] += mbsc->m_mtypes[i]; 415 1.124 yamt } 416 1.124 yamt } 417 1.124 yamt 418 1.124 yamt static void 419 1.124 yamt mbstat_convert_to_user(struct mbstat *mbs) 420 1.124 yamt { 421 1.124 yamt 422 1.124 yamt memset(mbs, 0, sizeof(*mbs)); 423 1.124 yamt mbs->m_drain = mbstat.m_drain; 424 1.234 jmcneill percpu_foreach(mbstat_percpu, mbstat_convert_to_user_cb, mbs); 425 1.124 yamt } 426 1.124 yamt 427 1.124 yamt static int 428 1.124 yamt sysctl_kern_mbuf_stats(SYSCTLFN_ARGS) 429 1.124 yamt { 430 1.124 yamt struct sysctlnode node; 431 1.124 yamt struct mbstat mbs; 432 1.124 yamt 433 1.124 yamt mbstat_convert_to_user(&mbs); 434 1.124 yamt node = *rnode; 435 1.124 yamt node.sysctl_data = &mbs; 436 1.124 yamt node.sysctl_size = sizeof(mbs); 437 1.124 yamt return sysctl_lookup(SYSCTLFN_CALL(&node)); 438 1.124 yamt } 439 1.124 yamt 440 1.129 pooka static void 441 1.131 cegger sysctl_kern_mbuf_setup(void) 442 1.75 atatat { 443 1.75 atatat 444 1.129 pooka KASSERT(mbuf_sysctllog == NULL); 445 1.129 pooka sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL, 446 1.80 atatat CTLFLAG_PERMANENT, 447 1.82 atatat CTLTYPE_NODE, "mbuf", 448 1.82 atatat SYSCTL_DESCR("mbuf control variables"), 449 1.75 atatat NULL, 0, NULL, 0, 450 1.75 atatat CTL_KERN, KERN_MBUF, CTL_EOL); 451 1.75 atatat 452 1.129 pooka sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL, 453 1.80 atatat CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 454 1.82 atatat CTLTYPE_INT, "msize", 455 1.82 atatat SYSCTL_DESCR("mbuf base size"), 456 1.75 atatat NULL, msize, NULL, 0, 457 1.75 atatat CTL_KERN, KERN_MBUF, MBUF_MSIZE, CTL_EOL); 458 1.129 pooka sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL, 459 1.80 atatat CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 460 1.82 atatat CTLTYPE_INT, "mclbytes", 461 1.82 atatat SYSCTL_DESCR("mbuf cluster size"), 462 1.75 atatat NULL, mclbytes, NULL, 0, 463 1.75 atatat CTL_KERN, KERN_MBUF, MBUF_MCLBYTES, CTL_EOL); 464 1.129 pooka sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL, 465 1.80 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 466 1.82 atatat CTLTYPE_INT, "nmbclusters", 467 1.82 atatat SYSCTL_DESCR("Limit on the number of mbuf clusters"), 468 1.75 atatat sysctl_kern_mbuf, 0, &nmbclusters, 0, 469 1.75 atatat CTL_KERN, KERN_MBUF, MBUF_NMBCLUSTERS, CTL_EOL); 470 1.129 pooka sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL, 471 1.80 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 472 1.82 atatat CTLTYPE_INT, "mblowat", 473 1.82 atatat SYSCTL_DESCR("mbuf low water mark"), 474 1.75 atatat sysctl_kern_mbuf, 0, &mblowat, 0, 475 1.75 atatat CTL_KERN, KERN_MBUF, MBUF_MBLOWAT, CTL_EOL); 476 1.129 pooka sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL, 477 1.80 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 478 1.82 atatat CTLTYPE_INT, "mcllowat", 479 1.82 atatat SYSCTL_DESCR("mbuf cluster low water mark"), 480 1.75 atatat sysctl_kern_mbuf, 0, &mcllowat, 0, 481 1.75 atatat CTL_KERN, KERN_MBUF, MBUF_MCLLOWAT, CTL_EOL); 482 1.129 pooka sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL, 483 1.80 atatat CTLFLAG_PERMANENT, 484 1.82 atatat CTLTYPE_STRUCT, "stats", 485 1.82 atatat SYSCTL_DESCR("mbuf allocation statistics"), 486 1.124 yamt sysctl_kern_mbuf_stats, 0, NULL, 0, 487 1.75 atatat CTL_KERN, KERN_MBUF, MBUF_STATS, CTL_EOL); 488 1.75 atatat #ifdef MBUFTRACE 489 1.129 pooka sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL, 490 1.80 atatat CTLFLAG_PERMANENT, 491 1.82 atatat CTLTYPE_STRUCT, "mowners", 492 1.82 atatat SYSCTL_DESCR("Information about mbuf owners"), 493 1.75 atatat sysctl_kern_mbuf_mowners, 0, NULL, 0, 494 1.75 atatat CTL_KERN, KERN_MBUF, MBUF_MOWNERS, CTL_EOL); 495 1.199 maxv #endif 496 1.247 msaitoh sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL, 497 1.247 msaitoh CTLFLAG_PERMANENT|CTLFLAG_READONLY, 498 1.247 msaitoh CTLTYPE_INT, "nmbclusters_limit", 499 1.247 msaitoh SYSCTL_DESCR("Limit of nmbclusters"), 500 1.247 msaitoh sysctl_kern_mbuf, 0, NULL, 0, 501 1.247 msaitoh CTL_KERN, KERN_MBUF, MBUF_NMBCLUSTERS_LIMIT, CTL_EOL); 502 1.28 thorpej } 503 1.28 thorpej 504 1.65 thorpej static int 505 1.116 yamt mb_ctor(void *arg, void *object, int flags) 506 1.65 thorpej { 507 1.65 thorpej struct mbuf *m = object; 508 1.65 thorpej 509 1.65 thorpej #ifdef POOL_VTOPHYS 510 1.65 thorpej m->m_paddr = POOL_VTOPHYS(m); 511 1.65 thorpej #else 512 1.65 thorpej m->m_paddr = M_PADDR_INVALID; 513 1.65 thorpej #endif 514 1.199 maxv return 0; 515 1.1 cgd } 516 1.1 cgd 517 1.150 christos /* 518 1.150 christos * Add mbuf to the end of a chain 519 1.150 christos */ 520 1.150 christos struct mbuf * 521 1.179 maxv m_add(struct mbuf *c, struct mbuf *m) 522 1.179 maxv { 523 1.150 christos struct mbuf *n; 524 1.150 christos 525 1.150 christos if (c == NULL) 526 1.150 christos return m; 527 1.150 christos 528 1.150 christos for (n = c; n->m_next != NULL; n = n->m_next) 529 1.150 christos continue; 530 1.150 christos n->m_next = m; 531 1.150 christos return c; 532 1.150 christos } 533 1.150 christos 534 1.1 cgd struct mbuf * 535 1.212 maxv m_get(int how, int type) 536 1.1 cgd { 537 1.27 matt struct mbuf *m; 538 1.1 cgd 539 1.142 dyoung KASSERT(type != MT_FREE); 540 1.142 dyoung 541 1.124 yamt m = pool_cache_get(mb_cache, 542 1.212 maxv how == M_WAIT ? PR_WAITOK|PR_LIMITFAIL : PR_NOWAIT); 543 1.124 yamt if (m == NULL) 544 1.124 yamt return NULL; 545 1.249 riastrad KASSERTMSG(((vaddr_t)m->m_dat & PAGE_MASK) + MLEN <= PAGE_SIZE, 546 1.250 skrll "m=%p m->m_dat=%p" 547 1.249 riastrad " MLEN=%u PAGE_MASK=0x%x PAGE_SIZE=%u", 548 1.249 riastrad m, m->m_dat, 549 1.249 riastrad (unsigned)MLEN, (unsigned)PAGE_MASK, (unsigned)PAGE_SIZE); 550 1.124 yamt 551 1.124 yamt mbstat_type_add(type, 1); 552 1.164 knakahar 553 1.184 maxv mowner_init(m, type); 554 1.184 maxv m->m_ext_ref = m; /* default */ 555 1.184 maxv m->m_type = type; 556 1.184 maxv m->m_len = 0; 557 1.184 maxv m->m_next = NULL; 558 1.184 maxv m->m_nextpkt = NULL; /* default */ 559 1.184 maxv m->m_data = m->m_dat; 560 1.184 maxv m->m_flags = 0; /* default */ 561 1.124 yamt 562 1.124 yamt return m; 563 1.1 cgd } 564 1.1 cgd 565 1.1 cgd struct mbuf * 566 1.212 maxv m_gethdr(int how, int type) 567 1.1 cgd { 568 1.27 matt struct mbuf *m; 569 1.1 cgd 570 1.212 maxv m = m_get(how, type); 571 1.124 yamt if (m == NULL) 572 1.124 yamt return NULL; 573 1.124 yamt 574 1.184 maxv m->m_data = m->m_pktdat; 575 1.184 maxv m->m_flags = M_PKTHDR; 576 1.184 maxv 577 1.184 maxv m_reset_rcvif(m); 578 1.184 maxv m->m_pkthdr.len = 0; 579 1.184 maxv m->m_pkthdr.csum_flags = 0; 580 1.184 maxv m->m_pkthdr.csum_data = 0; 581 1.231 knakahar m->m_pkthdr.segsz = 0; 582 1.231 knakahar m->m_pkthdr.ether_vtag = 0; 583 1.232 knakahar m->m_pkthdr.pkthdr_flags = 0; 584 1.184 maxv SLIST_INIT(&m->m_pkthdr.tags); 585 1.184 maxv 586 1.184 maxv m->m_pkthdr.pattr_class = NULL; 587 1.184 maxv m->m_pkthdr.pattr_af = AF_UNSPEC; 588 1.184 maxv m->m_pkthdr.pattr_hdr = NULL; 589 1.124 yamt 590 1.124 yamt return m; 591 1.1 cgd } 592 1.1 cgd 593 1.251 riastrad struct mbuf * 594 1.251 riastrad m_get_n(int how, int type, size_t alignbytes, size_t nbytes) 595 1.251 riastrad { 596 1.251 riastrad struct mbuf *m; 597 1.251 riastrad 598 1.251 riastrad if (alignbytes > MCLBYTES || nbytes > MCLBYTES - alignbytes) 599 1.251 riastrad return NULL; 600 1.251 riastrad if ((m = m_get(how, type)) == NULL) 601 1.251 riastrad return NULL; 602 1.251 riastrad if (nbytes + alignbytes > MLEN) { 603 1.251 riastrad m_clget(m, how); 604 1.251 riastrad if ((m->m_flags & M_EXT) == 0) { 605 1.251 riastrad m_free(m); 606 1.251 riastrad return NULL; 607 1.251 riastrad } 608 1.251 riastrad } 609 1.251 riastrad m->m_len = alignbytes + nbytes; 610 1.251 riastrad m_adj(m, alignbytes); 611 1.251 riastrad 612 1.251 riastrad return m; 613 1.251 riastrad } 614 1.251 riastrad 615 1.251 riastrad struct mbuf * 616 1.251 riastrad m_gethdr_n(int how, int type, size_t alignbytes, size_t nbytes) 617 1.251 riastrad { 618 1.251 riastrad struct mbuf *m; 619 1.251 riastrad 620 1.251 riastrad if (nbytes > MCLBYTES || nbytes > MCLBYTES - alignbytes) 621 1.251 riastrad return NULL; 622 1.251 riastrad if ((m = m_gethdr(how, type)) == NULL) 623 1.251 riastrad return NULL; 624 1.251 riastrad if (alignbytes + nbytes > MHLEN) { 625 1.251 riastrad m_clget(m, how); 626 1.251 riastrad if ((m->m_flags & M_EXT) == 0) { 627 1.251 riastrad m_free(m); 628 1.251 riastrad return NULL; 629 1.251 riastrad } 630 1.251 riastrad } 631 1.251 riastrad m->m_len = m->m_pkthdr.len = alignbytes + nbytes; 632 1.251 riastrad m_adj(m, alignbytes); 633 1.251 riastrad 634 1.251 riastrad return m; 635 1.251 riastrad } 636 1.251 riastrad 637 1.64 matt void 638 1.212 maxv m_clget(struct mbuf *m, int how) 639 1.64 matt { 640 1.206 maxv m->m_ext_storage.ext_buf = (char *)pool_cache_get_paddr(mcl_cache, 641 1.212 maxv how == M_WAIT ? (PR_WAITOK|PR_LIMITFAIL) : PR_NOWAIT, 642 1.206 maxv &m->m_ext_storage.ext_paddr); 643 1.71 simonb 644 1.206 maxv if (m->m_ext_storage.ext_buf == NULL) 645 1.206 maxv return; 646 1.206 maxv 647 1.249 riastrad KASSERTMSG((((vaddr_t)m->m_ext_storage.ext_buf & PAGE_MASK) + mclbytes 648 1.249 riastrad <= PAGE_SIZE), 649 1.250 skrll "m=%p m->m_ext_storage.ext_buf=%p" 650 1.249 riastrad " mclbytes=%u PAGE_MASK=0x%x PAGE_SIZE=%u", 651 1.249 riastrad m, m->m_dat, 652 1.249 riastrad (unsigned)mclbytes, (unsigned)PAGE_MASK, (unsigned)PAGE_SIZE); 653 1.239 jdolecek 654 1.206 maxv MCLINITREFERENCE(m); 655 1.206 maxv m->m_data = m->m_ext.ext_buf; 656 1.206 maxv m->m_flags = (m->m_flags & ~M_EXTCOPYFLAGS) | 657 1.206 maxv M_EXT|M_EXT_CLUSTER|M_EXT_RW; 658 1.206 maxv m->m_ext.ext_size = MCLBYTES; 659 1.206 maxv m->m_ext.ext_free = NULL; 660 1.207 maxv m->m_ext.ext_arg = NULL; 661 1.206 maxv /* ext_paddr initialized above */ 662 1.206 maxv 663 1.206 maxv mowner_ref(m, M_EXT|M_EXT_CLUSTER); 664 1.64 matt } 665 1.64 matt 666 1.221 maxv struct mbuf * 667 1.221 maxv m_getcl(int how, int type, int flags) 668 1.221 maxv { 669 1.221 maxv struct mbuf *mp; 670 1.221 maxv 671 1.221 maxv if ((flags & M_PKTHDR) != 0) 672 1.221 maxv mp = m_gethdr(how, type); 673 1.221 maxv else 674 1.221 maxv mp = m_get(how, type); 675 1.221 maxv 676 1.221 maxv if (mp == NULL) 677 1.221 maxv return NULL; 678 1.221 maxv 679 1.221 maxv MCLGET(mp, how); 680 1.221 maxv if ((mp->m_flags & M_EXT) != 0) 681 1.221 maxv return mp; 682 1.221 maxv 683 1.221 maxv m_free(mp); 684 1.221 maxv return NULL; 685 1.221 maxv } 686 1.221 maxv 687 1.1 cgd /* 688 1.194 maxv * Utility function for M_PREPEND. Do *NOT* use it directly. 689 1.1 cgd */ 690 1.1 cgd struct mbuf * 691 1.62 thorpej m_prepend(struct mbuf *m, int len, int how) 692 1.1 cgd { 693 1.1 cgd struct mbuf *mn; 694 1.1 cgd 695 1.180 maxv if (__predict_false(len > MHLEN)) { 696 1.180 maxv panic("%s: len > MHLEN", __func__); 697 1.180 maxv } 698 1.180 maxv 699 1.156 christos KASSERT(len != M_COPYALL); 700 1.153 christos mn = m_get(how, m->m_type); 701 1.143 plunky if (mn == NULL) { 702 1.1 cgd m_freem(m); 703 1.179 maxv return NULL; 704 1.1 cgd } 705 1.178 maxv 706 1.1 cgd if (m->m_flags & M_PKTHDR) { 707 1.226 maxv m_move_pkthdr(mn, m); 708 1.64 matt } else { 709 1.64 matt MCLAIM(mn, m->m_owner); 710 1.1 cgd } 711 1.1 cgd mn->m_next = m; 712 1.1 cgd m = mn; 713 1.178 maxv 714 1.178 maxv if (m->m_flags & M_PKTHDR) { 715 1.178 maxv if (len < MHLEN) 716 1.229 maxv m_align(m, len); 717 1.178 maxv } else { 718 1.178 maxv if (len < MLEN) 719 1.229 maxv m_align(m, len); 720 1.178 maxv } 721 1.178 maxv 722 1.1 cgd m->m_len = len; 723 1.179 maxv return m; 724 1.1 cgd } 725 1.1 cgd 726 1.1 cgd struct mbuf * 727 1.195 maxv m_copym(struct mbuf *m, int off, int len, int wait) 728 1.1 cgd { 729 1.195 maxv /* Shallow copy on M_EXT. */ 730 1.195 maxv return m_copy_internal(m, off, len, wait, false); 731 1.44 itojun } 732 1.44 itojun 733 1.44 itojun struct mbuf * 734 1.195 maxv m_dup(struct mbuf *m, int off, int len, int wait) 735 1.44 itojun { 736 1.195 maxv /* Deep copy. */ 737 1.195 maxv return m_copy_internal(m, off, len, wait, true); 738 1.44 itojun } 739 1.44 itojun 740 1.154 christos static inline int 741 1.179 maxv m_copylen(int len, int copylen) 742 1.179 maxv { 743 1.219 riastrad return (len == M_COPYALL) ? copylen : uimin(len, copylen); 744 1.154 christos } 745 1.154 christos 746 1.44 itojun static struct mbuf * 747 1.195 maxv m_copy_internal(struct mbuf *m, int off0, int len, int wait, bool deep) 748 1.44 itojun { 749 1.249 riastrad struct mbuf *m0 __diagused = m; 750 1.249 riastrad int len0 __diagused = len; 751 1.27 matt struct mbuf *n, **np; 752 1.27 matt int off = off0; 753 1.1 cgd struct mbuf *top; 754 1.1 cgd int copyhdr = 0; 755 1.1 cgd 756 1.154 christos if (off < 0 || (len != M_COPYALL && len < 0)) 757 1.196 maxv panic("%s: off %d, len %d", __func__, off, len); 758 1.1 cgd if (off == 0 && m->m_flags & M_PKTHDR) 759 1.1 cgd copyhdr = 1; 760 1.1 cgd while (off > 0) { 761 1.179 maxv if (m == NULL) 762 1.199 maxv panic("%s: m == NULL, off %d", __func__, off); 763 1.1 cgd if (off < m->m_len) 764 1.1 cgd break; 765 1.1 cgd off -= m->m_len; 766 1.1 cgd m = m->m_next; 767 1.1 cgd } 768 1.179 maxv 769 1.1 cgd np = ⊤ 770 1.179 maxv top = NULL; 771 1.155 skrll while (len == M_COPYALL || len > 0) { 772 1.179 maxv if (m == NULL) { 773 1.1 cgd if (len != M_COPYALL) 774 1.196 maxv panic("%s: m == NULL, len %d [!COPYALL]", 775 1.196 maxv __func__, len); 776 1.1 cgd break; 777 1.1 cgd } 778 1.179 maxv 779 1.153 christos n = m_get(wait, m->m_type); 780 1.1 cgd *np = n; 781 1.179 maxv if (n == NULL) 782 1.1 cgd goto nospace; 783 1.64 matt MCLAIM(n, m->m_owner); 784 1.179 maxv 785 1.1 cgd if (copyhdr) { 786 1.228 maxv m_copy_pkthdr(n, m); 787 1.1 cgd if (len == M_COPYALL) 788 1.1 cgd n->m_pkthdr.len -= off0; 789 1.1 cgd else 790 1.1 cgd n->m_pkthdr.len = len; 791 1.1 cgd copyhdr = 0; 792 1.1 cgd } 793 1.154 christos n->m_len = m_copylen(len, m->m_len - off); 794 1.179 maxv 795 1.1 cgd if (m->m_flags & M_EXT) { 796 1.44 itojun if (!deep) { 797 1.44 itojun n->m_data = m->m_data + off; 798 1.44 itojun MCLADDREFERENCE(m, n); 799 1.44 itojun } else { 800 1.48 itojun /* 801 1.181 maxv * We don't care if MCLGET fails. n->m_len is 802 1.181 maxv * recomputed and handles that. 803 1.48 itojun */ 804 1.44 itojun MCLGET(n, wait); 805 1.161 mlelstv n->m_len = 0; 806 1.50 itojun n->m_len = M_TRAILINGSPACE(n); 807 1.154 christos n->m_len = m_copylen(len, n->m_len); 808 1.219 riastrad n->m_len = uimin(n->m_len, m->m_len - off); 809 1.119 christos memcpy(mtod(n, void *), mtod(m, char *) + off, 810 1.44 itojun (unsigned)n->m_len); 811 1.44 itojun } 812 1.179 maxv } else { 813 1.119 christos memcpy(mtod(n, void *), mtod(m, char *) + off, 814 1.1 cgd (unsigned)n->m_len); 815 1.179 maxv } 816 1.179 maxv 817 1.1 cgd if (len != M_COPYALL) 818 1.1 cgd len -= n->m_len; 819 1.50 itojun off += n->m_len; 820 1.199 maxv 821 1.249 riastrad KASSERTMSG(off <= m->m_len, 822 1.249 riastrad "m=%p m->m_len=%d off=%d len=%d m0=%p off0=%d len0=%d", 823 1.249 riastrad m, m->m_len, off, len, m0, off0, len0); 824 1.199 maxv 825 1.50 itojun if (off == m->m_len) { 826 1.50 itojun m = m->m_next; 827 1.50 itojun off = 0; 828 1.50 itojun } 829 1.1 cgd np = &n->m_next; 830 1.1 cgd } 831 1.179 maxv 832 1.179 maxv return top; 833 1.179 maxv 834 1.1 cgd nospace: 835 1.1 cgd m_freem(top); 836 1.179 maxv return NULL; 837 1.1 cgd } 838 1.1 cgd 839 1.1 cgd /* 840 1.18 thorpej * Copy an entire packet, including header (which must be present). 841 1.181 maxv * An optimization of the common case 'm_copym(m, 0, M_COPYALL, how)'. 842 1.18 thorpej */ 843 1.18 thorpej struct mbuf * 844 1.62 thorpej m_copypacket(struct mbuf *m, int how) 845 1.18 thorpej { 846 1.18 thorpej struct mbuf *top, *n, *o; 847 1.18 thorpej 848 1.198 maxv if (__predict_false((m->m_flags & M_PKTHDR) == 0)) { 849 1.198 maxv panic("%s: no header (m = %p)", __func__, m); 850 1.198 maxv } 851 1.198 maxv 852 1.153 christos n = m_get(how, m->m_type); 853 1.18 thorpej top = n; 854 1.18 thorpej if (!n) 855 1.18 thorpej goto nospace; 856 1.18 thorpej 857 1.64 matt MCLAIM(n, m->m_owner); 858 1.228 maxv m_copy_pkthdr(n, m); 859 1.18 thorpej n->m_len = m->m_len; 860 1.18 thorpej if (m->m_flags & M_EXT) { 861 1.18 thorpej n->m_data = m->m_data; 862 1.18 thorpej MCLADDREFERENCE(m, n); 863 1.18 thorpej } else { 864 1.30 perry memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 865 1.18 thorpej } 866 1.18 thorpej 867 1.18 thorpej m = m->m_next; 868 1.18 thorpej while (m) { 869 1.153 christos o = m_get(how, m->m_type); 870 1.18 thorpej if (!o) 871 1.18 thorpej goto nospace; 872 1.18 thorpej 873 1.64 matt MCLAIM(o, m->m_owner); 874 1.18 thorpej n->m_next = o; 875 1.18 thorpej n = n->m_next; 876 1.18 thorpej 877 1.18 thorpej n->m_len = m->m_len; 878 1.18 thorpej if (m->m_flags & M_EXT) { 879 1.18 thorpej n->m_data = m->m_data; 880 1.18 thorpej MCLADDREFERENCE(m, n); 881 1.18 thorpej } else { 882 1.30 perry memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 883 1.18 thorpej } 884 1.18 thorpej 885 1.18 thorpej m = m->m_next; 886 1.18 thorpej } 887 1.18 thorpej return top; 888 1.181 maxv 889 1.18 thorpej nospace: 890 1.18 thorpej m_freem(top); 891 1.71 simonb return NULL; 892 1.18 thorpej } 893 1.18 thorpej 894 1.14 christos void 895 1.200 maxv m_copydata(struct mbuf *m, int off, int len, void *cp) 896 1.1 cgd { 897 1.200 maxv unsigned int count; 898 1.179 maxv struct mbuf *m0 = m; 899 1.179 maxv int len0 = len; 900 1.179 maxv int off0 = off; 901 1.200 maxv void *cp0 = cp; 902 1.1 cgd 903 1.156 christos KASSERT(len != M_COPYALL); 904 1.1 cgd if (off < 0 || len < 0) 905 1.90 matt panic("m_copydata: off %d, len %d", off, len); 906 1.1 cgd while (off > 0) { 907 1.94 tron if (m == NULL) 908 1.151 matt panic("m_copydata(%p,%d,%d,%p): m=NULL, off=%d (%d)", 909 1.200 maxv m0, len0, off0, cp0, off, off0 - off); 910 1.1 cgd if (off < m->m_len) 911 1.1 cgd break; 912 1.1 cgd off -= m->m_len; 913 1.1 cgd m = m->m_next; 914 1.1 cgd } 915 1.1 cgd while (len > 0) { 916 1.94 tron if (m == NULL) 917 1.151 matt panic("m_copydata(%p,%d,%d,%p): " 918 1.151 matt "m=NULL, off=%d (%d), len=%d (%d)", 919 1.200 maxv m0, len0, off0, cp0, 920 1.151 matt off, off0 - off, len, len0 - len); 921 1.219 riastrad count = uimin(m->m_len - off, len); 922 1.119 christos memcpy(cp, mtod(m, char *) + off, count); 923 1.1 cgd len -= count; 924 1.119 christos cp = (char *)cp + count; 925 1.1 cgd off = 0; 926 1.1 cgd m = m->m_next; 927 1.1 cgd } 928 1.1 cgd } 929 1.1 cgd 930 1.1 cgd /* 931 1.1 cgd * Concatenate mbuf chain n to m. 932 1.72 itojun * n might be copied into m (when n->m_len is small), therefore data portion of 933 1.72 itojun * n could be copied into an mbuf of different mbuf type. 934 1.1 cgd * Any m_pkthdr is not updated. 935 1.1 cgd */ 936 1.14 christos void 937 1.62 thorpej m_cat(struct mbuf *m, struct mbuf *n) 938 1.1 cgd { 939 1.73 yamt 940 1.1 cgd while (m->m_next) 941 1.1 cgd m = m->m_next; 942 1.1 cgd while (n) { 943 1.77 itojun if (M_READONLY(m) || n->m_len > M_TRAILINGSPACE(m)) { 944 1.1 cgd /* just join the two chains */ 945 1.1 cgd m->m_next = n; 946 1.1 cgd return; 947 1.1 cgd } 948 1.1 cgd /* splat the data from one into the other */ 949 1.119 christos memcpy(mtod(m, char *) + m->m_len, mtod(n, void *), 950 1.1 cgd (u_int)n->m_len); 951 1.1 cgd m->m_len += n->m_len; 952 1.1 cgd n = m_free(n); 953 1.1 cgd } 954 1.1 cgd } 955 1.1 cgd 956 1.11 mycroft void 957 1.62 thorpej m_adj(struct mbuf *mp, int req_len) 958 1.1 cgd { 959 1.27 matt int len = req_len; 960 1.27 matt struct mbuf *m; 961 1.27 matt int count; 962 1.1 cgd 963 1.1 cgd if ((m = mp) == NULL) 964 1.1 cgd return; 965 1.1 cgd if (len >= 0) { 966 1.1 cgd /* 967 1.1 cgd * Trim from head. 968 1.1 cgd */ 969 1.1 cgd while (m != NULL && len > 0) { 970 1.1 cgd if (m->m_len <= len) { 971 1.1 cgd len -= m->m_len; 972 1.1 cgd m->m_len = 0; 973 1.1 cgd m = m->m_next; 974 1.1 cgd } else { 975 1.1 cgd m->m_len -= len; 976 1.1 cgd m->m_data += len; 977 1.1 cgd len = 0; 978 1.1 cgd } 979 1.1 cgd } 980 1.1 cgd if (mp->m_flags & M_PKTHDR) 981 1.181 maxv mp->m_pkthdr.len -= (req_len - len); 982 1.1 cgd } else { 983 1.1 cgd /* 984 1.1 cgd * Trim from tail. Scan the mbuf chain, 985 1.1 cgd * calculating its length and finding the last mbuf. 986 1.1 cgd * If the adjustment only affects this mbuf, then just 987 1.1 cgd * adjust and return. Otherwise, rescan and truncate 988 1.1 cgd * after the remaining size. 989 1.1 cgd */ 990 1.1 cgd len = -len; 991 1.1 cgd count = 0; 992 1.1 cgd for (;;) { 993 1.1 cgd count += m->m_len; 994 1.181 maxv if (m->m_next == NULL) 995 1.1 cgd break; 996 1.1 cgd m = m->m_next; 997 1.1 cgd } 998 1.1 cgd if (m->m_len >= len) { 999 1.1 cgd m->m_len -= len; 1000 1.8 deraadt if (mp->m_flags & M_PKTHDR) 1001 1.8 deraadt mp->m_pkthdr.len -= len; 1002 1.1 cgd return; 1003 1.1 cgd } 1004 1.181 maxv 1005 1.1 cgd count -= len; 1006 1.1 cgd if (count < 0) 1007 1.1 cgd count = 0; 1008 1.181 maxv 1009 1.1 cgd /* 1010 1.1 cgd * Correct length for chain is "count". 1011 1.1 cgd * Find the mbuf with last data, adjust its length, 1012 1.1 cgd * and toss data from remaining mbufs on chain. 1013 1.1 cgd */ 1014 1.1 cgd m = mp; 1015 1.1 cgd if (m->m_flags & M_PKTHDR) 1016 1.1 cgd m->m_pkthdr.len = count; 1017 1.1 cgd for (; m; m = m->m_next) { 1018 1.1 cgd if (m->m_len >= count) { 1019 1.1 cgd m->m_len = count; 1020 1.1 cgd break; 1021 1.1 cgd } 1022 1.1 cgd count -= m->m_len; 1023 1.1 cgd } 1024 1.181 maxv if (m) { 1025 1.110 christos while (m->m_next) 1026 1.110 christos (m = m->m_next)->m_len = 0; 1027 1.181 maxv } 1028 1.1 cgd } 1029 1.1 cgd } 1030 1.1 cgd 1031 1.1 cgd /* 1032 1.148 rmind * m_ensure_contig: rearrange an mbuf chain that given length of bytes 1033 1.148 rmind * would be contiguous and in the data area of an mbuf (therefore, mtod() 1034 1.148 rmind * would work for a structure of given length). 1035 1.148 rmind * 1036 1.148 rmind * => On success, returns true and the resulting mbuf chain; false otherwise. 1037 1.148 rmind * => The mbuf chain may change, but is always preserved valid. 1038 1.1 cgd */ 1039 1.148 rmind bool 1040 1.148 rmind m_ensure_contig(struct mbuf **m0, int len) 1041 1.1 cgd { 1042 1.148 rmind struct mbuf *n = *m0, *m; 1043 1.148 rmind size_t count, space; 1044 1.1 cgd 1045 1.156 christos KASSERT(len != M_COPYALL); 1046 1.1 cgd /* 1047 1.1 cgd * If first mbuf has no cluster, and has room for len bytes 1048 1.1 cgd * without shifting current data, pullup into it, 1049 1.1 cgd * otherwise allocate a new mbuf to prepend to the chain. 1050 1.1 cgd */ 1051 1.1 cgd if ((n->m_flags & M_EXT) == 0 && 1052 1.1 cgd n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 1053 1.148 rmind if (n->m_len >= len) { 1054 1.148 rmind return true; 1055 1.148 rmind } 1056 1.1 cgd m = n; 1057 1.1 cgd n = n->m_next; 1058 1.1 cgd len -= m->m_len; 1059 1.1 cgd } else { 1060 1.148 rmind if (len > MHLEN) { 1061 1.148 rmind return false; 1062 1.148 rmind } 1063 1.153 christos m = m_get(M_DONTWAIT, n->m_type); 1064 1.148 rmind if (m == NULL) { 1065 1.148 rmind return false; 1066 1.148 rmind } 1067 1.64 matt MCLAIM(m, n->m_owner); 1068 1.1 cgd if (n->m_flags & M_PKTHDR) { 1069 1.226 maxv m_move_pkthdr(m, n); 1070 1.1 cgd } 1071 1.1 cgd } 1072 1.1 cgd space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1073 1.1 cgd do { 1074 1.148 rmind count = MIN(MIN(MAX(len, max_protohdr), space), n->m_len); 1075 1.119 christos memcpy(mtod(m, char *) + m->m_len, mtod(n, void *), 1076 1.1 cgd (unsigned)count); 1077 1.1 cgd len -= count; 1078 1.1 cgd m->m_len += count; 1079 1.1 cgd n->m_len -= count; 1080 1.1 cgd space -= count; 1081 1.1 cgd if (n->m_len) 1082 1.1 cgd n->m_data += count; 1083 1.1 cgd else 1084 1.1 cgd n = m_free(n); 1085 1.1 cgd } while (len > 0 && n); 1086 1.148 rmind 1087 1.148 rmind m->m_next = n; 1088 1.148 rmind *m0 = m; 1089 1.148 rmind 1090 1.148 rmind return len <= 0; 1091 1.148 rmind } 1092 1.148 rmind 1093 1.148 rmind /* 1094 1.148 rmind * m_pullup: same as m_ensure_contig(), but destroys mbuf chain on error. 1095 1.148 rmind */ 1096 1.148 rmind struct mbuf * 1097 1.148 rmind m_pullup(struct mbuf *n, int len) 1098 1.148 rmind { 1099 1.148 rmind struct mbuf *m = n; 1100 1.148 rmind 1101 1.156 christos KASSERT(len != M_COPYALL); 1102 1.148 rmind if (!m_ensure_contig(&m, len)) { 1103 1.148 rmind KASSERT(m != NULL); 1104 1.148 rmind m_freem(m); 1105 1.148 rmind m = NULL; 1106 1.1 cgd } 1107 1.148 rmind return m; 1108 1.60 thorpej } 1109 1.60 thorpej 1110 1.60 thorpej /* 1111 1.221 maxv * ensure that [off, off + len) is contiguous on the mbuf chain "m". 1112 1.221 maxv * packet chain before "off" is kept untouched. 1113 1.221 maxv * if offp == NULL, the target will start at <retval, 0> on resulting chain. 1114 1.221 maxv * if offp != NULL, the target will start at <retval, *offp> on resulting chain. 1115 1.221 maxv * 1116 1.221 maxv * on error return (NULL return value), original "m" will be freed. 1117 1.221 maxv * 1118 1.221 maxv * XXX M_TRAILINGSPACE/M_LEADINGSPACE on shared cluster (sharedcluster) 1119 1.221 maxv */ 1120 1.221 maxv struct mbuf * 1121 1.221 maxv m_pulldown(struct mbuf *m, int off, int len, int *offp) 1122 1.221 maxv { 1123 1.221 maxv struct mbuf *n, *o; 1124 1.221 maxv int hlen, tlen, olen; 1125 1.221 maxv int sharedcluster; 1126 1.221 maxv 1127 1.221 maxv /* Check invalid arguments. */ 1128 1.221 maxv if (m == NULL) 1129 1.221 maxv panic("%s: m == NULL", __func__); 1130 1.221 maxv if (len > MCLBYTES) { 1131 1.221 maxv m_freem(m); 1132 1.221 maxv return NULL; 1133 1.221 maxv } 1134 1.221 maxv 1135 1.221 maxv n = m; 1136 1.221 maxv while (n != NULL && off > 0) { 1137 1.221 maxv if (n->m_len > off) 1138 1.221 maxv break; 1139 1.221 maxv off -= n->m_len; 1140 1.221 maxv n = n->m_next; 1141 1.221 maxv } 1142 1.221 maxv /* Be sure to point non-empty mbuf. */ 1143 1.221 maxv while (n != NULL && n->m_len == 0) 1144 1.221 maxv n = n->m_next; 1145 1.221 maxv if (!n) { 1146 1.221 maxv m_freem(m); 1147 1.221 maxv return NULL; /* mbuf chain too short */ 1148 1.221 maxv } 1149 1.221 maxv 1150 1.221 maxv sharedcluster = M_READONLY(n); 1151 1.221 maxv 1152 1.221 maxv /* 1153 1.221 maxv * The target data is on <n, off>. If we got enough data on the mbuf 1154 1.221 maxv * "n", we're done. 1155 1.221 maxv */ 1156 1.221 maxv #ifdef __NO_STRICT_ALIGNMENT 1157 1.221 maxv if ((off == 0 || offp) && len <= n->m_len - off && !sharedcluster) 1158 1.221 maxv #else 1159 1.221 maxv if ((off == 0 || offp) && len <= n->m_len - off && !sharedcluster && 1160 1.221 maxv ALIGNED_POINTER((mtod(n, char *) + off), uint32_t)) 1161 1.221 maxv #endif 1162 1.221 maxv goto ok; 1163 1.221 maxv 1164 1.221 maxv /* 1165 1.221 maxv * When (len <= n->m_len - off) and (off != 0), it is a special case. 1166 1.221 maxv * Len bytes from <n, off> sit in single mbuf, but the caller does 1167 1.221 maxv * not like the starting position (off). 1168 1.221 maxv * 1169 1.221 maxv * Chop the current mbuf into two pieces, set off to 0. 1170 1.221 maxv */ 1171 1.221 maxv if (len <= n->m_len - off) { 1172 1.221 maxv struct mbuf *mlast; 1173 1.221 maxv 1174 1.221 maxv o = m_dup(n, off, n->m_len - off, M_DONTWAIT); 1175 1.221 maxv if (o == NULL) { 1176 1.221 maxv m_freem(m); 1177 1.221 maxv return NULL; /* ENOBUFS */ 1178 1.221 maxv } 1179 1.249 riastrad KASSERTMSG(o->m_len >= len, "o=%p o->m_len=%d len=%d", 1180 1.249 riastrad o, o->m_len, len); 1181 1.221 maxv for (mlast = o; mlast->m_next != NULL; mlast = mlast->m_next) 1182 1.221 maxv ; 1183 1.221 maxv n->m_len = off; 1184 1.221 maxv mlast->m_next = n->m_next; 1185 1.221 maxv n->m_next = o; 1186 1.221 maxv n = o; 1187 1.221 maxv off = 0; 1188 1.221 maxv goto ok; 1189 1.221 maxv } 1190 1.221 maxv 1191 1.221 maxv /* 1192 1.221 maxv * We need to take hlen from <n, off> and tlen from <n->m_next, 0>, 1193 1.221 maxv * and construct contiguous mbuf with m_len == len. 1194 1.221 maxv * 1195 1.221 maxv * Note that hlen + tlen == len, and tlen > 0. 1196 1.221 maxv */ 1197 1.221 maxv hlen = n->m_len - off; 1198 1.221 maxv tlen = len - hlen; 1199 1.221 maxv 1200 1.221 maxv /* 1201 1.221 maxv * Ensure that we have enough trailing data on mbuf chain. If not, 1202 1.221 maxv * we can do nothing about the chain. 1203 1.221 maxv */ 1204 1.221 maxv olen = 0; 1205 1.221 maxv for (o = n->m_next; o != NULL; o = o->m_next) 1206 1.221 maxv olen += o->m_len; 1207 1.221 maxv if (hlen + olen < len) { 1208 1.221 maxv m_freem(m); 1209 1.221 maxv return NULL; /* mbuf chain too short */ 1210 1.221 maxv } 1211 1.221 maxv 1212 1.221 maxv /* 1213 1.221 maxv * Easy cases first. We need to use m_copydata() to get data from 1214 1.221 maxv * <n->m_next, 0>. 1215 1.221 maxv */ 1216 1.221 maxv if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen && 1217 1.221 maxv !sharedcluster) { 1218 1.221 maxv m_copydata(n->m_next, 0, tlen, mtod(n, char *) + n->m_len); 1219 1.221 maxv n->m_len += tlen; 1220 1.221 maxv m_adj(n->m_next, tlen); 1221 1.221 maxv goto ok; 1222 1.221 maxv } 1223 1.221 maxv if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen && 1224 1.221 maxv #ifndef __NO_STRICT_ALIGNMENT 1225 1.221 maxv ALIGNED_POINTER((n->m_next->m_data - hlen), uint32_t) && 1226 1.221 maxv #endif 1227 1.221 maxv !sharedcluster && n->m_next->m_len >= tlen) { 1228 1.221 maxv n->m_next->m_data -= hlen; 1229 1.221 maxv n->m_next->m_len += hlen; 1230 1.221 maxv memcpy(mtod(n->m_next, void *), mtod(n, char *) + off, hlen); 1231 1.221 maxv n->m_len -= hlen; 1232 1.221 maxv n = n->m_next; 1233 1.221 maxv off = 0; 1234 1.221 maxv goto ok; 1235 1.221 maxv } 1236 1.221 maxv 1237 1.221 maxv /* 1238 1.221 maxv * Now, we need to do the hard way. Don't copy as there's no room 1239 1.221 maxv * on both ends. 1240 1.221 maxv */ 1241 1.221 maxv o = m_get(M_DONTWAIT, m->m_type); 1242 1.221 maxv if (o && len > MLEN) { 1243 1.221 maxv MCLGET(o, M_DONTWAIT); 1244 1.221 maxv if ((o->m_flags & M_EXT) == 0) { 1245 1.221 maxv m_free(o); 1246 1.221 maxv o = NULL; 1247 1.221 maxv } 1248 1.221 maxv } 1249 1.221 maxv if (!o) { 1250 1.221 maxv m_freem(m); 1251 1.221 maxv return NULL; /* ENOBUFS */ 1252 1.221 maxv } 1253 1.221 maxv /* get hlen from <n, off> into <o, 0> */ 1254 1.221 maxv o->m_len = hlen; 1255 1.221 maxv memcpy(mtod(o, void *), mtod(n, char *) + off, hlen); 1256 1.221 maxv n->m_len -= hlen; 1257 1.221 maxv /* get tlen from <n->m_next, 0> into <o, hlen> */ 1258 1.221 maxv m_copydata(n->m_next, 0, tlen, mtod(o, char *) + o->m_len); 1259 1.221 maxv o->m_len += tlen; 1260 1.221 maxv m_adj(n->m_next, tlen); 1261 1.221 maxv o->m_next = n->m_next; 1262 1.221 maxv n->m_next = o; 1263 1.221 maxv n = o; 1264 1.221 maxv off = 0; 1265 1.221 maxv 1266 1.221 maxv ok: 1267 1.221 maxv if (offp) 1268 1.221 maxv *offp = off; 1269 1.221 maxv return n; 1270 1.221 maxv } 1271 1.221 maxv 1272 1.221 maxv /* 1273 1.60 thorpej * Like m_pullup(), except a new mbuf is always allocated, and we allow 1274 1.60 thorpej * the amount of empty space before the data in the new mbuf to be specified 1275 1.60 thorpej * (in the event that the caller expects to prepend later). 1276 1.60 thorpej */ 1277 1.60 thorpej struct mbuf * 1278 1.60 thorpej m_copyup(struct mbuf *n, int len, int dstoff) 1279 1.60 thorpej { 1280 1.60 thorpej struct mbuf *m; 1281 1.60 thorpej int count, space; 1282 1.60 thorpej 1283 1.156 christos KASSERT(len != M_COPYALL); 1284 1.193 maxv if (len > ((int)MHLEN - dstoff)) 1285 1.60 thorpej goto bad; 1286 1.153 christos m = m_get(M_DONTWAIT, n->m_type); 1287 1.60 thorpej if (m == NULL) 1288 1.60 thorpej goto bad; 1289 1.64 matt MCLAIM(m, n->m_owner); 1290 1.60 thorpej if (n->m_flags & M_PKTHDR) { 1291 1.226 maxv m_move_pkthdr(m, n); 1292 1.60 thorpej } 1293 1.60 thorpej m->m_data += dstoff; 1294 1.60 thorpej space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1295 1.60 thorpej do { 1296 1.219 riastrad count = uimin(uimin(uimax(len, max_protohdr), space), n->m_len); 1297 1.119 christos memcpy(mtod(m, char *) + m->m_len, mtod(n, void *), 1298 1.60 thorpej (unsigned)count); 1299 1.60 thorpej len -= count; 1300 1.60 thorpej m->m_len += count; 1301 1.60 thorpej n->m_len -= count; 1302 1.60 thorpej space -= count; 1303 1.60 thorpej if (n->m_len) 1304 1.60 thorpej n->m_data += count; 1305 1.60 thorpej else 1306 1.60 thorpej n = m_free(n); 1307 1.60 thorpej } while (len > 0 && n); 1308 1.60 thorpej if (len > 0) { 1309 1.60 thorpej (void) m_free(m); 1310 1.60 thorpej goto bad; 1311 1.60 thorpej } 1312 1.60 thorpej m->m_next = n; 1313 1.199 maxv return m; 1314 1.60 thorpej bad: 1315 1.60 thorpej m_freem(n); 1316 1.199 maxv return NULL; 1317 1.9 mycroft } 1318 1.9 mycroft 1319 1.9 mycroft struct mbuf * 1320 1.195 maxv m_split(struct mbuf *m0, int len, int wait) 1321 1.9 mycroft { 1322 1.195 maxv return m_split_internal(m0, len, wait, true); 1323 1.85 yamt } 1324 1.85 yamt 1325 1.85 yamt static struct mbuf * 1326 1.195 maxv m_split_internal(struct mbuf *m0, int len0, int wait, bool copyhdr) 1327 1.85 yamt { 1328 1.27 matt struct mbuf *m, *n; 1329 1.22 thorpej unsigned len = len0, remain, len_save; 1330 1.9 mycroft 1331 1.156 christos KASSERT(len0 != M_COPYALL); 1332 1.9 mycroft for (m = m0; m && len > m->m_len; m = m->m_next) 1333 1.9 mycroft len -= m->m_len; 1334 1.181 maxv if (m == NULL) 1335 1.181 maxv return NULL; 1336 1.181 maxv 1337 1.9 mycroft remain = m->m_len - len; 1338 1.85 yamt if (copyhdr && (m0->m_flags & M_PKTHDR)) { 1339 1.153 christos n = m_gethdr(wait, m0->m_type); 1340 1.153 christos if (n == NULL) 1341 1.153 christos return NULL; 1342 1.181 maxv 1343 1.112 pavel MCLAIM(n, m0->m_owner); 1344 1.167 ozaki m_copy_rcvif(n, m0); 1345 1.9 mycroft n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1346 1.22 thorpej len_save = m0->m_pkthdr.len; 1347 1.9 mycroft m0->m_pkthdr.len = len0; 1348 1.181 maxv 1349 1.252 ozaki if ((m->m_flags & M_EXT) == 0 && remain > MHLEN) { 1350 1.9 mycroft /* m can't be the lead packet */ 1351 1.230 maxv m_align(n, 0); 1352 1.132 bouyer n->m_len = 0; 1353 1.9 mycroft n->m_next = m_split(m, len, wait); 1354 1.181 maxv if (n->m_next == NULL) { 1355 1.181 maxv (void)m_free(n); 1356 1.22 thorpej m0->m_pkthdr.len = len_save; 1357 1.181 maxv return NULL; 1358 1.181 maxv } 1359 1.181 maxv return n; 1360 1.181 maxv } 1361 1.9 mycroft } else if (remain == 0) { 1362 1.9 mycroft n = m->m_next; 1363 1.181 maxv m->m_next = NULL; 1364 1.181 maxv return n; 1365 1.9 mycroft } else { 1366 1.153 christos n = m_get(wait, m->m_type); 1367 1.181 maxv if (n == NULL) 1368 1.181 maxv return NULL; 1369 1.64 matt MCLAIM(n, m->m_owner); 1370 1.9 mycroft } 1371 1.181 maxv 1372 1.9 mycroft if (m->m_flags & M_EXT) { 1373 1.125 yamt n->m_data = m->m_data + len; 1374 1.18 thorpej MCLADDREFERENCE(m, n); 1375 1.9 mycroft } else { 1376 1.252 ozaki m_align(n, remain); 1377 1.119 christos memcpy(mtod(n, void *), mtod(m, char *) + len, remain); 1378 1.9 mycroft } 1379 1.181 maxv 1380 1.9 mycroft n->m_len = remain; 1381 1.9 mycroft m->m_len = len; 1382 1.9 mycroft n->m_next = m->m_next; 1383 1.181 maxv m->m_next = NULL; 1384 1.181 maxv return n; 1385 1.9 mycroft } 1386 1.181 maxv 1387 1.9 mycroft /* 1388 1.9 mycroft * Routine to copy from device local memory into mbufs. 1389 1.9 mycroft */ 1390 1.9 mycroft struct mbuf * 1391 1.225 maxv m_devget(char *buf, int totlen, int off, struct ifnet *ifp) 1392 1.9 mycroft { 1393 1.27 matt struct mbuf *m; 1394 1.181 maxv struct mbuf *top = NULL, **mp = ⊤ 1395 1.181 maxv char *cp, *epkt; 1396 1.225 maxv int len; 1397 1.9 mycroft 1398 1.9 mycroft cp = buf; 1399 1.9 mycroft epkt = cp + totlen; 1400 1.9 mycroft if (off) { 1401 1.13 cgd /* 1402 1.13 cgd * If 'off' is non-zero, packet is trailer-encapsulated, 1403 1.13 cgd * so we have to skip the type and length fields. 1404 1.13 cgd */ 1405 1.104 perry cp += off + 2 * sizeof(uint16_t); 1406 1.104 perry totlen -= 2 * sizeof(uint16_t); 1407 1.9 mycroft } 1408 1.181 maxv 1409 1.153 christos m = m_gethdr(M_DONTWAIT, MT_DATA); 1410 1.153 christos if (m == NULL) 1411 1.153 christos return NULL; 1412 1.166 ozaki m_set_rcvif(m, ifp); 1413 1.9 mycroft m->m_pkthdr.len = totlen; 1414 1.9 mycroft m->m_len = MHLEN; 1415 1.9 mycroft 1416 1.9 mycroft while (totlen > 0) { 1417 1.9 mycroft if (top) { 1418 1.153 christos m = m_get(M_DONTWAIT, MT_DATA); 1419 1.181 maxv if (m == NULL) { 1420 1.9 mycroft m_freem(top); 1421 1.181 maxv return NULL; 1422 1.9 mycroft } 1423 1.9 mycroft m->m_len = MLEN; 1424 1.9 mycroft } 1425 1.181 maxv 1426 1.219 riastrad len = uimin(totlen, epkt - cp); 1427 1.181 maxv 1428 1.9 mycroft if (len >= MINCLSIZE) { 1429 1.9 mycroft MCLGET(m, M_DONTWAIT); 1430 1.19 mycroft if ((m->m_flags & M_EXT) == 0) { 1431 1.20 mycroft m_free(m); 1432 1.19 mycroft m_freem(top); 1433 1.181 maxv return NULL; 1434 1.19 mycroft } 1435 1.219 riastrad m->m_len = len = uimin(len, MCLBYTES); 1436 1.9 mycroft } else { 1437 1.9 mycroft /* 1438 1.9 mycroft * Place initial small packet/header at end of mbuf. 1439 1.9 mycroft */ 1440 1.9 mycroft if (len < m->m_len) { 1441 1.9 mycroft if (top == 0 && len + max_linkhdr <= m->m_len) 1442 1.9 mycroft m->m_data += max_linkhdr; 1443 1.9 mycroft m->m_len = len; 1444 1.9 mycroft } else 1445 1.9 mycroft len = m->m_len; 1446 1.9 mycroft } 1447 1.181 maxv 1448 1.225 maxv memcpy(mtod(m, void *), cp, (size_t)len); 1449 1.181 maxv 1450 1.9 mycroft cp += len; 1451 1.9 mycroft *mp = m; 1452 1.9 mycroft mp = &m->m_next; 1453 1.9 mycroft totlen -= len; 1454 1.9 mycroft if (cp == epkt) 1455 1.9 mycroft cp = buf; 1456 1.9 mycroft } 1457 1.181 maxv 1458 1.181 maxv return top; 1459 1.18 thorpej } 1460 1.18 thorpej 1461 1.18 thorpej /* 1462 1.18 thorpej * Copy data from a buffer back into the indicated mbuf chain, 1463 1.18 thorpej * starting "off" bytes from the beginning, extending the mbuf 1464 1.18 thorpej * chain if necessary. 1465 1.18 thorpej */ 1466 1.18 thorpej void 1467 1.86 yamt m_copyback(struct mbuf *m0, int off, int len, const void *cp) 1468 1.18 thorpej { 1469 1.85 yamt #if defined(DEBUG) 1470 1.85 yamt struct mbuf *origm = m0; 1471 1.85 yamt int error; 1472 1.181 maxv #endif 1473 1.85 yamt 1474 1.85 yamt if (m0 == NULL) 1475 1.85 yamt return; 1476 1.85 yamt 1477 1.85 yamt #if defined(DEBUG) 1478 1.85 yamt error = 1479 1.181 maxv #endif 1480 1.196 maxv m_copyback_internal(&m0, off, len, cp, CB_COPYBACK|CB_EXTEND, 1481 1.196 maxv M_DONTWAIT); 1482 1.85 yamt 1483 1.85 yamt #if defined(DEBUG) 1484 1.85 yamt if (error != 0 || (m0 != NULL && origm != m0)) 1485 1.85 yamt panic("m_copyback"); 1486 1.181 maxv #endif 1487 1.85 yamt } 1488 1.85 yamt 1489 1.85 yamt struct mbuf * 1490 1.86 yamt m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp, int how) 1491 1.85 yamt { 1492 1.85 yamt int error; 1493 1.85 yamt 1494 1.85 yamt /* don't support chain expansion */ 1495 1.156 christos KASSERT(len != M_COPYALL); 1496 1.85 yamt KDASSERT(off + len <= m_length(m0)); 1497 1.85 yamt 1498 1.196 maxv error = m_copyback_internal(&m0, off, len, cp, CB_COPYBACK|CB_COW, 1499 1.196 maxv how); 1500 1.85 yamt if (error) { 1501 1.85 yamt /* 1502 1.85 yamt * no way to recover from partial success. 1503 1.85 yamt * just free the chain. 1504 1.85 yamt */ 1505 1.85 yamt m_freem(m0); 1506 1.85 yamt return NULL; 1507 1.85 yamt } 1508 1.85 yamt return m0; 1509 1.85 yamt } 1510 1.85 yamt 1511 1.85 yamt int 1512 1.85 yamt m_makewritable(struct mbuf **mp, int off, int len, int how) 1513 1.85 yamt { 1514 1.85 yamt int error; 1515 1.85 yamt #if defined(DEBUG) 1516 1.156 christos int origlen = m_length(*mp); 1517 1.181 maxv #endif 1518 1.85 yamt 1519 1.196 maxv error = m_copyback_internal(mp, off, len, NULL, CB_PRESERVE|CB_COW, 1520 1.196 maxv how); 1521 1.170 christos if (error) 1522 1.170 christos return error; 1523 1.170 christos 1524 1.85 yamt #if defined(DEBUG) 1525 1.156 christos int reslen = 0; 1526 1.156 christos for (struct mbuf *n = *mp; n; n = n->m_next) 1527 1.85 yamt reslen += n->m_len; 1528 1.85 yamt if (origlen != reslen) 1529 1.85 yamt panic("m_makewritable: length changed"); 1530 1.85 yamt if (((*mp)->m_flags & M_PKTHDR) != 0 && reslen != (*mp)->m_pkthdr.len) 1531 1.85 yamt panic("m_makewritable: inconsist"); 1532 1.181 maxv #endif 1533 1.85 yamt 1534 1.170 christos return 0; 1535 1.85 yamt } 1536 1.85 yamt 1537 1.196 maxv static int 1538 1.196 maxv m_copyback_internal(struct mbuf **mp0, int off, int len, const void *vp, 1539 1.196 maxv int flags, int how) 1540 1.85 yamt { 1541 1.27 matt int mlen; 1542 1.85 yamt struct mbuf *m, *n; 1543 1.85 yamt struct mbuf **mp; 1544 1.18 thorpej int totlen = 0; 1545 1.86 yamt const char *cp = vp; 1546 1.18 thorpej 1547 1.85 yamt KASSERT(mp0 != NULL); 1548 1.85 yamt KASSERT(*mp0 != NULL); 1549 1.196 maxv KASSERT((flags & CB_PRESERVE) == 0 || cp == NULL); 1550 1.196 maxv KASSERT((flags & CB_COPYBACK) == 0 || cp != NULL); 1551 1.85 yamt 1552 1.156 christos if (len == M_COPYALL) 1553 1.156 christos len = m_length(*mp0) - off; 1554 1.156 christos 1555 1.106 yamt /* 1556 1.196 maxv * we don't bother to update "totlen" in the case of CB_COW, 1557 1.196 maxv * assuming that CB_EXTEND and CB_COW are exclusive. 1558 1.106 yamt */ 1559 1.106 yamt 1560 1.196 maxv KASSERT((~flags & (CB_EXTEND|CB_COW)) != 0); 1561 1.106 yamt 1562 1.85 yamt mp = mp0; 1563 1.85 yamt m = *mp; 1564 1.18 thorpej while (off > (mlen = m->m_len)) { 1565 1.18 thorpej off -= mlen; 1566 1.18 thorpej totlen += mlen; 1567 1.109 yamt if (m->m_next == NULL) { 1568 1.109 yamt int tspace; 1569 1.109 yamt extend: 1570 1.196 maxv if ((flags & CB_EXTEND) == 0) 1571 1.85 yamt goto out; 1572 1.109 yamt 1573 1.109 yamt /* 1574 1.109 yamt * try to make some space at the end of "m". 1575 1.109 yamt */ 1576 1.109 yamt 1577 1.109 yamt mlen = m->m_len; 1578 1.109 yamt if (off + len >= MINCLSIZE && 1579 1.109 yamt (m->m_flags & M_EXT) == 0 && m->m_len == 0) { 1580 1.109 yamt MCLGET(m, how); 1581 1.109 yamt } 1582 1.109 yamt tspace = M_TRAILINGSPACE(m); 1583 1.109 yamt if (tspace > 0) { 1584 1.219 riastrad tspace = uimin(tspace, off + len); 1585 1.109 yamt KASSERT(tspace > 0); 1586 1.109 yamt memset(mtod(m, char *) + m->m_len, 0, 1587 1.219 riastrad uimin(off, tspace)); 1588 1.109 yamt m->m_len += tspace; 1589 1.109 yamt off += mlen; 1590 1.109 yamt totlen -= mlen; 1591 1.109 yamt continue; 1592 1.109 yamt } 1593 1.109 yamt 1594 1.109 yamt /* 1595 1.109 yamt * need to allocate an mbuf. 1596 1.109 yamt */ 1597 1.109 yamt 1598 1.109 yamt if (off + len >= MINCLSIZE) { 1599 1.109 yamt n = m_getcl(how, m->m_type, 0); 1600 1.109 yamt } else { 1601 1.109 yamt n = m_get(how, m->m_type); 1602 1.109 yamt } 1603 1.109 yamt if (n == NULL) { 1604 1.18 thorpej goto out; 1605 1.109 yamt } 1606 1.219 riastrad n->m_len = uimin(M_TRAILINGSPACE(n), off + len); 1607 1.219 riastrad memset(mtod(n, char *), 0, uimin(n->m_len, off)); 1608 1.18 thorpej m->m_next = n; 1609 1.18 thorpej } 1610 1.85 yamt mp = &m->m_next; 1611 1.18 thorpej m = m->m_next; 1612 1.18 thorpej } 1613 1.18 thorpej while (len > 0) { 1614 1.85 yamt mlen = m->m_len - off; 1615 1.85 yamt if (mlen != 0 && M_READONLY(m)) { 1616 1.196 maxv /* 1617 1.196 maxv * This mbuf is read-only. Allocate a new writable 1618 1.196 maxv * mbuf and try again. 1619 1.196 maxv */ 1620 1.85 yamt char *datap; 1621 1.85 yamt int eatlen; 1622 1.85 yamt 1623 1.196 maxv KASSERT((flags & CB_COW) != 0); 1624 1.85 yamt 1625 1.85 yamt /* 1626 1.85 yamt * if we're going to write into the middle of 1627 1.85 yamt * a mbuf, split it first. 1628 1.85 yamt */ 1629 1.137 seanb if (off > 0) { 1630 1.195 maxv n = m_split_internal(m, off, how, false); 1631 1.85 yamt if (n == NULL) 1632 1.85 yamt goto enobufs; 1633 1.85 yamt m->m_next = n; 1634 1.85 yamt mp = &m->m_next; 1635 1.85 yamt m = n; 1636 1.85 yamt off = 0; 1637 1.85 yamt continue; 1638 1.85 yamt } 1639 1.85 yamt 1640 1.85 yamt /* 1641 1.85 yamt * XXX TODO coalesce into the trailingspace of 1642 1.85 yamt * the previous mbuf when possible. 1643 1.85 yamt */ 1644 1.85 yamt 1645 1.85 yamt /* 1646 1.85 yamt * allocate a new mbuf. copy packet header if needed. 1647 1.85 yamt */ 1648 1.153 christos n = m_get(how, m->m_type); 1649 1.85 yamt if (n == NULL) 1650 1.85 yamt goto enobufs; 1651 1.85 yamt MCLAIM(n, m->m_owner); 1652 1.85 yamt if (off == 0 && (m->m_flags & M_PKTHDR) != 0) { 1653 1.226 maxv m_move_pkthdr(n, m); 1654 1.85 yamt n->m_len = MHLEN; 1655 1.85 yamt } else { 1656 1.85 yamt if (len >= MINCLSIZE) 1657 1.85 yamt MCLGET(n, M_DONTWAIT); 1658 1.85 yamt n->m_len = 1659 1.85 yamt (n->m_flags & M_EXT) ? MCLBYTES : MLEN; 1660 1.85 yamt } 1661 1.85 yamt if (n->m_len > len) 1662 1.85 yamt n->m_len = len; 1663 1.85 yamt 1664 1.85 yamt /* 1665 1.85 yamt * free the region which has been overwritten. 1666 1.85 yamt * copying data from old mbufs if requested. 1667 1.85 yamt */ 1668 1.196 maxv if (flags & CB_PRESERVE) 1669 1.85 yamt datap = mtod(n, char *); 1670 1.85 yamt else 1671 1.85 yamt datap = NULL; 1672 1.85 yamt eatlen = n->m_len; 1673 1.85 yamt while (m != NULL && M_READONLY(m) && 1674 1.85 yamt n->m_type == m->m_type && eatlen > 0) { 1675 1.219 riastrad mlen = uimin(eatlen, m->m_len); 1676 1.85 yamt if (datap) { 1677 1.85 yamt m_copydata(m, 0, mlen, datap); 1678 1.85 yamt datap += mlen; 1679 1.85 yamt } 1680 1.85 yamt m->m_data += mlen; 1681 1.85 yamt m->m_len -= mlen; 1682 1.85 yamt eatlen -= mlen; 1683 1.85 yamt if (m->m_len == 0) 1684 1.85 yamt *mp = m = m_free(m); 1685 1.85 yamt } 1686 1.85 yamt if (eatlen > 0) 1687 1.85 yamt n->m_len -= eatlen; 1688 1.85 yamt n->m_next = m; 1689 1.85 yamt *mp = m = n; 1690 1.85 yamt continue; 1691 1.85 yamt } 1692 1.219 riastrad mlen = uimin(mlen, len); 1693 1.196 maxv if (flags & CB_COPYBACK) { 1694 1.119 christos memcpy(mtod(m, char *) + off, cp, (unsigned)mlen); 1695 1.85 yamt cp += mlen; 1696 1.85 yamt } 1697 1.18 thorpej len -= mlen; 1698 1.18 thorpej mlen += off; 1699 1.18 thorpej off = 0; 1700 1.18 thorpej totlen += mlen; 1701 1.18 thorpej if (len == 0) 1702 1.18 thorpej break; 1703 1.109 yamt if (m->m_next == NULL) { 1704 1.109 yamt goto extend; 1705 1.18 thorpej } 1706 1.85 yamt mp = &m->m_next; 1707 1.18 thorpej m = m->m_next; 1708 1.18 thorpej } 1709 1.199 maxv 1710 1.199 maxv out: 1711 1.199 maxv if (((m = *mp0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) { 1712 1.196 maxv KASSERT((flags & CB_EXTEND) != 0); 1713 1.18 thorpej m->m_pkthdr.len = totlen; 1714 1.106 yamt } 1715 1.85 yamt 1716 1.85 yamt return 0; 1717 1.85 yamt 1718 1.85 yamt enobufs: 1719 1.254 riastrad return SET_ERROR(ENOBUFS); 1720 1.66 thorpej } 1721 1.66 thorpej 1722 1.205 maxv /* 1723 1.211 maxv * Compress the mbuf chain. Return the new mbuf chain on success, NULL on 1724 1.211 maxv * failure. The first mbuf is preserved, and on success the pointer returned 1725 1.211 maxv * is the same as the one passed. 1726 1.205 maxv */ 1727 1.205 maxv struct mbuf * 1728 1.212 maxv m_defrag(struct mbuf *m, int how) 1729 1.205 maxv { 1730 1.205 maxv struct mbuf *m0, *mn, *n; 1731 1.211 maxv int sz; 1732 1.205 maxv 1733 1.212 maxv KASSERT((m->m_flags & M_PKTHDR) != 0); 1734 1.205 maxv 1735 1.212 maxv if (m->m_next == NULL) 1736 1.212 maxv return m; 1737 1.211 maxv 1738 1.238 jdolecek /* Defrag to single mbuf if at all possible */ 1739 1.240 jdolecek if ((m->m_flags & M_EXT) == 0 && m->m_pkthdr.len <= MCLBYTES) { 1740 1.238 jdolecek if (m->m_pkthdr.len <= MHLEN) { 1741 1.238 jdolecek if (M_TRAILINGSPACE(m) < (m->m_pkthdr.len - m->m_len)) { 1742 1.241 jdolecek KASSERTMSG(M_LEADINGSPACE(m) + 1743 1.241 jdolecek M_TRAILINGSPACE(m) >= 1744 1.241 jdolecek (m->m_pkthdr.len - m->m_len), 1745 1.241 jdolecek "too small leading %d trailing %d ro? %d" 1746 1.241 jdolecek " pkthdr.len %d mlen %d", 1747 1.241 jdolecek (int)M_LEADINGSPACE(m), 1748 1.241 jdolecek (int)M_TRAILINGSPACE(m), 1749 1.241 jdolecek M_READONLY(m), 1750 1.241 jdolecek m->m_pkthdr.len, m->m_len); 1751 1.241 jdolecek 1752 1.238 jdolecek memmove(m->m_pktdat, m->m_data, m->m_len); 1753 1.238 jdolecek m->m_data = m->m_pktdat; 1754 1.241 jdolecek 1755 1.241 jdolecek KASSERT(M_TRAILINGSPACE(m) >= 1756 1.241 jdolecek (m->m_pkthdr.len - m->m_len)); 1757 1.238 jdolecek } 1758 1.240 jdolecek } else { 1759 1.240 jdolecek /* Must copy data before adding cluster */ 1760 1.240 jdolecek m0 = m_get(how, MT_DATA); 1761 1.240 jdolecek if (m0 == NULL) 1762 1.240 jdolecek return NULL; 1763 1.249 riastrad KASSERTMSG(m->m_len <= MHLEN, 1764 1.249 riastrad "m=%p m->m_len=%d MHLEN=%u", 1765 1.249 riastrad m, m->m_len, (unsigned)MHLEN); 1766 1.240 jdolecek m_copydata(m, 0, m->m_len, mtod(m0, void *)); 1767 1.238 jdolecek 1768 1.238 jdolecek MCLGET(m, how); 1769 1.240 jdolecek if ((m->m_flags & M_EXT) == 0) { 1770 1.240 jdolecek m_free(m0); 1771 1.238 jdolecek return NULL; 1772 1.238 jdolecek } 1773 1.240 jdolecek memcpy(m->m_data, mtod(m0, void *), m->m_len); 1774 1.240 jdolecek m_free(m0); 1775 1.238 jdolecek } 1776 1.249 riastrad KASSERTMSG(M_TRAILINGSPACE(m) >= (m->m_pkthdr.len - m->m_len), 1777 1.249 riastrad "m=%p M_TRAILINGSPACE(m)=%zd m->m_pkthdr.len=%d" 1778 1.249 riastrad " m->m_len=%d", 1779 1.249 riastrad m, M_TRAILINGSPACE(m), m->m_pkthdr.len, m->m_len); 1780 1.240 jdolecek m_copydata(m->m_next, 0, m->m_pkthdr.len - m->m_len, 1781 1.240 jdolecek mtod(m, char *) + m->m_len); 1782 1.240 jdolecek m->m_len = m->m_pkthdr.len; 1783 1.240 jdolecek m_freem(m->m_next); 1784 1.240 jdolecek m->m_next = NULL; 1785 1.240 jdolecek return m; 1786 1.238 jdolecek } 1787 1.238 jdolecek 1788 1.212 maxv m0 = m_get(how, MT_DATA); 1789 1.205 maxv if (m0 == NULL) 1790 1.205 maxv return NULL; 1791 1.205 maxv mn = m0; 1792 1.205 maxv 1793 1.212 maxv sz = m->m_pkthdr.len - m->m_len; 1794 1.211 maxv KASSERT(sz >= 0); 1795 1.211 maxv 1796 1.205 maxv do { 1797 1.211 maxv if (sz > MLEN) { 1798 1.212 maxv MCLGET(mn, how); 1799 1.205 maxv if ((mn->m_flags & M_EXT) == 0) { 1800 1.205 maxv m_freem(m0); 1801 1.205 maxv return NULL; 1802 1.205 maxv } 1803 1.205 maxv } 1804 1.205 maxv 1805 1.205 maxv mn->m_len = MIN(sz, MCLBYTES); 1806 1.205 maxv 1807 1.212 maxv m_copydata(m, m->m_pkthdr.len - sz, mn->m_len, 1808 1.205 maxv mtod(mn, void *)); 1809 1.205 maxv 1810 1.205 maxv sz -= mn->m_len; 1811 1.205 maxv 1812 1.205 maxv if (sz > 0) { 1813 1.205 maxv /* need more mbufs */ 1814 1.212 maxv n = m_get(how, MT_DATA); 1815 1.205 maxv if (n == NULL) { 1816 1.205 maxv m_freem(m0); 1817 1.205 maxv return NULL; 1818 1.205 maxv } 1819 1.205 maxv 1820 1.205 maxv mn->m_next = n; 1821 1.205 maxv mn = n; 1822 1.205 maxv } 1823 1.205 maxv } while (sz > 0); 1824 1.205 maxv 1825 1.212 maxv m_freem(m->m_next); 1826 1.212 maxv m->m_next = m0; 1827 1.205 maxv 1828 1.212 maxv return m; 1829 1.205 maxv } 1830 1.205 maxv 1831 1.205 maxv void 1832 1.213 maxv m_remove_pkthdr(struct mbuf *m) 1833 1.205 maxv { 1834 1.205 maxv KASSERT(m->m_flags & M_PKTHDR); 1835 1.205 maxv 1836 1.222 maxv m_tag_delete_chain(m); 1837 1.205 maxv m->m_flags &= ~M_PKTHDR; 1838 1.205 maxv memset(&m->m_pkthdr, 0, sizeof(m->m_pkthdr)); 1839 1.205 maxv } 1840 1.205 maxv 1841 1.101 yamt void 1842 1.202 maxv m_copy_pkthdr(struct mbuf *to, struct mbuf *from) 1843 1.202 maxv { 1844 1.215 maxv KASSERT((to->m_flags & M_EXT) == 0); 1845 1.222 maxv KASSERT((to->m_flags & M_PKTHDR) == 0 || 1846 1.222 maxv SLIST_FIRST(&to->m_pkthdr.tags) == NULL); 1847 1.202 maxv KASSERT((from->m_flags & M_PKTHDR) != 0); 1848 1.202 maxv 1849 1.202 maxv to->m_pkthdr = from->m_pkthdr; 1850 1.202 maxv to->m_flags = from->m_flags & M_COPYFLAGS; 1851 1.215 maxv to->m_data = to->m_pktdat; 1852 1.215 maxv 1853 1.202 maxv SLIST_INIT(&to->m_pkthdr.tags); 1854 1.202 maxv m_tag_copy_chain(to, from); 1855 1.202 maxv } 1856 1.202 maxv 1857 1.202 maxv void 1858 1.101 yamt m_move_pkthdr(struct mbuf *to, struct mbuf *from) 1859 1.101 yamt { 1860 1.101 yamt KASSERT((to->m_flags & M_EXT) == 0); 1861 1.222 maxv KASSERT((to->m_flags & M_PKTHDR) == 0 || 1862 1.222 maxv SLIST_FIRST(&to->m_pkthdr.tags) == NULL); 1863 1.101 yamt KASSERT((from->m_flags & M_PKTHDR) != 0); 1864 1.101 yamt 1865 1.101 yamt to->m_pkthdr = from->m_pkthdr; 1866 1.101 yamt to->m_flags = from->m_flags & M_COPYFLAGS; 1867 1.101 yamt to->m_data = to->m_pktdat; 1868 1.101 yamt 1869 1.101 yamt from->m_flags &= ~M_PKTHDR; 1870 1.101 yamt } 1871 1.101 yamt 1872 1.66 thorpej /* 1873 1.227 maxv * Set the m_data pointer of a newly-allocated mbuf to place an object of the 1874 1.227 maxv * specified size at the end of the mbuf, longword aligned. 1875 1.227 maxv */ 1876 1.227 maxv void 1877 1.227 maxv m_align(struct mbuf *m, int len) 1878 1.227 maxv { 1879 1.227 maxv int buflen, adjust; 1880 1.227 maxv 1881 1.227 maxv KASSERT(len != M_COPYALL); 1882 1.249 riastrad KASSERTMSG(M_LEADINGSPACE(m) == 0, "m=%p M_LEADINGSPACE(m)=%zd", 1883 1.249 riastrad m, M_LEADINGSPACE(m)); 1884 1.227 maxv 1885 1.233 maxv buflen = M_BUFSIZE(m); 1886 1.227 maxv 1887 1.249 riastrad KASSERTMSG(len <= buflen, "m=%p len=%d buflen=%d", m, len, buflen); 1888 1.227 maxv adjust = buflen - len; 1889 1.227 maxv m->m_data += adjust &~ (sizeof(long)-1); 1890 1.227 maxv } 1891 1.227 maxv 1892 1.227 maxv /* 1893 1.66 thorpej * Apply function f to the data in an mbuf chain starting "off" bytes from the 1894 1.66 thorpej * beginning, continuing for "len" bytes. 1895 1.66 thorpej */ 1896 1.66 thorpej int 1897 1.66 thorpej m_apply(struct mbuf *m, int off, int len, 1898 1.119 christos int (*f)(void *, void *, unsigned int), void *arg) 1899 1.66 thorpej { 1900 1.66 thorpej unsigned int count; 1901 1.66 thorpej int rval; 1902 1.66 thorpej 1903 1.156 christos KASSERT(len != M_COPYALL); 1904 1.66 thorpej KASSERT(len >= 0); 1905 1.66 thorpej KASSERT(off >= 0); 1906 1.66 thorpej 1907 1.66 thorpej while (off > 0) { 1908 1.66 thorpej KASSERT(m != NULL); 1909 1.66 thorpej if (off < m->m_len) 1910 1.66 thorpej break; 1911 1.66 thorpej off -= m->m_len; 1912 1.66 thorpej m = m->m_next; 1913 1.66 thorpej } 1914 1.66 thorpej while (len > 0) { 1915 1.66 thorpej KASSERT(m != NULL); 1916 1.219 riastrad count = uimin(m->m_len - off, len); 1917 1.66 thorpej 1918 1.119 christos rval = (*f)(arg, mtod(m, char *) + off, count); 1919 1.66 thorpej if (rval) 1920 1.181 maxv return rval; 1921 1.66 thorpej 1922 1.66 thorpej len -= count; 1923 1.66 thorpej off = 0; 1924 1.66 thorpej m = m->m_next; 1925 1.66 thorpej } 1926 1.66 thorpej 1927 1.181 maxv return 0; 1928 1.66 thorpej } 1929 1.66 thorpej 1930 1.66 thorpej /* 1931 1.66 thorpej * Return a pointer to mbuf/offset of location in mbuf chain. 1932 1.66 thorpej */ 1933 1.66 thorpej struct mbuf * 1934 1.66 thorpej m_getptr(struct mbuf *m, int loc, int *off) 1935 1.66 thorpej { 1936 1.66 thorpej 1937 1.66 thorpej while (loc >= 0) { 1938 1.66 thorpej /* Normal end of search */ 1939 1.66 thorpej if (m->m_len > loc) { 1940 1.177 maxv *off = loc; 1941 1.181 maxv return m; 1942 1.181 maxv } 1943 1.181 maxv 1944 1.181 maxv loc -= m->m_len; 1945 1.181 maxv 1946 1.181 maxv if (m->m_next == NULL) { 1947 1.181 maxv if (loc == 0) { 1948 1.181 maxv /* Point at the end of valid data */ 1949 1.181 maxv *off = m->m_len; 1950 1.181 maxv return m; 1951 1.181 maxv } 1952 1.181 maxv return NULL; 1953 1.66 thorpej } else { 1954 1.181 maxv m = m->m_next; 1955 1.66 thorpej } 1956 1.177 maxv } 1957 1.66 thorpej 1958 1.181 maxv return NULL; 1959 1.1 cgd } 1960 1.105 yamt 1961 1.221 maxv /* 1962 1.221 maxv * Release a reference to the mbuf external storage. 1963 1.221 maxv * 1964 1.221 maxv * => free the mbuf m itself as well. 1965 1.221 maxv */ 1966 1.221 maxv static void 1967 1.221 maxv m_ext_free(struct mbuf *m) 1968 1.221 maxv { 1969 1.221 maxv const bool embedded = MEXT_ISEMBEDDED(m); 1970 1.221 maxv bool dofree = true; 1971 1.221 maxv u_int refcnt; 1972 1.221 maxv 1973 1.221 maxv KASSERT((m->m_flags & M_EXT) != 0); 1974 1.221 maxv KASSERT(MEXT_ISEMBEDDED(m->m_ext_ref)); 1975 1.221 maxv KASSERT((m->m_ext_ref->m_flags & M_EXT) != 0); 1976 1.221 maxv KASSERT((m->m_flags & M_EXT_CLUSTER) == 1977 1.221 maxv (m->m_ext_ref->m_flags & M_EXT_CLUSTER)); 1978 1.221 maxv 1979 1.221 maxv if (__predict_false(m->m_type == MT_FREE)) { 1980 1.221 maxv panic("mbuf %p already freed", m); 1981 1.221 maxv } 1982 1.221 maxv 1983 1.221 maxv if (__predict_true(m->m_ext.ext_refcnt == 1)) { 1984 1.221 maxv refcnt = m->m_ext.ext_refcnt = 0; 1985 1.221 maxv } else { 1986 1.246 riastrad membar_release(); 1987 1.221 maxv refcnt = atomic_dec_uint_nv(&m->m_ext.ext_refcnt); 1988 1.221 maxv } 1989 1.221 maxv 1990 1.221 maxv if (refcnt > 0) { 1991 1.221 maxv if (embedded) { 1992 1.221 maxv /* 1993 1.221 maxv * other mbuf's m_ext_ref still points to us. 1994 1.221 maxv */ 1995 1.221 maxv dofree = false; 1996 1.221 maxv } else { 1997 1.221 maxv m->m_ext_ref = m; 1998 1.221 maxv } 1999 1.221 maxv } else { 2000 1.221 maxv /* 2001 1.221 maxv * dropping the last reference 2002 1.221 maxv */ 2003 1.246 riastrad membar_acquire(); 2004 1.221 maxv if (!embedded) { 2005 1.221 maxv m->m_ext.ext_refcnt++; /* XXX */ 2006 1.221 maxv m_ext_free(m->m_ext_ref); 2007 1.221 maxv m->m_ext_ref = m; 2008 1.221 maxv } else if ((m->m_flags & M_EXT_CLUSTER) != 0) { 2009 1.221 maxv pool_cache_put_paddr(mcl_cache, 2010 1.221 maxv m->m_ext.ext_buf, m->m_ext.ext_paddr); 2011 1.221 maxv } else if (m->m_ext.ext_free) { 2012 1.221 maxv (*m->m_ext.ext_free)(m, 2013 1.221 maxv m->m_ext.ext_buf, m->m_ext.ext_size, 2014 1.221 maxv m->m_ext.ext_arg); 2015 1.221 maxv /* 2016 1.221 maxv * 'm' is already freed by the ext_free callback. 2017 1.221 maxv */ 2018 1.221 maxv dofree = false; 2019 1.221 maxv } else { 2020 1.221 maxv free(m->m_ext.ext_buf, 0); 2021 1.221 maxv } 2022 1.221 maxv } 2023 1.221 maxv 2024 1.221 maxv if (dofree) { 2025 1.221 maxv m->m_type = MT_FREE; 2026 1.221 maxv m->m_data = NULL; 2027 1.221 maxv pool_cache_put(mb_cache, m); 2028 1.221 maxv } 2029 1.221 maxv } 2030 1.221 maxv 2031 1.221 maxv /* 2032 1.221 maxv * Free a single mbuf and associated external storage. Return the 2033 1.221 maxv * successor, if any. 2034 1.221 maxv */ 2035 1.221 maxv struct mbuf * 2036 1.221 maxv m_free(struct mbuf *m) 2037 1.221 maxv { 2038 1.221 maxv struct mbuf *n; 2039 1.221 maxv 2040 1.221 maxv mowner_revoke(m, 1, m->m_flags); 2041 1.221 maxv mbstat_type_add(m->m_type, -1); 2042 1.221 maxv 2043 1.221 maxv if (m->m_flags & M_PKTHDR) 2044 1.222 maxv m_tag_delete_chain(m); 2045 1.221 maxv 2046 1.221 maxv n = m->m_next; 2047 1.221 maxv 2048 1.221 maxv if (m->m_flags & M_EXT) { 2049 1.221 maxv m_ext_free(m); 2050 1.221 maxv } else { 2051 1.221 maxv if (__predict_false(m->m_type == MT_FREE)) { 2052 1.221 maxv panic("mbuf %p already freed", m); 2053 1.221 maxv } 2054 1.221 maxv m->m_type = MT_FREE; 2055 1.221 maxv m->m_data = NULL; 2056 1.221 maxv pool_cache_put(mb_cache, m); 2057 1.221 maxv } 2058 1.221 maxv 2059 1.221 maxv return n; 2060 1.221 maxv } 2061 1.221 maxv 2062 1.221 maxv void 2063 1.221 maxv m_freem(struct mbuf *m) 2064 1.221 maxv { 2065 1.221 maxv if (m == NULL) 2066 1.221 maxv return; 2067 1.221 maxv do { 2068 1.221 maxv m = m_free(m); 2069 1.221 maxv } while (m); 2070 1.221 maxv } 2071 1.221 maxv 2072 1.105 yamt #if defined(DDB) 2073 1.105 yamt void 2074 1.105 yamt m_print(const struct mbuf *m, const char *modif, void (*pr)(const char *, ...)) 2075 1.105 yamt { 2076 1.105 yamt char ch; 2077 1.118 thorpej bool opt_c = false; 2078 1.216 msaitoh bool opt_d = false; 2079 1.217 msaitoh #if NETHER > 0 2080 1.216 msaitoh bool opt_v = false; 2081 1.217 msaitoh const struct mbuf *m0 = NULL; 2082 1.217 msaitoh #endif 2083 1.216 msaitoh int no = 0; 2084 1.105 yamt char buf[512]; 2085 1.105 yamt 2086 1.105 yamt while ((ch = *(modif++)) != '\0') { 2087 1.105 yamt switch (ch) { 2088 1.105 yamt case 'c': 2089 1.118 thorpej opt_c = true; 2090 1.105 yamt break; 2091 1.216 msaitoh case 'd': 2092 1.216 msaitoh opt_d = true; 2093 1.216 msaitoh break; 2094 1.217 msaitoh #if NETHER > 0 2095 1.216 msaitoh case 'v': 2096 1.216 msaitoh opt_v = true; 2097 1.216 msaitoh m0 = m; 2098 1.216 msaitoh break; 2099 1.217 msaitoh #endif 2100 1.217 msaitoh default: 2101 1.217 msaitoh break; 2102 1.105 yamt } 2103 1.105 yamt } 2104 1.105 yamt 2105 1.105 yamt nextchain: 2106 1.216 msaitoh (*pr)("MBUF(%d) %p\n", no, m); 2107 1.130 christos snprintb(buf, sizeof(buf), M_FLAGS_BITS, (u_int)m->m_flags); 2108 1.138 cegger (*pr)(" data=%p, len=%d, type=%d, flags=%s\n", 2109 1.105 yamt m->m_data, m->m_len, m->m_type, buf); 2110 1.216 msaitoh if (opt_d) { 2111 1.216 msaitoh int i; 2112 1.216 msaitoh unsigned char *p = m->m_data; 2113 1.216 msaitoh 2114 1.216 msaitoh (*pr)(" data:"); 2115 1.216 msaitoh 2116 1.216 msaitoh for (i = 0; i < m->m_len; i++) { 2117 1.216 msaitoh if (i % 16 == 0) 2118 1.216 msaitoh (*pr)("\n"); 2119 1.216 msaitoh (*pr)(" %02x", p[i]); 2120 1.216 msaitoh } 2121 1.216 msaitoh 2122 1.216 msaitoh (*pr)("\n"); 2123 1.216 msaitoh } 2124 1.105 yamt (*pr)(" owner=%p, next=%p, nextpkt=%p\n", m->m_owner, m->m_next, 2125 1.105 yamt m->m_nextpkt); 2126 1.105 yamt (*pr)(" leadingspace=%u, trailingspace=%u, readonly=%u\n", 2127 1.105 yamt (int)M_LEADINGSPACE(m), (int)M_TRAILINGSPACE(m), 2128 1.105 yamt (int)M_READONLY(m)); 2129 1.105 yamt if ((m->m_flags & M_PKTHDR) != 0) { 2130 1.130 christos snprintb(buf, sizeof(buf), M_CSUM_BITS, m->m_pkthdr.csum_flags); 2131 1.172 msaitoh (*pr)(" pktlen=%d, rcvif=%p, csum_flags=%s, csum_data=0x%" 2132 1.105 yamt PRIx32 ", segsz=%u\n", 2133 1.167 ozaki m->m_pkthdr.len, m_get_rcvif_NOMPSAFE(m), 2134 1.105 yamt buf, m->m_pkthdr.csum_data, m->m_pkthdr.segsz); 2135 1.105 yamt } 2136 1.105 yamt if ((m->m_flags & M_EXT)) { 2137 1.125 yamt (*pr)(" ext_refcnt=%u, ext_buf=%p, ext_size=%zd, " 2138 1.105 yamt "ext_free=%p, ext_arg=%p\n", 2139 1.125 yamt m->m_ext.ext_refcnt, 2140 1.105 yamt m->m_ext.ext_buf, m->m_ext.ext_size, 2141 1.105 yamt m->m_ext.ext_free, m->m_ext.ext_arg); 2142 1.105 yamt } 2143 1.105 yamt if ((~m->m_flags & (M_EXT|M_EXT_PAGES)) == 0) { 2144 1.108 yamt vaddr_t sva = (vaddr_t)m->m_ext.ext_buf; 2145 1.108 yamt vaddr_t eva = sva + m->m_ext.ext_size; 2146 1.108 yamt int n = (round_page(eva) - trunc_page(sva)) >> PAGE_SHIFT; 2147 1.108 yamt int i; 2148 1.105 yamt 2149 1.105 yamt (*pr)(" pages:"); 2150 1.108 yamt for (i = 0; i < n; i ++) { 2151 1.108 yamt (*pr)(" %p", m->m_ext.ext_pgs[i]); 2152 1.105 yamt } 2153 1.105 yamt (*pr)("\n"); 2154 1.105 yamt } 2155 1.105 yamt 2156 1.105 yamt if (opt_c) { 2157 1.105 yamt m = m->m_next; 2158 1.105 yamt if (m != NULL) { 2159 1.216 msaitoh no++; 2160 1.105 yamt goto nextchain; 2161 1.105 yamt } 2162 1.105 yamt } 2163 1.216 msaitoh 2164 1.217 msaitoh #if NETHER > 0 2165 1.217 msaitoh if (opt_v && m0) 2166 1.216 msaitoh m_examine(m0, AF_ETHER, modif, pr); 2167 1.217 msaitoh #endif 2168 1.105 yamt } 2169 1.105 yamt #endif /* defined(DDB) */ 2170 1.124 yamt 2171 1.124 yamt #if defined(MBUFTRACE) 2172 1.124 yamt void 2173 1.237 thorpej mowner_init_owner(struct mowner *mo, const char *name, const char *descr) 2174 1.237 thorpej { 2175 1.237 thorpej memset(mo, 0, sizeof(*mo)); 2176 1.237 thorpej strlcpy(mo->mo_name, name, sizeof(mo->mo_name)); 2177 1.237 thorpej strlcpy(mo->mo_descr, descr, sizeof(mo->mo_descr)); 2178 1.237 thorpej } 2179 1.237 thorpej 2180 1.237 thorpej void 2181 1.124 yamt mowner_attach(struct mowner *mo) 2182 1.124 yamt { 2183 1.124 yamt 2184 1.124 yamt KASSERT(mo->mo_counters == NULL); 2185 1.124 yamt mo->mo_counters = percpu_alloc(sizeof(struct mowner_counter)); 2186 1.124 yamt 2187 1.124 yamt /* XXX lock */ 2188 1.124 yamt LIST_INSERT_HEAD(&mowners, mo, mo_link); 2189 1.124 yamt } 2190 1.124 yamt 2191 1.124 yamt void 2192 1.124 yamt mowner_detach(struct mowner *mo) 2193 1.124 yamt { 2194 1.124 yamt 2195 1.124 yamt KASSERT(mo->mo_counters != NULL); 2196 1.124 yamt 2197 1.124 yamt /* XXX lock */ 2198 1.124 yamt LIST_REMOVE(mo, mo_link); 2199 1.124 yamt 2200 1.124 yamt percpu_free(mo->mo_counters, sizeof(struct mowner_counter)); 2201 1.124 yamt mo->mo_counters = NULL; 2202 1.124 yamt } 2203 1.124 yamt 2204 1.124 yamt void 2205 1.124 yamt mowner_init(struct mbuf *m, int type) 2206 1.124 yamt { 2207 1.124 yamt struct mowner_counter *mc; 2208 1.124 yamt struct mowner *mo; 2209 1.124 yamt int s; 2210 1.124 yamt 2211 1.124 yamt m->m_owner = mo = &unknown_mowners[type]; 2212 1.124 yamt s = splvm(); 2213 1.126 thorpej mc = percpu_getref(mo->mo_counters); 2214 1.124 yamt mc->mc_counter[MOWNER_COUNTER_CLAIMS]++; 2215 1.126 thorpej percpu_putref(mo->mo_counters); 2216 1.124 yamt splx(s); 2217 1.124 yamt } 2218 1.124 yamt 2219 1.124 yamt void 2220 1.124 yamt mowner_ref(struct mbuf *m, int flags) 2221 1.124 yamt { 2222 1.124 yamt struct mowner *mo = m->m_owner; 2223 1.124 yamt struct mowner_counter *mc; 2224 1.124 yamt int s; 2225 1.124 yamt 2226 1.124 yamt s = splvm(); 2227 1.126 thorpej mc = percpu_getref(mo->mo_counters); 2228 1.124 yamt if ((flags & M_EXT) != 0) 2229 1.124 yamt mc->mc_counter[MOWNER_COUNTER_EXT_CLAIMS]++; 2230 1.204 maxv if ((flags & M_EXT_CLUSTER) != 0) 2231 1.124 yamt mc->mc_counter[MOWNER_COUNTER_CLUSTER_CLAIMS]++; 2232 1.126 thorpej percpu_putref(mo->mo_counters); 2233 1.124 yamt splx(s); 2234 1.124 yamt } 2235 1.124 yamt 2236 1.124 yamt void 2237 1.124 yamt mowner_revoke(struct mbuf *m, bool all, int flags) 2238 1.124 yamt { 2239 1.124 yamt struct mowner *mo = m->m_owner; 2240 1.124 yamt struct mowner_counter *mc; 2241 1.124 yamt int s; 2242 1.124 yamt 2243 1.124 yamt s = splvm(); 2244 1.126 thorpej mc = percpu_getref(mo->mo_counters); 2245 1.124 yamt if ((flags & M_EXT) != 0) 2246 1.124 yamt mc->mc_counter[MOWNER_COUNTER_EXT_RELEASES]++; 2247 1.204 maxv if ((flags & M_EXT_CLUSTER) != 0) 2248 1.124 yamt mc->mc_counter[MOWNER_COUNTER_CLUSTER_RELEASES]++; 2249 1.124 yamt if (all) 2250 1.124 yamt mc->mc_counter[MOWNER_COUNTER_RELEASES]++; 2251 1.126 thorpej percpu_putref(mo->mo_counters); 2252 1.124 yamt splx(s); 2253 1.124 yamt if (all) 2254 1.124 yamt m->m_owner = &revoked_mowner; 2255 1.124 yamt } 2256 1.124 yamt 2257 1.124 yamt static void 2258 1.124 yamt mowner_claim(struct mbuf *m, struct mowner *mo) 2259 1.124 yamt { 2260 1.124 yamt struct mowner_counter *mc; 2261 1.124 yamt int flags = m->m_flags; 2262 1.124 yamt int s; 2263 1.124 yamt 2264 1.124 yamt s = splvm(); 2265 1.126 thorpej mc = percpu_getref(mo->mo_counters); 2266 1.124 yamt mc->mc_counter[MOWNER_COUNTER_CLAIMS]++; 2267 1.124 yamt if ((flags & M_EXT) != 0) 2268 1.124 yamt mc->mc_counter[MOWNER_COUNTER_EXT_CLAIMS]++; 2269 1.204 maxv if ((flags & M_EXT_CLUSTER) != 0) 2270 1.124 yamt mc->mc_counter[MOWNER_COUNTER_CLUSTER_CLAIMS]++; 2271 1.126 thorpej percpu_putref(mo->mo_counters); 2272 1.124 yamt splx(s); 2273 1.124 yamt m->m_owner = mo; 2274 1.124 yamt } 2275 1.124 yamt 2276 1.124 yamt void 2277 1.124 yamt m_claim(struct mbuf *m, struct mowner *mo) 2278 1.124 yamt { 2279 1.124 yamt 2280 1.124 yamt if (m->m_owner == mo || mo == NULL) 2281 1.124 yamt return; 2282 1.124 yamt 2283 1.124 yamt mowner_revoke(m, true, m->m_flags); 2284 1.124 yamt mowner_claim(m, mo); 2285 1.124 yamt } 2286 1.205 maxv 2287 1.205 maxv void 2288 1.205 maxv m_claimm(struct mbuf *m, struct mowner *mo) 2289 1.205 maxv { 2290 1.205 maxv 2291 1.205 maxv for (; m != NULL; m = m->m_next) 2292 1.205 maxv m_claim(m, mo); 2293 1.205 maxv } 2294 1.124 yamt #endif /* defined(MBUFTRACE) */ 2295 1.169 christos 2296 1.188 maxv #ifdef DIAGNOSTIC 2297 1.188 maxv /* 2298 1.188 maxv * Verify that the mbuf chain is not malformed. Used only for diagnostic. 2299 1.188 maxv * Panics on error. 2300 1.188 maxv */ 2301 1.188 maxv void 2302 1.188 maxv m_verify_packet(struct mbuf *m) 2303 1.188 maxv { 2304 1.188 maxv struct mbuf *n = m; 2305 1.188 maxv char *low, *high, *dat; 2306 1.188 maxv int totlen = 0, len; 2307 1.188 maxv 2308 1.188 maxv if (__predict_false((m->m_flags & M_PKTHDR) == 0)) { 2309 1.188 maxv panic("%s: mbuf doesn't have M_PKTHDR", __func__); 2310 1.188 maxv } 2311 1.188 maxv 2312 1.188 maxv while (n != NULL) { 2313 1.188 maxv if (__predict_false(n->m_type == MT_FREE)) { 2314 1.188 maxv panic("%s: mbuf already freed (n = %p)", __func__, n); 2315 1.188 maxv } 2316 1.191 maxv #if 0 2317 1.191 maxv /* 2318 1.191 maxv * This ought to be a rule of the mbuf API. Unfortunately, 2319 1.191 maxv * many places don't respect that rule. 2320 1.191 maxv */ 2321 1.188 maxv if (__predict_false((n != m) && (n->m_flags & M_PKTHDR) != 0)) { 2322 1.188 maxv panic("%s: M_PKTHDR set on secondary mbuf", __func__); 2323 1.188 maxv } 2324 1.189 maxv #endif 2325 1.188 maxv if (__predict_false(n->m_nextpkt != NULL)) { 2326 1.188 maxv panic("%s: m_nextpkt not null (m_nextpkt = %p)", 2327 1.188 maxv __func__, n->m_nextpkt); 2328 1.188 maxv } 2329 1.188 maxv 2330 1.188 maxv dat = n->m_data; 2331 1.188 maxv len = n->m_len; 2332 1.236 maxv if (__predict_false(len < 0)) { 2333 1.188 maxv panic("%s: incorrect length (len = %d)", __func__, len); 2334 1.188 maxv } 2335 1.233 maxv 2336 1.233 maxv low = M_BUFADDR(n); 2337 1.233 maxv high = low + M_BUFSIZE(n); 2338 1.188 maxv if (__predict_false((dat < low) || (dat + len > high))) { 2339 1.188 maxv panic("%s: m_data not in packet" 2340 1.188 maxv "(dat = %p, len = %d, low = %p, high = %p)", 2341 1.188 maxv __func__, dat, len, low, high); 2342 1.188 maxv } 2343 1.188 maxv 2344 1.188 maxv totlen += len; 2345 1.188 maxv n = n->m_next; 2346 1.188 maxv } 2347 1.188 maxv 2348 1.188 maxv if (__predict_false(totlen != m->m_pkthdr.len)) { 2349 1.188 maxv panic("%s: inconsistent mbuf length (%d != %d)", __func__, 2350 1.188 maxv totlen, m->m_pkthdr.len); 2351 1.188 maxv } 2352 1.188 maxv } 2353 1.188 maxv #endif 2354 1.188 maxv 2355 1.221 maxv struct m_tag * 2356 1.221 maxv m_tag_get(int type, int len, int wait) 2357 1.221 maxv { 2358 1.221 maxv struct m_tag *t; 2359 1.221 maxv 2360 1.221 maxv if (len < 0) 2361 1.221 maxv return NULL; 2362 1.221 maxv t = malloc(len + sizeof(struct m_tag), M_PACKET_TAGS, wait); 2363 1.221 maxv if (t == NULL) 2364 1.221 maxv return NULL; 2365 1.221 maxv t->m_tag_id = type; 2366 1.221 maxv t->m_tag_len = len; 2367 1.221 maxv return t; 2368 1.221 maxv } 2369 1.221 maxv 2370 1.221 maxv void 2371 1.221 maxv m_tag_free(struct m_tag *t) 2372 1.221 maxv { 2373 1.221 maxv free(t, M_PACKET_TAGS); 2374 1.221 maxv } 2375 1.221 maxv 2376 1.221 maxv void 2377 1.221 maxv m_tag_prepend(struct mbuf *m, struct m_tag *t) 2378 1.221 maxv { 2379 1.224 maxv KASSERT((m->m_flags & M_PKTHDR) != 0); 2380 1.221 maxv SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link); 2381 1.221 maxv } 2382 1.221 maxv 2383 1.221 maxv void 2384 1.221 maxv m_tag_unlink(struct mbuf *m, struct m_tag *t) 2385 1.221 maxv { 2386 1.224 maxv KASSERT((m->m_flags & M_PKTHDR) != 0); 2387 1.221 maxv SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link); 2388 1.221 maxv } 2389 1.221 maxv 2390 1.221 maxv void 2391 1.221 maxv m_tag_delete(struct mbuf *m, struct m_tag *t) 2392 1.221 maxv { 2393 1.221 maxv m_tag_unlink(m, t); 2394 1.221 maxv m_tag_free(t); 2395 1.221 maxv } 2396 1.221 maxv 2397 1.221 maxv void 2398 1.222 maxv m_tag_delete_chain(struct mbuf *m) 2399 1.221 maxv { 2400 1.221 maxv struct m_tag *p, *q; 2401 1.221 maxv 2402 1.224 maxv KASSERT((m->m_flags & M_PKTHDR) != 0); 2403 1.224 maxv 2404 1.222 maxv p = SLIST_FIRST(&m->m_pkthdr.tags); 2405 1.221 maxv if (p == NULL) 2406 1.221 maxv return; 2407 1.221 maxv while ((q = SLIST_NEXT(p, m_tag_link)) != NULL) 2408 1.221 maxv m_tag_delete(m, q); 2409 1.221 maxv m_tag_delete(m, p); 2410 1.221 maxv } 2411 1.221 maxv 2412 1.221 maxv struct m_tag * 2413 1.223 maxv m_tag_find(const struct mbuf *m, int type) 2414 1.221 maxv { 2415 1.221 maxv struct m_tag *p; 2416 1.186 maxv 2417 1.224 maxv KASSERT((m->m_flags & M_PKTHDR) != 0); 2418 1.224 maxv 2419 1.223 maxv p = SLIST_FIRST(&m->m_pkthdr.tags); 2420 1.221 maxv while (p != NULL) { 2421 1.221 maxv if (p->m_tag_id == type) 2422 1.221 maxv return p; 2423 1.221 maxv p = SLIST_NEXT(p, m_tag_link); 2424 1.186 maxv } 2425 1.221 maxv return NULL; 2426 1.221 maxv } 2427 1.186 maxv 2428 1.221 maxv struct m_tag * 2429 1.221 maxv m_tag_copy(struct m_tag *t) 2430 1.221 maxv { 2431 1.221 maxv struct m_tag *p; 2432 1.186 maxv 2433 1.221 maxv p = m_tag_get(t->m_tag_id, t->m_tag_len, M_NOWAIT); 2434 1.221 maxv if (p == NULL) 2435 1.221 maxv return NULL; 2436 1.221 maxv memcpy(p + 1, t + 1, t->m_tag_len); 2437 1.221 maxv return p; 2438 1.186 maxv } 2439 1.186 maxv 2440 1.186 maxv /* 2441 1.221 maxv * Copy two tag chains. The destination mbuf (to) loses any attached 2442 1.221 maxv * tags even if the operation fails. This should not be a problem, as 2443 1.221 maxv * m_tag_copy_chain() is typically called with a newly-allocated 2444 1.221 maxv * destination mbuf. 2445 1.175 maxv */ 2446 1.221 maxv int 2447 1.221 maxv m_tag_copy_chain(struct mbuf *to, struct mbuf *from) 2448 1.169 christos { 2449 1.221 maxv struct m_tag *p, *t, *tprev = NULL; 2450 1.169 christos 2451 1.224 maxv KASSERT((from->m_flags & M_PKTHDR) != 0); 2452 1.224 maxv 2453 1.222 maxv m_tag_delete_chain(to); 2454 1.221 maxv SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) { 2455 1.221 maxv t = m_tag_copy(p); 2456 1.221 maxv if (t == NULL) { 2457 1.222 maxv m_tag_delete_chain(to); 2458 1.221 maxv return 0; 2459 1.175 maxv } 2460 1.221 maxv if (tprev == NULL) 2461 1.221 maxv SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link); 2462 1.221 maxv else 2463 1.221 maxv SLIST_INSERT_AFTER(tprev, t, m_tag_link); 2464 1.221 maxv tprev = t; 2465 1.175 maxv } 2466 1.221 maxv return 1; 2467 1.221 maxv } 2468