Home | History | Annotate | Line # | Download | only in npf
npf_mbuf.c revision 1.18
      1 /*	$NetBSD: npf_mbuf.c,v 1.18 2016/12/26 23:05:06 christos Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This material is based upon work partially supported by The
      8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * NPF network buffer management interface.
     34  *
     35  * Network buffer in NetBSD is mbuf.  Internal mbuf structures are
     36  * abstracted within this source.
     37  */
     38 
     39 #ifdef _KERNEL
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: npf_mbuf.c,v 1.18 2016/12/26 23:05:06 christos Exp $");
     42 
     43 #include <sys/param.h>
     44 #include <sys/mbuf.h>
     45 #endif
     46 
     47 #include "npf_impl.h"
     48 
     49 #if defined(_NPF_STANDALONE)
     50 #define	m_length(m)		(nbuf)->nb_mops->getchainlen(m)
     51 #define	m_buflen(m)		(nbuf)->nb_mops->getlen(m)
     52 #define	m_next_ptr(m)		(nbuf)->nb_mops->getnext(m)
     53 #define	m_ensure_contig(m,t)	(nbuf)->nb_mops->ensure_contig((m), (t))
     54 #define	m_makewritable(m,o,l,f)	(nbuf)->nb_mops->ensure_writable((m), (o+l))
     55 #define	mtod(m,t)		((t)((nbuf)->nb_mops->getdata(m)))
     56 #define	m_flags_p(m,f)		true
     57 #else
     58 #define	m_next_ptr(m)		(m)->m_next
     59 #define	m_buflen(m)		(m)->m_len
     60 #define	m_flags_p(m,f)		(((m)->m_flags & (f)) != 0)
     61 #endif
     62 
     63 #define	NBUF_ENSURE_ALIGN	(MAX(COHERENCY_UNIT, 64))
     64 #define	NBUF_ENSURE_MASK	(NBUF_ENSURE_ALIGN - 1)
     65 #define	NBUF_ENSURE_ROUNDUP(x)	(((x) + NBUF_ENSURE_ALIGN) & ~NBUF_ENSURE_MASK)
     66 
     67 void
     68 nbuf_init(npf_t *npf, nbuf_t *nbuf, struct mbuf *m, const ifnet_t *ifp)
     69 {
     70 	u_int ifid = npf_ifmap_getid(npf, ifp);
     71 
     72 	KASSERT(m_flags_p(m, M_PKTHDR));
     73 	nbuf->nb_mops = npf->mbufops;
     74 
     75 	nbuf->nb_mbuf0 = m;
     76 	nbuf->nb_ifp = ifp;
     77 	nbuf->nb_ifid = ifid;
     78 	nbuf_reset(nbuf);
     79 }
     80 
     81 void
     82 nbuf_reset(nbuf_t *nbuf)
     83 {
     84 	struct mbuf *m = nbuf->nb_mbuf0;
     85 
     86 	nbuf->nb_mbuf = m;
     87 	nbuf->nb_nptr = mtod(m, void *);
     88 }
     89 
     90 void *
     91 nbuf_dataptr(nbuf_t *nbuf)
     92 {
     93 	KASSERT(nbuf->nb_nptr);
     94 	return nbuf->nb_nptr;
     95 }
     96 
     97 size_t
     98 nbuf_offset(const nbuf_t *nbuf)
     99 {
    100 	const struct mbuf *m = nbuf->nb_mbuf;
    101 	const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
    102 	const int poff = m_length(nbuf->nb_mbuf0) - m_length(m) + off;
    103 
    104 	return poff;
    105 }
    106 
    107 struct mbuf *
    108 nbuf_head_mbuf(nbuf_t *nbuf)
    109 {
    110 	return nbuf->nb_mbuf0;
    111 }
    112 
    113 bool
    114 nbuf_flag_p(const nbuf_t *nbuf, int flag)
    115 {
    116 	return (nbuf->nb_flags & flag) != 0;
    117 }
    118 
    119 void
    120 nbuf_unset_flag(nbuf_t *nbuf, int flag)
    121 {
    122 	nbuf->nb_flags &= ~flag;
    123 }
    124 
    125 /*
    126  * nbuf_advance: advance in nbuf or chain by specified amount of bytes and,
    127  * if requested, ensure that the area *after* advance is contiguous.
    128  *
    129  * => Returns new pointer to data in nbuf or NULL if offset is invalid.
    130  * => Current nbuf and the offset is stored in the nbuf metadata.
    131  */
    132 void *
    133 nbuf_advance(nbuf_t *nbuf, size_t len, size_t ensure)
    134 {
    135 	struct mbuf *m = nbuf->nb_mbuf;
    136 	u_int off, wmark;
    137 	uint8_t *d;
    138 
    139 	/* Offset with amount to advance. */
    140 	off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t) + len;
    141 	wmark = m_buflen(m);
    142 
    143 	/* Find the mbuf according to offset. */
    144 	while (__predict_false(wmark <= off)) {
    145 		m = m_next_ptr(m);
    146 		if (__predict_false(m == NULL)) {
    147 			/*
    148 			 * If end of the chain, then the offset is
    149 			 * higher than packet length.
    150 			 */
    151 			return NULL;
    152 		}
    153 		wmark += m_buflen(m);
    154 	}
    155 	KASSERT(off < m_length(nbuf->nb_mbuf0));
    156 
    157 	/* Offset in mbuf data. */
    158 	d = mtod(m, uint8_t *);
    159 	KASSERT(off >= (wmark - m_buflen(m)));
    160 	d += (off - (wmark - m_buflen(m)));
    161 
    162 	nbuf->nb_mbuf = m;
    163 	nbuf->nb_nptr = d;
    164 
    165 	if (ensure) {
    166 		/* Ensure contiguousness (may change nbuf chain). */
    167 		d = nbuf_ensure_contig(nbuf, ensure);
    168 	}
    169 	return d;
    170 }
    171 
    172 /*
    173  * nbuf_ensure_contig: check whether the specified length from the current
    174  * point in the nbuf is contiguous.  If not, rearrange the chain to be so.
    175  *
    176  * => Returns pointer to the data at the current offset in the buffer.
    177  * => Returns NULL on failure and nbuf becomes invalid.
    178  */
    179 void *
    180 nbuf_ensure_contig(nbuf_t *nbuf, size_t len)
    181 {
    182 	const struct mbuf * const n = nbuf->nb_mbuf;
    183 	const size_t off = (uintptr_t)nbuf->nb_nptr - mtod(n, uintptr_t);
    184 
    185 	KASSERT(off <= m_buflen(n));
    186 
    187 	if (__predict_false(m_buflen(n) < (off + len))) {
    188 		struct mbuf *m = nbuf->nb_mbuf0;
    189 		const size_t foff = nbuf_offset(nbuf);
    190 		const size_t plen = m_length(m);
    191 		const size_t mlen = m_buflen(m);
    192 		size_t target;
    193 		bool success;
    194 
    195 		//npf_stats_inc(npf, NPF_STAT_NBUF_NONCONTIG);
    196 
    197 		/* Attempt to round-up to NBUF_ENSURE_ALIGN bytes. */
    198 		if ((target = NBUF_ENSURE_ROUNDUP(foff + len)) > plen) {
    199 			target = foff + len;
    200 		}
    201 
    202 		/* Rearrange the chain to be contiguous. */
    203 		KASSERT(m_flags_p(m, M_PKTHDR));
    204 		success = m_ensure_contig(&m, target);
    205 		KASSERT(m != NULL);
    206 
    207 		/* If no change in the chain: return what we have. */
    208 		if (m == nbuf->nb_mbuf0 && m_buflen(m) == mlen) {
    209 			return success ? nbuf->nb_nptr : NULL;
    210 		}
    211 
    212 		/*
    213 		 * The mbuf chain was re-arranged.  Update the pointers
    214 		 * accordingly and indicate that the references to the data
    215 		 * might need a reset.
    216 		 */
    217 		KASSERT(m_flags_p(m, M_PKTHDR));
    218 		nbuf->nb_mbuf0 = m;
    219 		nbuf->nb_mbuf = m;
    220 
    221 		KASSERT(foff < m_buflen(m) && foff < m_length(m));
    222 		nbuf->nb_nptr = mtod(m, uint8_t *) + foff;
    223 		nbuf->nb_flags |= NBUF_DATAREF_RESET;
    224 
    225 		if (!success) {
    226 			//npf_stats_inc(npf, NPF_STAT_NBUF_CONTIG_FAIL);
    227 			return NULL;
    228 		}
    229 	}
    230 	return nbuf->nb_nptr;
    231 }
    232 
    233 void *
    234 nbuf_ensure_writable(nbuf_t *nbuf, size_t len)
    235 {
    236 	struct mbuf *m = nbuf->nb_mbuf;
    237 	const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
    238 	const int tlen = off + len;
    239 	bool head_buf;
    240 
    241 	KASSERT(off < m_length(nbuf->nb_mbuf0));
    242 
    243 	if (!M_UNWRITABLE(m, tlen)) {
    244 		return nbuf->nb_nptr;
    245 	}
    246 	head_buf = (nbuf->nb_mbuf0 == m);
    247 	if (m_makewritable(&m, 0, tlen, M_NOWAIT)) {
    248 		memset(nbuf, 0, sizeof(nbuf_t));
    249 		return NULL;
    250 	}
    251 	if (head_buf) {
    252 		KASSERT(m_flags_p(m, M_PKTHDR));
    253 		KASSERT(off < m_length(m));
    254 		nbuf->nb_mbuf0 = m;
    255 	}
    256 	nbuf->nb_mbuf = m;
    257 	nbuf->nb_nptr = mtod(m, uint8_t *) + off;
    258 
    259 	return nbuf->nb_nptr;
    260 }
    261 
    262 bool
    263 nbuf_cksum_barrier(nbuf_t *nbuf, int di)
    264 {
    265 #ifdef _KERNEL
    266 	struct mbuf *m;
    267 
    268 	if (di != PFIL_OUT) {
    269 		return false;
    270 	}
    271 	m = nbuf->nb_mbuf0;
    272 	KASSERT(m_flags_p(m, M_PKTHDR));
    273 
    274 	if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
    275 		in_delayed_cksum(m);
    276 		m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4 | M_CSUM_UDPv4);
    277 		return true;
    278 	}
    279 #ifdef INET6
    280 	if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
    281 		in6_delayed_cksum(m);
    282 		m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6 | M_CSUM_UDPv6);
    283 		return true;
    284 	}
    285 #endif
    286 #else
    287 	(void)nbuf; (void)di;
    288 #endif
    289 	return false;
    290 }
    291 
    292 /*
    293  * nbuf_add_tag: add a tag to specified network buffer.
    294  *
    295  * => Returns 0 on success or errno on failure.
    296  */
    297 int
    298 nbuf_add_tag(nbuf_t *nbuf, uint32_t val)
    299 {
    300 #ifdef _KERNEL
    301 	struct mbuf *m = nbuf->nb_mbuf0;
    302 	struct m_tag *mt;
    303 	uint32_t *dat;
    304 
    305 	KASSERT(m_flags_p(m, M_PKTHDR));
    306 
    307 	mt = m_tag_get(PACKET_TAG_NPF, sizeof(uint32_t), M_NOWAIT);
    308 	if (mt == NULL) {
    309 		return ENOMEM;
    310 	}
    311 	dat = (uint32_t *)(mt + 1);
    312 	*dat = val;
    313 	m_tag_prepend(m, mt);
    314 	return 0;
    315 #else
    316 	(void)nbuf; (void)val;
    317 	return ENOTSUP;
    318 #endif
    319 }
    320 
    321 /*
    322  * nbuf_find_tag: find a tag in specified network buffer.
    323  *
    324  * => Returns 0 on success or errno on failure.
    325  */
    326 int
    327 nbuf_find_tag(nbuf_t *nbuf, uint32_t *val)
    328 {
    329 #ifdef _KERNEL
    330 	struct mbuf *m = nbuf->nb_mbuf0;
    331 	struct m_tag *mt;
    332 
    333 	KASSERT(m_flags_p(m, M_PKTHDR));
    334 
    335 	mt = m_tag_find(m, PACKET_TAG_NPF, NULL);
    336 	if (mt == NULL) {
    337 		return EINVAL;
    338 	}
    339 	*val = *(uint32_t *)(mt + 1);
    340 	return 0;
    341 #else
    342 	(void)nbuf; (void)val;
    343 	return ENOTSUP;
    344 #endif
    345 }
    346