Home | History | Annotate | Line # | Download | only in npf
npf_mbuf.c revision 1.21
      1 /*-
      2  * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
      3  * All rights reserved.
      4  *
      5  * This material is based upon work partially supported by The
      6  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * NPF network buffer management interface.
     32  *
     33  * Network buffer in NetBSD is mbuf.  Internal mbuf structures are
     34  * abstracted within this source.
     35  */
     36 
     37 #ifdef _KERNEL
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: npf_mbuf.c,v 1.21 2018/09/29 14:41:36 rmind Exp $");
     40 
     41 #include <sys/param.h>
     42 #include <sys/mbuf.h>
     43 #include <netinet/in_offload.h>
     44 #endif
     45 
     46 #include "npf_impl.h"
     47 
     48 #ifdef _KERNEL
     49 #ifdef INET6
     50 #include <netinet6/in6.h>
     51 #include <netinet6/in6_offload.h>
     52 #endif
     53 #endif
     54 
     55 #if defined(_NPF_STANDALONE)
     56 #define	m_length(m)		(nbuf)->nb_mops->getchainlen(m)
     57 #define	m_buflen(m)		(nbuf)->nb_mops->getlen(m)
     58 #define	m_next_ptr(m)		(nbuf)->nb_mops->getnext(m)
     59 #define	m_ensure_contig(m,t)	(nbuf)->nb_mops->ensure_contig((m), (t))
     60 #define	m_makewritable(m,o,l,f)	(nbuf)->nb_mops->ensure_writable((m), (o+l))
     61 #define	mtod(m,t)		((t)((nbuf)->nb_mops->getdata(m)))
     62 #define	m_flags_p(m,f)		true
     63 #else
     64 #define	m_next_ptr(m)		(m)->m_next
     65 #define	m_buflen(m)		(m)->m_len
     66 #define	m_flags_p(m,f)		(((m)->m_flags & (f)) != 0)
     67 #endif
     68 
     69 #define	NBUF_ENSURE_ALIGN	(MAX(COHERENCY_UNIT, 64))
     70 #define	NBUF_ENSURE_MASK	(NBUF_ENSURE_ALIGN - 1)
     71 #define	NBUF_ENSURE_ROUNDUP(x)	(((x) + NBUF_ENSURE_ALIGN) & ~NBUF_ENSURE_MASK)
     72 
     73 void
     74 nbuf_init(npf_t *npf, nbuf_t *nbuf, struct mbuf *m, const ifnet_t *ifp)
     75 {
     76 	u_int ifid = npf_ifmap_getid(npf, ifp);
     77 
     78 	KASSERT(m_flags_p(m, M_PKTHDR));
     79 	nbuf->nb_mops = npf->mbufops;
     80 
     81 	nbuf->nb_mbuf0 = m;
     82 	nbuf->nb_ifp = ifp;
     83 	nbuf->nb_ifid = ifid;
     84 	nbuf_reset(nbuf);
     85 }
     86 
     87 void
     88 nbuf_reset(nbuf_t *nbuf)
     89 {
     90 	struct mbuf *m = nbuf->nb_mbuf0;
     91 
     92 	nbuf->nb_mbuf = m;
     93 	nbuf->nb_nptr = mtod(m, void *);
     94 }
     95 
     96 void *
     97 nbuf_dataptr(nbuf_t *nbuf)
     98 {
     99 	KASSERT(nbuf->nb_nptr);
    100 	return nbuf->nb_nptr;
    101 }
    102 
    103 size_t
    104 nbuf_offset(const nbuf_t *nbuf)
    105 {
    106 	const struct mbuf *m = nbuf->nb_mbuf;
    107 	const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
    108 	const int poff = m_length(nbuf->nb_mbuf0) - m_length(m) + off;
    109 
    110 	return poff;
    111 }
    112 
    113 struct mbuf *
    114 nbuf_head_mbuf(nbuf_t *nbuf)
    115 {
    116 	return nbuf->nb_mbuf0;
    117 }
    118 
    119 bool
    120 nbuf_flag_p(const nbuf_t *nbuf, int flag)
    121 {
    122 	return (nbuf->nb_flags & flag) != 0;
    123 }
    124 
    125 void
    126 nbuf_unset_flag(nbuf_t *nbuf, int flag)
    127 {
    128 	nbuf->nb_flags &= ~flag;
    129 }
    130 
    131 /*
    132  * nbuf_advance: advance in nbuf or chain by specified amount of bytes and,
    133  * if requested, ensure that the area *after* advance is contiguous.
    134  *
    135  * => Returns new pointer to data in nbuf or NULL if offset is invalid.
    136  * => Current nbuf and the offset is stored in the nbuf metadata.
    137  */
    138 void *
    139 nbuf_advance(nbuf_t *nbuf, size_t len, size_t ensure)
    140 {
    141 	struct mbuf *m = nbuf->nb_mbuf;
    142 	u_int off, wmark;
    143 	uint8_t *d;
    144 
    145 	/* Offset with amount to advance. */
    146 	off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t) + len;
    147 	wmark = m_buflen(m);
    148 
    149 	/* Find the mbuf according to offset. */
    150 	while (__predict_false(wmark <= off)) {
    151 		m = m_next_ptr(m);
    152 		if (__predict_false(m == NULL)) {
    153 			/*
    154 			 * If end of the chain, then the offset is
    155 			 * higher than packet length.
    156 			 */
    157 			return NULL;
    158 		}
    159 		wmark += m_buflen(m);
    160 	}
    161 	KASSERT(off < m_length(nbuf->nb_mbuf0));
    162 
    163 	/* Offset in mbuf data. */
    164 	d = mtod(m, uint8_t *);
    165 	KASSERT(off >= (wmark - m_buflen(m)));
    166 	d += (off - (wmark - m_buflen(m)));
    167 
    168 	nbuf->nb_mbuf = m;
    169 	nbuf->nb_nptr = d;
    170 
    171 	if (ensure) {
    172 		/* Ensure contiguousness (may change nbuf chain). */
    173 		d = nbuf_ensure_contig(nbuf, ensure);
    174 	}
    175 	return d;
    176 }
    177 
    178 /*
    179  * nbuf_ensure_contig: check whether the specified length from the current
    180  * point in the nbuf is contiguous.  If not, rearrange the chain to be so.
    181  *
    182  * => Returns pointer to the data at the current offset in the buffer.
    183  * => Returns NULL on failure and nbuf becomes invalid.
    184  */
    185 void *
    186 nbuf_ensure_contig(nbuf_t *nbuf, size_t len)
    187 {
    188 	const struct mbuf * const n = nbuf->nb_mbuf;
    189 	const size_t off = (uintptr_t)nbuf->nb_nptr - mtod(n, uintptr_t);
    190 
    191 	KASSERT(off <= m_buflen(n));
    192 
    193 	if (__predict_false(m_buflen(n) < (off + len))) {
    194 		struct mbuf *m = nbuf->nb_mbuf0;
    195 		const size_t foff = nbuf_offset(nbuf);
    196 		const size_t plen = m_length(m);
    197 		const size_t mlen = m_buflen(m);
    198 		size_t target;
    199 		bool success;
    200 
    201 		//npf_stats_inc(npf, NPF_STAT_NBUF_NONCONTIG);
    202 
    203 		/* Attempt to round-up to NBUF_ENSURE_ALIGN bytes. */
    204 		if ((target = NBUF_ENSURE_ROUNDUP(foff + len)) > plen) {
    205 			target = foff + len;
    206 		}
    207 
    208 		/* Rearrange the chain to be contiguous. */
    209 		KASSERT(m_flags_p(m, M_PKTHDR));
    210 		success = m_ensure_contig(&m, target);
    211 		KASSERT(m != NULL);
    212 
    213 		/* If no change in the chain: return what we have. */
    214 		if (m == nbuf->nb_mbuf0 && m_buflen(m) == mlen) {
    215 			return success ? nbuf->nb_nptr : NULL;
    216 		}
    217 
    218 		/*
    219 		 * The mbuf chain was re-arranged.  Update the pointers
    220 		 * accordingly and indicate that the references to the data
    221 		 * might need a reset.
    222 		 */
    223 		KASSERT(m_flags_p(m, M_PKTHDR));
    224 		nbuf->nb_mbuf0 = m;
    225 		nbuf->nb_mbuf = m;
    226 
    227 		KASSERT(foff < m_buflen(m) && foff < m_length(m));
    228 		nbuf->nb_nptr = mtod(m, uint8_t *) + foff;
    229 		nbuf->nb_flags |= NBUF_DATAREF_RESET;
    230 
    231 		if (!success) {
    232 			//npf_stats_inc(npf, NPF_STAT_NBUF_CONTIG_FAIL);
    233 			return NULL;
    234 		}
    235 	}
    236 	return nbuf->nb_nptr;
    237 }
    238 
    239 void *
    240 nbuf_ensure_writable(nbuf_t *nbuf, size_t len)
    241 {
    242 	struct mbuf *m = nbuf->nb_mbuf;
    243 	const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
    244 	const int tlen = off + len;
    245 	bool head_buf;
    246 
    247 	KASSERT(off < m_length(nbuf->nb_mbuf0));
    248 
    249 	if (!M_UNWRITABLE(m, tlen)) {
    250 		return nbuf->nb_nptr;
    251 	}
    252 	head_buf = (nbuf->nb_mbuf0 == m);
    253 	if (m_makewritable(&m, 0, tlen, M_NOWAIT)) {
    254 		memset(nbuf, 0, sizeof(nbuf_t));
    255 		return NULL;
    256 	}
    257 	if (head_buf) {
    258 		KASSERT(m_flags_p(m, M_PKTHDR));
    259 		KASSERT(off < m_length(m));
    260 		nbuf->nb_mbuf0 = m;
    261 	}
    262 	nbuf->nb_mbuf = m;
    263 	nbuf->nb_nptr = mtod(m, uint8_t *) + off;
    264 
    265 	return nbuf->nb_nptr;
    266 }
    267 
    268 bool
    269 nbuf_cksum_barrier(nbuf_t *nbuf, int di)
    270 {
    271 #ifdef _KERNEL
    272 	struct mbuf *m;
    273 
    274 	if (di != PFIL_OUT) {
    275 		return false;
    276 	}
    277 	m = nbuf->nb_mbuf0;
    278 	KASSERT(m_flags_p(m, M_PKTHDR));
    279 
    280 	if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
    281 		in_undefer_cksum_tcpudp(m);
    282 		m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4 | M_CSUM_UDPv4);
    283 		return true;
    284 	}
    285 #ifdef INET6
    286 	if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
    287 		in6_undefer_cksum_tcpudp(m);
    288 		m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6 | M_CSUM_UDPv6);
    289 		return true;
    290 	}
    291 #endif
    292 #else
    293 	(void)nbuf; (void)di;
    294 #endif
    295 	return false;
    296 }
    297 
    298 /*
    299  * nbuf_add_tag: add a tag to specified network buffer.
    300  *
    301  * => Returns 0 on success or errno on failure.
    302  */
    303 int
    304 nbuf_add_tag(nbuf_t *nbuf, uint32_t val)
    305 {
    306 #ifdef _KERNEL
    307 	struct mbuf *m = nbuf->nb_mbuf0;
    308 	struct m_tag *mt;
    309 	uint32_t *dat;
    310 
    311 	KASSERT(m_flags_p(m, M_PKTHDR));
    312 
    313 	mt = m_tag_get(PACKET_TAG_NPF, sizeof(uint32_t), M_NOWAIT);
    314 	if (mt == NULL) {
    315 		return ENOMEM;
    316 	}
    317 	dat = (uint32_t *)(mt + 1);
    318 	*dat = val;
    319 	m_tag_prepend(m, mt);
    320 	return 0;
    321 #else
    322 	(void)nbuf; (void)val;
    323 	return ENOTSUP;
    324 #endif
    325 }
    326 
    327 /*
    328  * nbuf_find_tag: find a tag in specified network buffer.
    329  *
    330  * => Returns 0 on success or errno on failure.
    331  */
    332 int
    333 nbuf_find_tag(nbuf_t *nbuf, uint32_t *val)
    334 {
    335 #ifdef _KERNEL
    336 	struct mbuf *m = nbuf->nb_mbuf0;
    337 	struct m_tag *mt;
    338 
    339 	KASSERT(m_flags_p(m, M_PKTHDR));
    340 
    341 	mt = m_tag_find(m, PACKET_TAG_NPF, NULL);
    342 	if (mt == NULL) {
    343 		return EINVAL;
    344 	}
    345 	*val = *(uint32_t *)(mt + 1);
    346 	return 0;
    347 #else
    348 	(void)nbuf; (void)val;
    349 	return ENOTSUP;
    350 #endif
    351 }
    352