Home | History | Annotate | Line # | Download | only in npf
      1 /*-
      2  * Copyright (c) 2009-2020 The NetBSD Foundation, Inc.
      3  * All rights reserved.
      4  *
      5  * This material is based upon work partially supported by The
      6  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * NPF network buffer management interface.
     32  *
     33  * Network buffer in NetBSD is mbuf.  Internal mbuf structures are
     34  * abstracted within this source.
     35  */
     36 
     37 #ifdef _KERNEL
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: npf_mbuf.c,v 1.25 2023/02/12 13:38:37 kardel Exp $");
     40 
     41 #include <sys/param.h>
     42 #include <sys/mbuf.h>
     43 #include <netinet/in_offload.h>
     44 #endif
     45 
     46 #include "npf_impl.h"
     47 
     48 #ifdef _KERNEL
     49 #ifdef INET6
     50 #include <netinet6/in6.h>
     51 #include <netinet6/in6_offload.h>
     52 #endif
     53 #endif
     54 
     55 #if defined(_NPF_STANDALONE)
     56 #define	m_length(m)		(nbuf)->nb_mops->getchainlen(m)
     57 #define	m_buflen(m)		(nbuf)->nb_mops->getlen(m)
     58 #define	m_next_ptr(m)		(nbuf)->nb_mops->getnext(m)
     59 #define	m_ensure_contig(m,t)	(nbuf)->nb_mops->ensure_contig((m), (t))
     60 #define	m_makewritable(m,o,l,f)	(nbuf)->nb_mops->ensure_writable((m), (o+l))
     61 #define	mtod(m,t)		((t)((nbuf)->nb_mops->getdata(m)))
     62 #define	m_flags_p(m,f)		true
     63 #define	M_UNWRITABLE(m, l)	false
     64 #else
     65 #define	m_next_ptr(m)		(m)->m_next
     66 #define	m_buflen(m)		((size_t)(m)->m_len)
     67 #define	m_flags_p(m,f)		(((m)->m_flags & (f)) != 0)
     68 #endif
     69 
     70 #define	NBUF_ENSURE_ALIGN	(MAX(COHERENCY_UNIT, 64))
     71 #define	NBUF_ENSURE_MASK	(NBUF_ENSURE_ALIGN - 1)
     72 #define	NBUF_ENSURE_ROUNDUP(x)	(((x) + NBUF_ENSURE_ALIGN) & ~NBUF_ENSURE_MASK)
     73 
     74 void
     75 nbuf_init(npf_t *npf, nbuf_t *nbuf, struct mbuf *m, const ifnet_t *ifp)
     76 {
     77 	unsigned ifid = npf_ifmap_getid(npf, ifp);
     78 
     79 	KASSERT(m_flags_p(m, M_PKTHDR));
     80 	nbuf->nb_mops = npf->mbufops;
     81 
     82 	nbuf->nb_mbuf0 = m;
     83 	nbuf->nb_ifp = ifp;
     84 	nbuf->nb_ifid = ifid;
     85 	nbuf_reset(nbuf);
     86 }
     87 
     88 void
     89 nbuf_reset(nbuf_t *nbuf)
     90 {
     91 	struct mbuf *m = nbuf->nb_mbuf0;
     92 
     93 	nbuf->nb_mbuf = m;
     94 	nbuf->nb_nptr = mtod(m, void *);
     95 }
     96 
     97 void *
     98 nbuf_dataptr(nbuf_t *nbuf)
     99 {
    100 	KASSERT(nbuf->nb_nptr);
    101 	return nbuf->nb_nptr;
    102 }
    103 
    104 size_t
    105 nbuf_offset(const nbuf_t *nbuf)
    106 {
    107 	const struct mbuf *m = nbuf->nb_mbuf;
    108 	const unsigned off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
    109 	const int poff = m_length(nbuf->nb_mbuf0) - m_length(m) + off;
    110 
    111 	return poff;
    112 }
    113 
    114 struct mbuf *
    115 nbuf_head_mbuf(nbuf_t *nbuf)
    116 {
    117 	return nbuf->nb_mbuf0;
    118 }
    119 
    120 bool
    121 nbuf_flag_p(const nbuf_t *nbuf, int flag)
    122 {
    123 	return (nbuf->nb_flags & flag) != 0;
    124 }
    125 
    126 void
    127 nbuf_unset_flag(nbuf_t *nbuf, int flag)
    128 {
    129 	nbuf->nb_flags &= ~flag;
    130 }
    131 
    132 /*
    133  * nbuf_advance: advance in nbuf or chain by specified amount of bytes and,
    134  * if requested, ensure that the area *after* advance is contiguous.
    135  *
    136  * => Returns new pointer to data in nbuf or NULL if offset is invalid.
    137  * => Current nbuf and the offset is stored in the nbuf metadata.
    138  */
    139 void *
    140 nbuf_advance(nbuf_t *nbuf, size_t len, size_t ensure)
    141 {
    142 	struct mbuf *m = nbuf->nb_mbuf;
    143 	unsigned off, wmark;
    144 	uint8_t *d;
    145 
    146 	/* Offset with amount to advance. */
    147 	off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t) + len;
    148 	wmark = m_buflen(m);
    149 
    150 	/* Find the mbuf according to offset. */
    151 	while (__predict_false(wmark <= off)) {
    152 		m = m_next_ptr(m);
    153 		if (__predict_false(m == NULL)) {
    154 			/*
    155 			 * If end of the chain, then the offset is
    156 			 * higher than packet length.
    157 			 */
    158 			return NULL;
    159 		}
    160 		wmark += m_buflen(m);
    161 	}
    162 	KASSERT(off < m_length(nbuf->nb_mbuf0));
    163 
    164 	/* Offset in mbuf data. */
    165 	d = mtod(m, uint8_t *);
    166 	KASSERT(off >= (wmark - m_buflen(m)));
    167 	d += (off - (wmark - m_buflen(m)));
    168 
    169 	nbuf->nb_mbuf = m;
    170 	nbuf->nb_nptr = d;
    171 
    172 	if (ensure) {
    173 		/* Ensure contiguousness (may change nbuf chain). */
    174 		d = nbuf_ensure_contig(nbuf, ensure);
    175 	}
    176 	return d;
    177 }
    178 
    179 /*
    180  * nbuf_ensure_contig: check whether the specified length from the current
    181  * point in the nbuf is contiguous.  If not, rearrange the chain to be so.
    182  *
    183  * => Returns pointer to the data at the current offset in the buffer.
    184  * => Returns NULL on failure and nbuf becomes invalid.
    185  */
    186 void *
    187 nbuf_ensure_contig(nbuf_t *nbuf, size_t len)
    188 {
    189 	const struct mbuf * const n = nbuf->nb_mbuf;
    190 	const size_t off = (uintptr_t)nbuf->nb_nptr - mtod(n, uintptr_t);
    191 
    192 	KASSERT(off <= m_buflen(n));
    193 
    194 	if (__predict_false(m_buflen(n) < (off + len))) {
    195 		struct mbuf *m = nbuf->nb_mbuf0;
    196 		const size_t foff = nbuf_offset(nbuf);
    197 		const size_t plen = m_length(m);
    198 		const size_t mlen = m_buflen(m);
    199 		size_t target;
    200 		bool success;
    201 
    202 		//npf_stats_inc(npf, NPF_STAT_NBUF_NONCONTIG);
    203 
    204 		/* Attempt to round-up to NBUF_ENSURE_ALIGN bytes. */
    205 		if ((target = NBUF_ENSURE_ROUNDUP(foff + len)) > plen) {
    206 			target = foff + len;
    207 		}
    208 
    209 		/* Rearrange the chain to be contiguous. */
    210 		KASSERT(m_flags_p(m, M_PKTHDR));
    211 		success = m_ensure_contig(&m, target);
    212 		KASSERT(m != NULL);
    213 
    214 		/* If no change in the chain: return what we have. */
    215 		if (m == nbuf->nb_mbuf0 && m_buflen(m) == mlen) {
    216 			return success ? nbuf->nb_nptr : NULL;
    217 		}
    218 
    219 		/*
    220 		 * The mbuf chain was re-arranged.  Update the pointers
    221 		 * accordingly and indicate that the references to the data
    222 		 * might need a reset.
    223 		 */
    224 		KASSERT(m_flags_p(m, M_PKTHDR));
    225 		nbuf->nb_mbuf0 = m;
    226 		nbuf->nb_mbuf = m;
    227 
    228 		KASSERT(foff < m_buflen(m) && foff < m_length(m));
    229 		nbuf->nb_nptr = mtod(m, uint8_t *) + foff;
    230 		nbuf->nb_flags |= NBUF_DATAREF_RESET;
    231 
    232 		if (!success) {
    233 			//npf_stats_inc(npf, NPF_STAT_NBUF_CONTIG_FAIL);
    234 			return NULL;
    235 		}
    236 	}
    237 	return nbuf->nb_nptr;
    238 }
    239 
    240 void *
    241 nbuf_ensure_writable(nbuf_t *nbuf, size_t len)
    242 {
    243 	struct mbuf *m = nbuf->nb_mbuf;
    244 	const unsigned off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
    245 	const int tlen = off + len;
    246 	bool head_buf;
    247 
    248 	KASSERT(off < m_length(nbuf->nb_mbuf0));
    249 
    250 	if (!M_UNWRITABLE(m, tlen)) {
    251 		return nbuf->nb_nptr;
    252 	}
    253 	head_buf = (nbuf->nb_mbuf0 == m);
    254 	if (m_makewritable(&m, 0, tlen, M_NOWAIT)) {
    255 		memset(nbuf, 0, sizeof(nbuf_t));
    256 		return NULL;
    257 	}
    258 	if (head_buf) {
    259 		KASSERT(m_flags_p(m, M_PKTHDR));
    260 		KASSERT(off < m_length(m));
    261 		nbuf->nb_mbuf0 = m;
    262 	}
    263 	nbuf->nb_mbuf = m;
    264 	nbuf->nb_nptr = mtod(m, uint8_t *) + off;
    265 
    266 	return nbuf->nb_nptr;
    267 }
    268 
    269 bool
    270 nbuf_cksum_barrier(nbuf_t *nbuf, int di)
    271 {
    272 #ifdef _KERNEL
    273 	struct mbuf *m;
    274 
    275 	if (di != PFIL_OUT) {
    276 		return false;
    277 	}
    278 	m = nbuf->nb_mbuf0;
    279 	KASSERT(m_flags_p(m, M_PKTHDR));
    280 
    281 	if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
    282 		in_undefer_cksum_tcpudp(m);
    283 		m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4 | M_CSUM_UDPv4);
    284 		return true;
    285 	}
    286 #ifdef INET6
    287 	if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
    288 		in6_undefer_cksum_tcpudp(m);
    289 		m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6 | M_CSUM_UDPv6);
    290 		return true;
    291 	}
    292 #endif
    293 #else
    294 	(void)nbuf; (void)di;
    295 #endif
    296 	return false;
    297 }
    298 
    299 /*
    300  * npf_mbuf_add_tag: associate a tag with the network buffer.
    301  *
    302  * => Returns 0 on success or error number on failure.
    303  */
    304 int
    305 npf_mbuf_add_tag(nbuf_t *nbuf, struct mbuf *m, uint32_t val)
    306 {
    307 #ifdef _KERNEL
    308 	struct m_tag *mt;
    309 	uint32_t *dat;
    310 
    311 	KASSERT(m_flags_p(m, M_PKTHDR));
    312 
    313 	mt = m_tag_get(PACKET_TAG_NPF, sizeof(uint32_t), M_NOWAIT);
    314 	if (mt == NULL) {
    315 		return ENOMEM;
    316 	}
    317 	dat = (uint32_t *)(mt + 1);
    318 	*dat = val;
    319 	m_tag_prepend(m, mt);
    320 	return 0;
    321 #else
    322 	if (!nbuf->nb_mops->set_tag) {
    323 		return ENOTSUP;
    324 	}
    325 	return nbuf->nb_mops->set_tag(m, val);
    326 #endif
    327 }
    328 
    329 /*
    330  * nbuf_add_tag: associate a tag with the network buffer.
    331  *
    332  * => Returns 0 on success or error number on failure.
    333  */
    334 int
    335 nbuf_add_tag(nbuf_t *nbuf, uint32_t val)
    336 {
    337 	struct mbuf *m = nbuf->nb_mbuf0;
    338 	return npf_mbuf_add_tag(nbuf, m, val);
    339 }
    340 
    341 /*
    342  * nbuf_find_tag: find a tag associated with a network buffer.
    343  *
    344  * => Returns 0 on success or error number on failure.
    345  */
    346 int
    347 nbuf_find_tag(nbuf_t *nbuf, uint32_t *val)
    348 {
    349 	struct mbuf *m = nbuf->nb_mbuf0;
    350 #ifdef _KERNEL
    351 	struct m_tag *mt;
    352 
    353 	KASSERT(m_flags_p(m, M_PKTHDR));
    354 
    355 	mt = m_tag_find(m, PACKET_TAG_NPF);
    356 	if (mt == NULL) {
    357 		return EINVAL;
    358 	}
    359 	*val = *(uint32_t *)(mt + 1);
    360 	return 0;
    361 #else
    362 	if (!nbuf->nb_mops->get_tag) {
    363 		return ENOTSUP;
    364 	}
    365 	return nbuf->nb_mops->get_tag(m, val);
    366 #endif
    367 }
    368