Home | History | Annotate | Line # | Download | only in npf
npf_mbuf.c revision 1.10
      1  1.10  rmind /*	$NetBSD: npf_mbuf.c,v 1.10 2013/01/20 18:45:56 rmind Exp $	*/
      2   1.1  rmind 
      3   1.1  rmind /*-
      4   1.9  rmind  * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
      5   1.1  rmind  * All rights reserved.
      6   1.1  rmind  *
      7   1.1  rmind  * This material is based upon work partially supported by The
      8   1.1  rmind  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      9   1.1  rmind  *
     10   1.1  rmind  * Redistribution and use in source and binary forms, with or without
     11   1.1  rmind  * modification, are permitted provided that the following conditions
     12   1.1  rmind  * are met:
     13   1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     14   1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     15   1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     17   1.1  rmind  *    documentation and/or other materials provided with the distribution.
     18   1.1  rmind  *
     19   1.1  rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1  rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1  rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1  rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1  rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1  rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1  rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1  rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1  rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1  rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1  rmind  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1  rmind  */
     31   1.1  rmind 
     32   1.1  rmind /*
     33   1.1  rmind  * NPF network buffer management interface.
     34   1.1  rmind  *
     35   1.1  rmind  * Network buffer in NetBSD is mbuf.  Internal mbuf structures are
     36   1.1  rmind  * abstracted within this source.
     37   1.1  rmind  */
     38   1.1  rmind 
     39   1.1  rmind #include <sys/cdefs.h>
     40  1.10  rmind __KERNEL_RCSID(0, "$NetBSD: npf_mbuf.c,v 1.10 2013/01/20 18:45:56 rmind Exp $");
     41   1.1  rmind 
     42   1.1  rmind #include <sys/param.h>
     43   1.1  rmind #include <sys/mbuf.h>
     44   1.1  rmind 
     45   1.1  rmind #include "npf_impl.h"
     46   1.1  rmind 
     47   1.9  rmind #define	NBUF_ENSURE_ALIGN	(MAX(COHERENCY_UNIT, 64))
     48   1.9  rmind #define	NBUF_ENSURE_MASK	(NBUF_ENSURE_ALIGN - 1)
     49   1.9  rmind #define	NBUF_ENSURE_ROUNDUP(x)	(((x) + NBUF_ENSURE_ALIGN) & ~NBUF_ENSURE_MASK)
     50   1.9  rmind 
     51   1.9  rmind void
     52   1.9  rmind nbuf_init(nbuf_t *nbuf, struct mbuf *m, const ifnet_t *ifp)
     53   1.9  rmind {
     54   1.9  rmind 	KASSERT((m->m_flags & M_PKTHDR) != 0);
     55   1.9  rmind 	KASSERT(ifp != NULL);
     56   1.9  rmind 
     57   1.9  rmind 	nbuf->nb_mbuf0 = m;
     58   1.9  rmind 	nbuf->nb_ifp = ifp;
     59   1.9  rmind 	nbuf_reset(nbuf);
     60   1.9  rmind }
     61   1.9  rmind 
     62   1.9  rmind void
     63   1.9  rmind nbuf_reset(nbuf_t *nbuf)
     64   1.9  rmind {
     65   1.9  rmind 	struct mbuf *m = nbuf->nb_mbuf0;
     66   1.9  rmind 
     67   1.9  rmind 	nbuf->nb_mbuf = m;
     68   1.9  rmind 	nbuf->nb_nptr = mtod(m, void *);
     69   1.9  rmind }
     70   1.9  rmind 
     71   1.1  rmind void *
     72   1.1  rmind nbuf_dataptr(nbuf_t *nbuf)
     73   1.1  rmind {
     74   1.9  rmind 	KASSERT(nbuf->nb_nptr);
     75   1.9  rmind 	return nbuf->nb_nptr;
     76   1.9  rmind }
     77   1.9  rmind 
     78   1.9  rmind size_t
     79   1.9  rmind nbuf_offset(const nbuf_t *nbuf)
     80   1.9  rmind {
     81   1.9  rmind 	const struct mbuf *m = nbuf->nb_mbuf;
     82   1.9  rmind 	const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
     83   1.9  rmind 	const int poff = m_length(nbuf->nb_mbuf0) - m_length(m) + off;
     84   1.9  rmind 
     85   1.9  rmind 	return poff;
     86   1.9  rmind }
     87   1.9  rmind 
     88   1.9  rmind struct mbuf *
     89   1.9  rmind nbuf_head_mbuf(nbuf_t *nbuf)
     90   1.9  rmind {
     91   1.9  rmind 	return nbuf->nb_mbuf0;
     92   1.9  rmind }
     93   1.1  rmind 
     94   1.9  rmind bool
     95   1.9  rmind nbuf_flag_p(const nbuf_t *nbuf, int flag)
     96   1.9  rmind {
     97   1.9  rmind 	return (nbuf->nb_flags & flag) != 0;
     98   1.9  rmind }
     99   1.9  rmind 
    100   1.9  rmind void
    101   1.9  rmind nbuf_unset_flag(nbuf_t *nbuf, int flag)
    102   1.9  rmind {
    103   1.9  rmind 	nbuf->nb_flags &= ~flag;
    104   1.1  rmind }
    105   1.1  rmind 
    106   1.1  rmind /*
    107   1.9  rmind  * nbuf_advance: advance in nbuf or chain by specified amount of bytes and,
    108   1.9  rmind  * if requested, ensure that the area *after* advance is contiguous.
    109   1.1  rmind  *
    110   1.9  rmind  * => Returns new pointer to data in nbuf or NULL if offset is invalid.
    111   1.9  rmind  * => Current nbuf and the offset is stored in the nbuf metadata.
    112   1.1  rmind  */
    113   1.1  rmind void *
    114   1.9  rmind nbuf_advance(nbuf_t *nbuf, size_t len, size_t ensure)
    115   1.1  rmind {
    116   1.9  rmind 	struct mbuf *m = nbuf->nb_mbuf;
    117   1.1  rmind 	u_int off, wmark;
    118   1.1  rmind 	uint8_t *d;
    119   1.1  rmind 
    120   1.1  rmind 	/* Offset with amount to advance. */
    121   1.9  rmind 	off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t) + len;
    122   1.1  rmind 	wmark = m->m_len;
    123   1.1  rmind 
    124   1.1  rmind 	/* Find the mbuf according to offset. */
    125   1.1  rmind 	while (__predict_false(wmark <= off)) {
    126   1.1  rmind 		m = m->m_next;
    127   1.1  rmind 		if (__predict_false(m == NULL)) {
    128   1.1  rmind 			/*
    129   1.9  rmind 			 * If end of the chain, then the offset is
    130   1.1  rmind 			 * higher than packet length.
    131   1.1  rmind 			 */
    132   1.1  rmind 			return NULL;
    133   1.1  rmind 		}
    134   1.1  rmind 		wmark += m->m_len;
    135   1.1  rmind 	}
    136   1.9  rmind 	KASSERT(off < m_length(nbuf->nb_mbuf0));
    137   1.1  rmind 
    138   1.1  rmind 	/* Offset in mbuf data. */
    139   1.1  rmind 	d = mtod(m, uint8_t *);
    140   1.1  rmind 	KASSERT(off >= (wmark - m->m_len));
    141   1.1  rmind 	d += (off - (wmark - m->m_len));
    142   1.1  rmind 
    143   1.9  rmind 	nbuf->nb_mbuf = m;
    144   1.9  rmind 	nbuf->nb_nptr = d;
    145   1.9  rmind 
    146   1.9  rmind 	if (ensure) {
    147   1.9  rmind 		/* Ensure contiguousness (may change nbuf chain). */
    148   1.9  rmind 		d = nbuf_ensure_contig(nbuf, ensure);
    149   1.9  rmind 	}
    150   1.1  rmind 	return d;
    151   1.1  rmind }
    152   1.1  rmind 
    153   1.1  rmind /*
    154   1.9  rmind  * nbuf_ensure_contig: check whether the specified length from the current
    155   1.9  rmind  * point in the nbuf is contiguous.  If not, rearrange the chain to be so.
    156   1.1  rmind  *
    157   1.9  rmind  * => Returns pointer to the data at the current offset in the buffer.
    158   1.9  rmind  * => Returns NULL on failure and nbuf becomes invalid.
    159   1.1  rmind  */
    160   1.9  rmind void *
    161   1.9  rmind nbuf_ensure_contig(nbuf_t *nbuf, size_t len)
    162   1.1  rmind {
    163  1.10  rmind 	const struct mbuf * const n = nbuf->nb_mbuf;
    164  1.10  rmind 	const size_t off = (uintptr_t)nbuf->nb_nptr - mtod(n, uintptr_t);
    165   1.9  rmind 
    166  1.10  rmind 	KASSERT(off < n->m_len);
    167   1.9  rmind 
    168  1.10  rmind 	if (__predict_false(n->m_len < (off + len))) {
    169  1.10  rmind 		struct mbuf *m = nbuf->nb_mbuf0;
    170  1.10  rmind 		const size_t foff = nbuf_offset(nbuf);
    171  1.10  rmind 		const size_t plen = m_length(m);
    172  1.10  rmind 		const size_t mlen = m->m_len;
    173  1.10  rmind 		size_t target;
    174  1.10  rmind 		bool success;
    175   1.9  rmind 
    176   1.9  rmind 		npf_stats_inc(NPF_STAT_NBUF_NONCONTIG);
    177   1.9  rmind 
    178   1.9  rmind 		/* Attempt to round-up to NBUF_ENSURE_ALIGN bytes. */
    179  1.10  rmind 		if ((target = NBUF_ENSURE_ROUNDUP(foff + len)) > plen) {
    180  1.10  rmind 			target = foff + len;
    181   1.9  rmind 		}
    182   1.1  rmind 
    183   1.9  rmind 		/* Rearrange the chain to be contiguous. */
    184  1.10  rmind 		KASSERT((m->m_flags & M_PKTHDR) != 0);
    185  1.10  rmind 		success = m_ensure_contig(&m, target);
    186  1.10  rmind 		KASSERT(m != NULL);
    187  1.10  rmind 
    188  1.10  rmind 		/* If no change in the chain: return what we have. */
    189  1.10  rmind 		if (m == nbuf->nb_mbuf0 && m->m_len == mlen) {
    190  1.10  rmind 			return success ? nbuf->nb_nptr : NULL;
    191   1.9  rmind 		}
    192   1.1  rmind 
    193   1.9  rmind 		/*
    194  1.10  rmind 		 * The mbuf chain was re-arranged.  Update the pointers
    195  1.10  rmind 		 * accordingly and indicate that the references to the data
    196  1.10  rmind 		 * might need a reset.
    197   1.9  rmind 		 */
    198  1.10  rmind 		KASSERT((m->m_flags & M_PKTHDR) != 0);
    199  1.10  rmind 		nbuf->nb_mbuf0 = m;
    200  1.10  rmind 		nbuf->nb_mbuf = m;
    201  1.10  rmind 
    202  1.10  rmind 		KASSERT(foff < m->m_len && foff < m_length(m));
    203  1.10  rmind 		nbuf->nb_nptr = mtod(m, uint8_t *) + foff;
    204  1.10  rmind 		nbuf->nb_flags |= NBUF_DATAREF_RESET;
    205  1.10  rmind 
    206  1.10  rmind 		if (!success) {
    207  1.10  rmind 			npf_stats_inc(NPF_STAT_NBUF_CONTIG_FAIL);
    208  1.10  rmind 			return NULL;
    209   1.1  rmind 		}
    210   1.1  rmind 	}
    211   1.9  rmind 	return nbuf->nb_nptr;
    212   1.1  rmind }
    213   1.1  rmind 
    214   1.9  rmind void *
    215   1.9  rmind nbuf_ensure_writable(nbuf_t *nbuf, size_t len)
    216   1.1  rmind {
    217   1.9  rmind 	struct mbuf *m = nbuf->nb_mbuf;
    218   1.9  rmind 	const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
    219   1.9  rmind 	const int tlen = off + len;
    220   1.9  rmind 	bool head_buf;
    221   1.1  rmind 
    222   1.9  rmind 	KASSERT(off < m_length(nbuf->nb_mbuf0));
    223   1.1  rmind 
    224   1.9  rmind 	if (!M_UNWRITABLE(m, tlen)) {
    225   1.9  rmind 		return nbuf->nb_nptr;
    226   1.9  rmind 	}
    227   1.9  rmind 	head_buf = (nbuf->nb_mbuf0 == m);
    228   1.9  rmind 	if (m_makewritable(&m, 0, tlen, M_NOWAIT)) {
    229   1.9  rmind 		memset(nbuf, 0, sizeof(nbuf_t));
    230   1.9  rmind 		return NULL;
    231   1.9  rmind 	}
    232   1.9  rmind 	if (head_buf) {
    233   1.9  rmind 		KASSERT((m->m_flags & M_PKTHDR) != 0);
    234   1.9  rmind 		KASSERT(off < m_length(m));
    235   1.9  rmind 		nbuf->nb_mbuf0 = m;
    236   1.9  rmind 	}
    237   1.9  rmind 	nbuf->nb_mbuf = m;
    238   1.9  rmind 	nbuf->nb_nptr = mtod(m, uint8_t *) + off;
    239   1.1  rmind 
    240   1.9  rmind 	return nbuf->nb_nptr;
    241   1.1  rmind }
    242   1.1  rmind 
    243   1.9  rmind bool
    244   1.9  rmind nbuf_cksum_barrier(nbuf_t *nbuf, int di)
    245   1.3  rmind {
    246   1.9  rmind 	struct mbuf *m;
    247   1.3  rmind 
    248   1.9  rmind 	if (di != PFIL_OUT) {
    249   1.9  rmind 		return false;
    250   1.5  rmind 	}
    251   1.9  rmind 	m = nbuf->nb_mbuf0;
    252   1.9  rmind 	KASSERT((m->m_flags & M_PKTHDR) != 0);
    253   1.8  rmind 
    254   1.8  rmind 	if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
    255   1.8  rmind 		in_delayed_cksum(m);
    256   1.8  rmind 		m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4 | M_CSUM_UDPv4);
    257   1.9  rmind 		return true;
    258   1.8  rmind 	}
    259   1.9  rmind 	return false;
    260   1.8  rmind }
    261   1.8  rmind 
    262   1.5  rmind /*
    263   1.1  rmind  * nbuf_add_tag: add a tag to specified network buffer.
    264   1.1  rmind  *
    265   1.9  rmind  * => Returns 0 on success or errno on failure.
    266   1.1  rmind  */
    267   1.1  rmind int
    268   1.1  rmind nbuf_add_tag(nbuf_t *nbuf, uint32_t key, uint32_t val)
    269   1.1  rmind {
    270   1.9  rmind 	struct mbuf *m = nbuf->nb_mbuf0;
    271   1.1  rmind 	struct m_tag *mt;
    272   1.1  rmind 	uint32_t *dat;
    273   1.1  rmind 
    274   1.9  rmind 	KASSERT((m->m_flags & M_PKTHDR) != 0);
    275   1.9  rmind 
    276   1.1  rmind 	mt = m_tag_get(PACKET_TAG_NPF, sizeof(uint32_t), M_NOWAIT);
    277   1.9  rmind 	if (mt == NULL) {
    278   1.1  rmind 		return ENOMEM;
    279   1.1  rmind 	}
    280   1.1  rmind 	dat = (uint32_t *)(mt + 1);
    281   1.1  rmind 	*dat = val;
    282   1.1  rmind 	m_tag_prepend(m, mt);
    283   1.1  rmind 	return 0;
    284   1.1  rmind }
    285   1.1  rmind 
    286   1.1  rmind /*
    287   1.1  rmind  * nbuf_find_tag: find a tag in specified network buffer.
    288   1.1  rmind  *
    289   1.9  rmind  * => Returns 0 on success or errno on failure.
    290   1.1  rmind  */
    291   1.1  rmind int
    292   1.1  rmind nbuf_find_tag(nbuf_t *nbuf, uint32_t key, void **data)
    293   1.1  rmind {
    294   1.9  rmind 	struct mbuf *m = nbuf->nb_mbuf0;
    295   1.1  rmind 	struct m_tag *mt;
    296   1.1  rmind 
    297   1.9  rmind 	KASSERT((m->m_flags & M_PKTHDR) != 0);
    298   1.9  rmind 
    299   1.1  rmind 	mt = m_tag_find(m, PACKET_TAG_NPF, NULL);
    300   1.9  rmind 	if (mt == NULL) {
    301   1.1  rmind 		return EINVAL;
    302   1.1  rmind 	}
    303   1.1  rmind 	*data = (void *)(mt + 1);
    304   1.1  rmind 	return 0;
    305   1.1  rmind }
    306