Home | History | Annotate | Line # | Download | only in net80211
ieee80211_netbsd.c revision 1.31.2.3
      1 /*	$NetBSD: ieee80211_netbsd.c,v 1.31.2.3 2018/07/16 20:11:11 phil Exp $ */
      2 
      3 /*-
      4  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
      5  *
      6  * Copyright (c) 2003-2009 Sam Leffler, Errno Consulting
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 #include <sys/cdefs.h>
     31 /*  __FBSDID("$FreeBSD$");  */
     32 __KERNEL_RCSID(0, "$NetBSD: ieee80211_netbsd.c,v 1.31.2.3 2018/07/16 20:11:11 phil Exp $");
     33 
     34 /*
     35  * IEEE 802.11 support (NetBSD-specific code)
     36  */
     37 
     38 #include "opt_wlan.h"
     39 
     40 #include <sys/atomic.h>
     41 #include <sys/param.h>
     42 #include <sys/systm.h>
     43 #include <sys/kernel.h>
     44 #include <sys/malloc.h>
     45 #include <sys/mbuf.h>
     46 #include <sys/module.h>
     47 #include <sys/proc.h>
     48 #include <sys/sysctl.h>
     49 #include <sys/syslog.h>
     50 
     51 #include <sys/socket.h>
     52 
     53 #include <net/bpf.h>
     54 #include <net/if.h>
     55 #include <net/if_dl.h>
     56 #include <net/if_ether.h>
     57 #include <net/if_media.h>
     58 #include <net/if_types.h>
     59 #include <net/route.h>
     60 
     61 #include <net80211/ieee80211_var.h>
     62 #include <net80211/ieee80211_input.h>
     63 
     64 #ifdef notyet
     65 SYSCTL_NODE(_net, OID_AUTO, wlan, CTLFLAG_RD, 0, "IEEE 80211 parameters");
     66 
     67 #ifdef IEEE80211_DEBUG
     68 static int	ieee80211_debug = 0;
     69 SYSCTL_INT(_net_wlan, OID_AUTO, debug, CTLFLAG_RW, &ieee80211_debug,
     70 	    0, "debugging printfs");
     71 #endif
     72 
     73 static struct if_clone *wlan_cloner;
     74 #endif
     75 /* notyet */
     76 
     77 static const char wlanname[] = "wlan";
     78 
     79 static __unused int
     80 wlan_clone_create(struct if_clone *ifc, int unit, void * params)
     81 {
     82 	struct ieee80211_clone_params cp;
     83 	struct ieee80211vap *vap;
     84 	struct ieee80211com *ic;
     85 	int error;
     86 
     87 	error = copyin(params, &cp, sizeof(cp));
     88 	if (error)
     89 		return error;
     90 	ic = ieee80211_find_com(cp.icp_parent);
     91 	if (ic == NULL)
     92 		return ENXIO;
     93 	if (cp.icp_opmode >= IEEE80211_OPMODE_MAX) {
     94 		ic_printf(ic, "%s: invalid opmode %d\n", __func__,
     95 		    cp.icp_opmode);
     96 		return EINVAL;
     97 	}
     98 	if ((ic->ic_caps & ieee80211_opcap[cp.icp_opmode]) == 0) {
     99 		ic_printf(ic, "%s mode not supported\n",
    100 		    ieee80211_opmode_name[cp.icp_opmode]);
    101 		return EOPNOTSUPP;
    102 	}
    103 	if ((cp.icp_flags & IEEE80211_CLONE_TDMA) &&
    104 #ifdef IEEE80211_SUPPORT_TDMA
    105 	    (ic->ic_caps & IEEE80211_C_TDMA) == 0
    106 #else
    107 	    (1)
    108 #endif
    109 	) {
    110 		ic_printf(ic, "TDMA not supported\n");
    111 		return EOPNOTSUPP;
    112 	}
    113 	vap = ic->ic_vap_create(ic, wlanname, unit,
    114 			cp.icp_opmode, cp.icp_flags, cp.icp_bssid,
    115 			cp.icp_flags & IEEE80211_CLONE_MACADDR ?
    116 			    cp.icp_macaddr : ic->ic_macaddr);
    117 
    118 	return (vap == NULL ? EIO : 0);
    119 }
    120 
    121 static __unused void
    122 wlan_clone_destroy(struct ifnet *ifp)
    123 {
    124 	struct ieee80211vap *vap = ifp->if_softc;
    125 	struct ieee80211com *ic = vap->iv_ic;
    126 
    127 	ic->ic_vap_delete(vap);
    128 }
    129 
    130 void
    131 ieee80211_vap_destroy(struct ieee80211vap *vap)
    132 {
    133 #ifdef notyet
    134 	CURVNET_SET(vap->iv_ifp->if_vnet);
    135 	if_clone_destroyif(wlan_cloner, vap->iv_ifp);
    136 	CURVNET_RESTORE();
    137 #else
    138 	printf ("vap_destroy called ... what next?\n");
    139 #endif
    140 }
    141 
    142 #ifdef notyet
    143 int
    144 ieee80211_sysctl_msecs_ticks(SYSCTL_HANDLER_ARGS)
    145 {
    146 	int msecs = ticks_to_msecs(*(int *)arg1);
    147 	int error, t;
    148 
    149 	error = sysctl_handle_int(oidp, &msecs, 0, req);
    150 	if (error || !req->newptr)
    151 		return error;
    152 	t = msecs_to_ticks(msecs);
    153 	*(int *)arg1 = (t < 1) ? 1 : t;
    154 	return 0;
    155 }
    156 
    157 static int
    158 ieee80211_sysctl_inact(SYSCTL_HANDLER_ARGS)
    159 {
    160 	int inact = (*(int *)arg1) * IEEE80211_INACT_WAIT;
    161 	int error;
    162 
    163 	error = sysctl_handle_int(oidp, &inact, 0, req);
    164 	if (error || !req->newptr)
    165 		return error;
    166 	*(int *)arg1 = inact / IEEE80211_INACT_WAIT;
    167 	return 0;
    168 }
    169 
    170 static int
    171 ieee80211_sysctl_parent(SYSCTL_HANDLER_ARGS)
    172 {
    173 	struct ieee80211com *ic = arg1;
    174 
    175 	return SYSCTL_OUT_STR(req, ic->ic_name);
    176 }
    177 
    178 static int
    179 ieee80211_sysctl_radar(SYSCTL_HANDLER_ARGS)
    180 {
    181 	struct ieee80211com *ic = arg1;
    182 	int t = 0, error;
    183 
    184 	error = sysctl_handle_int(oidp, &t, 0, req);
    185 	if (error || !req->newptr)
    186 		return error;
    187 	IEEE80211_LOCK(ic);
    188 	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
    189 	IEEE80211_UNLOCK(ic);
    190 	return 0;
    191 }
    192 
    193 /*
    194  * For now, just restart everything.
    195  *
    196  * Later on, it'd be nice to have a separate VAP restart to
    197  * full-device restart.
    198  */
    199 static int
    200 ieee80211_sysctl_vap_restart(SYSCTL_HANDLER_ARGS)
    201 {
    202 	struct ieee80211vap *vap = arg1;
    203 	int t = 0, error;
    204 
    205 	error = sysctl_handle_int(oidp, &t, 0, req);
    206 	if (error || !req->newptr)
    207 		return error;
    208 
    209 	ieee80211_restart_all(vap->iv_ic);
    210 	return 0;
    211 }
    212 #endif /* notyet */
    213 
    214 void
    215 ieee80211_sysctl_attach(struct ieee80211com *ic)
    216 {
    217 }
    218 
    219 void
    220 ieee80211_sysctl_detach(struct ieee80211com *ic)
    221 {
    222 }
    223 
    224 void
    225 ieee80211_sysctl_vattach(struct ieee80211vap *vap)
    226 {
    227 #ifdef notyet
    228 	struct ifnet *ifp = vap->iv_ifp;
    229 	struct sysctl_ctx_list *ctx;
    230 	struct sysctl_oid *oid;
    231 	char num[14];			/* sufficient for 32 bits */
    232 
    233 	ctx = (struct sysctl_ctx_list *) IEEE80211_MALLOC(sizeof(struct sysctl_ctx_list),
    234 		M_DEVBUF, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
    235 	if (ctx == NULL) {
    236 		if_printf(ifp, "%s: cannot allocate sysctl context!\n",
    237 			__func__);
    238 		return;
    239 	}
    240 	sysctl_ctx_init(ctx);
    241 	snprintf(num, sizeof(num), "%u", ifp->if_dunit);
    242 	oid = SYSCTL_ADD_NODE(ctx, &SYSCTL_NODE_CHILDREN(_net, wlan),
    243 		OID_AUTO, num, CTLFLAG_RD, NULL, "");
    244 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    245 		"%parent", CTLTYPE_STRING | CTLFLAG_RD, vap->iv_ic, 0,
    246 		ieee80211_sysctl_parent, "A", "parent device");
    247 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    248 		"driver_caps", CTLFLAG_RW, &vap->iv_caps, 0,
    249 		"driver capabilities");
    250 #ifdef IEEE80211_DEBUG
    251 	vap->iv_debug = ieee80211_debug;
    252 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    253 		"debug", CTLFLAG_RW, &vap->iv_debug, 0,
    254 		"control debugging printfs");
    255 #endif
    256 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    257 		"bmiss_max", CTLFLAG_RW, &vap->iv_bmiss_max, 0,
    258 		"consecutive beacon misses before scanning");
    259 	/* XXX inherit from tunables */
    260 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    261 		"inact_run", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_run, 0,
    262 		ieee80211_sysctl_inact, "I",
    263 		"station inactivity timeout (sec)");
    264 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    265 		"inact_probe", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_probe, 0,
    266 		ieee80211_sysctl_inact, "I",
    267 		"station inactivity probe timeout (sec)");
    268 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    269 		"inact_auth", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_auth, 0,
    270 		ieee80211_sysctl_inact, "I",
    271 		"station authentication timeout (sec)");
    272 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    273 		"inact_init", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_init, 0,
    274 		ieee80211_sysctl_inact, "I",
    275 		"station initial state timeout (sec)");
    276 	if (vap->iv_htcaps & IEEE80211_HTC_HT) {
    277 		SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    278 			"ampdu_mintraffic_bk", CTLFLAG_RW,
    279 			&vap->iv_ampdu_mintraffic[WME_AC_BK], 0,
    280 			"BK traffic tx aggr threshold (pps)");
    281 		SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    282 			"ampdu_mintraffic_be", CTLFLAG_RW,
    283 			&vap->iv_ampdu_mintraffic[WME_AC_BE], 0,
    284 			"BE traffic tx aggr threshold (pps)");
    285 		SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    286 			"ampdu_mintraffic_vo", CTLFLAG_RW,
    287 			&vap->iv_ampdu_mintraffic[WME_AC_VO], 0,
    288 			"VO traffic tx aggr threshold (pps)");
    289 		SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    290 			"ampdu_mintraffic_vi", CTLFLAG_RW,
    291 			&vap->iv_ampdu_mintraffic[WME_AC_VI], 0,
    292 			"VI traffic tx aggr threshold (pps)");
    293 	}
    294 
    295 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    296 		"force_restart", CTLTYPE_INT | CTLFLAG_RW, vap, 0,
    297 		ieee80211_sysctl_vap_restart, "I",
    298 		"force a VAP restart");
    299 
    300 	if (vap->iv_caps & IEEE80211_C_DFS) {
    301 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
    302 			"radar", CTLTYPE_INT | CTLFLAG_RW, vap->iv_ic, 0,
    303 			ieee80211_sysctl_radar, "I", "simulate radar event");
    304 	}
    305 	vap->iv_sysctl = ctx;
    306 	vap->iv_oid = oid;
    307 #endif
    308 }
    309 
    310 void
    311 ieee80211_sysctl_vdetach(struct ieee80211vap *vap)
    312 {
    313 #ifdef notyet
    314 	if (vap->iv_sysctl != NULL) {
    315 		sysctl_ctx_free(vap->iv_sysctl);
    316 		IEEE80211_FREE(vap->iv_sysctl, M_DEVBUF);
    317 		vap->iv_sysctl = NULL;
    318 	}
    319 #endif
    320 }
    321 
    322 
    323 int
    324 ieee80211_node_dectestref(struct ieee80211_node *ni)
    325 {
    326 	/* XXX need equivalent of atomic_dec_and_test */
    327 	atomic_subtract_int(&ni->ni_refcnt, 1);
    328 	return atomic_cas_uint(&ni->ni_refcnt, 0, 1) == 0;
    329 }
    330 
    331 void
    332 ieee80211_drain_ifq(struct ifqueue *ifq)
    333 {
    334 	struct ieee80211_node *ni;
    335 	struct mbuf *m;
    336 
    337 	for (;;) {
    338 		IF_DEQUEUE(ifq, m);
    339 		if (m == NULL)
    340 			break;
    341 
    342 		ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
    343 		FBSDKASSERT(ni != NULL, ("frame w/o node"));
    344 		ieee80211_free_node(ni);
    345 		ieee80211_free_mbuf(m);
    346 	}
    347 }
    348 
    349 void
    350 ieee80211_flush_ifq(struct ifqueue *ifq, struct ieee80211vap *vap)
    351 {
    352 	struct ieee80211_node *ni;
    353 	struct mbuf *m, **mprev;
    354 
    355 	IFQ_LOCK(ifq);
    356 	mprev = &ifq->ifq_head;
    357 	while ((m = *mprev) != NULL) {
    358 		ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
    359 		if (ni != NULL && ni->ni_vap == vap) {
    360 			*mprev = m->m_nextpkt;		/* remove from list */
    361 			ifq->ifq_len--;
    362 
    363 			ieee80211_free_node(ni);	/* reclaim ref */
    364 			ieee80211_free_mbuf(m);
    365 		} else
    366 			mprev = &m->m_nextpkt;
    367 	}
    368 	/* recalculate tail ptr */
    369 	m = ifq->ifq_head;
    370 	for (; m != NULL && m->m_nextpkt != NULL; m = m->m_nextpkt)
    371 		;
    372 	ifq->ifq_tail = m;
    373 	IFQ_UNLOCK(ifq);
    374 }
    375 
    376 /*
    377  * As above, for mbufs allocated with m_gethdr/MGETHDR
    378  * or initialized by M_COPY_PKTHDR.
    379  */
    380 #define	MC_ALIGN(m, len)						\
    381 do {									\
    382 	(m)->m_data += rounddown2(MCLBYTES - (len), sizeof(long));	\
    383 } while (/* CONSTCOND */ 0)
    384 
    385 /*
    386  * Allocate and setup a management frame of the specified
    387  * size.  We return the mbuf and a pointer to the start
    388  * of the contiguous data area that's been reserved based
    389  * on the packet length.  The data area is forced to 32-bit
    390  * alignment and the buffer length to a multiple of 4 bytes.
    391  * This is done mainly so beacon frames (that require this)
    392  * can use this interface too.
    393  */
    394 struct mbuf *
    395 ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen)
    396 {
    397 	struct mbuf *m;
    398 	u_int len;
    399 
    400 	/*
    401 	 * NB: we know the mbuf routines will align the data area
    402 	 *     so we don't need to do anything special.
    403 	 */
    404 	len = roundup2(headroom + pktlen, 4);
    405 	FBSDKASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len));
    406 	if (len < MINCLSIZE) {
    407 		m = m_gethdr(M_NOWAIT, MT_DATA);
    408 		/*
    409 		 * Align the data in case additional headers are added.
    410 		 * This should only happen when a WEP header is added
    411 		 * which only happens for shared key authentication mgt
    412 		 * frames which all fit in MHLEN.
    413 		 */
    414 		if (m != NULL)
    415 			M_ALIGN(m, len);
    416 	} else {
    417 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
    418 		if (m != NULL)
    419 			MC_ALIGN(m, len);
    420 	}
    421 	if (m != NULL) {
    422 		m->m_data += headroom;
    423 		*frm = m->m_data;
    424 	}
    425 	return m;
    426 }
    427 
    428 #ifndef __NO_STRICT_ALIGNMENT
    429 /*
    430  * Re-align the payload in the mbuf.  This is mainly used (right now)
    431  * to handle IP header alignment requirements on certain architectures.
    432  */
    433 struct mbuf *
    434 ieee80211_realign(struct ieee80211vap *vap, struct mbuf *m, size_t align)
    435 {
    436 	int pktlen, space;
    437 	struct mbuf *n;
    438 
    439 	pktlen = m->m_pkthdr.len;
    440 	space = pktlen + align;
    441 	if (space < MINCLSIZE)
    442 		n = m_gethdr(M_NOWAIT, MT_DATA);
    443 	else {
    444 		n = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
    445 		    space <= MCLBYTES ?     MCLBYTES :
    446 #if MJUMPAGESIZE != MCLBYTES
    447 		    space <= MJUMPAGESIZE ? MJUMPAGESIZE :
    448 #endif
    449 		    space <= MJUM9BYTES ?   MJUM9BYTES : MJUM16BYTES);
    450 	}
    451 	if (__predict_true(n != NULL)) {
    452 		m_move_pkthdr(n, m);
    453 		n->m_data = (caddr_t)(ALIGN(n->m_data + align) - align);
    454 		m_copydata(m, 0, pktlen, mtod(n, caddr_t));
    455 		n->m_len = pktlen;
    456 	} else {
    457 		IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
    458 		    mtod(m, const struct ieee80211_frame *), NULL,
    459 		    "%s", "no mbuf to realign");
    460 		vap->iv_stats.is_rx_badalign++;
    461 	}
    462 	m_freem(m);
    463 	return n;
    464 }
    465 #endif /* !__NO_STRICT_ALIGNMENT */
    466 
    467 int
    468 ieee80211_add_callback(struct mbuf *m,
    469 	void (*func)(struct ieee80211_node *, void *, int), void *arg)
    470 {
    471 	struct m_tag *mtag;
    472 	struct ieee80211_cb *cb;
    473 
    474 	mtag = m_tag_get(/*MTAG_ABI_NET80211*/ NET80211_TAG_CALLBACK,
    475 			sizeof(struct ieee80211_cb), M_NOWAIT);
    476 	if (mtag == NULL)
    477 		return 0;
    478 
    479 	cb = (struct ieee80211_cb *)(mtag+1);
    480 	cb->func = func;
    481 	cb->arg = arg;
    482 	m_tag_prepend(m, mtag);
    483 	m->m_flags |= M_TXCB;
    484 	return 1;
    485 }
    486 
    487 int
    488 ieee80211_add_xmit_params(struct mbuf *m,
    489     const struct ieee80211_bpf_params *params)
    490 {
    491 	struct m_tag *mtag;
    492 	struct ieee80211_tx_params *tx;
    493 
    494 	mtag = m_tag_get(/*MTAG_ABI_NET80211*/ NET80211_TAG_XMIT_PARAMS,
    495 	    sizeof(struct ieee80211_tx_params), M_NOWAIT);
    496 	if (mtag == NULL)
    497 		return (0);
    498 
    499 	tx = (struct ieee80211_tx_params *)(mtag+1);
    500 	memcpy(&tx->params, params, sizeof(struct ieee80211_bpf_params));
    501 	m_tag_prepend(m, mtag);
    502 	return (1);
    503 }
    504 
    505 int
    506 ieee80211_get_xmit_params(struct mbuf *m,
    507     struct ieee80211_bpf_params *params)
    508 {
    509 	struct m_tag *mtag;
    510 	struct ieee80211_tx_params *tx;
    511 
    512 	mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_XMIT_PARAMS,
    513 	    NULL);
    514 	if (mtag == NULL)
    515 		return (-1);
    516 	tx = (struct ieee80211_tx_params *)(mtag + 1);
    517 	memcpy(params, &tx->params, sizeof(struct ieee80211_bpf_params));
    518 	return (0);
    519 }
    520 
    521 void
    522 ieee80211_process_callback(struct ieee80211_node *ni,
    523 	struct mbuf *m, int status)
    524 {
    525 	struct m_tag *mtag;
    526 
    527 	mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_CALLBACK, NULL);
    528 	if (mtag != NULL) {
    529 		struct ieee80211_cb *cb = (struct ieee80211_cb *)(mtag+1);
    530 		cb->func(ni, cb->arg, status);
    531 	}
    532 }
    533 
    534 /*
    535  * Add RX parameters to the given mbuf.
    536  *
    537  * Returns 1 if OK, 0 on error.
    538  */
    539 int
    540 ieee80211_add_rx_params(struct mbuf *m, const struct ieee80211_rx_stats *rxs)
    541 {
    542 	struct m_tag *mtag;
    543 	struct ieee80211_rx_params *rx;
    544 
    545 	mtag = m_tag_get(/*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
    546 	    sizeof(struct ieee80211_rx_stats), M_NOWAIT);
    547 	if (mtag == NULL)
    548 		return (0);
    549 
    550 	rx = (struct ieee80211_rx_params *)(mtag + 1);
    551 	memcpy(&rx->params, rxs, sizeof(*rxs));
    552 	m_tag_prepend(m, mtag);
    553 	return (1);
    554 }
    555 
    556 int
    557 ieee80211_get_rx_params(struct mbuf *m, struct ieee80211_rx_stats *rxs)
    558 {
    559 	struct m_tag *mtag;
    560 	struct ieee80211_rx_params *rx;
    561 
    562 	mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
    563 	    NULL);
    564 	if (mtag == NULL)
    565 		return (-1);
    566 	rx = (struct ieee80211_rx_params *)(mtag + 1);
    567 	memcpy(rxs, &rx->params, sizeof(*rxs));
    568 	return (0);
    569 }
    570 
    571 const struct ieee80211_rx_stats *
    572 ieee80211_get_rx_params_ptr(struct mbuf *m)
    573 {
    574 	struct m_tag *mtag;
    575 	struct ieee80211_rx_params *rx;
    576 
    577 	mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
    578 	    NULL);
    579 	if (mtag == NULL)
    580 		return (NULL);
    581 	rx = (struct ieee80211_rx_params *)(mtag + 1);
    582 	return (&rx->params);
    583 }
    584 
    585 
    586 /*
    587  * Add TOA parameters to the given mbuf.
    588  */
    589 int
    590 ieee80211_add_toa_params(struct mbuf *m, const struct ieee80211_toa_params *p)
    591 {
    592 	struct m_tag *mtag;
    593 	struct ieee80211_toa_params *rp;
    594 
    595 	mtag = m_tag_get(/*MTAG_ABI_NET80211,*/ NET80211_TAG_TOA_PARAMS,
    596 	    sizeof(struct ieee80211_toa_params), M_NOWAIT);
    597 	if (mtag == NULL)
    598 		return (0);
    599 
    600 	rp = (struct ieee80211_toa_params *)(mtag + 1);
    601 	memcpy(rp, p, sizeof(*rp));
    602 	m_tag_prepend(m, mtag);
    603 	return (1);
    604 }
    605 
    606 int
    607 ieee80211_get_toa_params(struct mbuf *m, struct ieee80211_toa_params *p)
    608 {
    609 	struct m_tag *mtag;
    610 	struct ieee80211_toa_params *rp;
    611 
    612 	mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_TOA_PARAMS,
    613 	    NULL);
    614 	if (mtag == NULL)
    615 		return (0);
    616 	rp = (struct ieee80211_toa_params *)(mtag + 1);
    617 	if (p != NULL)
    618 		memcpy(p, rp, sizeof(*p));
    619 	return (1);
    620 }
    621 
    622 /*
    623  * Transmit a frame to the parent interface.
    624  */
    625 int
    626 ieee80211_parent_xmitpkt(struct ieee80211com *ic, struct mbuf *m)
    627 {
    628 	int error;
    629 
    630 	/*
    631 	 * Assert the IC TX lock is held - this enforces the
    632 	 * processing -> queuing order is maintained
    633 	 */
    634 	IEEE80211_TX_LOCK_ASSERT(ic);
    635 	error = ic->ic_transmit(ic, m);
    636 	if (error) {
    637 		struct ieee80211_node *ni;
    638 
    639 		ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
    640 
    641 		/* XXX number of fragments */
    642 		if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
    643 		ieee80211_free_node(ni);
    644 		ieee80211_free_mbuf(m);
    645 	}
    646 	return (error);
    647 }
    648 
    649 /*
    650  * Transmit a frame to the VAP interface.
    651  */
    652 int
    653 ieee80211_vap_xmitpkt(struct ieee80211vap *vap, struct mbuf *m)
    654 {
    655 	struct ifnet *ifp = vap->iv_ifp;
    656 
    657 	/*
    658 	 * When transmitting via the VAP, we shouldn't hold
    659 	 * any IC TX lock as the VAP TX path will acquire it.
    660 	 */
    661 	IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic);
    662 
    663 	return (ifp->if_transmit(ifp, m));
    664 
    665 }
    666 
    667 void
    668 get_random_bytes(void *p, size_t n)
    669 {
    670 	uint8_t *dp = p;
    671 
    672 	while (n > 0) {
    673 		uint32_t v = arc4random();
    674 		size_t nb = n > sizeof(uint32_t) ? sizeof(uint32_t) : n;
    675 		bcopy(&v, dp, n > sizeof(uint32_t) ? sizeof(uint32_t) : n);
    676 		dp += sizeof(uint32_t), n -= nb;
    677 	}
    678 }
    679 
    680 /*
    681  * Helper function for events that pass just a single mac address.
    682  */
    683 static void
    684 notify_macaddr(struct ifnet *ifp, int op, const uint8_t mac[IEEE80211_ADDR_LEN])
    685 {
    686 	struct ieee80211_join_event iev;
    687 
    688 	CURVNET_SET(ifp->if_vnet);
    689 	memset(&iev, 0, sizeof(iev));
    690 	IEEE80211_ADDR_COPY(iev.iev_addr, mac);
    691 	rt_ieee80211msg(ifp, op, &iev, sizeof(iev));
    692 	CURVNET_RESTORE();
    693 }
    694 
    695 void
    696 ieee80211_notify_node_join(struct ieee80211_node *ni, int newassoc)
    697 {
    698 	struct ieee80211vap *vap = ni->ni_vap;
    699 	struct ifnet *ifp = vap->iv_ifp;
    700 
    701 	CURVNET_SET_QUIET(ifp->if_vnet);
    702 	IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode join",
    703 	    (ni == vap->iv_bss) ? "bss " : "");
    704 
    705 	if (ni == vap->iv_bss) {
    706 		notify_macaddr(ifp, newassoc ?
    707 		    RTM_IEEE80211_ASSOC : RTM_IEEE80211_REASSOC, ni->ni_bssid);
    708 		if_link_state_change(ifp, LINK_STATE_UP);
    709 	} else {
    710 		notify_macaddr(ifp, newassoc ?
    711 		    RTM_IEEE80211_JOIN : RTM_IEEE80211_REJOIN, ni->ni_macaddr);
    712 	}
    713 	CURVNET_RESTORE();
    714 }
    715 
    716 void
    717 ieee80211_notify_node_leave(struct ieee80211_node *ni)
    718 {
    719 	struct ieee80211vap *vap = ni->ni_vap;
    720 	struct ifnet *ifp = vap->iv_ifp;
    721 
    722 	CURVNET_SET_QUIET(ifp->if_vnet);
    723 	IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode leave",
    724 	    (ni == vap->iv_bss) ? "bss " : "");
    725 
    726 	if (ni == vap->iv_bss) {
    727 		rt_ieee80211msg(ifp, RTM_IEEE80211_DISASSOC, NULL, 0);
    728 		if_link_state_change(ifp, LINK_STATE_DOWN);
    729 	} else {
    730 		/* fire off wireless event station leaving */
    731 		notify_macaddr(ifp, RTM_IEEE80211_LEAVE, ni->ni_macaddr);
    732 	}
    733 	CURVNET_RESTORE();
    734 }
    735 
    736 void
    737 ieee80211_notify_scan_done(struct ieee80211vap *vap)
    738 {
    739 	struct ifnet *ifp = vap->iv_ifp;
    740 
    741 	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s\n", "notify scan done");
    742 
    743 	/* dispatch wireless event indicating scan completed */
    744 	CURVNET_SET(ifp->if_vnet);
    745 	rt_ieee80211msg(ifp, RTM_IEEE80211_SCAN, NULL, 0);
    746 	CURVNET_RESTORE();
    747 }
    748 
    749 void
    750 ieee80211_notify_replay_failure(struct ieee80211vap *vap,
    751 	const struct ieee80211_frame *wh, const struct ieee80211_key *k,
    752 	u_int64_t rsc, int tid)
    753 {
    754 	struct ifnet *ifp = vap->iv_ifp;
    755 
    756 	IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
    757 	    "%s replay detected tid %d <rsc %ju, csc %ju, keyix %u rxkeyix %u>",
    758 	    k->wk_cipher->ic_name, tid, (intmax_t) rsc,
    759 	    (intmax_t) k->wk_keyrsc[tid],
    760 	    k->wk_keyix, k->wk_rxkeyix);
    761 
    762 	if (ifp != NULL) {		/* NB: for cipher test modules */
    763 		struct ieee80211_replay_event iev;
    764 
    765 		IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
    766 		IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
    767 		iev.iev_cipher = k->wk_cipher->ic_cipher;
    768 		if (k->wk_rxkeyix != IEEE80211_KEYIX_NONE)
    769 			iev.iev_keyix = k->wk_rxkeyix;
    770 		else
    771 			iev.iev_keyix = k->wk_keyix;
    772 		iev.iev_keyrsc = k->wk_keyrsc[tid];
    773 		iev.iev_rsc = rsc;
    774 		CURVNET_SET(ifp->if_vnet);
    775 		rt_ieee80211msg(ifp, RTM_IEEE80211_REPLAY, &iev, sizeof(iev));
    776 		CURVNET_RESTORE();
    777 	}
    778 }
    779 
    780 void
    781 ieee80211_notify_michael_failure(struct ieee80211vap *vap,
    782 	const struct ieee80211_frame *wh, u_int keyix)
    783 {
    784 	struct ifnet *ifp = vap->iv_ifp;
    785 
    786 	IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
    787 	    "michael MIC verification failed <keyix %u>", keyix);
    788 	vap->iv_stats.is_rx_tkipmic++;
    789 
    790 	if (ifp != NULL) {		/* NB: for cipher test modules */
    791 		struct ieee80211_michael_event iev;
    792 
    793 		IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
    794 		IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
    795 		iev.iev_cipher = IEEE80211_CIPHER_TKIP;
    796 		iev.iev_keyix = keyix;
    797 		CURVNET_SET(ifp->if_vnet);
    798 		rt_ieee80211msg(ifp, RTM_IEEE80211_MICHAEL, &iev, sizeof(iev));
    799 		CURVNET_RESTORE();
    800 	}
    801 }
    802 
    803 void
    804 ieee80211_notify_wds_discover(struct ieee80211_node *ni)
    805 {
    806 	struct ieee80211vap *vap = ni->ni_vap;
    807 	struct ifnet *ifp = vap->iv_ifp;
    808 
    809 	notify_macaddr(ifp, RTM_IEEE80211_WDS, ni->ni_macaddr);
    810 }
    811 
    812 void
    813 ieee80211_notify_csa(struct ieee80211com *ic,
    814 	const struct ieee80211_channel *c, int mode, int count)
    815 {
    816 	struct ieee80211_csa_event iev;
    817 	struct ieee80211vap *vap;
    818 	struct ifnet *ifp;
    819 
    820 	memset(&iev, 0, sizeof(iev));
    821 	iev.iev_flags = c->ic_flags;
    822 	iev.iev_freq = c->ic_freq;
    823 	iev.iev_ieee = c->ic_ieee;
    824 	iev.iev_mode = mode;
    825 	iev.iev_count = count;
    826 	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
    827 		ifp = vap->iv_ifp;
    828 		CURVNET_SET(ifp->if_vnet);
    829 		rt_ieee80211msg(ifp, RTM_IEEE80211_CSA, &iev, sizeof(iev));
    830 		CURVNET_RESTORE();
    831 	}
    832 }
    833 
    834 void
    835 ieee80211_notify_radar(struct ieee80211com *ic,
    836 	const struct ieee80211_channel *c)
    837 {
    838 	struct ieee80211_radar_event iev;
    839 	struct ieee80211vap *vap;
    840 	struct ifnet *ifp;
    841 
    842 	memset(&iev, 0, sizeof(iev));
    843 	iev.iev_flags = c->ic_flags;
    844 	iev.iev_freq = c->ic_freq;
    845 	iev.iev_ieee = c->ic_ieee;
    846 	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
    847 		ifp = vap->iv_ifp;
    848 		CURVNET_SET(ifp->if_vnet);
    849 		rt_ieee80211msg(ifp, RTM_IEEE80211_RADAR, &iev, sizeof(iev));
    850 		CURVNET_RESTORE();
    851 	}
    852 }
    853 
    854 void
    855 ieee80211_notify_cac(struct ieee80211com *ic,
    856 	const struct ieee80211_channel *c, enum ieee80211_notify_cac_event type)
    857 {
    858 	struct ieee80211_cac_event iev;
    859 	struct ieee80211vap *vap;
    860 	struct ifnet *ifp;
    861 
    862 	memset(&iev, 0, sizeof(iev));
    863 	iev.iev_flags = c->ic_flags;
    864 	iev.iev_freq = c->ic_freq;
    865 	iev.iev_ieee = c->ic_ieee;
    866 	iev.iev_type = type;
    867 	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
    868 		ifp = vap->iv_ifp;
    869 		CURVNET_SET(ifp->if_vnet);
    870 		rt_ieee80211msg(ifp, RTM_IEEE80211_CAC, &iev, sizeof(iev));
    871 		CURVNET_RESTORE();
    872 	}
    873 }
    874 
    875 void
    876 ieee80211_notify_node_deauth(struct ieee80211_node *ni)
    877 {
    878 	struct ieee80211vap *vap = ni->ni_vap;
    879 	struct ifnet *ifp = vap->iv_ifp;
    880 
    881 	IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node deauth");
    882 
    883 	notify_macaddr(ifp, RTM_IEEE80211_DEAUTH, ni->ni_macaddr);
    884 }
    885 
    886 void
    887 ieee80211_notify_node_auth(struct ieee80211_node *ni)
    888 {
    889 	struct ieee80211vap *vap = ni->ni_vap;
    890 	struct ifnet *ifp = vap->iv_ifp;
    891 
    892 	IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node auth");
    893 
    894 	notify_macaddr(ifp, RTM_IEEE80211_AUTH, ni->ni_macaddr);
    895 }
    896 
    897 void
    898 ieee80211_notify_country(struct ieee80211vap *vap,
    899 	const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t cc[2])
    900 {
    901 	struct ifnet *ifp = vap->iv_ifp;
    902 	struct ieee80211_country_event iev;
    903 
    904 	memset(&iev, 0, sizeof(iev));
    905 	IEEE80211_ADDR_COPY(iev.iev_addr, bssid);
    906 	iev.iev_cc[0] = cc[0];
    907 	iev.iev_cc[1] = cc[1];
    908 	CURVNET_SET(ifp->if_vnet);
    909 	rt_ieee80211msg(ifp, RTM_IEEE80211_COUNTRY, &iev, sizeof(iev));
    910 	CURVNET_RESTORE();
    911 }
    912 
    913 void
    914 ieee80211_notify_radio(struct ieee80211com *ic, int state)
    915 {
    916 	struct ieee80211_radio_event iev;
    917 	struct ieee80211vap *vap;
    918 	struct ifnet *ifp;
    919 
    920 	memset(&iev, 0, sizeof(iev));
    921 	iev.iev_state = state;
    922 	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
    923 		ifp = vap->iv_ifp;
    924 		CURVNET_SET(ifp->if_vnet);
    925 		rt_ieee80211msg(ifp, RTM_IEEE80211_RADIO, &iev, sizeof(iev));
    926 		CURVNET_RESTORE();
    927 	}
    928 }
    929 
    930 #ifdef notyet
    931 void
    932 ieee80211_load_module(const char *modname)
    933 {
    934 	struct thread *td = curthread;
    935 
    936 	if (suser(td) == 0 && securelevel_gt(td->td_ucred, 0) == 0) {
    937 		mtx_lock(&Giant);
    938 		(void) linker_load_module(modname, NULL, NULL, NULL, NULL);
    939 		mtx_unlock(&Giant);
    940 	}
    941 }
    942 #endif
    943 
    944 #ifdef notyet
    945 static eventhandler_tag wlan_bpfevent;
    946 static eventhandler_tag wlan_ifllevent;
    947 
    948 static void
    949 bpf_track(void *arg, struct ifnet *ifp, int dlt, int attach)
    950 {
    951 	/* NB: identify vap's by if_init */
    952 	if (dlt == DLT_IEEE802_11_RADIO &&
    953 	    ifp->if_init == ieee80211_init) {
    954 		struct ieee80211vap *vap = ifp->if_softc;
    955 		/*
    956 		 * Track bpf radiotap listener state.  We mark the vap
    957 		 * to indicate if any listener is present and the com
    958 		 * to indicate if any listener exists on any associated
    959 		 * vap.  This flag is used by drivers to prepare radiotap
    960 		 * state only when needed.
    961 		 */
    962 		if (attach) {
    963 			ieee80211_syncflag_ext(vap, IEEE80211_FEXT_BPF);
    964 			if (vap->iv_opmode == IEEE80211_M_MONITOR)
    965 				atomic_add_int(&vap->iv_ic->ic_montaps, 1);
    966 		} else if (!bpf_peers_present(vap->iv_rawbpf)) {
    967 			ieee80211_syncflag_ext(vap, -IEEE80211_FEXT_BPF);
    968 			if (vap->iv_opmode == IEEE80211_M_MONITOR)
    969 				atomic_subtract_int(&vap->iv_ic->ic_montaps, 1);
    970 		}
    971 	}
    972 }
    973 
    974 /*
    975  * Change MAC address on the vap (if was not started).
    976  */
    977 static void
    978 wlan_iflladdr(void *arg __unused, struct ifnet *ifp)
    979 {
    980 	/* NB: identify vap's by if_init */
    981 	if (ifp->if_init == ieee80211_init &&
    982 	    (ifp->if_flags & IFF_UP) == 0) {
    983 		struct ieee80211vap *vap = ifp->if_softc;
    984 
    985 		IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp));
    986 	}
    987 }
    988 #endif
    989 
    990 void
    991 if_inc_counter(struct ifnet *ifp, ift_counter ifc, int64_t value)
    992 {
    993 	switch (ifc) {
    994 	case IFCOUNTER_IPACKETS:
    995 		ifp->if_data.ifi_ipackets += value;
    996 		break;
    997 	case IFCOUNTER_IERRORS:
    998 		ifp->if_data.ifi_ierrors += value;
    999 		break;
   1000 	case IFCOUNTER_OPACKETS:
   1001 		ifp->if_data.ifi_opackets += value;
   1002 		break;
   1003 	case IFCOUNTER_OERRORS:
   1004 		ifp->if_data.ifi_oerrors += value;
   1005 		break;
   1006         case IFCOUNTER_COLLISIONS:
   1007 		ifp->if_data.ifi_collisions += value;
   1008 		break;
   1009         case IFCOUNTER_IBYTES:
   1010 		ifp->if_data.ifi_ibytes += value;
   1011 		break;
   1012         case IFCOUNTER_OBYTES:
   1013 		ifp->if_data.ifi_obytes += value;
   1014 		break;
   1015         case IFCOUNTER_IMCASTS:
   1016 		ifp->if_data.ifi_imcasts += value;
   1017 		break;
   1018         case IFCOUNTER_OMCASTS:
   1019 		ifp->if_data.ifi_omcasts += value;
   1020 		break;
   1021         case IFCOUNTER_IQDROPS:
   1022 		ifp->if_data.ifi_iqdrops += value;
   1023 		break;
   1024         case IFCOUNTER_OQDROPS:
   1025 		/* ifp->if_data.ifi_oqdrops += value; No such field, just ignore it q*/
   1026 		break;
   1027         case IFCOUNTER_NOPROTO:
   1028 		ifp->if_data.ifi_noproto += value;
   1029 		break;
   1030 	default:
   1031 		panic("if_inc_counter: non-existant counter");
   1032 	}
   1033 }
   1034 
   1035 
   1036 #ifdef notyet
   1037 /*
   1038  * Module glue.
   1039  *
   1040  * NB: the module name is "wlan" for compatibility with NetBSD.
   1041  */
   1042 static int
   1043 wlan_modevent(module_t mod, int type, void *unused)
   1044 {
   1045 	switch (type) {
   1046 	case MOD_LOAD:
   1047 		if (bootverbose)
   1048 			printf("wlan: <802.11 Link Layer>\n");
   1049 		wlan_bpfevent = EVENTHANDLER_REGISTER(bpf_track,
   1050 		    bpf_track, 0, EVENTHANDLER_PRI_ANY);
   1051 		wlan_ifllevent = EVENTHANDLER_REGISTER(iflladdr_event,
   1052 		    wlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
   1053 		wlan_cloner = if_clone_simple(wlanname, wlan_clone_create,
   1054 		    wlan_clone_destroy, 0);
   1055 		return 0;
   1056 	case MOD_UNLOAD:
   1057 		if_clone_detach(wlan_cloner);
   1058 		EVENTHANDLER_DEREGISTER(bpf_track, wlan_bpfevent);
   1059 		EVENTHANDLER_DEREGISTER(iflladdr_event, wlan_ifllevent);
   1060 		return 0;
   1061 	}
   1062 	return EINVAL;
   1063 }
   1064 
   1065 static moduledata_t wlan_mod = {
   1066 	wlanname,
   1067 	wlan_modevent,
   1068 	0
   1069 };
   1070 DECLARE_MODULE(wlan, wlan_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
   1071 MODULE_VERSION(wlan, 1);
   1072 MODULE_DEPEND(wlan, ether, 1, 1, 1);
   1073 #endif
   1074 
   1075 #ifdef	IEEE80211_ALQ
   1076 MODULE_DEPEND(wlan, alq, 1, 1, 1);
   1077 #endif	/* IEEE80211_ALQ */
   1078 
   1079 /* Missing support for if_printf in NetBSD ... */
   1080 int
   1081 if_printf(struct ifnet *ifp, const char *fmt, ...)
   1082 {
   1083         char if_fmt[256];
   1084         va_list ap;
   1085 
   1086         snprintf(if_fmt, sizeof(if_fmt), "%s: %s", ifp->if_xname, fmt);
   1087         va_start(ap, fmt);
   1088         vlog(LOG_INFO, if_fmt, ap);
   1089         va_end(ap);
   1090         return (0);
   1091 }
   1092 
   1093 /*
   1094  * Set the m_data pointer of a newly-allocated mbuf
   1095  * to place an object of the specified size at the
   1096  * end of the mbuf, longword aligned.
   1097  */
   1098 void
   1099 m_align(struct mbuf *m, int len)
   1100 {
   1101 	int adjust;
   1102 
   1103 	KASSERT(len != M_COPYALL);
   1104 
   1105 	if (m->m_flags & M_EXT)
   1106 		adjust = m->m_ext.ext_size - len;
   1107 	else if (m->m_flags & M_PKTHDR)
   1108 		adjust = MHLEN - len;
   1109 	else
   1110 		adjust = MLEN - len;
   1111 	m->m_data += adjust &~ (sizeof(long)-1);
   1112 }
   1113 
   1114 /*
   1115  * Append the specified data to the indicated mbuf chain,
   1116  * Extend the mbuf chain if the new data does not fit in
   1117  * existing space.
   1118  *
   1119  * Return 1 if able to complete the job; otherwise 0.
   1120  */
   1121 int
   1122 m_append(struct mbuf *m0, int len, const void *cpv)
   1123 {
   1124 	struct mbuf *m, *n;
   1125 	int remainder, space;
   1126 	const char *cp = cpv;
   1127 
   1128 	KASSERT(len != M_COPYALL);
   1129 	for (m = m0; m->m_next != NULL; m = m->m_next)
   1130 		continue;
   1131 	remainder = len;
   1132 	space = M_TRAILINGSPACE(m);
   1133 	if (space > 0) {
   1134 		/*
   1135 		 * Copy into available space.
   1136 		 */
   1137 		if (space > remainder)
   1138 			space = remainder;
   1139 		memmove(mtod(m, char *) + m->m_len, cp, space);
   1140 		m->m_len += space;
   1141 		cp = cp + space, remainder -= space;
   1142 	}
   1143 	while (remainder > 0) {
   1144 		/*
   1145 		 * Allocate a new mbuf; could check space
   1146 		 * and allocate a cluster instead.
   1147 		 */
   1148 		n = m_get(M_DONTWAIT, m->m_type);
   1149 		if (n == NULL)
   1150 			break;
   1151 		n->m_len = min(MLEN, remainder);
   1152 		memmove(mtod(n, void *), cp, n->m_len);
   1153 		cp += n->m_len, remainder -= n->m_len;
   1154 		m->m_next = n;
   1155 		m = n;
   1156 	}
   1157 	if (m0->m_flags & M_PKTHDR)
   1158 		m0->m_pkthdr.len += len - remainder;
   1159 	return (remainder == 0);
   1160 }
   1161 
   1162 /*
   1163  * Create a writable copy of the mbuf chain.  While doing this
   1164  * we compact the chain with a goal of producing a chain with
   1165  * at most two mbufs.  The second mbuf in this chain is likely
   1166  * to be a cluster.  The primary purpose of this work is to create
   1167  * a writable packet for encryption, compression, etc.  The
   1168  * secondary goal is to linearize the data so the data can be
   1169  * passed to crypto hardware in the most efficient manner possible.
   1170  */
   1171 struct mbuf *
   1172 m_unshare(struct mbuf *m0, int how)
   1173 {
   1174 	struct mbuf *m, *mprev;
   1175 	struct mbuf *n, *mfirst, *mlast;
   1176 	int len, off;
   1177 
   1178 	mprev = NULL;
   1179 	for (m = m0; m != NULL; m = mprev->m_next) {
   1180 		/*
   1181 		 * Regular mbufs are ignored unless there's a cluster
   1182 		 * in front of it that we can use to coalesce.  We do
   1183 		 * the latter mainly so later clusters can be coalesced
   1184 		 * also w/o having to handle them specially (i.e. convert
   1185 		 * mbuf+cluster -> cluster).  This optimization is heavily
   1186 		 * influenced by the assumption that we're running over
   1187 		 * Ethernet where MCLBYTES is large enough that the max
   1188 		 * packet size will permit lots of coalescing into a
   1189 		 * single cluster.  This in turn permits efficient
   1190 		 * crypto operations, especially when using hardware.
   1191 		 */
   1192 		if ((m->m_flags & M_EXT) == 0) {
   1193 			if (mprev && (mprev->m_flags & M_EXT) &&
   1194 			    m->m_len <= M_TRAILINGSPACE(mprev)) {
   1195 				/* XXX: this ignores mbuf types */
   1196 				memcpy(mtod(mprev, __uint8_t *) + mprev->m_len,
   1197 				    mtod(m, __uint8_t *), m->m_len);
   1198 				mprev->m_len += m->m_len;
   1199 				mprev->m_next = m->m_next;	/* unlink from chain */
   1200 				m_free(m);			/* reclaim mbuf */
   1201 			} else {
   1202 				mprev = m;
   1203 			}
   1204 			continue;
   1205 		}
   1206 		/*
   1207 		 * Writable mbufs are left alone (for now).
   1208 		 */
   1209 		if (!M_READONLY(m)) {
   1210 			mprev = m;
   1211 			continue;
   1212 		}
   1213 
   1214 		/*
   1215 		 * Not writable, replace with a copy or coalesce with
   1216 		 * the previous mbuf if possible (since we have to copy
   1217 		 * it anyway, we try to reduce the number of mbufs and
   1218 		 * clusters so that future work is easier).
   1219 		 */
   1220 		FBSDKASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
   1221 		/* NB: we only coalesce into a cluster or larger */
   1222 		if (mprev != NULL && (mprev->m_flags & M_EXT) &&
   1223 		    m->m_len <= M_TRAILINGSPACE(mprev)) {
   1224 			/* XXX: this ignores mbuf types */
   1225 			memcpy(mtod(mprev, __uint8_t *) + mprev->m_len,
   1226 			    mtod(m, __uint8_t *), m->m_len);
   1227 			mprev->m_len += m->m_len;
   1228 			mprev->m_next = m->m_next;	/* unlink from chain */
   1229 			m_free(m);			/* reclaim mbuf */
   1230 			continue;
   1231 		}
   1232 
   1233 		/*
   1234 		 * Allocate new space to hold the copy and copy the data.
   1235 		 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
   1236 		 * splitting them into clusters.  We could just malloc a
   1237 		 * buffer and make it external but too many device drivers
   1238 		 * don't know how to break up the non-contiguous memory when
   1239 		 * doing DMA.
   1240 		 */
   1241 		n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
   1242 		if (n == NULL) {
   1243 			m_freem(m0);
   1244 			return (NULL);
   1245 		}
   1246 		if (m->m_flags & M_PKTHDR) {
   1247 			FBSDKASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
   1248 			    __func__, m0, m));
   1249 			m_move_pkthdr(n, m);
   1250 		}
   1251 		len = m->m_len;
   1252 		off = 0;
   1253 		mfirst = n;
   1254 		mlast = NULL;
   1255 		for (;;) {
   1256 			int cc = min(len, MCLBYTES);
   1257 			memcpy(mtod(n, __uint8_t *), mtod(m, __uint8_t *) + off, cc);
   1258 			n->m_len = cc;
   1259 			if (mlast != NULL)
   1260 				mlast->m_next = n;
   1261 			mlast = n;
   1262 #if 0
   1263 			newipsecstat.ips_clcopied++;
   1264 #endif
   1265 
   1266 			len -= cc;
   1267 			if (len <= 0)
   1268 				break;
   1269 			off += cc;
   1270 
   1271 			n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
   1272 			if (n == NULL) {
   1273 				m_freem(mfirst);
   1274 				m_freem(m0);
   1275 				return (NULL);
   1276 			}
   1277 		}
   1278 		n->m_next = m->m_next;
   1279 		if (mprev == NULL)
   1280 			m0 = mfirst;		/* new head of chain */
   1281 		else
   1282 			mprev->m_next = mfirst;	/* replace old mbuf */
   1283 		m_free(m);			/* release old mbuf */
   1284 		mprev = mfirst;
   1285 	}
   1286 	return (m0);
   1287 }
   1288