ieee80211_netbsd.c revision 1.31.2.2 1 /* $NetBSD: ieee80211_netbsd.c,v 1.31.2.2 2018/07/12 16:35:34 phil Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 2003-2009 Sam Leffler, Errno Consulting
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 /* __FBSDID("$FreeBSD$"); */
32 __KERNEL_RCSID(0, "$NetBSD: ieee80211_netbsd.c,v 1.31.2.2 2018/07/12 16:35:34 phil Exp $");
33
34 /*
35 * IEEE 802.11 support (NetBSD-specific code)
36 */
37
38 #include "opt_wlan.h"
39
40 #include <sys/atomic.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/module.h>
47 #include <sys/proc.h>
48 #include <sys/sysctl.h>
49 #include <sys/syslog.h>
50
51 #include <sys/socket.h>
52
53 #include <net/bpf.h>
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 #include <net/route.h>
60
61 #include <net80211/ieee80211_var.h>
62 #include <net80211/ieee80211_input.h>
63
64 #ifdef notyet
65 SYSCTL_NODE(_net, OID_AUTO, wlan, CTLFLAG_RD, 0, "IEEE 80211 parameters");
66
67 #ifdef IEEE80211_DEBUG
68 static int ieee80211_debug = 0;
69 SYSCTL_INT(_net_wlan, OID_AUTO, debug, CTLFLAG_RW, &ieee80211_debug,
70 0, "debugging printfs");
71 #endif
72 #endif /* notyet */
73
74 /* static MALLOC_DEFINE(M_80211_COM, "80211com", "802.11 com state"); NNN */
75
76 #ifdef notyet
77 static const char wlanname[] = "wlan";
78 static struct if_clone *wlan_cloner;
79
80 static int
81 wlan_clone_create(struct if_clone *ifc, int unit, caddr_t params)
82 {
83 struct ieee80211_clone_params cp;
84 struct ieee80211vap *vap;
85 struct ieee80211com *ic;
86 int error;
87
88 error = copyin(params, &cp, sizeof(cp));
89 if (error)
90 return error;
91 ic = ieee80211_find_com(cp.icp_parent);
92 if (ic == NULL)
93 return ENXIO;
94 if (cp.icp_opmode >= IEEE80211_OPMODE_MAX) {
95 ic_printf(ic, "%s: invalid opmode %d\n", __func__,
96 cp.icp_opmode);
97 return EINVAL;
98 }
99 if ((ic->ic_caps & ieee80211_opcap[cp.icp_opmode]) == 0) {
100 ic_printf(ic, "%s mode not supported\n",
101 ieee80211_opmode_name[cp.icp_opmode]);
102 return EOPNOTSUPP;
103 }
104 if ((cp.icp_flags & IEEE80211_CLONE_TDMA) &&
105 #ifdef IEEE80211_SUPPORT_TDMA
106 (ic->ic_caps & IEEE80211_C_TDMA) == 0
107 #else
108 (1)
109 #endif
110 ) {
111 ic_printf(ic, "TDMA not supported\n");
112 return EOPNOTSUPP;
113 }
114 vap = ic->ic_vap_create(ic, wlanname, unit,
115 cp.icp_opmode, cp.icp_flags, cp.icp_bssid,
116 cp.icp_flags & IEEE80211_CLONE_MACADDR ?
117 cp.icp_macaddr : ic->ic_macaddr);
118
119 return (vap == NULL ? EIO : 0);
120 }
121
122 static void
123 wlan_clone_destroy(struct ifnet *ifp)
124 {
125 struct ieee80211vap *vap = ifp->if_softc;
126 struct ieee80211com *ic = vap->iv_ic;
127
128 ic->ic_vap_delete(vap);
129 }
130 #endif
131
132 void
133 ieee80211_vap_destroy(struct ieee80211vap *vap)
134 {
135 #ifdef notyet
136 CURVNET_SET(vap->iv_ifp->if_vnet);
137 if_clone_destroyif(wlan_cloner, vap->iv_ifp);
138 CURVNET_RESTORE();
139 #else
140 panic("ieee80211_vap_destroy");
141 #endif
142 }
143
144 #ifdef notyet
145 int
146 ieee80211_sysctl_msecs_ticks(SYSCTL_HANDLER_ARGS)
147 {
148 int msecs = ticks_to_msecs(*(int *)arg1);
149 int error, t;
150
151 error = sysctl_handle_int(oidp, &msecs, 0, req);
152 if (error || !req->newptr)
153 return error;
154 t = msecs_to_ticks(msecs);
155 *(int *)arg1 = (t < 1) ? 1 : t;
156 return 0;
157 }
158
159 static int
160 ieee80211_sysctl_inact(SYSCTL_HANDLER_ARGS)
161 {
162 int inact = (*(int *)arg1) * IEEE80211_INACT_WAIT;
163 int error;
164
165 error = sysctl_handle_int(oidp, &inact, 0, req);
166 if (error || !req->newptr)
167 return error;
168 *(int *)arg1 = inact / IEEE80211_INACT_WAIT;
169 return 0;
170 }
171
172 static int
173 ieee80211_sysctl_parent(SYSCTL_HANDLER_ARGS)
174 {
175 struct ieee80211com *ic = arg1;
176
177 return SYSCTL_OUT_STR(req, ic->ic_name);
178 }
179
180 static int
181 ieee80211_sysctl_radar(SYSCTL_HANDLER_ARGS)
182 {
183 struct ieee80211com *ic = arg1;
184 int t = 0, error;
185
186 error = sysctl_handle_int(oidp, &t, 0, req);
187 if (error || !req->newptr)
188 return error;
189 IEEE80211_LOCK(ic);
190 ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
191 IEEE80211_UNLOCK(ic);
192 return 0;
193 }
194
195 /*
196 * For now, just restart everything.
197 *
198 * Later on, it'd be nice to have a separate VAP restart to
199 * full-device restart.
200 */
201 static int
202 ieee80211_sysctl_vap_restart(SYSCTL_HANDLER_ARGS)
203 {
204 struct ieee80211vap *vap = arg1;
205 int t = 0, error;
206
207 error = sysctl_handle_int(oidp, &t, 0, req);
208 if (error || !req->newptr)
209 return error;
210
211 ieee80211_restart_all(vap->iv_ic);
212 return 0;
213 }
214 #endif /* notyet */
215
216 void
217 ieee80211_sysctl_attach(struct ieee80211com *ic)
218 {
219 }
220
221 void
222 ieee80211_sysctl_detach(struct ieee80211com *ic)
223 {
224 }
225
226 void
227 ieee80211_sysctl_vattach(struct ieee80211vap *vap)
228 {
229 #ifdef notyet
230 struct ifnet *ifp = vap->iv_ifp;
231 struct sysctl_ctx_list *ctx;
232 struct sysctl_oid *oid;
233 char num[14]; /* sufficient for 32 bits */
234
235 ctx = (struct sysctl_ctx_list *) IEEE80211_MALLOC(sizeof(struct sysctl_ctx_list),
236 M_DEVBUF, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
237 if (ctx == NULL) {
238 if_printf(ifp, "%s: cannot allocate sysctl context!\n",
239 __func__);
240 return;
241 }
242 sysctl_ctx_init(ctx);
243 snprintf(num, sizeof(num), "%u", ifp->if_dunit);
244 oid = SYSCTL_ADD_NODE(ctx, &SYSCTL_NODE_CHILDREN(_net, wlan),
245 OID_AUTO, num, CTLFLAG_RD, NULL, "");
246 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
247 "%parent", CTLTYPE_STRING | CTLFLAG_RD, vap->iv_ic, 0,
248 ieee80211_sysctl_parent, "A", "parent device");
249 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
250 "driver_caps", CTLFLAG_RW, &vap->iv_caps, 0,
251 "driver capabilities");
252 #ifdef IEEE80211_DEBUG
253 vap->iv_debug = ieee80211_debug;
254 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
255 "debug", CTLFLAG_RW, &vap->iv_debug, 0,
256 "control debugging printfs");
257 #endif
258 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
259 "bmiss_max", CTLFLAG_RW, &vap->iv_bmiss_max, 0,
260 "consecutive beacon misses before scanning");
261 /* XXX inherit from tunables */
262 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
263 "inact_run", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_run, 0,
264 ieee80211_sysctl_inact, "I",
265 "station inactivity timeout (sec)");
266 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
267 "inact_probe", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_probe, 0,
268 ieee80211_sysctl_inact, "I",
269 "station inactivity probe timeout (sec)");
270 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
271 "inact_auth", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_auth, 0,
272 ieee80211_sysctl_inact, "I",
273 "station authentication timeout (sec)");
274 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
275 "inact_init", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_init, 0,
276 ieee80211_sysctl_inact, "I",
277 "station initial state timeout (sec)");
278 if (vap->iv_htcaps & IEEE80211_HTC_HT) {
279 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
280 "ampdu_mintraffic_bk", CTLFLAG_RW,
281 &vap->iv_ampdu_mintraffic[WME_AC_BK], 0,
282 "BK traffic tx aggr threshold (pps)");
283 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
284 "ampdu_mintraffic_be", CTLFLAG_RW,
285 &vap->iv_ampdu_mintraffic[WME_AC_BE], 0,
286 "BE traffic tx aggr threshold (pps)");
287 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
288 "ampdu_mintraffic_vo", CTLFLAG_RW,
289 &vap->iv_ampdu_mintraffic[WME_AC_VO], 0,
290 "VO traffic tx aggr threshold (pps)");
291 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
292 "ampdu_mintraffic_vi", CTLFLAG_RW,
293 &vap->iv_ampdu_mintraffic[WME_AC_VI], 0,
294 "VI traffic tx aggr threshold (pps)");
295 }
296
297 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
298 "force_restart", CTLTYPE_INT | CTLFLAG_RW, vap, 0,
299 ieee80211_sysctl_vap_restart, "I",
300 "force a VAP restart");
301
302 if (vap->iv_caps & IEEE80211_C_DFS) {
303 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
304 "radar", CTLTYPE_INT | CTLFLAG_RW, vap->iv_ic, 0,
305 ieee80211_sysctl_radar, "I", "simulate radar event");
306 }
307 vap->iv_sysctl = ctx;
308 vap->iv_oid = oid;
309 #endif
310 }
311
312 void
313 ieee80211_sysctl_vdetach(struct ieee80211vap *vap)
314 {
315 #ifdef notyet
316 if (vap->iv_sysctl != NULL) {
317 sysctl_ctx_free(vap->iv_sysctl);
318 IEEE80211_FREE(vap->iv_sysctl, M_DEVBUF);
319 vap->iv_sysctl = NULL;
320 }
321 #endif
322 }
323
324
325 int
326 ieee80211_node_dectestref(struct ieee80211_node *ni)
327 {
328 /* XXX need equivalent of atomic_dec_and_test */
329 atomic_subtract_int(&ni->ni_refcnt, 1);
330 return atomic_cas_uint(&ni->ni_refcnt, 0, 1) == 0;
331 }
332
333 void
334 ieee80211_drain_ifq(struct ifqueue *ifq)
335 {
336 struct ieee80211_node *ni;
337 struct mbuf *m;
338
339 for (;;) {
340 IF_DEQUEUE(ifq, m);
341 if (m == NULL)
342 break;
343
344 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
345 FBSDKASSERT(ni != NULL, ("frame w/o node"));
346 ieee80211_free_node(ni);
347 ieee80211_free_mbuf(m);
348 }
349 }
350
351 void
352 ieee80211_flush_ifq(struct ifqueue *ifq, struct ieee80211vap *vap)
353 {
354 struct ieee80211_node *ni;
355 struct mbuf *m, **mprev;
356
357 IFQ_LOCK(ifq);
358 mprev = &ifq->ifq_head;
359 while ((m = *mprev) != NULL) {
360 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
361 if (ni != NULL && ni->ni_vap == vap) {
362 *mprev = m->m_nextpkt; /* remove from list */
363 ifq->ifq_len--;
364
365 ieee80211_free_node(ni); /* reclaim ref */
366 ieee80211_free_mbuf(m);
367 } else
368 mprev = &m->m_nextpkt;
369 }
370 /* recalculate tail ptr */
371 m = ifq->ifq_head;
372 for (; m != NULL && m->m_nextpkt != NULL; m = m->m_nextpkt)
373 ;
374 ifq->ifq_tail = m;
375 IFQ_UNLOCK(ifq);
376 }
377
378 /*
379 * As above, for mbufs allocated with m_gethdr/MGETHDR
380 * or initialized by M_COPY_PKTHDR.
381 */
382 #define MC_ALIGN(m, len) \
383 do { \
384 (m)->m_data += rounddown2(MCLBYTES - (len), sizeof(long)); \
385 } while (/* CONSTCOND */ 0)
386
387 /*
388 * Allocate and setup a management frame of the specified
389 * size. We return the mbuf and a pointer to the start
390 * of the contiguous data area that's been reserved based
391 * on the packet length. The data area is forced to 32-bit
392 * alignment and the buffer length to a multiple of 4 bytes.
393 * This is done mainly so beacon frames (that require this)
394 * can use this interface too.
395 */
396 struct mbuf *
397 ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen)
398 {
399 struct mbuf *m;
400 u_int len;
401
402 /*
403 * NB: we know the mbuf routines will align the data area
404 * so we don't need to do anything special.
405 */
406 len = roundup2(headroom + pktlen, 4);
407 FBSDKASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len));
408 if (len < MINCLSIZE) {
409 m = m_gethdr(M_NOWAIT, MT_DATA);
410 /*
411 * Align the data in case additional headers are added.
412 * This should only happen when a WEP header is added
413 * which only happens for shared key authentication mgt
414 * frames which all fit in MHLEN.
415 */
416 if (m != NULL)
417 M_ALIGN(m, len);
418 } else {
419 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
420 if (m != NULL)
421 MC_ALIGN(m, len);
422 }
423 if (m != NULL) {
424 m->m_data += headroom;
425 *frm = m->m_data;
426 }
427 return m;
428 }
429
430 #ifndef __NO_STRICT_ALIGNMENT
431 /*
432 * Re-align the payload in the mbuf. This is mainly used (right now)
433 * to handle IP header alignment requirements on certain architectures.
434 */
435 struct mbuf *
436 ieee80211_realign(struct ieee80211vap *vap, struct mbuf *m, size_t align)
437 {
438 int pktlen, space;
439 struct mbuf *n;
440
441 pktlen = m->m_pkthdr.len;
442 space = pktlen + align;
443 if (space < MINCLSIZE)
444 n = m_gethdr(M_NOWAIT, MT_DATA);
445 else {
446 n = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
447 space <= MCLBYTES ? MCLBYTES :
448 #if MJUMPAGESIZE != MCLBYTES
449 space <= MJUMPAGESIZE ? MJUMPAGESIZE :
450 #endif
451 space <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
452 }
453 if (__predict_true(n != NULL)) {
454 m_move_pkthdr(n, m);
455 n->m_data = (caddr_t)(ALIGN(n->m_data + align) - align);
456 m_copydata(m, 0, pktlen, mtod(n, caddr_t));
457 n->m_len = pktlen;
458 } else {
459 IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
460 mtod(m, const struct ieee80211_frame *), NULL,
461 "%s", "no mbuf to realign");
462 vap->iv_stats.is_rx_badalign++;
463 }
464 m_freem(m);
465 return n;
466 }
467 #endif /* !__NO_STRICT_ALIGNMENT */
468
469 int
470 ieee80211_add_callback(struct mbuf *m,
471 void (*func)(struct ieee80211_node *, void *, int), void *arg)
472 {
473 struct m_tag *mtag;
474 struct ieee80211_cb *cb;
475
476 mtag = m_tag_get(/*MTAG_ABI_NET80211*/ NET80211_TAG_CALLBACK,
477 sizeof(struct ieee80211_cb), M_NOWAIT);
478 if (mtag == NULL)
479 return 0;
480
481 cb = (struct ieee80211_cb *)(mtag+1);
482 cb->func = func;
483 cb->arg = arg;
484 m_tag_prepend(m, mtag);
485 m->m_flags |= M_TXCB;
486 return 1;
487 }
488
489 int
490 ieee80211_add_xmit_params(struct mbuf *m,
491 const struct ieee80211_bpf_params *params)
492 {
493 struct m_tag *mtag;
494 struct ieee80211_tx_params *tx;
495
496 mtag = m_tag_get(/*MTAG_ABI_NET80211*/ NET80211_TAG_XMIT_PARAMS,
497 sizeof(struct ieee80211_tx_params), M_NOWAIT);
498 if (mtag == NULL)
499 return (0);
500
501 tx = (struct ieee80211_tx_params *)(mtag+1);
502 memcpy(&tx->params, params, sizeof(struct ieee80211_bpf_params));
503 m_tag_prepend(m, mtag);
504 return (1);
505 }
506
507 int
508 ieee80211_get_xmit_params(struct mbuf *m,
509 struct ieee80211_bpf_params *params)
510 {
511 struct m_tag *mtag;
512 struct ieee80211_tx_params *tx;
513
514 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_XMIT_PARAMS,
515 NULL);
516 if (mtag == NULL)
517 return (-1);
518 tx = (struct ieee80211_tx_params *)(mtag + 1);
519 memcpy(params, &tx->params, sizeof(struct ieee80211_bpf_params));
520 return (0);
521 }
522
523 void
524 ieee80211_process_callback(struct ieee80211_node *ni,
525 struct mbuf *m, int status)
526 {
527 struct m_tag *mtag;
528
529 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_CALLBACK, NULL);
530 if (mtag != NULL) {
531 struct ieee80211_cb *cb = (struct ieee80211_cb *)(mtag+1);
532 cb->func(ni, cb->arg, status);
533 }
534 }
535
536 /*
537 * Add RX parameters to the given mbuf.
538 *
539 * Returns 1 if OK, 0 on error.
540 */
541 int
542 ieee80211_add_rx_params(struct mbuf *m, const struct ieee80211_rx_stats *rxs)
543 {
544 struct m_tag *mtag;
545 struct ieee80211_rx_params *rx;
546
547 mtag = m_tag_get(/*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
548 sizeof(struct ieee80211_rx_stats), M_NOWAIT);
549 if (mtag == NULL)
550 return (0);
551
552 rx = (struct ieee80211_rx_params *)(mtag + 1);
553 memcpy(&rx->params, rxs, sizeof(*rxs));
554 m_tag_prepend(m, mtag);
555 return (1);
556 }
557
558 int
559 ieee80211_get_rx_params(struct mbuf *m, struct ieee80211_rx_stats *rxs)
560 {
561 struct m_tag *mtag;
562 struct ieee80211_rx_params *rx;
563
564 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
565 NULL);
566 if (mtag == NULL)
567 return (-1);
568 rx = (struct ieee80211_rx_params *)(mtag + 1);
569 memcpy(rxs, &rx->params, sizeof(*rxs));
570 return (0);
571 }
572
573 const struct ieee80211_rx_stats *
574 ieee80211_get_rx_params_ptr(struct mbuf *m)
575 {
576 struct m_tag *mtag;
577 struct ieee80211_rx_params *rx;
578
579 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
580 NULL);
581 if (mtag == NULL)
582 return (NULL);
583 rx = (struct ieee80211_rx_params *)(mtag + 1);
584 return (&rx->params);
585 }
586
587
588 /*
589 * Add TOA parameters to the given mbuf.
590 */
591 int
592 ieee80211_add_toa_params(struct mbuf *m, const struct ieee80211_toa_params *p)
593 {
594 struct m_tag *mtag;
595 struct ieee80211_toa_params *rp;
596
597 mtag = m_tag_get(/*MTAG_ABI_NET80211,*/ NET80211_TAG_TOA_PARAMS,
598 sizeof(struct ieee80211_toa_params), M_NOWAIT);
599 if (mtag == NULL)
600 return (0);
601
602 rp = (struct ieee80211_toa_params *)(mtag + 1);
603 memcpy(rp, p, sizeof(*rp));
604 m_tag_prepend(m, mtag);
605 return (1);
606 }
607
608 int
609 ieee80211_get_toa_params(struct mbuf *m, struct ieee80211_toa_params *p)
610 {
611 struct m_tag *mtag;
612 struct ieee80211_toa_params *rp;
613
614 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_TOA_PARAMS,
615 NULL);
616 if (mtag == NULL)
617 return (0);
618 rp = (struct ieee80211_toa_params *)(mtag + 1);
619 if (p != NULL)
620 memcpy(p, rp, sizeof(*p));
621 return (1);
622 }
623
624 /*
625 * Transmit a frame to the parent interface.
626 */
627 int
628 ieee80211_parent_xmitpkt(struct ieee80211com *ic, struct mbuf *m)
629 {
630 int error;
631
632 /*
633 * Assert the IC TX lock is held - this enforces the
634 * processing -> queuing order is maintained
635 */
636 IEEE80211_TX_LOCK_ASSERT(ic);
637 error = ic->ic_transmit(ic, m);
638 if (error) {
639 struct ieee80211_node *ni;
640
641 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
642
643 /* XXX number of fragments */
644 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
645 ieee80211_free_node(ni);
646 ieee80211_free_mbuf(m);
647 }
648 return (error);
649 }
650
651 /*
652 * Transmit a frame to the VAP interface.
653 */
654 int
655 ieee80211_vap_xmitpkt(struct ieee80211vap *vap, struct mbuf *m)
656 {
657 struct ifnet *ifp = vap->iv_ifp;
658
659 /*
660 * When transmitting via the VAP, we shouldn't hold
661 * any IC TX lock as the VAP TX path will acquire it.
662 */
663 IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic);
664
665 return (ifp->if_transmit(ifp, m));
666
667 }
668
669 void
670 get_random_bytes(void *p, size_t n)
671 {
672 uint8_t *dp = p;
673
674 while (n > 0) {
675 uint32_t v = arc4random();
676 size_t nb = n > sizeof(uint32_t) ? sizeof(uint32_t) : n;
677 bcopy(&v, dp, n > sizeof(uint32_t) ? sizeof(uint32_t) : n);
678 dp += sizeof(uint32_t), n -= nb;
679 }
680 }
681
682 /*
683 * Helper function for events that pass just a single mac address.
684 */
685 static void
686 notify_macaddr(struct ifnet *ifp, int op, const uint8_t mac[IEEE80211_ADDR_LEN])
687 {
688 struct ieee80211_join_event iev;
689
690 CURVNET_SET(ifp->if_vnet);
691 memset(&iev, 0, sizeof(iev));
692 IEEE80211_ADDR_COPY(iev.iev_addr, mac);
693 rt_ieee80211msg(ifp, op, &iev, sizeof(iev));
694 CURVNET_RESTORE();
695 }
696
697 void
698 ieee80211_notify_node_join(struct ieee80211_node *ni, int newassoc)
699 {
700 struct ieee80211vap *vap = ni->ni_vap;
701 struct ifnet *ifp = vap->iv_ifp;
702
703 CURVNET_SET_QUIET(ifp->if_vnet);
704 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode join",
705 (ni == vap->iv_bss) ? "bss " : "");
706
707 if (ni == vap->iv_bss) {
708 notify_macaddr(ifp, newassoc ?
709 RTM_IEEE80211_ASSOC : RTM_IEEE80211_REASSOC, ni->ni_bssid);
710 if_link_state_change(ifp, LINK_STATE_UP);
711 } else {
712 notify_macaddr(ifp, newassoc ?
713 RTM_IEEE80211_JOIN : RTM_IEEE80211_REJOIN, ni->ni_macaddr);
714 }
715 CURVNET_RESTORE();
716 }
717
718 void
719 ieee80211_notify_node_leave(struct ieee80211_node *ni)
720 {
721 struct ieee80211vap *vap = ni->ni_vap;
722 struct ifnet *ifp = vap->iv_ifp;
723
724 CURVNET_SET_QUIET(ifp->if_vnet);
725 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode leave",
726 (ni == vap->iv_bss) ? "bss " : "");
727
728 if (ni == vap->iv_bss) {
729 rt_ieee80211msg(ifp, RTM_IEEE80211_DISASSOC, NULL, 0);
730 if_link_state_change(ifp, LINK_STATE_DOWN);
731 } else {
732 /* fire off wireless event station leaving */
733 notify_macaddr(ifp, RTM_IEEE80211_LEAVE, ni->ni_macaddr);
734 }
735 CURVNET_RESTORE();
736 }
737
738 void
739 ieee80211_notify_scan_done(struct ieee80211vap *vap)
740 {
741 struct ifnet *ifp = vap->iv_ifp;
742
743 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s\n", "notify scan done");
744
745 /* dispatch wireless event indicating scan completed */
746 CURVNET_SET(ifp->if_vnet);
747 rt_ieee80211msg(ifp, RTM_IEEE80211_SCAN, NULL, 0);
748 CURVNET_RESTORE();
749 }
750
751 void
752 ieee80211_notify_replay_failure(struct ieee80211vap *vap,
753 const struct ieee80211_frame *wh, const struct ieee80211_key *k,
754 u_int64_t rsc, int tid)
755 {
756 struct ifnet *ifp = vap->iv_ifp;
757
758 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
759 "%s replay detected tid %d <rsc %ju, csc %ju, keyix %u rxkeyix %u>",
760 k->wk_cipher->ic_name, tid, (intmax_t) rsc,
761 (intmax_t) k->wk_keyrsc[tid],
762 k->wk_keyix, k->wk_rxkeyix);
763
764 if (ifp != NULL) { /* NB: for cipher test modules */
765 struct ieee80211_replay_event iev;
766
767 IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
768 IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
769 iev.iev_cipher = k->wk_cipher->ic_cipher;
770 if (k->wk_rxkeyix != IEEE80211_KEYIX_NONE)
771 iev.iev_keyix = k->wk_rxkeyix;
772 else
773 iev.iev_keyix = k->wk_keyix;
774 iev.iev_keyrsc = k->wk_keyrsc[tid];
775 iev.iev_rsc = rsc;
776 CURVNET_SET(ifp->if_vnet);
777 rt_ieee80211msg(ifp, RTM_IEEE80211_REPLAY, &iev, sizeof(iev));
778 CURVNET_RESTORE();
779 }
780 }
781
782 void
783 ieee80211_notify_michael_failure(struct ieee80211vap *vap,
784 const struct ieee80211_frame *wh, u_int keyix)
785 {
786 struct ifnet *ifp = vap->iv_ifp;
787
788 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
789 "michael MIC verification failed <keyix %u>", keyix);
790 vap->iv_stats.is_rx_tkipmic++;
791
792 if (ifp != NULL) { /* NB: for cipher test modules */
793 struct ieee80211_michael_event iev;
794
795 IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
796 IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
797 iev.iev_cipher = IEEE80211_CIPHER_TKIP;
798 iev.iev_keyix = keyix;
799 CURVNET_SET(ifp->if_vnet);
800 rt_ieee80211msg(ifp, RTM_IEEE80211_MICHAEL, &iev, sizeof(iev));
801 CURVNET_RESTORE();
802 }
803 }
804
805 void
806 ieee80211_notify_wds_discover(struct ieee80211_node *ni)
807 {
808 struct ieee80211vap *vap = ni->ni_vap;
809 struct ifnet *ifp = vap->iv_ifp;
810
811 notify_macaddr(ifp, RTM_IEEE80211_WDS, ni->ni_macaddr);
812 }
813
814 void
815 ieee80211_notify_csa(struct ieee80211com *ic,
816 const struct ieee80211_channel *c, int mode, int count)
817 {
818 struct ieee80211_csa_event iev;
819 struct ieee80211vap *vap;
820 struct ifnet *ifp;
821
822 memset(&iev, 0, sizeof(iev));
823 iev.iev_flags = c->ic_flags;
824 iev.iev_freq = c->ic_freq;
825 iev.iev_ieee = c->ic_ieee;
826 iev.iev_mode = mode;
827 iev.iev_count = count;
828 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
829 ifp = vap->iv_ifp;
830 CURVNET_SET(ifp->if_vnet);
831 rt_ieee80211msg(ifp, RTM_IEEE80211_CSA, &iev, sizeof(iev));
832 CURVNET_RESTORE();
833 }
834 }
835
836 void
837 ieee80211_notify_radar(struct ieee80211com *ic,
838 const struct ieee80211_channel *c)
839 {
840 struct ieee80211_radar_event iev;
841 struct ieee80211vap *vap;
842 struct ifnet *ifp;
843
844 memset(&iev, 0, sizeof(iev));
845 iev.iev_flags = c->ic_flags;
846 iev.iev_freq = c->ic_freq;
847 iev.iev_ieee = c->ic_ieee;
848 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
849 ifp = vap->iv_ifp;
850 CURVNET_SET(ifp->if_vnet);
851 rt_ieee80211msg(ifp, RTM_IEEE80211_RADAR, &iev, sizeof(iev));
852 CURVNET_RESTORE();
853 }
854 }
855
856 void
857 ieee80211_notify_cac(struct ieee80211com *ic,
858 const struct ieee80211_channel *c, enum ieee80211_notify_cac_event type)
859 {
860 struct ieee80211_cac_event iev;
861 struct ieee80211vap *vap;
862 struct ifnet *ifp;
863
864 memset(&iev, 0, sizeof(iev));
865 iev.iev_flags = c->ic_flags;
866 iev.iev_freq = c->ic_freq;
867 iev.iev_ieee = c->ic_ieee;
868 iev.iev_type = type;
869 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
870 ifp = vap->iv_ifp;
871 CURVNET_SET(ifp->if_vnet);
872 rt_ieee80211msg(ifp, RTM_IEEE80211_CAC, &iev, sizeof(iev));
873 CURVNET_RESTORE();
874 }
875 }
876
877 void
878 ieee80211_notify_node_deauth(struct ieee80211_node *ni)
879 {
880 struct ieee80211vap *vap = ni->ni_vap;
881 struct ifnet *ifp = vap->iv_ifp;
882
883 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node deauth");
884
885 notify_macaddr(ifp, RTM_IEEE80211_DEAUTH, ni->ni_macaddr);
886 }
887
888 void
889 ieee80211_notify_node_auth(struct ieee80211_node *ni)
890 {
891 struct ieee80211vap *vap = ni->ni_vap;
892 struct ifnet *ifp = vap->iv_ifp;
893
894 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node auth");
895
896 notify_macaddr(ifp, RTM_IEEE80211_AUTH, ni->ni_macaddr);
897 }
898
899 void
900 ieee80211_notify_country(struct ieee80211vap *vap,
901 const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t cc[2])
902 {
903 struct ifnet *ifp = vap->iv_ifp;
904 struct ieee80211_country_event iev;
905
906 memset(&iev, 0, sizeof(iev));
907 IEEE80211_ADDR_COPY(iev.iev_addr, bssid);
908 iev.iev_cc[0] = cc[0];
909 iev.iev_cc[1] = cc[1];
910 CURVNET_SET(ifp->if_vnet);
911 rt_ieee80211msg(ifp, RTM_IEEE80211_COUNTRY, &iev, sizeof(iev));
912 CURVNET_RESTORE();
913 }
914
915 void
916 ieee80211_notify_radio(struct ieee80211com *ic, int state)
917 {
918 struct ieee80211_radio_event iev;
919 struct ieee80211vap *vap;
920 struct ifnet *ifp;
921
922 memset(&iev, 0, sizeof(iev));
923 iev.iev_state = state;
924 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
925 ifp = vap->iv_ifp;
926 CURVNET_SET(ifp->if_vnet);
927 rt_ieee80211msg(ifp, RTM_IEEE80211_RADIO, &iev, sizeof(iev));
928 CURVNET_RESTORE();
929 }
930 }
931
932 void
933 ieee80211_load_module(const char *modname)
934 {
935
936 #ifdef notyet
937 struct thread *td = curthread;
938
939 if (suser(td) == 0 && securelevel_gt(td->td_ucred, 0) == 0) {
940 mtx_lock(&Giant);
941 (void) linker_load_module(modname, NULL, NULL, NULL, NULL);
942 mtx_unlock(&Giant);
943 }
944 #else
945 printf("%s: load the %s module by hand for now.\n", __func__, modname);
946 #endif
947 }
948
949 #ifdef notyet
950 static eventhandler_tag wlan_bpfevent;
951 static eventhandler_tag wlan_ifllevent;
952
953 static void
954 bpf_track(void *arg, struct ifnet *ifp, int dlt, int attach)
955 {
956 /* NB: identify vap's by if_init */
957 if (dlt == DLT_IEEE802_11_RADIO &&
958 ifp->if_init == ieee80211_init) {
959 struct ieee80211vap *vap = ifp->if_softc;
960 /*
961 * Track bpf radiotap listener state. We mark the vap
962 * to indicate if any listener is present and the com
963 * to indicate if any listener exists on any associated
964 * vap. This flag is used by drivers to prepare radiotap
965 * state only when needed.
966 */
967 if (attach) {
968 ieee80211_syncflag_ext(vap, IEEE80211_FEXT_BPF);
969 if (vap->iv_opmode == IEEE80211_M_MONITOR)
970 atomic_add_int(&vap->iv_ic->ic_montaps, 1);
971 } else if (!bpf_peers_present(vap->iv_rawbpf)) {
972 ieee80211_syncflag_ext(vap, -IEEE80211_FEXT_BPF);
973 if (vap->iv_opmode == IEEE80211_M_MONITOR)
974 atomic_subtract_int(&vap->iv_ic->ic_montaps, 1);
975 }
976 }
977 }
978
979 /*
980 * Change MAC address on the vap (if was not started).
981 */
982 static void
983 wlan_iflladdr(void *arg __unused, struct ifnet *ifp)
984 {
985 /* NB: identify vap's by if_init */
986 if (ifp->if_init == ieee80211_init &&
987 (ifp->if_flags & IFF_UP) == 0) {
988 struct ieee80211vap *vap = ifp->if_softc;
989
990 IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp));
991 }
992 }
993 #endif
994
995 void
996 if_inc_counter(struct ifnet *ifp, ift_counter ifc, int64_t value)
997 {
998 switch (ifc) {
999 case IFCOUNTER_IPACKETS:
1000 ifp->if_data.ifi_ipackets += value;
1001 break;
1002 case IFCOUNTER_IERRORS:
1003 ifp->if_data.ifi_ierrors += value;
1004 break;
1005 case IFCOUNTER_OPACKETS:
1006 ifp->if_data.ifi_opackets += value;
1007 break;
1008 case IFCOUNTER_OERRORS:
1009 ifp->if_data.ifi_oerrors += value;
1010 break;
1011 case IFCOUNTER_COLLISIONS:
1012 ifp->if_data.ifi_collisions += value;
1013 break;
1014 case IFCOUNTER_IBYTES:
1015 ifp->if_data.ifi_ibytes += value;
1016 break;
1017 case IFCOUNTER_OBYTES:
1018 ifp->if_data.ifi_obytes += value;
1019 break;
1020 case IFCOUNTER_IMCASTS:
1021 ifp->if_data.ifi_imcasts += value;
1022 break;
1023 case IFCOUNTER_OMCASTS:
1024 ifp->if_data.ifi_omcasts += value;
1025 break;
1026 case IFCOUNTER_IQDROPS:
1027 ifp->if_data.ifi_iqdrops += value;
1028 break;
1029 case IFCOUNTER_OQDROPS:
1030 /* ifp->if_data.ifi_oqdrops += value; No such field, just ignore it q*/
1031 break;
1032 case IFCOUNTER_NOPROTO:
1033 ifp->if_data.ifi_noproto += value;
1034 break;
1035 default:
1036 panic("if_inc_counter: non-existant counter");
1037 }
1038 }
1039
1040
1041 #ifdef notyet
1042 /*
1043 * Module glue.
1044 *
1045 * NB: the module name is "wlan" for compatibility with NetBSD.
1046 */
1047 static int
1048 wlan_modevent(module_t mod, int type, void *unused)
1049 {
1050 switch (type) {
1051 case MOD_LOAD:
1052 if (bootverbose)
1053 printf("wlan: <802.11 Link Layer>\n");
1054 wlan_bpfevent = EVENTHANDLER_REGISTER(bpf_track,
1055 bpf_track, 0, EVENTHANDLER_PRI_ANY);
1056 wlan_ifllevent = EVENTHANDLER_REGISTER(iflladdr_event,
1057 wlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
1058 wlan_cloner = if_clone_simple(wlanname, wlan_clone_create,
1059 wlan_clone_destroy, 0);
1060 return 0;
1061 case MOD_UNLOAD:
1062 if_clone_detach(wlan_cloner);
1063 EVENTHANDLER_DEREGISTER(bpf_track, wlan_bpfevent);
1064 EVENTHANDLER_DEREGISTER(iflladdr_event, wlan_ifllevent);
1065 return 0;
1066 }
1067 return EINVAL;
1068 }
1069
1070 static moduledata_t wlan_mod = {
1071 wlanname,
1072 wlan_modevent,
1073 0
1074 };
1075 DECLARE_MODULE(wlan, wlan_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
1076 MODULE_VERSION(wlan, 1);
1077 MODULE_DEPEND(wlan, ether, 1, 1, 1);
1078 #endif
1079
1080 #ifdef IEEE80211_ALQ
1081 MODULE_DEPEND(wlan, alq, 1, 1, 1);
1082 #endif /* IEEE80211_ALQ */
1083
1084 /* Missing support for if_printf in NetBSD ... */
1085 int
1086 if_printf(struct ifnet *ifp, const char *fmt, ...)
1087 {
1088 char if_fmt[256];
1089 va_list ap;
1090
1091 snprintf(if_fmt, sizeof(if_fmt), "%s: %s", ifp->if_xname, fmt);
1092 va_start(ap, fmt);
1093 vlog(LOG_INFO, if_fmt, ap);
1094 va_end(ap);
1095 return (0);
1096 }
1097
1098 /*
1099 * Set the m_data pointer of a newly-allocated mbuf
1100 * to place an object of the specified size at the
1101 * end of the mbuf, longword aligned.
1102 */
1103 void
1104 m_align(struct mbuf *m, int len)
1105 {
1106 int adjust;
1107
1108 KASSERT(len != M_COPYALL);
1109
1110 if (m->m_flags & M_EXT)
1111 adjust = m->m_ext.ext_size - len;
1112 else if (m->m_flags & M_PKTHDR)
1113 adjust = MHLEN - len;
1114 else
1115 adjust = MLEN - len;
1116 m->m_data += adjust &~ (sizeof(long)-1);
1117 }
1118
1119 /*
1120 * Append the specified data to the indicated mbuf chain,
1121 * Extend the mbuf chain if the new data does not fit in
1122 * existing space.
1123 *
1124 * Return 1 if able to complete the job; otherwise 0.
1125 */
1126 int
1127 m_append(struct mbuf *m0, int len, const void *cpv)
1128 {
1129 struct mbuf *m, *n;
1130 int remainder, space;
1131 const char *cp = cpv;
1132
1133 KASSERT(len != M_COPYALL);
1134 for (m = m0; m->m_next != NULL; m = m->m_next)
1135 continue;
1136 remainder = len;
1137 space = M_TRAILINGSPACE(m);
1138 if (space > 0) {
1139 /*
1140 * Copy into available space.
1141 */
1142 if (space > remainder)
1143 space = remainder;
1144 memmove(mtod(m, char *) + m->m_len, cp, space);
1145 m->m_len += space;
1146 cp = cp + space, remainder -= space;
1147 }
1148 while (remainder > 0) {
1149 /*
1150 * Allocate a new mbuf; could check space
1151 * and allocate a cluster instead.
1152 */
1153 n = m_get(M_DONTWAIT, m->m_type);
1154 if (n == NULL)
1155 break;
1156 n->m_len = min(MLEN, remainder);
1157 memmove(mtod(n, void *), cp, n->m_len);
1158 cp += n->m_len, remainder -= n->m_len;
1159 m->m_next = n;
1160 m = n;
1161 }
1162 if (m0->m_flags & M_PKTHDR)
1163 m0->m_pkthdr.len += len - remainder;
1164 return (remainder == 0);
1165 }
1166
1167 /*
1168 * Create a writable copy of the mbuf chain. While doing this
1169 * we compact the chain with a goal of producing a chain with
1170 * at most two mbufs. The second mbuf in this chain is likely
1171 * to be a cluster. The primary purpose of this work is to create
1172 * a writable packet for encryption, compression, etc. The
1173 * secondary goal is to linearize the data so the data can be
1174 * passed to crypto hardware in the most efficient manner possible.
1175 */
1176 struct mbuf *
1177 m_unshare(struct mbuf *m0, int how)
1178 {
1179 struct mbuf *m, *mprev;
1180 struct mbuf *n, *mfirst, *mlast;
1181 int len, off;
1182
1183 mprev = NULL;
1184 for (m = m0; m != NULL; m = mprev->m_next) {
1185 /*
1186 * Regular mbufs are ignored unless there's a cluster
1187 * in front of it that we can use to coalesce. We do
1188 * the latter mainly so later clusters can be coalesced
1189 * also w/o having to handle them specially (i.e. convert
1190 * mbuf+cluster -> cluster). This optimization is heavily
1191 * influenced by the assumption that we're running over
1192 * Ethernet where MCLBYTES is large enough that the max
1193 * packet size will permit lots of coalescing into a
1194 * single cluster. This in turn permits efficient
1195 * crypto operations, especially when using hardware.
1196 */
1197 if ((m->m_flags & M_EXT) == 0) {
1198 if (mprev && (mprev->m_flags & M_EXT) &&
1199 m->m_len <= M_TRAILINGSPACE(mprev)) {
1200 /* XXX: this ignores mbuf types */
1201 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1202 mtod(m, caddr_t), m->m_len);
1203 mprev->m_len += m->m_len;
1204 mprev->m_next = m->m_next; /* unlink from chain */
1205 m_free(m); /* reclaim mbuf */
1206 } else {
1207 mprev = m;
1208 }
1209 continue;
1210 }
1211 /*
1212 * Writable mbufs are left alone (for now).
1213 */
1214 if (!M_READONLY(m)) {
1215 mprev = m;
1216 continue;
1217 }
1218
1219 /*
1220 * Not writable, replace with a copy or coalesce with
1221 * the previous mbuf if possible (since we have to copy
1222 * it anyway, we try to reduce the number of mbufs and
1223 * clusters so that future work is easier).
1224 */
1225 FBSDKASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1226 /* NB: we only coalesce into a cluster or larger */
1227 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1228 m->m_len <= M_TRAILINGSPACE(mprev)) {
1229 /* XXX: this ignores mbuf types */
1230 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1231 mtod(m, caddr_t), m->m_len);
1232 mprev->m_len += m->m_len;
1233 mprev->m_next = m->m_next; /* unlink from chain */
1234 m_free(m); /* reclaim mbuf */
1235 continue;
1236 }
1237
1238 /*
1239 * Allocate new space to hold the copy and copy the data.
1240 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
1241 * splitting them into clusters. We could just malloc a
1242 * buffer and make it external but too many device drivers
1243 * don't know how to break up the non-contiguous memory when
1244 * doing DMA.
1245 */
1246 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1247 if (n == NULL) {
1248 m_freem(m0);
1249 return (NULL);
1250 }
1251 if (m->m_flags & M_PKTHDR) {
1252 FBSDKASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
1253 __func__, m0, m));
1254 m_move_pkthdr(n, m);
1255 }
1256 len = m->m_len;
1257 off = 0;
1258 mfirst = n;
1259 mlast = NULL;
1260 for (;;) {
1261 int cc = min(len, MCLBYTES);
1262 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1263 n->m_len = cc;
1264 if (mlast != NULL)
1265 mlast->m_next = n;
1266 mlast = n;
1267 #if 0
1268 newipsecstat.ips_clcopied++;
1269 #endif
1270
1271 len -= cc;
1272 if (len <= 0)
1273 break;
1274 off += cc;
1275
1276 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1277 if (n == NULL) {
1278 m_freem(mfirst);
1279 m_freem(m0);
1280 return (NULL);
1281 }
1282 }
1283 n->m_next = m->m_next;
1284 if (mprev == NULL)
1285 m0 = mfirst; /* new head of chain */
1286 else
1287 mprev->m_next = mfirst; /* replace old mbuf */
1288 m_free(m); /* release old mbuf */
1289 mprev = mfirst;
1290 }
1291 return (m0);
1292 }
1293