ieee80211_netbsd.c revision 1.31.2.8 1 /* $NetBSD: ieee80211_netbsd.c,v 1.31.2.8 2019/06/10 22:09:46 christos Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 2003-2009 Sam Leffler, Errno Consulting
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #ifdef __NetBSD__
32 __KERNEL_RCSID(0, "$NetBSD: ieee80211_netbsd.c,v 1.31.2.8 2019/06/10 22:09:46 christos Exp $");
33 #endif
34
35 /*
36 * IEEE 802.11 support (NetBSD-specific code)
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_wlan.h"
41 #endif
42
43 #include <sys/atomic.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/module.h>
50 #include <sys/proc.h>
51 #include <sys/sysctl.h>
52 #include <sys/syslog.h>
53
54 #include <sys/socket.h>
55
56 #include <net/bpf.h>
57 #include <net/if.h>
58 #include <net/if_dl.h>
59 #include <net/if_ether.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62 #include <net/route.h>
63
64 #include <net80211/ieee80211_var.h>
65 #include <net80211/ieee80211_input.h>
66
67 static const struct sysctlnode *
68 ieee80211_sysctl_treetop(struct sysctllog **log);
69 static void ieee80211_sysctl_setup(void);
70
71 /* NNN in .h file? */
72 #define SYSCTL_HANDLER_ARGS SYSCTLFN_ARGS
73
74 #ifdef IEEE80211_DEBUG
75 static int ieee80211_debug = 0;
76 #endif
77
78 #ifdef notyet
79 static struct if_clone *wlan_cloner;
80 #endif
81 /* notyet */
82
83 static const char wlanname[] = "wlan";
84
85 int
86 ieee80211_init0(void)
87 {
88 ieee80211_sysctl_setup();
89 return 0;
90 }
91
92 /*
93 * "taskqueue" support for doing FreeBSD style taskqueue operations using
94 * NetBSD's workqueue to do the actual function calls for the work.
95 * Many features of the FreeBSD taskqueue are not implemented. This should
96 * be enough features for the 802.11 stack to run its tasks and time delayed
97 * tasks.
98 */
99
100 void
101 ieee80211_runwork(struct work *work2do, void *arg)
102 {
103 struct task *work_task = (struct task *) work2do;
104 #ifdef IEEE80211_DEBUG
105 printf ("runwork: %s (t_arg is 0x%lx)\n",
106 work_task->t_func_name, (long)work_task->t_arg);
107 #endif
108 mutex_enter(&work_task->t_mutex);
109 work_task->t_onqueue = 0;
110 mutex_exit(&work_task->t_mutex);
111
112 work_task->t_func(work_task->t_arg, 0);
113 }
114
115 void
116 taskqueue_enqueue(struct workqueue *wq, struct task *task_item)
117 {
118 mutex_enter(&task_item->t_mutex);
119 if (!task_item->t_onqueue) {
120 workqueue_enqueue(wq, &task_item->t_work, NULL);
121 task_item->t_onqueue = 1;
122 }
123 mutex_exit(&task_item->t_mutex);
124 }
125
126 void
127 taskqueue_drain(struct workqueue *wq, struct task *task_item)
128 {
129 workqueue_wait(wq, &task_item->t_work);
130 }
131
132 static void
133 taskqueue_callout_enqueue(void *arg)
134 {
135 struct timeout_task *timeout_task = arg;
136 mutex_enter(&timeout_task->to_task.t_mutex);
137 timeout_task->to_scheduled = 0;
138 mutex_exit(&timeout_task->to_task.t_mutex);
139
140 taskqueue_enqueue(timeout_task->to_wq, (struct task*) timeout_task);
141 }
142
143 int
144 taskqueue_enqueue_timeout(struct workqueue *queue,
145 struct timeout_task *timeout_task, int nticks)
146 {
147 mutex_enter(&timeout_task->to_task.t_mutex);
148 if (!timeout_task->to_scheduled) {
149 callout_reset(&timeout_task->to_callout, nticks,
150 taskqueue_callout_enqueue, timeout_task);
151 timeout_task->to_scheduled = 1;
152 }
153 mutex_exit(&timeout_task->to_task.t_mutex);
154
155 return -1;
156 }
157
158 int
159 taskqueue_cancel_timeout(struct workqueue *queue,
160 struct timeout_task *timeout_task, u_int *pendp)
161 {
162 // printf ("taskqueue_cancel_timeout called\n");
163 return -1;
164 }
165
166 void
167 taskqueue_drain_timeout(struct workqueue *queue,
168 struct timeout_task *timeout_task)
169 {
170 // printf ("taskqueue_drain_timeout called\n");
171 }
172
173
174 static __unused int
175 wlan_clone_create(struct if_clone *ifc, int unit, void * params)
176 {
177 struct ieee80211_clone_params cp;
178 struct ieee80211vap *vap;
179 struct ieee80211com *ic;
180 int error;
181
182 error = copyin(params, &cp, sizeof(cp));
183 if (error)
184 return error;
185 ic = ieee80211_find_com(cp.icp_parent);
186 if (ic == NULL)
187 return ENXIO;
188 if (cp.icp_opmode >= IEEE80211_OPMODE_MAX) {
189 ic_printf(ic, "%s: invalid opmode %d\n", __func__,
190 cp.icp_opmode);
191 return EINVAL;
192 }
193 if ((ic->ic_caps & ieee80211_opcap[cp.icp_opmode]) == 0) {
194 ic_printf(ic, "%s mode not supported\n",
195 ieee80211_opmode_name[cp.icp_opmode]);
196 return EOPNOTSUPP;
197 }
198 if ((cp.icp_flags & IEEE80211_CLONE_TDMA) &&
199 #ifdef IEEE80211_SUPPORT_TDMA
200 (ic->ic_caps & IEEE80211_C_TDMA) == 0
201 #else
202 (1)
203 #endif
204 ) {
205 ic_printf(ic, "TDMA not supported\n");
206 return EOPNOTSUPP;
207 }
208 vap = ic->ic_vap_create(ic, wlanname, unit,
209 cp.icp_opmode, cp.icp_flags, cp.icp_bssid,
210 cp.icp_flags & IEEE80211_CLONE_MACADDR ?
211 cp.icp_macaddr : ic->ic_macaddr);
212
213 return (vap == NULL ? EIO : 0);
214 }
215
216 static __unused void
217 wlan_clone_destroy(struct ifnet *ifp)
218 {
219 struct ieee80211vap *vap = ifp->if_softc;
220 struct ieee80211com *ic = vap->iv_ic;
221
222 ic->ic_vap_delete(vap);
223 }
224
225 void
226 ieee80211_vap_destroy(struct ieee80211vap *vap)
227 {
228 #ifdef notyet
229 CURVNET_SET(vap->iv_ifp->if_vnet);
230 if_clone_destroyif(wlan_cloner, vap->iv_ifp);
231 CURVNET_RESTORE();
232 #else
233 printf ("vap_destroy called ... what next?\n");
234 #endif
235 }
236
237 #ifdef notyet
238 int
239 ieee80211_sysctl_msecs_ticks(SYSCTL_HANDLER_ARGS)
240 {
241 int msecs = ticks_to_msecs(*(int *)arg1);
242 int error, t;
243
244 error = sysctl_handle_int(oidp, &msecs, 0, req);
245 if (error || !req->newptr)
246 return error;
247 t = msecs_to_ticks(msecs);
248 *(int *)arg1 = (t < 1) ? 1 : t;
249 return 0;
250 }
251
252 static int
253 ieee80211_sysctl_inact(SYSCTL_HANDLER_ARGS)
254 {
255 int inact = (*(int *)arg1) * IEEE80211_INACT_WAIT;
256 int error;
257
258 error = sysctl_handle_int(oidp, &inact, 0, req);
259 if (error || !req->newptr)
260 return error;
261 *(int *)arg1 = inact / IEEE80211_INACT_WAIT;
262 return 0;
263 }
264 #endif
265
266 static int
267 ieee80211_sysctl_parent(SYSCTLFN_ARGS)
268 {
269 struct ieee80211vap *vap;
270 char pname[IFNAMSIZ];
271 struct sysctlnode node;
272
273 node = *rnode;
274 vap = node.sysctl_data;
275 strlcpy(pname, vap->iv_ifp->if_xname, IFNAMSIZ);
276 node.sysctl_data = pname;
277 return sysctl_lookup(SYSCTLFN_CALL(&node));
278 }
279
280 #ifdef notyet
281 static int
282 ieee80211_sysctl_radar(SYSCTL_HANDLER_ARGS)
283 {
284 struct ieee80211com *ic = arg1;
285 int t = 0, error;
286
287 error = sysctl_handle_int(oidp, &t, 0, req);
288 if (error || !req->newptr)
289 return error;
290 IEEE80211_LOCK(ic);
291 ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
292 IEEE80211_UNLOCK(ic);
293 return 0;
294 }
295
296 /*
297 * For now, just restart everything.
298 *
299 * Later on, it'd be nice to have a separate VAP restart to
300 * full-device restart.
301 */
302 static int
303 ieee80211_sysctl_vap_restart(SYSCTL_HANDLER_ARGS)
304 {
305 struct ieee80211vap *vap = arg1;
306 int t = 0, error;
307
308 error = sysctl_handle_int(oidp, &t, 0, req);
309 if (error || !req->newptr)
310 return error;
311
312 ieee80211_restart_all(vap->iv_ic);
313 return 0;
314 }
315 #endif /* notyet */
316
317 void
318 ieee80211_sysctl_attach(struct ieee80211com *ic)
319 {
320 }
321
322 void
323 ieee80211_sysctl_detach(struct ieee80211com *ic)
324 {
325 }
326
327 /*
328 * Setup sysctl(3) MIB, net.ieee80211.*
329 *
330 * TBD condition CTLFLAG_PERMANENT on being a module or not
331 */
332 static struct sysctllog *ieee80211_sysctllog;
333 static void
334 ieee80211_sysctl_setup(void)
335 {
336 #ifdef notyet
337 int rc;
338 #endif
339 const struct sysctlnode *rnode;
340
341 if ((rnode = ieee80211_sysctl_treetop(&ieee80211_sysctllog)) == NULL)
342 return;
343
344 #ifdef notyet
345 if ((rc = sysctl_createv(&ieee80211_sysctllog, 0, &rnode, NULL,
346 CTLFLAG_PERMANENT, CTLTYPE_NODE, "nodes", "client/peer stations",
347 ieee80211_sysctl_node, 0, NULL, 0, CTL_CREATE, CTL_EOL)) != 0)
348 goto err;
349 #endif
350
351 #ifdef IEEE80211_DEBUG
352 /* control debugging printfs */
353 if ((rc = sysctl_createv(&ieee80211_sysctllog, 0, &rnode, NULL,
354 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
355 "debug", SYSCTL_DESCR("control debugging printfs"),
356 NULL, 0, &ieee80211_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
357 goto err;
358 #endif
359
360 #ifdef notyet
361 ieee80211_rssadapt_sysctl_setup(&ieee80211_sysctllog);
362 #endif
363
364 return;
365 #if defined(IEEE80211_DEBUG) || defined(notyet)
366 err:
367 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
368 #endif
369 }
370
371 /*
372 * Create or get top of sysctl tree net.link.ieee80211.
373 */
374 static const struct sysctlnode *
375 ieee80211_sysctl_treetop(struct sysctllog **log)
376 {
377 int rc;
378 const struct sysctlnode *rnode;
379
380 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
381 CTLFLAG_PERMANENT, CTLTYPE_NODE, "link",
382 "link-layer statistics and controls",
383 NULL, 0, NULL, 0, CTL_NET, PF_LINK, CTL_EOL)) != 0)
384 goto err;
385
386 if ((rc = sysctl_createv(log, 0, &rnode, &rnode,
387 CTLFLAG_PERMANENT, CTLTYPE_NODE, "ieee80211",
388 "IEEE 802.11 WLAN statistics and controls",
389 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL)) != 0)
390 goto err;
391
392 return rnode;
393 err:
394 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
395 return NULL;
396 }
397
398 void
399 ieee80211_sysctl_vattach(struct ieee80211vap *vap)
400 {
401 int rc;
402 const struct sysctlnode *cnode, *rnode;
403 char num[sizeof("vap") + 14]; /* sufficient for 32 bits */
404
405 if ((rnode = ieee80211_sysctl_treetop(NULL)) == NULL)
406 return;
407
408 snprintf(num, sizeof(num), "vap%u", vap->iv_ifp->if_index);
409
410 if ((rc = sysctl_createv(&vap->iv_sysctllog, 0, &rnode, &rnode,
411 CTLFLAG_PERMANENT, CTLTYPE_NODE, num, SYSCTL_DESCR("virtual AP"),
412 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL)) != 0)
413 goto err;
414
415 /* control debugging printfs */
416 if ((rc = sysctl_createv(&vap->iv_sysctllog, 0, &rnode, &cnode,
417 CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_STRING,
418 "parent", SYSCTL_DESCR("parent device"),
419 ieee80211_sysctl_parent, 0, (void *)vap, IFNAMSIZ,
420 CTL_CREATE, CTL_EOL)) != 0)
421 goto err;
422
423
424 #ifdef notyet
425 struct ifnet *ifp = vap->iv_ifp;
426 struct sysctl_ctx_list *ctx;
427 struct sysctl_oid *oid;
428 char num[14]; /* sufficient for 32 bits */
429
430 ctx = (struct sysctl_ctx_list *) IEEE80211_MALLOC(sizeof(struct sysctl_ctx_list),
431 M_DEVBUF, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
432 if (ctx == NULL) {
433 if_printf(ifp, "%s: cannot allocate sysctl context!\n",
434 __func__);
435 return;
436 }
437 sysctl_ctx_init(ctx);
438 snprintf(num, sizeof(num), "%u", ifp->if_dunit);
439 oid = SYSCTL_ADD_NODE(ctx, &SYSCTL_NODE_CHILDREN(_net, wlan),
440 OID_AUTO, num, CTLFLAG_RD, NULL, "");
441 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
442 "%parent", CTLTYPE_STRING | CTLFLAG_RD, vap->iv_ic, 0,
443 ieee80211_sysctl_parent, "A", "parent device");
444 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
445 "driver_caps", CTLFLAG_RW, &vap->iv_caps, 0,
446 "driver capabilities");
447 #ifdef IEEE80211_DEBUG
448 vap->iv_debug = ieee80211_debug;
449 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
450 "debug", CTLFLAG_RW, &vap->iv_debug, 0,
451 "control debugging printfs");
452 #endif
453 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
454 "bmiss_max", CTLFLAG_RW, &vap->iv_bmiss_max, 0,
455 "consecutive beacon misses before scanning");
456 /* XXX inherit from tunables */
457 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
458 "inact_run", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_run, 0,
459 ieee80211_sysctl_inact, "I",
460 "station inactivity timeout (sec)");
461 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
462 "inact_probe", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_probe, 0,
463 ieee80211_sysctl_inact, "I",
464 "station inactivity probe timeout (sec)");
465 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
466 "inact_auth", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_auth, 0,
467 ieee80211_sysctl_inact, "I",
468 "station authentication timeout (sec)");
469 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
470 "inact_init", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_init, 0,
471 ieee80211_sysctl_inact, "I",
472 "station initial state timeout (sec)");
473 if (vap->iv_htcaps & IEEE80211_HTC_HT) {
474 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
475 "ampdu_mintraffic_bk", CTLFLAG_RW,
476 &vap->iv_ampdu_mintraffic[WME_AC_BK], 0,
477 "BK traffic tx aggr threshold (pps)");
478 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
479 "ampdu_mintraffic_be", CTLFLAG_RW,
480 &vap->iv_ampdu_mintraffic[WME_AC_BE], 0,
481 "BE traffic tx aggr threshold (pps)");
482 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
483 "ampdu_mintraffic_vo", CTLFLAG_RW,
484 &vap->iv_ampdu_mintraffic[WME_AC_VO], 0,
485 "VO traffic tx aggr threshold (pps)");
486 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
487 "ampdu_mintraffic_vi", CTLFLAG_RW,
488 &vap->iv_ampdu_mintraffic[WME_AC_VI], 0,
489 "VI traffic tx aggr threshold (pps)");
490 }
491
492 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
493 "force_restart", CTLTYPE_INT | CTLFLAG_RW, vap, 0,
494 ieee80211_sysctl_vap_restart, "I",
495 "force a VAP restart");
496
497 if (vap->iv_caps & IEEE80211_C_DFS) {
498 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
499 "radar", CTLTYPE_INT | CTLFLAG_RW, vap->iv_ic, 0,
500 ieee80211_sysctl_radar, "I", "simulate radar event");
501 }
502 vap->iv_sysctl = ctx;
503 vap->iv_oid = oid;
504 #endif
505 return;
506 err:
507 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
508 }
509
510 void
511 ieee80211_sysctl_vdetach(struct ieee80211vap *vap)
512 {
513 #ifdef notyet
514 if (vap->iv_sysctl != NULL) {
515 sysctl_ctx_free(vap->iv_sysctl);
516 IEEE80211_FREE(vap->iv_sysctl, M_DEVBUF);
517 vap->iv_sysctl = NULL;
518 }
519 #endif
520 }
521
522
523 int
524 ieee80211_node_dectestref(struct ieee80211_node *ni)
525 {
526 /* XXX need equivalent of atomic_dec_and_test */
527 atomic_subtract_int(&ni->ni_refcnt, 1);
528 return atomic_cas_uint(&ni->ni_refcnt, 0, 1) == 0;
529 }
530
531 void
532 ieee80211_drain_ifq(struct ifqueue *ifq)
533 {
534 struct ieee80211_node *ni;
535 struct mbuf *m;
536
537 for (;;) {
538 IF_DEQUEUE(ifq, m);
539 if (m == NULL)
540 break;
541
542 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
543 FBSDKASSERT(ni != NULL, ("frame w/o node"));
544 ieee80211_free_node(ni);
545 ieee80211_free_mbuf(m);
546 }
547 }
548
549 void
550 ieee80211_flush_ifq(struct ifqueue *ifq, struct ieee80211vap *vap)
551 {
552 struct ieee80211_node *ni;
553 struct mbuf *m, **mprev;
554
555 IFQ_LOCK(ifq);
556 mprev = &ifq->ifq_head;
557 while ((m = *mprev) != NULL) {
558 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
559 if (ni != NULL && ni->ni_vap == vap) {
560 *mprev = m->m_nextpkt; /* remove from list */
561 ifq->ifq_len--;
562
563 ieee80211_free_node(ni); /* reclaim ref */
564 ieee80211_free_mbuf(m);
565 } else
566 mprev = &m->m_nextpkt;
567 }
568 /* recalculate tail ptr */
569 m = ifq->ifq_head;
570 for (; m != NULL && m->m_nextpkt != NULL; m = m->m_nextpkt)
571 ;
572 ifq->ifq_tail = m;
573 IFQ_UNLOCK(ifq);
574 }
575
576 /*
577 * As above, for mbufs allocated with m_gethdr/MGETHDR
578 * or initialized by M_COPY_PKTHDR.
579 */
580 #define MC_ALIGN(m, len) \
581 do { \
582 (m)->m_data += rounddown2(MCLBYTES - (len), sizeof(long)); \
583 } while (/* CONSTCOND */ 0)
584
585 /*
586 * Allocate and setup a management frame of the specified
587 * size. We return the mbuf and a pointer to the start
588 * of the contiguous data area that's been reserved based
589 * on the packet length. The data area is forced to 32-bit
590 * alignment and the buffer length to a multiple of 4 bytes.
591 * This is done mainly so beacon frames (that require this)
592 * can use this interface too.
593 */
594 struct mbuf *
595 ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen)
596 {
597 struct mbuf *m;
598 u_int len;
599
600 /*
601 * NB: we know the mbuf routines will align the data area
602 * so we don't need to do anything special.
603 */
604 len = roundup2(headroom + pktlen, 4);
605 FBSDKASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len));
606 if (len < MINCLSIZE) {
607 m = m_gethdr(M_NOWAIT, MT_DATA);
608 /*
609 * Align the data in case additional headers are added.
610 * This should only happen when a WEP header is added
611 * which only happens for shared key authentication mgt
612 * frames which all fit in MHLEN.
613 */
614 if (m != NULL)
615 m_align(m, len);
616 } else {
617 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
618 if (m != NULL)
619 MC_ALIGN(m, len);
620 }
621 if (m != NULL) {
622 m->m_data += headroom;
623 *frm = m->m_data;
624 }
625 return m;
626 }
627
628 #ifndef __NO_STRICT_ALIGNMENT
629 /*
630 * Re-align the payload in the mbuf. This is mainly used (right now)
631 * to handle IP header alignment requirements on certain architectures.
632 */
633 struct mbuf *
634 ieee80211_realign(struct ieee80211vap *vap, struct mbuf *m, size_t align)
635 {
636 int pktlen, space;
637 struct mbuf *n;
638
639 pktlen = m->m_pkthdr.len;
640 space = pktlen + align;
641 if (space < MINCLSIZE)
642 n = m_gethdr(M_NOWAIT, MT_DATA);
643 else {
644 n = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
645 space <= MCLBYTES ? MCLBYTES :
646 #if MJUMPAGESIZE != MCLBYTES
647 space <= MJUMPAGESIZE ? MJUMPAGESIZE :
648 #endif
649 space <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
650 }
651 if (__predict_true(n != NULL)) {
652 m_move_pkthdr(n, m);
653 n->m_data = (caddr_t)(ALIGN(n->m_data + align) - align);
654 m_copydata(m, 0, pktlen, mtod(n, caddr_t));
655 n->m_len = pktlen;
656 } else {
657 IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
658 mtod(m, const struct ieee80211_frame *), NULL,
659 "%s", "no mbuf to realign");
660 vap->iv_stats.is_rx_badalign++;
661 }
662 m_freem(m);
663 return n;
664 }
665 #endif /* !__NO_STRICT_ALIGNMENT */
666
667 int
668 ieee80211_add_callback(struct mbuf *m,
669 void (*func)(struct ieee80211_node *, void *, int), void *arg)
670 {
671 struct m_tag *mtag;
672 struct ieee80211_cb *cb;
673
674 mtag = m_tag_get(/*MTAG_ABI_NET80211*/ NET80211_TAG_CALLBACK,
675 sizeof(struct ieee80211_cb), M_NOWAIT);
676 if (mtag == NULL)
677 return 0;
678
679 cb = (struct ieee80211_cb *)(mtag+1);
680 cb->func = func;
681 cb->arg = arg;
682 m_tag_prepend(m, mtag);
683 m->m_flags |= M_TXCB;
684 return 1;
685 }
686
687 int
688 ieee80211_add_xmit_params(struct mbuf *m,
689 const struct ieee80211_bpf_params *params)
690 {
691 struct m_tag *mtag;
692 struct ieee80211_tx_params *tx;
693
694 mtag = m_tag_get(/*MTAG_ABI_NET80211*/ NET80211_TAG_XMIT_PARAMS,
695 sizeof(struct ieee80211_tx_params), M_NOWAIT);
696 if (mtag == NULL)
697 return (0);
698
699 tx = (struct ieee80211_tx_params *)(mtag+1);
700 memcpy(&tx->params, params, sizeof(struct ieee80211_bpf_params));
701 m_tag_prepend(m, mtag);
702 return (1);
703 }
704
705 int
706 ieee80211_get_xmit_params(struct mbuf *m,
707 struct ieee80211_bpf_params *params)
708 {
709 struct m_tag *mtag;
710 struct ieee80211_tx_params *tx;
711
712 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_XMIT_PARAMS);
713 if (mtag == NULL)
714 return (-1);
715 tx = (struct ieee80211_tx_params *)(mtag + 1);
716 memcpy(params, &tx->params, sizeof(struct ieee80211_bpf_params));
717 return (0);
718 }
719
720 void
721 ieee80211_process_callback(struct ieee80211_node *ni,
722 struct mbuf *m, int status)
723 {
724 struct m_tag *mtag;
725
726 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_CALLBACK);
727 if (mtag != NULL) {
728 struct ieee80211_cb *cb = (struct ieee80211_cb *)(mtag+1);
729 cb->func(ni, cb->arg, status);
730 }
731 }
732
733 /*
734 * Add RX parameters to the given mbuf.
735 *
736 * Returns 1 if OK, 0 on error.
737 */
738 int
739 ieee80211_add_rx_params(struct mbuf *m, const struct ieee80211_rx_stats *rxs)
740 {
741 struct m_tag *mtag;
742 struct ieee80211_rx_params *rx;
743
744 mtag = m_tag_get(/*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
745 sizeof(struct ieee80211_rx_stats), M_NOWAIT);
746 if (mtag == NULL)
747 return (0);
748
749 rx = (struct ieee80211_rx_params *)(mtag + 1);
750 memcpy(&rx->params, rxs, sizeof(*rxs));
751 m_tag_prepend(m, mtag);
752 return (1);
753 }
754
755 int
756 ieee80211_get_rx_params(struct mbuf *m, struct ieee80211_rx_stats *rxs)
757 {
758 struct m_tag *mtag;
759 struct ieee80211_rx_params *rx;
760
761 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS);
762 if (mtag == NULL)
763 return (-1);
764 rx = (struct ieee80211_rx_params *)(mtag + 1);
765 memcpy(rxs, &rx->params, sizeof(*rxs));
766 return (0);
767 }
768
769 const struct ieee80211_rx_stats *
770 ieee80211_get_rx_params_ptr(struct mbuf *m)
771 {
772 struct m_tag *mtag;
773 struct ieee80211_rx_params *rx;
774
775 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS);
776 if (mtag == NULL)
777 return (NULL);
778 rx = (struct ieee80211_rx_params *)(mtag + 1);
779 return (&rx->params);
780 }
781
782
783 /*
784 * Add TOA parameters to the given mbuf.
785 */
786 int
787 ieee80211_add_toa_params(struct mbuf *m, const struct ieee80211_toa_params *p)
788 {
789 struct m_tag *mtag;
790 struct ieee80211_toa_params *rp;
791
792 mtag = m_tag_get(/*MTAG_ABI_NET80211,*/ NET80211_TAG_TOA_PARAMS,
793 sizeof(struct ieee80211_toa_params), M_NOWAIT);
794 if (mtag == NULL)
795 return (0);
796
797 rp = (struct ieee80211_toa_params *)(mtag + 1);
798 memcpy(rp, p, sizeof(*rp));
799 m_tag_prepend(m, mtag);
800 return (1);
801 }
802
803 int
804 ieee80211_get_toa_params(struct mbuf *m, struct ieee80211_toa_params *p)
805 {
806 struct m_tag *mtag;
807 struct ieee80211_toa_params *rp;
808
809 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_TOA_PARAMS);
810 if (mtag == NULL)
811 return (0);
812 rp = (struct ieee80211_toa_params *)(mtag + 1);
813 if (p != NULL)
814 memcpy(p, rp, sizeof(*p));
815 return (1);
816 }
817
818 /*
819 * Transmit a frame to the parent interface.
820 */
821 int
822 ieee80211_parent_xmitpkt(struct ieee80211com *ic, struct mbuf *m)
823 {
824 int error;
825
826 /*
827 * Assert the IC TX lock is held - this enforces the
828 * processing -> queuing order is maintained
829 */
830 IEEE80211_TX_LOCK_ASSERT(ic);
831 error = ic->ic_transmit(ic, m);
832 if (error) {
833 struct ieee80211_node *ni;
834
835 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
836
837 /* XXX number of fragments */
838 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
839 ieee80211_free_node(ni);
840 ieee80211_free_mbuf(m);
841 }
842 return (error);
843 }
844
845 /*
846 * Transmit a frame to the VAP interface.
847 */
848 int
849 ieee80211_vap_xmitpkt(struct ieee80211vap *vap, struct mbuf *m)
850 {
851 struct ifnet *ifp = vap->iv_ifp;
852
853 /*
854 * When transmitting via the VAP, we shouldn't hold
855 * any IC TX lock as the VAP TX path will acquire it.
856 */
857 IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic);
858
859 return (ifp->if_transmit(ifp, m));
860
861 }
862
863 void
864 get_random_bytes(void *p, size_t n)
865 {
866 uint8_t *dp = p;
867
868 while (n > 0) {
869 uint32_t v = arc4random();
870 size_t nb = n > sizeof(uint32_t) ? sizeof(uint32_t) : n;
871 bcopy(&v, dp, n > sizeof(uint32_t) ? sizeof(uint32_t) : n);
872 dp += sizeof(uint32_t), n -= nb;
873 }
874 }
875
876 /*
877 * Helper function for events that pass just a single mac address.
878 */
879 static void
880 notify_macaddr(struct ifnet *ifp, int op, const uint8_t mac[IEEE80211_ADDR_LEN])
881 {
882 struct ieee80211_join_event iev;
883 printf ("NNN notify_macaddr called\n");
884 CURVNET_SET(ifp->if_vnet);
885 memset(&iev, 0, sizeof(iev));
886 IEEE80211_ADDR_COPY(iev.iev_addr, mac);
887 rt_ieee80211msg(ifp, op, &iev, sizeof(iev));
888 CURVNET_RESTORE();
889 }
890
891 void
892 ieee80211_notify_node_join(struct ieee80211_node *ni, int newassoc)
893 {
894 struct ieee80211vap *vap = ni->ni_vap;
895 struct ifnet *ifp = vap->iv_ifp;
896
897 CURVNET_SET_QUIET(ifp->if_vnet);
898 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode join",
899 (ni == vap->iv_bss) ? "bss " : "");
900
901 if (ni == vap->iv_bss) {
902 notify_macaddr(ifp, newassoc ?
903 RTM_IEEE80211_ASSOC : RTM_IEEE80211_REASSOC, ni->ni_bssid);
904 if_link_state_change(ifp, LINK_STATE_UP);
905 } else {
906 notify_macaddr(ifp, newassoc ?
907 RTM_IEEE80211_JOIN : RTM_IEEE80211_REJOIN, ni->ni_macaddr);
908 }
909 CURVNET_RESTORE();
910 }
911
912 void
913 ieee80211_notify_node_leave(struct ieee80211_node *ni)
914 {
915 struct ieee80211vap *vap = ni->ni_vap;
916 struct ifnet *ifp = vap->iv_ifp;
917
918 CURVNET_SET_QUIET(ifp->if_vnet);
919 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode leave",
920 (ni == vap->iv_bss) ? "bss " : "");
921
922 if (ni == vap->iv_bss) {
923 rt_ieee80211msg(ifp, RTM_IEEE80211_DISASSOC, NULL, 0);
924 if_link_state_change(ifp, LINK_STATE_DOWN);
925 } else {
926 /* fire off wireless event station leaving */
927 notify_macaddr(ifp, RTM_IEEE80211_LEAVE, ni->ni_macaddr);
928 }
929 CURVNET_RESTORE();
930 }
931
932 void
933 ieee80211_notify_scan_done(struct ieee80211vap *vap)
934 {
935 struct ifnet *ifp = vap->iv_ifp;
936
937 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s\n", "notify scan done");
938
939 /* dispatch wireless event indicating scan completed */
940 CURVNET_SET(ifp->if_vnet);
941 rt_ieee80211msg(ifp, RTM_IEEE80211_SCAN, NULL, 0);
942 CURVNET_RESTORE();
943 }
944
945 void
946 ieee80211_notify_replay_failure(struct ieee80211vap *vap,
947 const struct ieee80211_frame *wh, const struct ieee80211_key *k,
948 u_int64_t rsc, int tid)
949 {
950 struct ifnet *ifp = vap->iv_ifp;
951
952 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
953 "%s replay detected tid %d <rsc %ju, csc %ju, keyix %u rxkeyix %u>",
954 k->wk_cipher->ic_name, tid, (intmax_t) rsc,
955 (intmax_t) k->wk_keyrsc[tid],
956 k->wk_keyix, k->wk_rxkeyix);
957
958 if (ifp != NULL) { /* NB: for cipher test modules */
959 struct ieee80211_replay_event iev;
960
961 IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
962 IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
963 iev.iev_cipher = k->wk_cipher->ic_cipher;
964 if (k->wk_rxkeyix != IEEE80211_KEYIX_NONE)
965 iev.iev_keyix = k->wk_rxkeyix;
966 else
967 iev.iev_keyix = k->wk_keyix;
968 iev.iev_keyrsc = k->wk_keyrsc[tid];
969 iev.iev_rsc = rsc;
970 CURVNET_SET(ifp->if_vnet);
971 rt_ieee80211msg(ifp, RTM_IEEE80211_REPLAY, &iev, sizeof(iev));
972 CURVNET_RESTORE();
973 }
974 }
975
976 void
977 ieee80211_notify_michael_failure(struct ieee80211vap *vap,
978 const struct ieee80211_frame *wh, u_int keyix)
979 {
980 struct ifnet *ifp = vap->iv_ifp;
981
982 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
983 "michael MIC verification failed <keyix %u>", keyix);
984 vap->iv_stats.is_rx_tkipmic++;
985
986 if (ifp != NULL) { /* NB: for cipher test modules */
987 struct ieee80211_michael_event iev;
988
989 IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
990 IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
991 iev.iev_cipher = IEEE80211_CIPHER_TKIP;
992 iev.iev_keyix = keyix;
993 CURVNET_SET(ifp->if_vnet);
994 rt_ieee80211msg(ifp, RTM_IEEE80211_MICHAEL, &iev, sizeof(iev));
995 CURVNET_RESTORE();
996 }
997 }
998
999 void
1000 ieee80211_notify_wds_discover(struct ieee80211_node *ni)
1001 {
1002 struct ieee80211vap *vap = ni->ni_vap;
1003 struct ifnet *ifp = vap->iv_ifp;
1004
1005 notify_macaddr(ifp, RTM_IEEE80211_WDS, ni->ni_macaddr);
1006 }
1007
1008 void
1009 ieee80211_notify_csa(struct ieee80211com *ic,
1010 const struct ieee80211_channel *c, int mode, int count)
1011 {
1012 struct ieee80211_csa_event iev;
1013 struct ieee80211vap *vap;
1014 struct ifnet *ifp;
1015
1016 memset(&iev, 0, sizeof(iev));
1017 iev.iev_flags = c->ic_flags;
1018 iev.iev_freq = c->ic_freq;
1019 iev.iev_ieee = c->ic_ieee;
1020 iev.iev_mode = mode;
1021 iev.iev_count = count;
1022 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1023 ifp = vap->iv_ifp;
1024 CURVNET_SET(ifp->if_vnet);
1025 rt_ieee80211msg(ifp, RTM_IEEE80211_CSA, &iev, sizeof(iev));
1026 CURVNET_RESTORE();
1027 }
1028 }
1029
1030 void
1031 ieee80211_notify_radar(struct ieee80211com *ic,
1032 const struct ieee80211_channel *c)
1033 {
1034 struct ieee80211_radar_event iev;
1035 struct ieee80211vap *vap;
1036 struct ifnet *ifp;
1037
1038 memset(&iev, 0, sizeof(iev));
1039 iev.iev_flags = c->ic_flags;
1040 iev.iev_freq = c->ic_freq;
1041 iev.iev_ieee = c->ic_ieee;
1042 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1043 ifp = vap->iv_ifp;
1044 CURVNET_SET(ifp->if_vnet);
1045 rt_ieee80211msg(ifp, RTM_IEEE80211_RADAR, &iev, sizeof(iev));
1046 CURVNET_RESTORE();
1047 }
1048 }
1049
1050 void
1051 ieee80211_notify_cac(struct ieee80211com *ic,
1052 const struct ieee80211_channel *c, enum ieee80211_notify_cac_event type)
1053 {
1054 struct ieee80211_cac_event iev;
1055 struct ieee80211vap *vap;
1056 struct ifnet *ifp;
1057
1058 memset(&iev, 0, sizeof(iev));
1059 iev.iev_flags = c->ic_flags;
1060 iev.iev_freq = c->ic_freq;
1061 iev.iev_ieee = c->ic_ieee;
1062 iev.iev_type = type;
1063 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1064 ifp = vap->iv_ifp;
1065 CURVNET_SET(ifp->if_vnet);
1066 rt_ieee80211msg(ifp, RTM_IEEE80211_CAC, &iev, sizeof(iev));
1067 CURVNET_RESTORE();
1068 }
1069 }
1070
1071 void
1072 ieee80211_notify_node_deauth(struct ieee80211_node *ni)
1073 {
1074 struct ieee80211vap *vap = ni->ni_vap;
1075 struct ifnet *ifp = vap->iv_ifp;
1076
1077 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node deauth");
1078
1079 notify_macaddr(ifp, RTM_IEEE80211_DEAUTH, ni->ni_macaddr);
1080 }
1081
1082 void
1083 ieee80211_notify_node_auth(struct ieee80211_node *ni)
1084 {
1085 struct ieee80211vap *vap = ni->ni_vap;
1086 struct ifnet *ifp = vap->iv_ifp;
1087
1088 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node auth");
1089
1090 notify_macaddr(ifp, RTM_IEEE80211_AUTH, ni->ni_macaddr);
1091 }
1092
1093 void
1094 ieee80211_notify_country(struct ieee80211vap *vap,
1095 const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t cc[2])
1096 {
1097 struct ifnet *ifp = vap->iv_ifp;
1098 struct ieee80211_country_event iev;
1099
1100 memset(&iev, 0, sizeof(iev));
1101 IEEE80211_ADDR_COPY(iev.iev_addr, bssid);
1102 iev.iev_cc[0] = cc[0];
1103 iev.iev_cc[1] = cc[1];
1104 CURVNET_SET(ifp->if_vnet);
1105 rt_ieee80211msg(ifp, RTM_IEEE80211_COUNTRY, &iev, sizeof(iev));
1106 CURVNET_RESTORE();
1107 }
1108
1109 void
1110 ieee80211_notify_radio(struct ieee80211com *ic, int state)
1111 {
1112 struct ieee80211_radio_event iev;
1113 struct ieee80211vap *vap;
1114 struct ifnet *ifp;
1115
1116 memset(&iev, 0, sizeof(iev));
1117 iev.iev_state = state;
1118 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1119 ifp = vap->iv_ifp;
1120 CURVNET_SET(ifp->if_vnet);
1121 rt_ieee80211msg(ifp, RTM_IEEE80211_RADIO, &iev, sizeof(iev));
1122 CURVNET_RESTORE();
1123 }
1124 }
1125
1126 #ifdef notyet
1127 void
1128 ieee80211_load_module(const char *modname)
1129 {
1130 struct thread *td = curthread;
1131
1132 if (suser(td) == 0 && securelevel_gt(td->td_ucred, 0) == 0) {
1133 mtx_lock(&Giant);
1134 (void) linker_load_module(modname, NULL, NULL, NULL, NULL);
1135 mtx_unlock(&Giant);
1136 }
1137 }
1138 #endif
1139
1140 #ifdef notyet
1141 static eventhandler_tag wlan_bpfevent;
1142 static eventhandler_tag wlan_ifllevent;
1143
1144 static void
1145 bpf_track(void *arg, struct ifnet *ifp, int dlt, int attach)
1146 {
1147 /* NB: identify vap's by if_init */ // NNN won't work with urtwn ...
1148 if (dlt == DLT_IEEE802_11_RADIO &&
1149 ifp->if_init == ieee80211_init) {
1150 struct ieee80211vap *vap = ifp->if_softc;
1151 /*
1152 * Track bpf radiotap listener state. We mark the vap
1153 * to indicate if any listener is present and the com
1154 * to indicate if any listener exists on any associated
1155 * vap. This flag is used by drivers to prepare radiotap
1156 * state only when needed.
1157 */
1158 if (attach) {
1159 ieee80211_syncflag_ext(vap, IEEE80211_FEXT_BPF);
1160 if (vap->iv_opmode == IEEE80211_M_MONITOR)
1161 atomic_add_int(&vap->iv_ic->ic_montaps, 1);
1162 } else if (!bpf_peers_present(vap->iv_rawbpf)) {
1163 ieee80211_syncflag_ext(vap, -IEEE80211_FEXT_BPF);
1164 if (vap->iv_opmode == IEEE80211_M_MONITOR)
1165 atomic_subtract_int(&vap->iv_ic->ic_montaps, 1);
1166 }
1167 }
1168 }
1169
1170 /*
1171 * Change MAC address on the vap (if was not started).
1172 */
1173 static void
1174 wlan_iflladdr(void *arg __unused, struct ifnet *ifp)
1175 {
1176 /* NB: identify vap's by if_init */ // NNN wont work on urtwn
1177 if (ifp->if_init == ieee80211_init &&
1178 (ifp->if_flags & IFF_UP) == 0) {
1179 struct ieee80211vap *vap = ifp->if_softc;
1180
1181 IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp));
1182 }
1183 }
1184 #endif
1185
1186 void
1187 if_inc_counter(struct ifnet *ifp, ift_counter ifc, int64_t value)
1188 {
1189 switch (ifc) {
1190 case IFCOUNTER_IPACKETS:
1191 ifp->if_data.ifi_ipackets += value;
1192 break;
1193 case IFCOUNTER_IERRORS:
1194 ifp->if_data.ifi_ierrors += value;
1195 break;
1196 case IFCOUNTER_OPACKETS:
1197 ifp->if_data.ifi_opackets += value;
1198 break;
1199 case IFCOUNTER_OERRORS:
1200 ifp->if_data.ifi_oerrors += value;
1201 break;
1202 case IFCOUNTER_COLLISIONS:
1203 ifp->if_data.ifi_collisions += value;
1204 break;
1205 case IFCOUNTER_IBYTES:
1206 ifp->if_data.ifi_ibytes += value;
1207 break;
1208 case IFCOUNTER_OBYTES:
1209 ifp->if_data.ifi_obytes += value;
1210 break;
1211 case IFCOUNTER_IMCASTS:
1212 ifp->if_data.ifi_imcasts += value;
1213 break;
1214 case IFCOUNTER_OMCASTS:
1215 ifp->if_data.ifi_omcasts += value;
1216 break;
1217 case IFCOUNTER_IQDROPS:
1218 ifp->if_data.ifi_iqdrops += value;
1219 break;
1220 case IFCOUNTER_OQDROPS:
1221 /* ifp->if_data.ifi_oqdrops += value; No such field, just ignore it q*/
1222 break;
1223 case IFCOUNTER_NOPROTO:
1224 ifp->if_data.ifi_noproto += value;
1225 break;
1226 default:
1227 panic("if_inc_counter: non-existant counter");
1228 }
1229 }
1230
1231
1232 #ifdef notyet
1233 /*
1234 * Module glue.
1235 *
1236 * NB: the module name is "wlan" for compatibility with NetBSD.
1237 */
1238 static int
1239 wlan_modevent(module_t mod, int type, void *unused)
1240 {
1241 switch (type) {
1242 case MOD_LOAD:
1243 if (bootverbose)
1244 printf("wlan: <802.11 Link Layer>\n");
1245 wlan_bpfevent = EVENTHANDLER_REGISTER(bpf_track,
1246 bpf_track, 0, EVENTHANDLER_PRI_ANY);
1247 wlan_ifllevent = EVENTHANDLER_REGISTER(iflladdr_event,
1248 wlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
1249 wlan_cloner = if_clone_simple(wlanname, wlan_clone_create,
1250 wlan_clone_destroy, 0);
1251 return 0;
1252 case MOD_UNLOAD:
1253 if_clone_detach(wlan_cloner);
1254 EVENTHANDLER_DEREGISTER(bpf_track, wlan_bpfevent);
1255 EVENTHANDLER_DEREGISTER(iflladdr_event, wlan_ifllevent);
1256 return 0;
1257 }
1258 return EINVAL;
1259 }
1260
1261 static moduledata_t wlan_mod = {
1262 wlanname,
1263 wlan_modevent,
1264 0
1265 };
1266 DECLARE_MODULE(wlan, wlan_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
1267 MODULE_VERSION(wlan, 1);
1268 MODULE_DEPEND(wlan, ether, 1, 1, 1);
1269 #endif
1270
1271 #ifdef IEEE80211_ALQ
1272 MODULE_DEPEND(wlan, alq, 1, 1, 1);
1273 #endif /* IEEE80211_ALQ */
1274
1275 /* Missing support for if_printf in NetBSD ... */
1276 int
1277 if_printf(struct ifnet *ifp, const char *fmt, ...)
1278 {
1279 char if_fmt[256];
1280 va_list ap;
1281
1282 snprintf(if_fmt, sizeof(if_fmt), "%s: %s", ifp->if_xname, fmt);
1283 va_start(ap, fmt);
1284 vlog(LOG_INFO, if_fmt, ap);
1285 va_end(ap);
1286 return (0);
1287 }
1288
1289 /*
1290 * Append the specified data to the indicated mbuf chain,
1291 * Extend the mbuf chain if the new data does not fit in
1292 * existing space.
1293 *
1294 * Return 1 if able to complete the job; otherwise 0.
1295 */
1296 int
1297 m_append(struct mbuf *m0, int len, const void *cpv)
1298 {
1299 struct mbuf *m, *n;
1300 int remainder, space;
1301 const char *cp = cpv;
1302
1303 KASSERT(len != M_COPYALL);
1304 for (m = m0; m->m_next != NULL; m = m->m_next)
1305 continue;
1306 remainder = len;
1307 space = M_TRAILINGSPACE(m);
1308 if (space > 0) {
1309 /*
1310 * Copy into available space.
1311 */
1312 if (space > remainder)
1313 space = remainder;
1314 memmove(mtod(m, char *) + m->m_len, cp, space);
1315 m->m_len += space;
1316 cp = cp + space, remainder -= space;
1317 }
1318 while (remainder > 0) {
1319 /*
1320 * Allocate a new mbuf; could check space
1321 * and allocate a cluster instead.
1322 */
1323 n = m_get(M_DONTWAIT, m->m_type);
1324 if (n == NULL)
1325 break;
1326 n->m_len = uimin(MLEN, remainder);
1327 memmove(mtod(n, void *), cp, n->m_len);
1328 cp += n->m_len, remainder -= n->m_len;
1329 m->m_next = n;
1330 m = n;
1331 }
1332 if (m0->m_flags & M_PKTHDR)
1333 m0->m_pkthdr.len += len - remainder;
1334 return (remainder == 0);
1335 }
1336
1337 /*
1338 * Create a writable copy of the mbuf chain. While doing this
1339 * we compact the chain with a goal of producing a chain with
1340 * at most two mbufs. The second mbuf in this chain is likely
1341 * to be a cluster. The primary purpose of this work is to create
1342 * a writable packet for encryption, compression, etc. The
1343 * secondary goal is to linearize the data so the data can be
1344 * passed to crypto hardware in the most efficient manner possible.
1345 */
1346 struct mbuf *
1347 m_unshare(struct mbuf *m0, int how)
1348 {
1349 struct mbuf *m, *mprev;
1350 struct mbuf *n, *mfirst, *mlast;
1351 int len, off;
1352
1353 mprev = NULL;
1354 for (m = m0; m != NULL; m = mprev->m_next) {
1355 /*
1356 * Regular mbufs are ignored unless there's a cluster
1357 * in front of it that we can use to coalesce. We do
1358 * the latter mainly so later clusters can be coalesced
1359 * also w/o having to handle them specially (i.e. convert
1360 * mbuf+cluster -> cluster). This optimization is heavily
1361 * influenced by the assumption that we're running over
1362 * Ethernet where MCLBYTES is large enough that the max
1363 * packet size will permit lots of coalescing into a
1364 * single cluster. This in turn permits efficient
1365 * crypto operations, especially when using hardware.
1366 */
1367 if ((m->m_flags & M_EXT) == 0) {
1368 if (mprev && (mprev->m_flags & M_EXT) &&
1369 m->m_len <= M_TRAILINGSPACE(mprev)) {
1370 /* XXX: this ignores mbuf types */
1371 memcpy(mtod(mprev, __uint8_t *) + mprev->m_len,
1372 mtod(m, __uint8_t *), m->m_len);
1373 mprev->m_len += m->m_len;
1374 mprev->m_next = m->m_next; /* unlink from chain */
1375 m_free(m); /* reclaim mbuf */
1376 } else {
1377 mprev = m;
1378 }
1379 continue;
1380 }
1381 /*
1382 * Writable mbufs are left alone (for now).
1383 */
1384 if (!M_READONLY(m)) {
1385 mprev = m;
1386 continue;
1387 }
1388
1389 /*
1390 * Not writable, replace with a copy or coalesce with
1391 * the previous mbuf if possible (since we have to copy
1392 * it anyway, we try to reduce the number of mbufs and
1393 * clusters so that future work is easier).
1394 */
1395 FBSDKASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1396 /* NB: we only coalesce into a cluster or larger */
1397 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1398 m->m_len <= M_TRAILINGSPACE(mprev)) {
1399 /* XXX: this ignores mbuf types */
1400 memcpy(mtod(mprev, __uint8_t *) + mprev->m_len,
1401 mtod(m, __uint8_t *), m->m_len);
1402 mprev->m_len += m->m_len;
1403 mprev->m_next = m->m_next; /* unlink from chain */
1404 m_free(m); /* reclaim mbuf */
1405 continue;
1406 }
1407
1408 /*
1409 * Allocate new space to hold the copy and copy the data.
1410 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
1411 * splitting them into clusters. We could just malloc a
1412 * buffer and make it external but too many device drivers
1413 * don't know how to break up the non-contiguous memory when
1414 * doing DMA.
1415 */
1416 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1417 if (n == NULL) {
1418 m_freem(m0);
1419 return (NULL);
1420 }
1421 if (m->m_flags & M_PKTHDR) {
1422 FBSDKASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
1423 __func__, m0, m));
1424 m_move_pkthdr(n, m);
1425 }
1426 len = m->m_len;
1427 off = 0;
1428 mfirst = n;
1429 mlast = NULL;
1430 for (;;) {
1431 int cc = uimin(len, MCLBYTES);
1432 memcpy(mtod(n, __uint8_t *), mtod(m, __uint8_t *) + off, cc);
1433 n->m_len = cc;
1434 if (mlast != NULL)
1435 mlast->m_next = n;
1436 mlast = n;
1437 #if 0
1438 newipsecstat.ips_clcopied++;
1439 #endif
1440
1441 len -= cc;
1442 if (len <= 0)
1443 break;
1444 off += cc;
1445
1446 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1447 if (n == NULL) {
1448 m_freem(mfirst);
1449 m_freem(m0);
1450 return (NULL);
1451 }
1452 }
1453 n->m_next = m->m_next;
1454 if (mprev == NULL)
1455 m0 = mfirst; /* new head of chain */
1456 else
1457 mprev->m_next = mfirst; /* replace old mbuf */
1458 m_free(m); /* release old mbuf */
1459 mprev = mfirst;
1460 }
1461 return (m0);
1462 }
1463