ieee80211_netbsd.c revision 1.31.2.6 1 /* $NetBSD: ieee80211_netbsd.c,v 1.31.2.6 2018/08/03 19:47:25 phil Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 2003-2009 Sam Leffler, Errno Consulting
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 /* __FBSDID("$FreeBSD$"); */
32 __KERNEL_RCSID(0, "$NetBSD: ieee80211_netbsd.c,v 1.31.2.6 2018/08/03 19:47:25 phil Exp $");
33
34 /*
35 * IEEE 802.11 support (NetBSD-specific code)
36 */
37
38 #include "opt_wlan.h"
39
40 #include <sys/atomic.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/module.h>
47 #include <sys/proc.h>
48 #include <sys/sysctl.h>
49 #include <sys/syslog.h>
50
51 #include <sys/socket.h>
52
53 #include <net/bpf.h>
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 #include <net/route.h>
60
61 #include <net80211/ieee80211_var.h>
62 #include <net80211/ieee80211_input.h>
63
64 static const struct sysctlnode *
65 ieee80211_sysctl_treetop(struct sysctllog **log);
66 static void ieee80211_sysctl_setup(void);
67
68 /* NNN in .h file? */
69 #define SYSCTL_HANDLER_ARGS SYSCTLFN_ARGS
70
71 #ifdef IEEE80211_DEBUG
72 static int ieee80211_debug = 0;
73 #endif
74
75 #ifdef notyet
76 static struct if_clone *wlan_cloner;
77 #endif
78 /* notyet */
79
80 static const char wlanname[] = "wlan";
81
82 int
83 ieee80211_init0(void)
84 {
85 ieee80211_sysctl_setup();
86 return 0;
87 }
88
89 /*
90 * "taskqueue" support for doing FreeBSD style taskqueue operations using
91 * NetBSD's workqueue to do the actual function calls for the work.
92 * Many features of the FreeBSD taskqueue are not implemented. This should
93 * be enough features for the 802.11 stack to run its tasks and time delayed
94 * tasks.
95 */
96
97 void
98 ieee80211_runwork(struct work *work2do, void *arg)
99 {
100 struct task *work_task = (struct task *) work2do;
101 #ifdef IEEE80211_DEBUG
102 printf ("runwork: %s (t_arg is 0x%lx)\n",
103 work_task->t_func_name, (long)work_task->t_arg);
104 #endif
105 mutex_enter(&work_task->t_mutex);
106 work_task->t_onqueue = 0;
107 mutex_exit(&work_task->t_mutex);
108
109 work_task->t_func(work_task->t_arg, 0);
110 }
111
112 void
113 taskqueue_enqueue(struct workqueue *wq, struct task *task_item)
114 {
115 mutex_enter(&task_item->t_mutex);
116 if (!task_item->t_onqueue) {
117 printf ("taskqueue_enqueue function %s\n", task_item->t_func_name);
118 workqueue_enqueue(wq, &task_item->t_work, NULL);
119 task_item->t_onqueue = 1;
120 }
121 mutex_exit(&task_item->t_mutex);
122 }
123
124 void
125 taskqueue_drain(struct workqueue *wq, struct task *task_item)
126 {
127 printf ("taskqueue_drain called\n");
128 workqueue_wait(wq, &task_item->t_work);
129 }
130
131 static void
132 taskqueue_callout_enqueue(void *arg)
133 {
134 struct timeout_task *timeout_task = arg;
135 mutex_enter(&timeout_task->to_task.t_mutex);
136 timeout_task->to_scheduled = 0;
137 mutex_exit(&timeout_task->to_task.t_mutex);
138
139 taskqueue_enqueue(timeout_task->to_wq, (struct task*) timeout_task);
140 }
141
142 int
143 taskqueue_enqueue_timeout(struct workqueue *queue,
144 struct timeout_task *timeout_task, int nticks)
145 {
146 mutex_enter(&timeout_task->to_task.t_mutex);
147 if (!timeout_task->to_scheduled) {
148 printf ("taskqueue_enqueue_timeout: Scheduling the function %s.\n",
149 timeout_task->to_task.t_func_name);
150 callout_reset(&timeout_task->to_callout, nticks,
151 taskqueue_callout_enqueue, timeout_task);
152 timeout_task->to_scheduled = 1;
153 }
154 mutex_exit(&timeout_task->to_task.t_mutex);
155
156 return -1;
157 }
158
159 int
160 taskqueue_cancel_timeout(struct workqueue *queue,
161 struct timeout_task *timeout_task, u_int *pendp)
162 {
163 printf ("taskqueue_cancel_timeout called\n");
164 return -1;
165 }
166
167 void
168 taskqueue_drain_timeout(struct workqueue *queue,
169 struct timeout_task *timeout_task)
170 {
171 printf ("taskqueue_drain_timeout called\n");
172 }
173
174
175 static __unused int
176 wlan_clone_create(struct if_clone *ifc, int unit, void * params)
177 {
178 struct ieee80211_clone_params cp;
179 struct ieee80211vap *vap;
180 struct ieee80211com *ic;
181 int error;
182
183 error = copyin(params, &cp, sizeof(cp));
184 if (error)
185 return error;
186 ic = ieee80211_find_com(cp.icp_parent);
187 if (ic == NULL)
188 return ENXIO;
189 if (cp.icp_opmode >= IEEE80211_OPMODE_MAX) {
190 ic_printf(ic, "%s: invalid opmode %d\n", __func__,
191 cp.icp_opmode);
192 return EINVAL;
193 }
194 if ((ic->ic_caps & ieee80211_opcap[cp.icp_opmode]) == 0) {
195 ic_printf(ic, "%s mode not supported\n",
196 ieee80211_opmode_name[cp.icp_opmode]);
197 return EOPNOTSUPP;
198 }
199 if ((cp.icp_flags & IEEE80211_CLONE_TDMA) &&
200 #ifdef IEEE80211_SUPPORT_TDMA
201 (ic->ic_caps & IEEE80211_C_TDMA) == 0
202 #else
203 (1)
204 #endif
205 ) {
206 ic_printf(ic, "TDMA not supported\n");
207 return EOPNOTSUPP;
208 }
209 vap = ic->ic_vap_create(ic, wlanname, unit,
210 cp.icp_opmode, cp.icp_flags, cp.icp_bssid,
211 cp.icp_flags & IEEE80211_CLONE_MACADDR ?
212 cp.icp_macaddr : ic->ic_macaddr);
213
214 return (vap == NULL ? EIO : 0);
215 }
216
217 static __unused void
218 wlan_clone_destroy(struct ifnet *ifp)
219 {
220 struct ieee80211vap *vap = ifp->if_softc;
221 struct ieee80211com *ic = vap->iv_ic;
222
223 ic->ic_vap_delete(vap);
224 }
225
226 void
227 ieee80211_vap_destroy(struct ieee80211vap *vap)
228 {
229 #ifdef notyet
230 CURVNET_SET(vap->iv_ifp->if_vnet);
231 if_clone_destroyif(wlan_cloner, vap->iv_ifp);
232 CURVNET_RESTORE();
233 #else
234 printf ("vap_destroy called ... what next?\n");
235 #endif
236 }
237
238 #ifdef notyet
239 int
240 ieee80211_sysctl_msecs_ticks(SYSCTL_HANDLER_ARGS)
241 {
242 int msecs = ticks_to_msecs(*(int *)arg1);
243 int error, t;
244
245 error = sysctl_handle_int(oidp, &msecs, 0, req);
246 if (error || !req->newptr)
247 return error;
248 t = msecs_to_ticks(msecs);
249 *(int *)arg1 = (t < 1) ? 1 : t;
250 return 0;
251 }
252
253 static int
254 ieee80211_sysctl_inact(SYSCTL_HANDLER_ARGS)
255 {
256 int inact = (*(int *)arg1) * IEEE80211_INACT_WAIT;
257 int error;
258
259 error = sysctl_handle_int(oidp, &inact, 0, req);
260 if (error || !req->newptr)
261 return error;
262 *(int *)arg1 = inact / IEEE80211_INACT_WAIT;
263 return 0;
264 }
265 #endif
266
267 static int
268 ieee80211_sysctl_parent(SYSCTLFN_ARGS)
269 {
270 struct ieee80211vap *vap;
271 char pname[IFNAMSIZ];
272 struct sysctlnode node;
273
274 node = *rnode;
275 vap = node.sysctl_data;
276 strlcpy(pname, vap->iv_ifp->if_xname, IFNAMSIZ);
277 node.sysctl_data = pname;
278 return sysctl_lookup(SYSCTLFN_CALL(&node));
279 }
280
281 #ifdef notyet
282 static int
283 ieee80211_sysctl_radar(SYSCTL_HANDLER_ARGS)
284 {
285 struct ieee80211com *ic = arg1;
286 int t = 0, error;
287
288 error = sysctl_handle_int(oidp, &t, 0, req);
289 if (error || !req->newptr)
290 return error;
291 IEEE80211_LOCK(ic);
292 ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
293 IEEE80211_UNLOCK(ic);
294 return 0;
295 }
296
297 /*
298 * For now, just restart everything.
299 *
300 * Later on, it'd be nice to have a separate VAP restart to
301 * full-device restart.
302 */
303 static int
304 ieee80211_sysctl_vap_restart(SYSCTL_HANDLER_ARGS)
305 {
306 struct ieee80211vap *vap = arg1;
307 int t = 0, error;
308
309 error = sysctl_handle_int(oidp, &t, 0, req);
310 if (error || !req->newptr)
311 return error;
312
313 ieee80211_restart_all(vap->iv_ic);
314 return 0;
315 }
316 #endif /* notyet */
317
318 void
319 ieee80211_sysctl_attach(struct ieee80211com *ic)
320 {
321 }
322
323 void
324 ieee80211_sysctl_detach(struct ieee80211com *ic)
325 {
326 }
327
328 /*
329 * Setup sysctl(3) MIB, net.ieee80211.*
330 *
331 * TBD condition CTLFLAG_PERMANENT on being a module or not
332 */
333 static struct sysctllog *ieee80211_sysctllog;
334 static void
335 ieee80211_sysctl_setup(void)
336 {
337 int rc;
338 const struct sysctlnode *rnode;
339
340 if ((rnode = ieee80211_sysctl_treetop(&ieee80211_sysctllog)) == NULL)
341 return;
342
343 #ifdef notyet
344 if ((rc = sysctl_createv(&ieee80211_sysctllog, 0, &rnode, NULL,
345 CTLFLAG_PERMANENT, CTLTYPE_NODE, "nodes", "client/peer stations",
346 ieee80211_sysctl_node, 0, NULL, 0, CTL_CREATE, CTL_EOL)) != 0)
347 goto err;
348 #endif
349
350 #ifdef IEEE80211_DEBUG
351 /* control debugging printfs */
352 if ((rc = sysctl_createv(&ieee80211_sysctllog, 0, &rnode, NULL,
353 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
354 "debug", SYSCTL_DESCR("control debugging printfs"),
355 NULL, 0, &ieee80211_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
356 goto err;
357 #endif
358
359 #ifdef notyet
360 ieee80211_rssadapt_sysctl_setup(&ieee80211_sysctllog);
361 #endif
362
363 return;
364 err:
365 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
366 }
367
368 /*
369 * Create or get top of sysctl tree net.link.ieee80211.
370 */
371 static const struct sysctlnode *
372 ieee80211_sysctl_treetop(struct sysctllog **log)
373 {
374 int rc;
375 const struct sysctlnode *rnode;
376
377 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
378 CTLFLAG_PERMANENT, CTLTYPE_NODE, "link",
379 "link-layer statistics and controls",
380 NULL, 0, NULL, 0, CTL_NET, PF_LINK, CTL_EOL)) != 0)
381 goto err;
382
383 if ((rc = sysctl_createv(log, 0, &rnode, &rnode,
384 CTLFLAG_PERMANENT, CTLTYPE_NODE, "ieee80211",
385 "IEEE 802.11 WLAN statistics and controls",
386 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL)) != 0)
387 goto err;
388
389 return rnode;
390 err:
391 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
392 return NULL;
393 }
394
395 void
396 ieee80211_sysctl_vattach(struct ieee80211vap *vap)
397 {
398 int rc;
399 const struct sysctlnode *cnode, *rnode;
400 char num[sizeof("vap") + 14]; /* sufficient for 32 bits */
401
402 if ((rnode = ieee80211_sysctl_treetop(NULL)) == NULL)
403 return;
404
405 snprintf(num, sizeof(num), "vap%u", vap->iv_ifp->if_index);
406
407 if ((rc = sysctl_createv(&vap->iv_sysctllog, 0, &rnode, &rnode,
408 CTLFLAG_PERMANENT, CTLTYPE_NODE, num, SYSCTL_DESCR("virtual AP"),
409 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL)) != 0)
410 goto err;
411
412 /* control debugging printfs */
413 if ((rc = sysctl_createv(&vap->iv_sysctllog, 0, &rnode, &cnode,
414 CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_STRING,
415 "parent", SYSCTL_DESCR("parent device"),
416 ieee80211_sysctl_parent, 0, (void *)vap, IFNAMSIZ,
417 CTL_CREATE, CTL_EOL)) != 0)
418 goto err;
419
420
421 #ifdef notyet
422 struct ifnet *ifp = vap->iv_ifp;
423 struct sysctl_ctx_list *ctx;
424 struct sysctl_oid *oid;
425 char num[14]; /* sufficient for 32 bits */
426
427 ctx = (struct sysctl_ctx_list *) IEEE80211_MALLOC(sizeof(struct sysctl_ctx_list),
428 M_DEVBUF, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
429 if (ctx == NULL) {
430 if_printf(ifp, "%s: cannot allocate sysctl context!\n",
431 __func__);
432 return;
433 }
434 sysctl_ctx_init(ctx);
435 snprintf(num, sizeof(num), "%u", ifp->if_dunit);
436 oid = SYSCTL_ADD_NODE(ctx, &SYSCTL_NODE_CHILDREN(_net, wlan),
437 OID_AUTO, num, CTLFLAG_RD, NULL, "");
438 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
439 "%parent", CTLTYPE_STRING | CTLFLAG_RD, vap->iv_ic, 0,
440 ieee80211_sysctl_parent, "A", "parent device");
441 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
442 "driver_caps", CTLFLAG_RW, &vap->iv_caps, 0,
443 "driver capabilities");
444 #ifdef IEEE80211_DEBUG
445 vap->iv_debug = ieee80211_debug;
446 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
447 "debug", CTLFLAG_RW, &vap->iv_debug, 0,
448 "control debugging printfs");
449 #endif
450 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
451 "bmiss_max", CTLFLAG_RW, &vap->iv_bmiss_max, 0,
452 "consecutive beacon misses before scanning");
453 /* XXX inherit from tunables */
454 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
455 "inact_run", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_run, 0,
456 ieee80211_sysctl_inact, "I",
457 "station inactivity timeout (sec)");
458 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
459 "inact_probe", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_probe, 0,
460 ieee80211_sysctl_inact, "I",
461 "station inactivity probe timeout (sec)");
462 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
463 "inact_auth", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_auth, 0,
464 ieee80211_sysctl_inact, "I",
465 "station authentication timeout (sec)");
466 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
467 "inact_init", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_init, 0,
468 ieee80211_sysctl_inact, "I",
469 "station initial state timeout (sec)");
470 if (vap->iv_htcaps & IEEE80211_HTC_HT) {
471 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
472 "ampdu_mintraffic_bk", CTLFLAG_RW,
473 &vap->iv_ampdu_mintraffic[WME_AC_BK], 0,
474 "BK traffic tx aggr threshold (pps)");
475 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
476 "ampdu_mintraffic_be", CTLFLAG_RW,
477 &vap->iv_ampdu_mintraffic[WME_AC_BE], 0,
478 "BE traffic tx aggr threshold (pps)");
479 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
480 "ampdu_mintraffic_vo", CTLFLAG_RW,
481 &vap->iv_ampdu_mintraffic[WME_AC_VO], 0,
482 "VO traffic tx aggr threshold (pps)");
483 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
484 "ampdu_mintraffic_vi", CTLFLAG_RW,
485 &vap->iv_ampdu_mintraffic[WME_AC_VI], 0,
486 "VI traffic tx aggr threshold (pps)");
487 }
488
489 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
490 "force_restart", CTLTYPE_INT | CTLFLAG_RW, vap, 0,
491 ieee80211_sysctl_vap_restart, "I",
492 "force a VAP restart");
493
494 if (vap->iv_caps & IEEE80211_C_DFS) {
495 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
496 "radar", CTLTYPE_INT | CTLFLAG_RW, vap->iv_ic, 0,
497 ieee80211_sysctl_radar, "I", "simulate radar event");
498 }
499 vap->iv_sysctl = ctx;
500 vap->iv_oid = oid;
501 #endif
502 return;
503 err:
504 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
505 }
506
507 void
508 ieee80211_sysctl_vdetach(struct ieee80211vap *vap)
509 {
510 #ifdef notyet
511 if (vap->iv_sysctl != NULL) {
512 sysctl_ctx_free(vap->iv_sysctl);
513 IEEE80211_FREE(vap->iv_sysctl, M_DEVBUF);
514 vap->iv_sysctl = NULL;
515 }
516 #endif
517 }
518
519
520 int
521 ieee80211_node_dectestref(struct ieee80211_node *ni)
522 {
523 /* XXX need equivalent of atomic_dec_and_test */
524 atomic_subtract_int(&ni->ni_refcnt, 1);
525 return atomic_cas_uint(&ni->ni_refcnt, 0, 1) == 0;
526 }
527
528 void
529 ieee80211_drain_ifq(struct ifqueue *ifq)
530 {
531 struct ieee80211_node *ni;
532 struct mbuf *m;
533
534 for (;;) {
535 IF_DEQUEUE(ifq, m);
536 if (m == NULL)
537 break;
538
539 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
540 FBSDKASSERT(ni != NULL, ("frame w/o node"));
541 ieee80211_free_node(ni);
542 ieee80211_free_mbuf(m);
543 }
544 }
545
546 void
547 ieee80211_flush_ifq(struct ifqueue *ifq, struct ieee80211vap *vap)
548 {
549 struct ieee80211_node *ni;
550 struct mbuf *m, **mprev;
551
552 IFQ_LOCK(ifq);
553 mprev = &ifq->ifq_head;
554 while ((m = *mprev) != NULL) {
555 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
556 if (ni != NULL && ni->ni_vap == vap) {
557 *mprev = m->m_nextpkt; /* remove from list */
558 ifq->ifq_len--;
559
560 ieee80211_free_node(ni); /* reclaim ref */
561 ieee80211_free_mbuf(m);
562 } else
563 mprev = &m->m_nextpkt;
564 }
565 /* recalculate tail ptr */
566 m = ifq->ifq_head;
567 for (; m != NULL && m->m_nextpkt != NULL; m = m->m_nextpkt)
568 ;
569 ifq->ifq_tail = m;
570 IFQ_UNLOCK(ifq);
571 }
572
573 /*
574 * As above, for mbufs allocated with m_gethdr/MGETHDR
575 * or initialized by M_COPY_PKTHDR.
576 */
577 #define MC_ALIGN(m, len) \
578 do { \
579 (m)->m_data += rounddown2(MCLBYTES - (len), sizeof(long)); \
580 } while (/* CONSTCOND */ 0)
581
582 /*
583 * Allocate and setup a management frame of the specified
584 * size. We return the mbuf and a pointer to the start
585 * of the contiguous data area that's been reserved based
586 * on the packet length. The data area is forced to 32-bit
587 * alignment and the buffer length to a multiple of 4 bytes.
588 * This is done mainly so beacon frames (that require this)
589 * can use this interface too.
590 */
591 struct mbuf *
592 ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen)
593 {
594 struct mbuf *m;
595 u_int len;
596
597 /*
598 * NB: we know the mbuf routines will align the data area
599 * so we don't need to do anything special.
600 */
601 len = roundup2(headroom + pktlen, 4);
602 FBSDKASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len));
603 if (len < MINCLSIZE) {
604 m = m_gethdr(M_NOWAIT, MT_DATA);
605 /*
606 * Align the data in case additional headers are added.
607 * This should only happen when a WEP header is added
608 * which only happens for shared key authentication mgt
609 * frames which all fit in MHLEN.
610 */
611 if (m != NULL)
612 MH_ALIGN(m, len);
613 } else {
614 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
615 if (m != NULL)
616 MC_ALIGN(m, len);
617 }
618 if (m != NULL) {
619 m->m_data += headroom;
620 *frm = m->m_data;
621 }
622 return m;
623 }
624
625 #ifndef __NO_STRICT_ALIGNMENT
626 /*
627 * Re-align the payload in the mbuf. This is mainly used (right now)
628 * to handle IP header alignment requirements on certain architectures.
629 */
630 struct mbuf *
631 ieee80211_realign(struct ieee80211vap *vap, struct mbuf *m, size_t align)
632 {
633 int pktlen, space;
634 struct mbuf *n;
635
636 pktlen = m->m_pkthdr.len;
637 space = pktlen + align;
638 if (space < MINCLSIZE)
639 n = m_gethdr(M_NOWAIT, MT_DATA);
640 else {
641 n = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
642 space <= MCLBYTES ? MCLBYTES :
643 #if MJUMPAGESIZE != MCLBYTES
644 space <= MJUMPAGESIZE ? MJUMPAGESIZE :
645 #endif
646 space <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
647 }
648 if (__predict_true(n != NULL)) {
649 m_move_pkthdr(n, m);
650 n->m_data = (caddr_t)(ALIGN(n->m_data + align) - align);
651 m_copydata(m, 0, pktlen, mtod(n, caddr_t));
652 n->m_len = pktlen;
653 } else {
654 IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
655 mtod(m, const struct ieee80211_frame *), NULL,
656 "%s", "no mbuf to realign");
657 vap->iv_stats.is_rx_badalign++;
658 }
659 m_freem(m);
660 return n;
661 }
662 #endif /* !__NO_STRICT_ALIGNMENT */
663
664 int
665 ieee80211_add_callback(struct mbuf *m,
666 void (*func)(struct ieee80211_node *, void *, int), void *arg)
667 {
668 struct m_tag *mtag;
669 struct ieee80211_cb *cb;
670
671 mtag = m_tag_get(/*MTAG_ABI_NET80211*/ NET80211_TAG_CALLBACK,
672 sizeof(struct ieee80211_cb), M_NOWAIT);
673 if (mtag == NULL)
674 return 0;
675
676 cb = (struct ieee80211_cb *)(mtag+1);
677 cb->func = func;
678 cb->arg = arg;
679 m_tag_prepend(m, mtag);
680 m->m_flags |= M_TXCB;
681 return 1;
682 }
683
684 int
685 ieee80211_add_xmit_params(struct mbuf *m,
686 const struct ieee80211_bpf_params *params)
687 {
688 struct m_tag *mtag;
689 struct ieee80211_tx_params *tx;
690
691 mtag = m_tag_get(/*MTAG_ABI_NET80211*/ NET80211_TAG_XMIT_PARAMS,
692 sizeof(struct ieee80211_tx_params), M_NOWAIT);
693 if (mtag == NULL)
694 return (0);
695
696 tx = (struct ieee80211_tx_params *)(mtag+1);
697 memcpy(&tx->params, params, sizeof(struct ieee80211_bpf_params));
698 m_tag_prepend(m, mtag);
699 return (1);
700 }
701
702 int
703 ieee80211_get_xmit_params(struct mbuf *m,
704 struct ieee80211_bpf_params *params)
705 {
706 struct m_tag *mtag;
707 struct ieee80211_tx_params *tx;
708
709 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_XMIT_PARAMS,
710 NULL);
711 if (mtag == NULL)
712 return (-1);
713 tx = (struct ieee80211_tx_params *)(mtag + 1);
714 memcpy(params, &tx->params, sizeof(struct ieee80211_bpf_params));
715 return (0);
716 }
717
718 void
719 ieee80211_process_callback(struct ieee80211_node *ni,
720 struct mbuf *m, int status)
721 {
722 struct m_tag *mtag;
723
724 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_CALLBACK, NULL);
725 if (mtag != NULL) {
726 struct ieee80211_cb *cb = (struct ieee80211_cb *)(mtag+1);
727 cb->func(ni, cb->arg, status);
728 }
729 }
730
731 /*
732 * Add RX parameters to the given mbuf.
733 *
734 * Returns 1 if OK, 0 on error.
735 */
736 int
737 ieee80211_add_rx_params(struct mbuf *m, const struct ieee80211_rx_stats *rxs)
738 {
739 struct m_tag *mtag;
740 struct ieee80211_rx_params *rx;
741
742 mtag = m_tag_get(/*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
743 sizeof(struct ieee80211_rx_stats), M_NOWAIT);
744 if (mtag == NULL)
745 return (0);
746
747 rx = (struct ieee80211_rx_params *)(mtag + 1);
748 memcpy(&rx->params, rxs, sizeof(*rxs));
749 m_tag_prepend(m, mtag);
750 return (1);
751 }
752
753 int
754 ieee80211_get_rx_params(struct mbuf *m, struct ieee80211_rx_stats *rxs)
755 {
756 struct m_tag *mtag;
757 struct ieee80211_rx_params *rx;
758
759 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
760 NULL);
761 if (mtag == NULL)
762 return (-1);
763 rx = (struct ieee80211_rx_params *)(mtag + 1);
764 memcpy(rxs, &rx->params, sizeof(*rxs));
765 return (0);
766 }
767
768 const struct ieee80211_rx_stats *
769 ieee80211_get_rx_params_ptr(struct mbuf *m)
770 {
771 struct m_tag *mtag;
772 struct ieee80211_rx_params *rx;
773
774 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
775 NULL);
776 if (mtag == NULL)
777 return (NULL);
778 rx = (struct ieee80211_rx_params *)(mtag + 1);
779 return (&rx->params);
780 }
781
782
783 /*
784 * Add TOA parameters to the given mbuf.
785 */
786 int
787 ieee80211_add_toa_params(struct mbuf *m, const struct ieee80211_toa_params *p)
788 {
789 struct m_tag *mtag;
790 struct ieee80211_toa_params *rp;
791
792 mtag = m_tag_get(/*MTAG_ABI_NET80211,*/ NET80211_TAG_TOA_PARAMS,
793 sizeof(struct ieee80211_toa_params), M_NOWAIT);
794 if (mtag == NULL)
795 return (0);
796
797 rp = (struct ieee80211_toa_params *)(mtag + 1);
798 memcpy(rp, p, sizeof(*rp));
799 m_tag_prepend(m, mtag);
800 return (1);
801 }
802
803 int
804 ieee80211_get_toa_params(struct mbuf *m, struct ieee80211_toa_params *p)
805 {
806 struct m_tag *mtag;
807 struct ieee80211_toa_params *rp;
808
809 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_TOA_PARAMS,
810 NULL);
811 if (mtag == NULL)
812 return (0);
813 rp = (struct ieee80211_toa_params *)(mtag + 1);
814 if (p != NULL)
815 memcpy(p, rp, sizeof(*p));
816 return (1);
817 }
818
819 /*
820 * Transmit a frame to the parent interface.
821 */
822 int
823 ieee80211_parent_xmitpkt(struct ieee80211com *ic, struct mbuf *m)
824 {
825 int error;
826 printf ("ieee80211_parent_xmitpkt called\n");
827 /*
828 * Assert the IC TX lock is held - this enforces the
829 * processing -> queuing order is maintained
830 */
831 IEEE80211_TX_LOCK_ASSERT(ic);
832 error = ic->ic_transmit(ic, m);
833 if (error) {
834 struct ieee80211_node *ni;
835
836 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
837
838 /* XXX number of fragments */
839 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
840 ieee80211_free_node(ni);
841 ieee80211_free_mbuf(m);
842 }
843 return (error);
844 }
845
846 /*
847 * Transmit a frame to the VAP interface.
848 */
849 int
850 ieee80211_vap_xmitpkt(struct ieee80211vap *vap, struct mbuf *m)
851 {
852 struct ifnet *ifp = vap->iv_ifp;
853
854 /*
855 * When transmitting via the VAP, we shouldn't hold
856 * any IC TX lock as the VAP TX path will acquire it.
857 */
858 IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic);
859
860 return (ifp->if_transmit(ifp, m));
861
862 }
863
864 void
865 get_random_bytes(void *p, size_t n)
866 {
867 uint8_t *dp = p;
868
869 while (n > 0) {
870 uint32_t v = arc4random();
871 size_t nb = n > sizeof(uint32_t) ? sizeof(uint32_t) : n;
872 bcopy(&v, dp, n > sizeof(uint32_t) ? sizeof(uint32_t) : n);
873 dp += sizeof(uint32_t), n -= nb;
874 }
875 }
876
877 /*
878 * Helper function for events that pass just a single mac address.
879 */
880 static void
881 notify_macaddr(struct ifnet *ifp, int op, const uint8_t mac[IEEE80211_ADDR_LEN])
882 {
883 struct ieee80211_join_event iev;
884 printf ("NNN notify_macaddr called\n");
885 CURVNET_SET(ifp->if_vnet);
886 memset(&iev, 0, sizeof(iev));
887 IEEE80211_ADDR_COPY(iev.iev_addr, mac);
888 rt_ieee80211msg(ifp, op, &iev, sizeof(iev));
889 CURVNET_RESTORE();
890 }
891
892 void
893 ieee80211_notify_node_join(struct ieee80211_node *ni, int newassoc)
894 {
895 struct ieee80211vap *vap = ni->ni_vap;
896 struct ifnet *ifp = vap->iv_ifp;
897
898 CURVNET_SET_QUIET(ifp->if_vnet);
899 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode join",
900 (ni == vap->iv_bss) ? "bss " : "");
901
902 if (ni == vap->iv_bss) {
903 notify_macaddr(ifp, newassoc ?
904 RTM_IEEE80211_ASSOC : RTM_IEEE80211_REASSOC, ni->ni_bssid);
905 if_link_state_change(ifp, LINK_STATE_UP);
906 } else {
907 notify_macaddr(ifp, newassoc ?
908 RTM_IEEE80211_JOIN : RTM_IEEE80211_REJOIN, ni->ni_macaddr);
909 }
910 CURVNET_RESTORE();
911 }
912
913 void
914 ieee80211_notify_node_leave(struct ieee80211_node *ni)
915 {
916 struct ieee80211vap *vap = ni->ni_vap;
917 struct ifnet *ifp = vap->iv_ifp;
918
919 CURVNET_SET_QUIET(ifp->if_vnet);
920 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode leave",
921 (ni == vap->iv_bss) ? "bss " : "");
922
923 if (ni == vap->iv_bss) {
924 rt_ieee80211msg(ifp, RTM_IEEE80211_DISASSOC, NULL, 0);
925 if_link_state_change(ifp, LINK_STATE_DOWN);
926 } else {
927 /* fire off wireless event station leaving */
928 notify_macaddr(ifp, RTM_IEEE80211_LEAVE, ni->ni_macaddr);
929 }
930 CURVNET_RESTORE();
931 }
932
933 void
934 ieee80211_notify_scan_done(struct ieee80211vap *vap)
935 {
936 struct ifnet *ifp = vap->iv_ifp;
937
938 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s\n", "notify scan done");
939
940 /* dispatch wireless event indicating scan completed */
941 CURVNET_SET(ifp->if_vnet);
942 rt_ieee80211msg(ifp, RTM_IEEE80211_SCAN, NULL, 0);
943 CURVNET_RESTORE();
944 }
945
946 void
947 ieee80211_notify_replay_failure(struct ieee80211vap *vap,
948 const struct ieee80211_frame *wh, const struct ieee80211_key *k,
949 u_int64_t rsc, int tid)
950 {
951 struct ifnet *ifp = vap->iv_ifp;
952
953 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
954 "%s replay detected tid %d <rsc %ju, csc %ju, keyix %u rxkeyix %u>",
955 k->wk_cipher->ic_name, tid, (intmax_t) rsc,
956 (intmax_t) k->wk_keyrsc[tid],
957 k->wk_keyix, k->wk_rxkeyix);
958
959 if (ifp != NULL) { /* NB: for cipher test modules */
960 struct ieee80211_replay_event iev;
961
962 IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
963 IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
964 iev.iev_cipher = k->wk_cipher->ic_cipher;
965 if (k->wk_rxkeyix != IEEE80211_KEYIX_NONE)
966 iev.iev_keyix = k->wk_rxkeyix;
967 else
968 iev.iev_keyix = k->wk_keyix;
969 iev.iev_keyrsc = k->wk_keyrsc[tid];
970 iev.iev_rsc = rsc;
971 CURVNET_SET(ifp->if_vnet);
972 rt_ieee80211msg(ifp, RTM_IEEE80211_REPLAY, &iev, sizeof(iev));
973 CURVNET_RESTORE();
974 }
975 }
976
977 void
978 ieee80211_notify_michael_failure(struct ieee80211vap *vap,
979 const struct ieee80211_frame *wh, u_int keyix)
980 {
981 struct ifnet *ifp = vap->iv_ifp;
982
983 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
984 "michael MIC verification failed <keyix %u>", keyix);
985 vap->iv_stats.is_rx_tkipmic++;
986
987 if (ifp != NULL) { /* NB: for cipher test modules */
988 struct ieee80211_michael_event iev;
989
990 IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
991 IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
992 iev.iev_cipher = IEEE80211_CIPHER_TKIP;
993 iev.iev_keyix = keyix;
994 CURVNET_SET(ifp->if_vnet);
995 rt_ieee80211msg(ifp, RTM_IEEE80211_MICHAEL, &iev, sizeof(iev));
996 CURVNET_RESTORE();
997 }
998 }
999
1000 void
1001 ieee80211_notify_wds_discover(struct ieee80211_node *ni)
1002 {
1003 struct ieee80211vap *vap = ni->ni_vap;
1004 struct ifnet *ifp = vap->iv_ifp;
1005
1006 notify_macaddr(ifp, RTM_IEEE80211_WDS, ni->ni_macaddr);
1007 }
1008
1009 void
1010 ieee80211_notify_csa(struct ieee80211com *ic,
1011 const struct ieee80211_channel *c, int mode, int count)
1012 {
1013 struct ieee80211_csa_event iev;
1014 struct ieee80211vap *vap;
1015 struct ifnet *ifp;
1016
1017 memset(&iev, 0, sizeof(iev));
1018 iev.iev_flags = c->ic_flags;
1019 iev.iev_freq = c->ic_freq;
1020 iev.iev_ieee = c->ic_ieee;
1021 iev.iev_mode = mode;
1022 iev.iev_count = count;
1023 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1024 ifp = vap->iv_ifp;
1025 CURVNET_SET(ifp->if_vnet);
1026 rt_ieee80211msg(ifp, RTM_IEEE80211_CSA, &iev, sizeof(iev));
1027 CURVNET_RESTORE();
1028 }
1029 }
1030
1031 void
1032 ieee80211_notify_radar(struct ieee80211com *ic,
1033 const struct ieee80211_channel *c)
1034 {
1035 struct ieee80211_radar_event iev;
1036 struct ieee80211vap *vap;
1037 struct ifnet *ifp;
1038
1039 memset(&iev, 0, sizeof(iev));
1040 iev.iev_flags = c->ic_flags;
1041 iev.iev_freq = c->ic_freq;
1042 iev.iev_ieee = c->ic_ieee;
1043 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1044 ifp = vap->iv_ifp;
1045 CURVNET_SET(ifp->if_vnet);
1046 rt_ieee80211msg(ifp, RTM_IEEE80211_RADAR, &iev, sizeof(iev));
1047 CURVNET_RESTORE();
1048 }
1049 }
1050
1051 void
1052 ieee80211_notify_cac(struct ieee80211com *ic,
1053 const struct ieee80211_channel *c, enum ieee80211_notify_cac_event type)
1054 {
1055 struct ieee80211_cac_event iev;
1056 struct ieee80211vap *vap;
1057 struct ifnet *ifp;
1058
1059 memset(&iev, 0, sizeof(iev));
1060 iev.iev_flags = c->ic_flags;
1061 iev.iev_freq = c->ic_freq;
1062 iev.iev_ieee = c->ic_ieee;
1063 iev.iev_type = type;
1064 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1065 ifp = vap->iv_ifp;
1066 CURVNET_SET(ifp->if_vnet);
1067 rt_ieee80211msg(ifp, RTM_IEEE80211_CAC, &iev, sizeof(iev));
1068 CURVNET_RESTORE();
1069 }
1070 }
1071
1072 void
1073 ieee80211_notify_node_deauth(struct ieee80211_node *ni)
1074 {
1075 struct ieee80211vap *vap = ni->ni_vap;
1076 struct ifnet *ifp = vap->iv_ifp;
1077
1078 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node deauth");
1079
1080 notify_macaddr(ifp, RTM_IEEE80211_DEAUTH, ni->ni_macaddr);
1081 }
1082
1083 void
1084 ieee80211_notify_node_auth(struct ieee80211_node *ni)
1085 {
1086 struct ieee80211vap *vap = ni->ni_vap;
1087 struct ifnet *ifp = vap->iv_ifp;
1088
1089 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node auth");
1090
1091 notify_macaddr(ifp, RTM_IEEE80211_AUTH, ni->ni_macaddr);
1092 }
1093
1094 void
1095 ieee80211_notify_country(struct ieee80211vap *vap,
1096 const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t cc[2])
1097 {
1098 struct ifnet *ifp = vap->iv_ifp;
1099 struct ieee80211_country_event iev;
1100
1101 memset(&iev, 0, sizeof(iev));
1102 IEEE80211_ADDR_COPY(iev.iev_addr, bssid);
1103 iev.iev_cc[0] = cc[0];
1104 iev.iev_cc[1] = cc[1];
1105 CURVNET_SET(ifp->if_vnet);
1106 rt_ieee80211msg(ifp, RTM_IEEE80211_COUNTRY, &iev, sizeof(iev));
1107 CURVNET_RESTORE();
1108 }
1109
1110 void
1111 ieee80211_notify_radio(struct ieee80211com *ic, int state)
1112 {
1113 struct ieee80211_radio_event iev;
1114 struct ieee80211vap *vap;
1115 struct ifnet *ifp;
1116
1117 memset(&iev, 0, sizeof(iev));
1118 iev.iev_state = state;
1119 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1120 ifp = vap->iv_ifp;
1121 CURVNET_SET(ifp->if_vnet);
1122 rt_ieee80211msg(ifp, RTM_IEEE80211_RADIO, &iev, sizeof(iev));
1123 CURVNET_RESTORE();
1124 }
1125 }
1126
1127 #ifdef notyet
1128 void
1129 ieee80211_load_module(const char *modname)
1130 {
1131 struct thread *td = curthread;
1132
1133 if (suser(td) == 0 && securelevel_gt(td->td_ucred, 0) == 0) {
1134 mtx_lock(&Giant);
1135 (void) linker_load_module(modname, NULL, NULL, NULL, NULL);
1136 mtx_unlock(&Giant);
1137 }
1138 }
1139 #endif
1140
1141 #ifdef notyet
1142 static eventhandler_tag wlan_bpfevent;
1143 static eventhandler_tag wlan_ifllevent;
1144
1145 static void
1146 bpf_track(void *arg, struct ifnet *ifp, int dlt, int attach)
1147 {
1148 /* NB: identify vap's by if_init */ // NNN won't work with urtwn ...
1149 if (dlt == DLT_IEEE802_11_RADIO &&
1150 ifp->if_init == ieee80211_init) {
1151 struct ieee80211vap *vap = ifp->if_softc;
1152 /*
1153 * Track bpf radiotap listener state. We mark the vap
1154 * to indicate if any listener is present and the com
1155 * to indicate if any listener exists on any associated
1156 * vap. This flag is used by drivers to prepare radiotap
1157 * state only when needed.
1158 */
1159 if (attach) {
1160 ieee80211_syncflag_ext(vap, IEEE80211_FEXT_BPF);
1161 if (vap->iv_opmode == IEEE80211_M_MONITOR)
1162 atomic_add_int(&vap->iv_ic->ic_montaps, 1);
1163 } else if (!bpf_peers_present(vap->iv_rawbpf)) {
1164 ieee80211_syncflag_ext(vap, -IEEE80211_FEXT_BPF);
1165 if (vap->iv_opmode == IEEE80211_M_MONITOR)
1166 atomic_subtract_int(&vap->iv_ic->ic_montaps, 1);
1167 }
1168 }
1169 }
1170
1171 /*
1172 * Change MAC address on the vap (if was not started).
1173 */
1174 static void
1175 wlan_iflladdr(void *arg __unused, struct ifnet *ifp)
1176 {
1177 /* NB: identify vap's by if_init */ // NNN wont work on urtwn
1178 if (ifp->if_init == ieee80211_init &&
1179 (ifp->if_flags & IFF_UP) == 0) {
1180 struct ieee80211vap *vap = ifp->if_softc;
1181
1182 IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp));
1183 }
1184 }
1185 #endif
1186
1187 void
1188 if_inc_counter(struct ifnet *ifp, ift_counter ifc, int64_t value)
1189 {
1190 switch (ifc) {
1191 case IFCOUNTER_IPACKETS:
1192 ifp->if_data.ifi_ipackets += value;
1193 break;
1194 case IFCOUNTER_IERRORS:
1195 ifp->if_data.ifi_ierrors += value;
1196 break;
1197 case IFCOUNTER_OPACKETS:
1198 ifp->if_data.ifi_opackets += value;
1199 break;
1200 case IFCOUNTER_OERRORS:
1201 ifp->if_data.ifi_oerrors += value;
1202 break;
1203 case IFCOUNTER_COLLISIONS:
1204 ifp->if_data.ifi_collisions += value;
1205 break;
1206 case IFCOUNTER_IBYTES:
1207 ifp->if_data.ifi_ibytes += value;
1208 break;
1209 case IFCOUNTER_OBYTES:
1210 ifp->if_data.ifi_obytes += value;
1211 break;
1212 case IFCOUNTER_IMCASTS:
1213 ifp->if_data.ifi_imcasts += value;
1214 break;
1215 case IFCOUNTER_OMCASTS:
1216 ifp->if_data.ifi_omcasts += value;
1217 break;
1218 case IFCOUNTER_IQDROPS:
1219 ifp->if_data.ifi_iqdrops += value;
1220 break;
1221 case IFCOUNTER_OQDROPS:
1222 /* ifp->if_data.ifi_oqdrops += value; No such field, just ignore it q*/
1223 break;
1224 case IFCOUNTER_NOPROTO:
1225 ifp->if_data.ifi_noproto += value;
1226 break;
1227 default:
1228 panic("if_inc_counter: non-existant counter");
1229 }
1230 }
1231
1232
1233 #ifdef notyet
1234 /*
1235 * Module glue.
1236 *
1237 * NB: the module name is "wlan" for compatibility with NetBSD.
1238 */
1239 static int
1240 wlan_modevent(module_t mod, int type, void *unused)
1241 {
1242 switch (type) {
1243 case MOD_LOAD:
1244 if (bootverbose)
1245 printf("wlan: <802.11 Link Layer>\n");
1246 wlan_bpfevent = EVENTHANDLER_REGISTER(bpf_track,
1247 bpf_track, 0, EVENTHANDLER_PRI_ANY);
1248 wlan_ifllevent = EVENTHANDLER_REGISTER(iflladdr_event,
1249 wlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
1250 wlan_cloner = if_clone_simple(wlanname, wlan_clone_create,
1251 wlan_clone_destroy, 0);
1252 return 0;
1253 case MOD_UNLOAD:
1254 if_clone_detach(wlan_cloner);
1255 EVENTHANDLER_DEREGISTER(bpf_track, wlan_bpfevent);
1256 EVENTHANDLER_DEREGISTER(iflladdr_event, wlan_ifllevent);
1257 return 0;
1258 }
1259 return EINVAL;
1260 }
1261
1262 static moduledata_t wlan_mod = {
1263 wlanname,
1264 wlan_modevent,
1265 0
1266 };
1267 DECLARE_MODULE(wlan, wlan_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
1268 MODULE_VERSION(wlan, 1);
1269 MODULE_DEPEND(wlan, ether, 1, 1, 1);
1270 #endif
1271
1272 #ifdef IEEE80211_ALQ
1273 MODULE_DEPEND(wlan, alq, 1, 1, 1);
1274 #endif /* IEEE80211_ALQ */
1275
1276 /* Missing support for if_printf in NetBSD ... */
1277 int
1278 if_printf(struct ifnet *ifp, const char *fmt, ...)
1279 {
1280 char if_fmt[256];
1281 va_list ap;
1282
1283 snprintf(if_fmt, sizeof(if_fmt), "%s: %s", ifp->if_xname, fmt);
1284 va_start(ap, fmt);
1285 vlog(LOG_INFO, if_fmt, ap);
1286 va_end(ap);
1287 return (0);
1288 }
1289
1290 /*
1291 * Set the m_data pointer of a newly-allocated mbuf
1292 * to place an object of the specified size at the
1293 * end of the mbuf, longword aligned.
1294 */
1295 void
1296 m_align(struct mbuf *m, int len)
1297 {
1298 int adjust;
1299
1300 KASSERT(len != M_COPYALL);
1301
1302 if (m->m_flags & M_EXT)
1303 adjust = m->m_ext.ext_size - len;
1304 else if (m->m_flags & M_PKTHDR)
1305 adjust = MHLEN - len;
1306 else
1307 adjust = MLEN - len;
1308 m->m_data += adjust &~ (sizeof(long)-1);
1309 }
1310
1311 /*
1312 * Append the specified data to the indicated mbuf chain,
1313 * Extend the mbuf chain if the new data does not fit in
1314 * existing space.
1315 *
1316 * Return 1 if able to complete the job; otherwise 0.
1317 */
1318 int
1319 m_append(struct mbuf *m0, int len, const void *cpv)
1320 {
1321 struct mbuf *m, *n;
1322 int remainder, space;
1323 const char *cp = cpv;
1324
1325 KASSERT(len != M_COPYALL);
1326 for (m = m0; m->m_next != NULL; m = m->m_next)
1327 continue;
1328 remainder = len;
1329 space = M_TRAILINGSPACE(m);
1330 if (space > 0) {
1331 /*
1332 * Copy into available space.
1333 */
1334 if (space > remainder)
1335 space = remainder;
1336 memmove(mtod(m, char *) + m->m_len, cp, space);
1337 m->m_len += space;
1338 cp = cp + space, remainder -= space;
1339 }
1340 while (remainder > 0) {
1341 /*
1342 * Allocate a new mbuf; could check space
1343 * and allocate a cluster instead.
1344 */
1345 n = m_get(M_DONTWAIT, m->m_type);
1346 if (n == NULL)
1347 break;
1348 n->m_len = min(MLEN, remainder);
1349 memmove(mtod(n, void *), cp, n->m_len);
1350 cp += n->m_len, remainder -= n->m_len;
1351 m->m_next = n;
1352 m = n;
1353 }
1354 if (m0->m_flags & M_PKTHDR)
1355 m0->m_pkthdr.len += len - remainder;
1356 return (remainder == 0);
1357 }
1358
1359 /*
1360 * Create a writable copy of the mbuf chain. While doing this
1361 * we compact the chain with a goal of producing a chain with
1362 * at most two mbufs. The second mbuf in this chain is likely
1363 * to be a cluster. The primary purpose of this work is to create
1364 * a writable packet for encryption, compression, etc. The
1365 * secondary goal is to linearize the data so the data can be
1366 * passed to crypto hardware in the most efficient manner possible.
1367 */
1368 struct mbuf *
1369 m_unshare(struct mbuf *m0, int how)
1370 {
1371 struct mbuf *m, *mprev;
1372 struct mbuf *n, *mfirst, *mlast;
1373 int len, off;
1374
1375 mprev = NULL;
1376 for (m = m0; m != NULL; m = mprev->m_next) {
1377 /*
1378 * Regular mbufs are ignored unless there's a cluster
1379 * in front of it that we can use to coalesce. We do
1380 * the latter mainly so later clusters can be coalesced
1381 * also w/o having to handle them specially (i.e. convert
1382 * mbuf+cluster -> cluster). This optimization is heavily
1383 * influenced by the assumption that we're running over
1384 * Ethernet where MCLBYTES is large enough that the max
1385 * packet size will permit lots of coalescing into a
1386 * single cluster. This in turn permits efficient
1387 * crypto operations, especially when using hardware.
1388 */
1389 if ((m->m_flags & M_EXT) == 0) {
1390 if (mprev && (mprev->m_flags & M_EXT) &&
1391 m->m_len <= M_TRAILINGSPACE(mprev)) {
1392 /* XXX: this ignores mbuf types */
1393 memcpy(mtod(mprev, __uint8_t *) + mprev->m_len,
1394 mtod(m, __uint8_t *), m->m_len);
1395 mprev->m_len += m->m_len;
1396 mprev->m_next = m->m_next; /* unlink from chain */
1397 m_free(m); /* reclaim mbuf */
1398 } else {
1399 mprev = m;
1400 }
1401 continue;
1402 }
1403 /*
1404 * Writable mbufs are left alone (for now).
1405 */
1406 if (!M_READONLY(m)) {
1407 mprev = m;
1408 continue;
1409 }
1410
1411 /*
1412 * Not writable, replace with a copy or coalesce with
1413 * the previous mbuf if possible (since we have to copy
1414 * it anyway, we try to reduce the number of mbufs and
1415 * clusters so that future work is easier).
1416 */
1417 FBSDKASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1418 /* NB: we only coalesce into a cluster or larger */
1419 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1420 m->m_len <= M_TRAILINGSPACE(mprev)) {
1421 /* XXX: this ignores mbuf types */
1422 memcpy(mtod(mprev, __uint8_t *) + mprev->m_len,
1423 mtod(m, __uint8_t *), m->m_len);
1424 mprev->m_len += m->m_len;
1425 mprev->m_next = m->m_next; /* unlink from chain */
1426 m_free(m); /* reclaim mbuf */
1427 continue;
1428 }
1429
1430 /*
1431 * Allocate new space to hold the copy and copy the data.
1432 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
1433 * splitting them into clusters. We could just malloc a
1434 * buffer and make it external but too many device drivers
1435 * don't know how to break up the non-contiguous memory when
1436 * doing DMA.
1437 */
1438 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1439 if (n == NULL) {
1440 m_freem(m0);
1441 return (NULL);
1442 }
1443 if (m->m_flags & M_PKTHDR) {
1444 FBSDKASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
1445 __func__, m0, m));
1446 m_move_pkthdr(n, m);
1447 }
1448 len = m->m_len;
1449 off = 0;
1450 mfirst = n;
1451 mlast = NULL;
1452 for (;;) {
1453 int cc = min(len, MCLBYTES);
1454 memcpy(mtod(n, __uint8_t *), mtod(m, __uint8_t *) + off, cc);
1455 n->m_len = cc;
1456 if (mlast != NULL)
1457 mlast->m_next = n;
1458 mlast = n;
1459 #if 0
1460 newipsecstat.ips_clcopied++;
1461 #endif
1462
1463 len -= cc;
1464 if (len <= 0)
1465 break;
1466 off += cc;
1467
1468 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1469 if (n == NULL) {
1470 m_freem(mfirst);
1471 m_freem(m0);
1472 return (NULL);
1473 }
1474 }
1475 n->m_next = m->m_next;
1476 if (mprev == NULL)
1477 m0 = mfirst; /* new head of chain */
1478 else
1479 mprev->m_next = mfirst; /* replace old mbuf */
1480 m_free(m); /* release old mbuf */
1481 mprev = mfirst;
1482 }
1483 return (m0);
1484 }
1485