ieee80211_netbsd.c revision 1.31.2.7 1 /* $NetBSD: ieee80211_netbsd.c,v 1.31.2.7 2018/08/15 17:07:03 phil Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 2003-2009 Sam Leffler, Errno Consulting
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 /* __FBSDID("$FreeBSD$"); */
32 __KERNEL_RCSID(0, "$NetBSD: ieee80211_netbsd.c,v 1.31.2.7 2018/08/15 17:07:03 phil Exp $");
33
34 /*
35 * IEEE 802.11 support (NetBSD-specific code)
36 */
37
38 #include "opt_wlan.h"
39
40 #include <sys/atomic.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/module.h>
47 #include <sys/proc.h>
48 #include <sys/sysctl.h>
49 #include <sys/syslog.h>
50
51 #include <sys/socket.h>
52
53 #include <net/bpf.h>
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 #include <net/route.h>
60
61 #include <net80211/ieee80211_var.h>
62 #include <net80211/ieee80211_input.h>
63
64 static const struct sysctlnode *
65 ieee80211_sysctl_treetop(struct sysctllog **log);
66 static void ieee80211_sysctl_setup(void);
67
68 /* NNN in .h file? */
69 #define SYSCTL_HANDLER_ARGS SYSCTLFN_ARGS
70
71 #ifdef IEEE80211_DEBUG
72 static int ieee80211_debug = 0;
73 #endif
74
75 #ifdef notyet
76 static struct if_clone *wlan_cloner;
77 #endif
78 /* notyet */
79
80 static const char wlanname[] = "wlan";
81
82 int
83 ieee80211_init0(void)
84 {
85 ieee80211_sysctl_setup();
86 return 0;
87 }
88
89 /*
90 * "taskqueue" support for doing FreeBSD style taskqueue operations using
91 * NetBSD's workqueue to do the actual function calls for the work.
92 * Many features of the FreeBSD taskqueue are not implemented. This should
93 * be enough features for the 802.11 stack to run its tasks and time delayed
94 * tasks.
95 */
96
97 void
98 ieee80211_runwork(struct work *work2do, void *arg)
99 {
100 struct task *work_task = (struct task *) work2do;
101 #ifdef IEEE80211_DEBUG
102 printf ("runwork: %s (t_arg is 0x%lx)\n",
103 work_task->t_func_name, (long)work_task->t_arg);
104 #endif
105 mutex_enter(&work_task->t_mutex);
106 work_task->t_onqueue = 0;
107 mutex_exit(&work_task->t_mutex);
108
109 work_task->t_func(work_task->t_arg, 0);
110 }
111
112 void
113 taskqueue_enqueue(struct workqueue *wq, struct task *task_item)
114 {
115 mutex_enter(&task_item->t_mutex);
116 if (!task_item->t_onqueue) {
117 workqueue_enqueue(wq, &task_item->t_work, NULL);
118 task_item->t_onqueue = 1;
119 }
120 mutex_exit(&task_item->t_mutex);
121 }
122
123 void
124 taskqueue_drain(struct workqueue *wq, struct task *task_item)
125 {
126 workqueue_wait(wq, &task_item->t_work);
127 }
128
129 static void
130 taskqueue_callout_enqueue(void *arg)
131 {
132 struct timeout_task *timeout_task = arg;
133 mutex_enter(&timeout_task->to_task.t_mutex);
134 timeout_task->to_scheduled = 0;
135 mutex_exit(&timeout_task->to_task.t_mutex);
136
137 taskqueue_enqueue(timeout_task->to_wq, (struct task*) timeout_task);
138 }
139
140 int
141 taskqueue_enqueue_timeout(struct workqueue *queue,
142 struct timeout_task *timeout_task, int nticks)
143 {
144 mutex_enter(&timeout_task->to_task.t_mutex);
145 if (!timeout_task->to_scheduled) {
146 callout_reset(&timeout_task->to_callout, nticks,
147 taskqueue_callout_enqueue, timeout_task);
148 timeout_task->to_scheduled = 1;
149 }
150 mutex_exit(&timeout_task->to_task.t_mutex);
151
152 return -1;
153 }
154
155 int
156 taskqueue_cancel_timeout(struct workqueue *queue,
157 struct timeout_task *timeout_task, u_int *pendp)
158 {
159 // printf ("taskqueue_cancel_timeout called\n");
160 return -1;
161 }
162
163 void
164 taskqueue_drain_timeout(struct workqueue *queue,
165 struct timeout_task *timeout_task)
166 {
167 // printf ("taskqueue_drain_timeout called\n");
168 }
169
170
171 static __unused int
172 wlan_clone_create(struct if_clone *ifc, int unit, void * params)
173 {
174 struct ieee80211_clone_params cp;
175 struct ieee80211vap *vap;
176 struct ieee80211com *ic;
177 int error;
178
179 error = copyin(params, &cp, sizeof(cp));
180 if (error)
181 return error;
182 ic = ieee80211_find_com(cp.icp_parent);
183 if (ic == NULL)
184 return ENXIO;
185 if (cp.icp_opmode >= IEEE80211_OPMODE_MAX) {
186 ic_printf(ic, "%s: invalid opmode %d\n", __func__,
187 cp.icp_opmode);
188 return EINVAL;
189 }
190 if ((ic->ic_caps & ieee80211_opcap[cp.icp_opmode]) == 0) {
191 ic_printf(ic, "%s mode not supported\n",
192 ieee80211_opmode_name[cp.icp_opmode]);
193 return EOPNOTSUPP;
194 }
195 if ((cp.icp_flags & IEEE80211_CLONE_TDMA) &&
196 #ifdef IEEE80211_SUPPORT_TDMA
197 (ic->ic_caps & IEEE80211_C_TDMA) == 0
198 #else
199 (1)
200 #endif
201 ) {
202 ic_printf(ic, "TDMA not supported\n");
203 return EOPNOTSUPP;
204 }
205 vap = ic->ic_vap_create(ic, wlanname, unit,
206 cp.icp_opmode, cp.icp_flags, cp.icp_bssid,
207 cp.icp_flags & IEEE80211_CLONE_MACADDR ?
208 cp.icp_macaddr : ic->ic_macaddr);
209
210 return (vap == NULL ? EIO : 0);
211 }
212
213 static __unused void
214 wlan_clone_destroy(struct ifnet *ifp)
215 {
216 struct ieee80211vap *vap = ifp->if_softc;
217 struct ieee80211com *ic = vap->iv_ic;
218
219 ic->ic_vap_delete(vap);
220 }
221
222 void
223 ieee80211_vap_destroy(struct ieee80211vap *vap)
224 {
225 #ifdef notyet
226 CURVNET_SET(vap->iv_ifp->if_vnet);
227 if_clone_destroyif(wlan_cloner, vap->iv_ifp);
228 CURVNET_RESTORE();
229 #else
230 printf ("vap_destroy called ... what next?\n");
231 #endif
232 }
233
234 #ifdef notyet
235 int
236 ieee80211_sysctl_msecs_ticks(SYSCTL_HANDLER_ARGS)
237 {
238 int msecs = ticks_to_msecs(*(int *)arg1);
239 int error, t;
240
241 error = sysctl_handle_int(oidp, &msecs, 0, req);
242 if (error || !req->newptr)
243 return error;
244 t = msecs_to_ticks(msecs);
245 *(int *)arg1 = (t < 1) ? 1 : t;
246 return 0;
247 }
248
249 static int
250 ieee80211_sysctl_inact(SYSCTL_HANDLER_ARGS)
251 {
252 int inact = (*(int *)arg1) * IEEE80211_INACT_WAIT;
253 int error;
254
255 error = sysctl_handle_int(oidp, &inact, 0, req);
256 if (error || !req->newptr)
257 return error;
258 *(int *)arg1 = inact / IEEE80211_INACT_WAIT;
259 return 0;
260 }
261 #endif
262
263 static int
264 ieee80211_sysctl_parent(SYSCTLFN_ARGS)
265 {
266 struct ieee80211vap *vap;
267 char pname[IFNAMSIZ];
268 struct sysctlnode node;
269
270 node = *rnode;
271 vap = node.sysctl_data;
272 strlcpy(pname, vap->iv_ifp->if_xname, IFNAMSIZ);
273 node.sysctl_data = pname;
274 return sysctl_lookup(SYSCTLFN_CALL(&node));
275 }
276
277 #ifdef notyet
278 static int
279 ieee80211_sysctl_radar(SYSCTL_HANDLER_ARGS)
280 {
281 struct ieee80211com *ic = arg1;
282 int t = 0, error;
283
284 error = sysctl_handle_int(oidp, &t, 0, req);
285 if (error || !req->newptr)
286 return error;
287 IEEE80211_LOCK(ic);
288 ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
289 IEEE80211_UNLOCK(ic);
290 return 0;
291 }
292
293 /*
294 * For now, just restart everything.
295 *
296 * Later on, it'd be nice to have a separate VAP restart to
297 * full-device restart.
298 */
299 static int
300 ieee80211_sysctl_vap_restart(SYSCTL_HANDLER_ARGS)
301 {
302 struct ieee80211vap *vap = arg1;
303 int t = 0, error;
304
305 error = sysctl_handle_int(oidp, &t, 0, req);
306 if (error || !req->newptr)
307 return error;
308
309 ieee80211_restart_all(vap->iv_ic);
310 return 0;
311 }
312 #endif /* notyet */
313
314 void
315 ieee80211_sysctl_attach(struct ieee80211com *ic)
316 {
317 }
318
319 void
320 ieee80211_sysctl_detach(struct ieee80211com *ic)
321 {
322 }
323
324 /*
325 * Setup sysctl(3) MIB, net.ieee80211.*
326 *
327 * TBD condition CTLFLAG_PERMANENT on being a module or not
328 */
329 static struct sysctllog *ieee80211_sysctllog;
330 static void
331 ieee80211_sysctl_setup(void)
332 {
333 int rc;
334 const struct sysctlnode *rnode;
335
336 if ((rnode = ieee80211_sysctl_treetop(&ieee80211_sysctllog)) == NULL)
337 return;
338
339 #ifdef notyet
340 if ((rc = sysctl_createv(&ieee80211_sysctllog, 0, &rnode, NULL,
341 CTLFLAG_PERMANENT, CTLTYPE_NODE, "nodes", "client/peer stations",
342 ieee80211_sysctl_node, 0, NULL, 0, CTL_CREATE, CTL_EOL)) != 0)
343 goto err;
344 #endif
345
346 #ifdef IEEE80211_DEBUG
347 /* control debugging printfs */
348 if ((rc = sysctl_createv(&ieee80211_sysctllog, 0, &rnode, NULL,
349 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
350 "debug", SYSCTL_DESCR("control debugging printfs"),
351 NULL, 0, &ieee80211_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
352 goto err;
353 #endif
354
355 #ifdef notyet
356 ieee80211_rssadapt_sysctl_setup(&ieee80211_sysctllog);
357 #endif
358
359 return;
360 err:
361 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
362 }
363
364 /*
365 * Create or get top of sysctl tree net.link.ieee80211.
366 */
367 static const struct sysctlnode *
368 ieee80211_sysctl_treetop(struct sysctllog **log)
369 {
370 int rc;
371 const struct sysctlnode *rnode;
372
373 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
374 CTLFLAG_PERMANENT, CTLTYPE_NODE, "link",
375 "link-layer statistics and controls",
376 NULL, 0, NULL, 0, CTL_NET, PF_LINK, CTL_EOL)) != 0)
377 goto err;
378
379 if ((rc = sysctl_createv(log, 0, &rnode, &rnode,
380 CTLFLAG_PERMANENT, CTLTYPE_NODE, "ieee80211",
381 "IEEE 802.11 WLAN statistics and controls",
382 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL)) != 0)
383 goto err;
384
385 return rnode;
386 err:
387 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
388 return NULL;
389 }
390
391 void
392 ieee80211_sysctl_vattach(struct ieee80211vap *vap)
393 {
394 int rc;
395 const struct sysctlnode *cnode, *rnode;
396 char num[sizeof("vap") + 14]; /* sufficient for 32 bits */
397
398 if ((rnode = ieee80211_sysctl_treetop(NULL)) == NULL)
399 return;
400
401 snprintf(num, sizeof(num), "vap%u", vap->iv_ifp->if_index);
402
403 if ((rc = sysctl_createv(&vap->iv_sysctllog, 0, &rnode, &rnode,
404 CTLFLAG_PERMANENT, CTLTYPE_NODE, num, SYSCTL_DESCR("virtual AP"),
405 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL)) != 0)
406 goto err;
407
408 /* control debugging printfs */
409 if ((rc = sysctl_createv(&vap->iv_sysctllog, 0, &rnode, &cnode,
410 CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_STRING,
411 "parent", SYSCTL_DESCR("parent device"),
412 ieee80211_sysctl_parent, 0, (void *)vap, IFNAMSIZ,
413 CTL_CREATE, CTL_EOL)) != 0)
414 goto err;
415
416
417 #ifdef notyet
418 struct ifnet *ifp = vap->iv_ifp;
419 struct sysctl_ctx_list *ctx;
420 struct sysctl_oid *oid;
421 char num[14]; /* sufficient for 32 bits */
422
423 ctx = (struct sysctl_ctx_list *) IEEE80211_MALLOC(sizeof(struct sysctl_ctx_list),
424 M_DEVBUF, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
425 if (ctx == NULL) {
426 if_printf(ifp, "%s: cannot allocate sysctl context!\n",
427 __func__);
428 return;
429 }
430 sysctl_ctx_init(ctx);
431 snprintf(num, sizeof(num), "%u", ifp->if_dunit);
432 oid = SYSCTL_ADD_NODE(ctx, &SYSCTL_NODE_CHILDREN(_net, wlan),
433 OID_AUTO, num, CTLFLAG_RD, NULL, "");
434 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
435 "%parent", CTLTYPE_STRING | CTLFLAG_RD, vap->iv_ic, 0,
436 ieee80211_sysctl_parent, "A", "parent device");
437 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
438 "driver_caps", CTLFLAG_RW, &vap->iv_caps, 0,
439 "driver capabilities");
440 #ifdef IEEE80211_DEBUG
441 vap->iv_debug = ieee80211_debug;
442 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
443 "debug", CTLFLAG_RW, &vap->iv_debug, 0,
444 "control debugging printfs");
445 #endif
446 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
447 "bmiss_max", CTLFLAG_RW, &vap->iv_bmiss_max, 0,
448 "consecutive beacon misses before scanning");
449 /* XXX inherit from tunables */
450 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
451 "inact_run", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_run, 0,
452 ieee80211_sysctl_inact, "I",
453 "station inactivity timeout (sec)");
454 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
455 "inact_probe", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_probe, 0,
456 ieee80211_sysctl_inact, "I",
457 "station inactivity probe timeout (sec)");
458 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
459 "inact_auth", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_auth, 0,
460 ieee80211_sysctl_inact, "I",
461 "station authentication timeout (sec)");
462 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
463 "inact_init", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_init, 0,
464 ieee80211_sysctl_inact, "I",
465 "station initial state timeout (sec)");
466 if (vap->iv_htcaps & IEEE80211_HTC_HT) {
467 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
468 "ampdu_mintraffic_bk", CTLFLAG_RW,
469 &vap->iv_ampdu_mintraffic[WME_AC_BK], 0,
470 "BK traffic tx aggr threshold (pps)");
471 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
472 "ampdu_mintraffic_be", CTLFLAG_RW,
473 &vap->iv_ampdu_mintraffic[WME_AC_BE], 0,
474 "BE traffic tx aggr threshold (pps)");
475 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
476 "ampdu_mintraffic_vo", CTLFLAG_RW,
477 &vap->iv_ampdu_mintraffic[WME_AC_VO], 0,
478 "VO traffic tx aggr threshold (pps)");
479 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
480 "ampdu_mintraffic_vi", CTLFLAG_RW,
481 &vap->iv_ampdu_mintraffic[WME_AC_VI], 0,
482 "VI traffic tx aggr threshold (pps)");
483 }
484
485 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
486 "force_restart", CTLTYPE_INT | CTLFLAG_RW, vap, 0,
487 ieee80211_sysctl_vap_restart, "I",
488 "force a VAP restart");
489
490 if (vap->iv_caps & IEEE80211_C_DFS) {
491 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
492 "radar", CTLTYPE_INT | CTLFLAG_RW, vap->iv_ic, 0,
493 ieee80211_sysctl_radar, "I", "simulate radar event");
494 }
495 vap->iv_sysctl = ctx;
496 vap->iv_oid = oid;
497 #endif
498 return;
499 err:
500 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
501 }
502
503 void
504 ieee80211_sysctl_vdetach(struct ieee80211vap *vap)
505 {
506 #ifdef notyet
507 if (vap->iv_sysctl != NULL) {
508 sysctl_ctx_free(vap->iv_sysctl);
509 IEEE80211_FREE(vap->iv_sysctl, M_DEVBUF);
510 vap->iv_sysctl = NULL;
511 }
512 #endif
513 }
514
515
516 int
517 ieee80211_node_dectestref(struct ieee80211_node *ni)
518 {
519 /* XXX need equivalent of atomic_dec_and_test */
520 atomic_subtract_int(&ni->ni_refcnt, 1);
521 return atomic_cas_uint(&ni->ni_refcnt, 0, 1) == 0;
522 }
523
524 void
525 ieee80211_drain_ifq(struct ifqueue *ifq)
526 {
527 struct ieee80211_node *ni;
528 struct mbuf *m;
529
530 for (;;) {
531 IF_DEQUEUE(ifq, m);
532 if (m == NULL)
533 break;
534
535 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
536 FBSDKASSERT(ni != NULL, ("frame w/o node"));
537 ieee80211_free_node(ni);
538 ieee80211_free_mbuf(m);
539 }
540 }
541
542 void
543 ieee80211_flush_ifq(struct ifqueue *ifq, struct ieee80211vap *vap)
544 {
545 struct ieee80211_node *ni;
546 struct mbuf *m, **mprev;
547
548 IFQ_LOCK(ifq);
549 mprev = &ifq->ifq_head;
550 while ((m = *mprev) != NULL) {
551 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
552 if (ni != NULL && ni->ni_vap == vap) {
553 *mprev = m->m_nextpkt; /* remove from list */
554 ifq->ifq_len--;
555
556 ieee80211_free_node(ni); /* reclaim ref */
557 ieee80211_free_mbuf(m);
558 } else
559 mprev = &m->m_nextpkt;
560 }
561 /* recalculate tail ptr */
562 m = ifq->ifq_head;
563 for (; m != NULL && m->m_nextpkt != NULL; m = m->m_nextpkt)
564 ;
565 ifq->ifq_tail = m;
566 IFQ_UNLOCK(ifq);
567 }
568
569 /*
570 * As above, for mbufs allocated with m_gethdr/MGETHDR
571 * or initialized by M_COPY_PKTHDR.
572 */
573 #define MC_ALIGN(m, len) \
574 do { \
575 (m)->m_data += rounddown2(MCLBYTES - (len), sizeof(long)); \
576 } while (/* CONSTCOND */ 0)
577
578 /*
579 * Allocate and setup a management frame of the specified
580 * size. We return the mbuf and a pointer to the start
581 * of the contiguous data area that's been reserved based
582 * on the packet length. The data area is forced to 32-bit
583 * alignment and the buffer length to a multiple of 4 bytes.
584 * This is done mainly so beacon frames (that require this)
585 * can use this interface too.
586 */
587 struct mbuf *
588 ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen)
589 {
590 struct mbuf *m;
591 u_int len;
592
593 /*
594 * NB: we know the mbuf routines will align the data area
595 * so we don't need to do anything special.
596 */
597 len = roundup2(headroom + pktlen, 4);
598 FBSDKASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len));
599 if (len < MINCLSIZE) {
600 m = m_gethdr(M_NOWAIT, MT_DATA);
601 /*
602 * Align the data in case additional headers are added.
603 * This should only happen when a WEP header is added
604 * which only happens for shared key authentication mgt
605 * frames which all fit in MHLEN.
606 */
607 if (m != NULL)
608 MH_ALIGN(m, len);
609 } else {
610 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
611 if (m != NULL)
612 MC_ALIGN(m, len);
613 }
614 if (m != NULL) {
615 m->m_data += headroom;
616 *frm = m->m_data;
617 }
618 return m;
619 }
620
621 #ifndef __NO_STRICT_ALIGNMENT
622 /*
623 * Re-align the payload in the mbuf. This is mainly used (right now)
624 * to handle IP header alignment requirements on certain architectures.
625 */
626 struct mbuf *
627 ieee80211_realign(struct ieee80211vap *vap, struct mbuf *m, size_t align)
628 {
629 int pktlen, space;
630 struct mbuf *n;
631
632 pktlen = m->m_pkthdr.len;
633 space = pktlen + align;
634 if (space < MINCLSIZE)
635 n = m_gethdr(M_NOWAIT, MT_DATA);
636 else {
637 n = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
638 space <= MCLBYTES ? MCLBYTES :
639 #if MJUMPAGESIZE != MCLBYTES
640 space <= MJUMPAGESIZE ? MJUMPAGESIZE :
641 #endif
642 space <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
643 }
644 if (__predict_true(n != NULL)) {
645 m_move_pkthdr(n, m);
646 n->m_data = (caddr_t)(ALIGN(n->m_data + align) - align);
647 m_copydata(m, 0, pktlen, mtod(n, caddr_t));
648 n->m_len = pktlen;
649 } else {
650 IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
651 mtod(m, const struct ieee80211_frame *), NULL,
652 "%s", "no mbuf to realign");
653 vap->iv_stats.is_rx_badalign++;
654 }
655 m_freem(m);
656 return n;
657 }
658 #endif /* !__NO_STRICT_ALIGNMENT */
659
660 int
661 ieee80211_add_callback(struct mbuf *m,
662 void (*func)(struct ieee80211_node *, void *, int), void *arg)
663 {
664 struct m_tag *mtag;
665 struct ieee80211_cb *cb;
666
667 mtag = m_tag_get(/*MTAG_ABI_NET80211*/ NET80211_TAG_CALLBACK,
668 sizeof(struct ieee80211_cb), M_NOWAIT);
669 if (mtag == NULL)
670 return 0;
671
672 cb = (struct ieee80211_cb *)(mtag+1);
673 cb->func = func;
674 cb->arg = arg;
675 m_tag_prepend(m, mtag);
676 m->m_flags |= M_TXCB;
677 return 1;
678 }
679
680 int
681 ieee80211_add_xmit_params(struct mbuf *m,
682 const struct ieee80211_bpf_params *params)
683 {
684 struct m_tag *mtag;
685 struct ieee80211_tx_params *tx;
686
687 mtag = m_tag_get(/*MTAG_ABI_NET80211*/ NET80211_TAG_XMIT_PARAMS,
688 sizeof(struct ieee80211_tx_params), M_NOWAIT);
689 if (mtag == NULL)
690 return (0);
691
692 tx = (struct ieee80211_tx_params *)(mtag+1);
693 memcpy(&tx->params, params, sizeof(struct ieee80211_bpf_params));
694 m_tag_prepend(m, mtag);
695 return (1);
696 }
697
698 int
699 ieee80211_get_xmit_params(struct mbuf *m,
700 struct ieee80211_bpf_params *params)
701 {
702 struct m_tag *mtag;
703 struct ieee80211_tx_params *tx;
704
705 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_XMIT_PARAMS,
706 NULL);
707 if (mtag == NULL)
708 return (-1);
709 tx = (struct ieee80211_tx_params *)(mtag + 1);
710 memcpy(params, &tx->params, sizeof(struct ieee80211_bpf_params));
711 return (0);
712 }
713
714 void
715 ieee80211_process_callback(struct ieee80211_node *ni,
716 struct mbuf *m, int status)
717 {
718 struct m_tag *mtag;
719
720 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_CALLBACK, NULL);
721 if (mtag != NULL) {
722 struct ieee80211_cb *cb = (struct ieee80211_cb *)(mtag+1);
723 cb->func(ni, cb->arg, status);
724 }
725 }
726
727 /*
728 * Add RX parameters to the given mbuf.
729 *
730 * Returns 1 if OK, 0 on error.
731 */
732 int
733 ieee80211_add_rx_params(struct mbuf *m, const struct ieee80211_rx_stats *rxs)
734 {
735 struct m_tag *mtag;
736 struct ieee80211_rx_params *rx;
737
738 mtag = m_tag_get(/*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
739 sizeof(struct ieee80211_rx_stats), M_NOWAIT);
740 if (mtag == NULL)
741 return (0);
742
743 rx = (struct ieee80211_rx_params *)(mtag + 1);
744 memcpy(&rx->params, rxs, sizeof(*rxs));
745 m_tag_prepend(m, mtag);
746 return (1);
747 }
748
749 int
750 ieee80211_get_rx_params(struct mbuf *m, struct ieee80211_rx_stats *rxs)
751 {
752 struct m_tag *mtag;
753 struct ieee80211_rx_params *rx;
754
755 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
756 NULL);
757 if (mtag == NULL)
758 return (-1);
759 rx = (struct ieee80211_rx_params *)(mtag + 1);
760 memcpy(rxs, &rx->params, sizeof(*rxs));
761 return (0);
762 }
763
764 const struct ieee80211_rx_stats *
765 ieee80211_get_rx_params_ptr(struct mbuf *m)
766 {
767 struct m_tag *mtag;
768 struct ieee80211_rx_params *rx;
769
770 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_RECV_PARAMS,
771 NULL);
772 if (mtag == NULL)
773 return (NULL);
774 rx = (struct ieee80211_rx_params *)(mtag + 1);
775 return (&rx->params);
776 }
777
778
779 /*
780 * Add TOA parameters to the given mbuf.
781 */
782 int
783 ieee80211_add_toa_params(struct mbuf *m, const struct ieee80211_toa_params *p)
784 {
785 struct m_tag *mtag;
786 struct ieee80211_toa_params *rp;
787
788 mtag = m_tag_get(/*MTAG_ABI_NET80211,*/ NET80211_TAG_TOA_PARAMS,
789 sizeof(struct ieee80211_toa_params), M_NOWAIT);
790 if (mtag == NULL)
791 return (0);
792
793 rp = (struct ieee80211_toa_params *)(mtag + 1);
794 memcpy(rp, p, sizeof(*rp));
795 m_tag_prepend(m, mtag);
796 return (1);
797 }
798
799 int
800 ieee80211_get_toa_params(struct mbuf *m, struct ieee80211_toa_params *p)
801 {
802 struct m_tag *mtag;
803 struct ieee80211_toa_params *rp;
804
805 mtag = m_tag_find(m, /*MTAG_ABI_NET80211,*/ NET80211_TAG_TOA_PARAMS,
806 NULL);
807 if (mtag == NULL)
808 return (0);
809 rp = (struct ieee80211_toa_params *)(mtag + 1);
810 if (p != NULL)
811 memcpy(p, rp, sizeof(*p));
812 return (1);
813 }
814
815 /*
816 * Transmit a frame to the parent interface.
817 */
818 int
819 ieee80211_parent_xmitpkt(struct ieee80211com *ic, struct mbuf *m)
820 {
821 int error;
822
823 /*
824 * Assert the IC TX lock is held - this enforces the
825 * processing -> queuing order is maintained
826 */
827 IEEE80211_TX_LOCK_ASSERT(ic);
828 error = ic->ic_transmit(ic, m);
829 if (error) {
830 struct ieee80211_node *ni;
831
832 ni = (struct ieee80211_node *)m_get_rcvif_NOMPSAFE(m);
833
834 /* XXX number of fragments */
835 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
836 ieee80211_free_node(ni);
837 ieee80211_free_mbuf(m);
838 }
839 return (error);
840 }
841
842 /*
843 * Transmit a frame to the VAP interface.
844 */
845 int
846 ieee80211_vap_xmitpkt(struct ieee80211vap *vap, struct mbuf *m)
847 {
848 struct ifnet *ifp = vap->iv_ifp;
849
850 /*
851 * When transmitting via the VAP, we shouldn't hold
852 * any IC TX lock as the VAP TX path will acquire it.
853 */
854 IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic);
855
856 return (ifp->if_transmit(ifp, m));
857
858 }
859
860 void
861 get_random_bytes(void *p, size_t n)
862 {
863 uint8_t *dp = p;
864
865 while (n > 0) {
866 uint32_t v = arc4random();
867 size_t nb = n > sizeof(uint32_t) ? sizeof(uint32_t) : n;
868 bcopy(&v, dp, n > sizeof(uint32_t) ? sizeof(uint32_t) : n);
869 dp += sizeof(uint32_t), n -= nb;
870 }
871 }
872
873 /*
874 * Helper function for events that pass just a single mac address.
875 */
876 static void
877 notify_macaddr(struct ifnet *ifp, int op, const uint8_t mac[IEEE80211_ADDR_LEN])
878 {
879 struct ieee80211_join_event iev;
880 printf ("NNN notify_macaddr called\n");
881 CURVNET_SET(ifp->if_vnet);
882 memset(&iev, 0, sizeof(iev));
883 IEEE80211_ADDR_COPY(iev.iev_addr, mac);
884 rt_ieee80211msg(ifp, op, &iev, sizeof(iev));
885 CURVNET_RESTORE();
886 }
887
888 void
889 ieee80211_notify_node_join(struct ieee80211_node *ni, int newassoc)
890 {
891 struct ieee80211vap *vap = ni->ni_vap;
892 struct ifnet *ifp = vap->iv_ifp;
893
894 CURVNET_SET_QUIET(ifp->if_vnet);
895 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode join",
896 (ni == vap->iv_bss) ? "bss " : "");
897
898 if (ni == vap->iv_bss) {
899 notify_macaddr(ifp, newassoc ?
900 RTM_IEEE80211_ASSOC : RTM_IEEE80211_REASSOC, ni->ni_bssid);
901 if_link_state_change(ifp, LINK_STATE_UP);
902 } else {
903 notify_macaddr(ifp, newassoc ?
904 RTM_IEEE80211_JOIN : RTM_IEEE80211_REJOIN, ni->ni_macaddr);
905 }
906 CURVNET_RESTORE();
907 }
908
909 void
910 ieee80211_notify_node_leave(struct ieee80211_node *ni)
911 {
912 struct ieee80211vap *vap = ni->ni_vap;
913 struct ifnet *ifp = vap->iv_ifp;
914
915 CURVNET_SET_QUIET(ifp->if_vnet);
916 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode leave",
917 (ni == vap->iv_bss) ? "bss " : "");
918
919 if (ni == vap->iv_bss) {
920 rt_ieee80211msg(ifp, RTM_IEEE80211_DISASSOC, NULL, 0);
921 if_link_state_change(ifp, LINK_STATE_DOWN);
922 } else {
923 /* fire off wireless event station leaving */
924 notify_macaddr(ifp, RTM_IEEE80211_LEAVE, ni->ni_macaddr);
925 }
926 CURVNET_RESTORE();
927 }
928
929 void
930 ieee80211_notify_scan_done(struct ieee80211vap *vap)
931 {
932 struct ifnet *ifp = vap->iv_ifp;
933
934 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s\n", "notify scan done");
935
936 /* dispatch wireless event indicating scan completed */
937 CURVNET_SET(ifp->if_vnet);
938 rt_ieee80211msg(ifp, RTM_IEEE80211_SCAN, NULL, 0);
939 CURVNET_RESTORE();
940 }
941
942 void
943 ieee80211_notify_replay_failure(struct ieee80211vap *vap,
944 const struct ieee80211_frame *wh, const struct ieee80211_key *k,
945 u_int64_t rsc, int tid)
946 {
947 struct ifnet *ifp = vap->iv_ifp;
948
949 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
950 "%s replay detected tid %d <rsc %ju, csc %ju, keyix %u rxkeyix %u>",
951 k->wk_cipher->ic_name, tid, (intmax_t) rsc,
952 (intmax_t) k->wk_keyrsc[tid],
953 k->wk_keyix, k->wk_rxkeyix);
954
955 if (ifp != NULL) { /* NB: for cipher test modules */
956 struct ieee80211_replay_event iev;
957
958 IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
959 IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
960 iev.iev_cipher = k->wk_cipher->ic_cipher;
961 if (k->wk_rxkeyix != IEEE80211_KEYIX_NONE)
962 iev.iev_keyix = k->wk_rxkeyix;
963 else
964 iev.iev_keyix = k->wk_keyix;
965 iev.iev_keyrsc = k->wk_keyrsc[tid];
966 iev.iev_rsc = rsc;
967 CURVNET_SET(ifp->if_vnet);
968 rt_ieee80211msg(ifp, RTM_IEEE80211_REPLAY, &iev, sizeof(iev));
969 CURVNET_RESTORE();
970 }
971 }
972
973 void
974 ieee80211_notify_michael_failure(struct ieee80211vap *vap,
975 const struct ieee80211_frame *wh, u_int keyix)
976 {
977 struct ifnet *ifp = vap->iv_ifp;
978
979 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
980 "michael MIC verification failed <keyix %u>", keyix);
981 vap->iv_stats.is_rx_tkipmic++;
982
983 if (ifp != NULL) { /* NB: for cipher test modules */
984 struct ieee80211_michael_event iev;
985
986 IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
987 IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
988 iev.iev_cipher = IEEE80211_CIPHER_TKIP;
989 iev.iev_keyix = keyix;
990 CURVNET_SET(ifp->if_vnet);
991 rt_ieee80211msg(ifp, RTM_IEEE80211_MICHAEL, &iev, sizeof(iev));
992 CURVNET_RESTORE();
993 }
994 }
995
996 void
997 ieee80211_notify_wds_discover(struct ieee80211_node *ni)
998 {
999 struct ieee80211vap *vap = ni->ni_vap;
1000 struct ifnet *ifp = vap->iv_ifp;
1001
1002 notify_macaddr(ifp, RTM_IEEE80211_WDS, ni->ni_macaddr);
1003 }
1004
1005 void
1006 ieee80211_notify_csa(struct ieee80211com *ic,
1007 const struct ieee80211_channel *c, int mode, int count)
1008 {
1009 struct ieee80211_csa_event iev;
1010 struct ieee80211vap *vap;
1011 struct ifnet *ifp;
1012
1013 memset(&iev, 0, sizeof(iev));
1014 iev.iev_flags = c->ic_flags;
1015 iev.iev_freq = c->ic_freq;
1016 iev.iev_ieee = c->ic_ieee;
1017 iev.iev_mode = mode;
1018 iev.iev_count = count;
1019 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1020 ifp = vap->iv_ifp;
1021 CURVNET_SET(ifp->if_vnet);
1022 rt_ieee80211msg(ifp, RTM_IEEE80211_CSA, &iev, sizeof(iev));
1023 CURVNET_RESTORE();
1024 }
1025 }
1026
1027 void
1028 ieee80211_notify_radar(struct ieee80211com *ic,
1029 const struct ieee80211_channel *c)
1030 {
1031 struct ieee80211_radar_event iev;
1032 struct ieee80211vap *vap;
1033 struct ifnet *ifp;
1034
1035 memset(&iev, 0, sizeof(iev));
1036 iev.iev_flags = c->ic_flags;
1037 iev.iev_freq = c->ic_freq;
1038 iev.iev_ieee = c->ic_ieee;
1039 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1040 ifp = vap->iv_ifp;
1041 CURVNET_SET(ifp->if_vnet);
1042 rt_ieee80211msg(ifp, RTM_IEEE80211_RADAR, &iev, sizeof(iev));
1043 CURVNET_RESTORE();
1044 }
1045 }
1046
1047 void
1048 ieee80211_notify_cac(struct ieee80211com *ic,
1049 const struct ieee80211_channel *c, enum ieee80211_notify_cac_event type)
1050 {
1051 struct ieee80211_cac_event iev;
1052 struct ieee80211vap *vap;
1053 struct ifnet *ifp;
1054
1055 memset(&iev, 0, sizeof(iev));
1056 iev.iev_flags = c->ic_flags;
1057 iev.iev_freq = c->ic_freq;
1058 iev.iev_ieee = c->ic_ieee;
1059 iev.iev_type = type;
1060 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1061 ifp = vap->iv_ifp;
1062 CURVNET_SET(ifp->if_vnet);
1063 rt_ieee80211msg(ifp, RTM_IEEE80211_CAC, &iev, sizeof(iev));
1064 CURVNET_RESTORE();
1065 }
1066 }
1067
1068 void
1069 ieee80211_notify_node_deauth(struct ieee80211_node *ni)
1070 {
1071 struct ieee80211vap *vap = ni->ni_vap;
1072 struct ifnet *ifp = vap->iv_ifp;
1073
1074 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node deauth");
1075
1076 notify_macaddr(ifp, RTM_IEEE80211_DEAUTH, ni->ni_macaddr);
1077 }
1078
1079 void
1080 ieee80211_notify_node_auth(struct ieee80211_node *ni)
1081 {
1082 struct ieee80211vap *vap = ni->ni_vap;
1083 struct ifnet *ifp = vap->iv_ifp;
1084
1085 IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node auth");
1086
1087 notify_macaddr(ifp, RTM_IEEE80211_AUTH, ni->ni_macaddr);
1088 }
1089
1090 void
1091 ieee80211_notify_country(struct ieee80211vap *vap,
1092 const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t cc[2])
1093 {
1094 struct ifnet *ifp = vap->iv_ifp;
1095 struct ieee80211_country_event iev;
1096
1097 memset(&iev, 0, sizeof(iev));
1098 IEEE80211_ADDR_COPY(iev.iev_addr, bssid);
1099 iev.iev_cc[0] = cc[0];
1100 iev.iev_cc[1] = cc[1];
1101 CURVNET_SET(ifp->if_vnet);
1102 rt_ieee80211msg(ifp, RTM_IEEE80211_COUNTRY, &iev, sizeof(iev));
1103 CURVNET_RESTORE();
1104 }
1105
1106 void
1107 ieee80211_notify_radio(struct ieee80211com *ic, int state)
1108 {
1109 struct ieee80211_radio_event iev;
1110 struct ieee80211vap *vap;
1111 struct ifnet *ifp;
1112
1113 memset(&iev, 0, sizeof(iev));
1114 iev.iev_state = state;
1115 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
1116 ifp = vap->iv_ifp;
1117 CURVNET_SET(ifp->if_vnet);
1118 rt_ieee80211msg(ifp, RTM_IEEE80211_RADIO, &iev, sizeof(iev));
1119 CURVNET_RESTORE();
1120 }
1121 }
1122
1123 #ifdef notyet
1124 void
1125 ieee80211_load_module(const char *modname)
1126 {
1127 struct thread *td = curthread;
1128
1129 if (suser(td) == 0 && securelevel_gt(td->td_ucred, 0) == 0) {
1130 mtx_lock(&Giant);
1131 (void) linker_load_module(modname, NULL, NULL, NULL, NULL);
1132 mtx_unlock(&Giant);
1133 }
1134 }
1135 #endif
1136
1137 #ifdef notyet
1138 static eventhandler_tag wlan_bpfevent;
1139 static eventhandler_tag wlan_ifllevent;
1140
1141 static void
1142 bpf_track(void *arg, struct ifnet *ifp, int dlt, int attach)
1143 {
1144 /* NB: identify vap's by if_init */ // NNN won't work with urtwn ...
1145 if (dlt == DLT_IEEE802_11_RADIO &&
1146 ifp->if_init == ieee80211_init) {
1147 struct ieee80211vap *vap = ifp->if_softc;
1148 /*
1149 * Track bpf radiotap listener state. We mark the vap
1150 * to indicate if any listener is present and the com
1151 * to indicate if any listener exists on any associated
1152 * vap. This flag is used by drivers to prepare radiotap
1153 * state only when needed.
1154 */
1155 if (attach) {
1156 ieee80211_syncflag_ext(vap, IEEE80211_FEXT_BPF);
1157 if (vap->iv_opmode == IEEE80211_M_MONITOR)
1158 atomic_add_int(&vap->iv_ic->ic_montaps, 1);
1159 } else if (!bpf_peers_present(vap->iv_rawbpf)) {
1160 ieee80211_syncflag_ext(vap, -IEEE80211_FEXT_BPF);
1161 if (vap->iv_opmode == IEEE80211_M_MONITOR)
1162 atomic_subtract_int(&vap->iv_ic->ic_montaps, 1);
1163 }
1164 }
1165 }
1166
1167 /*
1168 * Change MAC address on the vap (if was not started).
1169 */
1170 static void
1171 wlan_iflladdr(void *arg __unused, struct ifnet *ifp)
1172 {
1173 /* NB: identify vap's by if_init */ // NNN wont work on urtwn
1174 if (ifp->if_init == ieee80211_init &&
1175 (ifp->if_flags & IFF_UP) == 0) {
1176 struct ieee80211vap *vap = ifp->if_softc;
1177
1178 IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp));
1179 }
1180 }
1181 #endif
1182
1183 void
1184 if_inc_counter(struct ifnet *ifp, ift_counter ifc, int64_t value)
1185 {
1186 switch (ifc) {
1187 case IFCOUNTER_IPACKETS:
1188 ifp->if_data.ifi_ipackets += value;
1189 break;
1190 case IFCOUNTER_IERRORS:
1191 ifp->if_data.ifi_ierrors += value;
1192 break;
1193 case IFCOUNTER_OPACKETS:
1194 ifp->if_data.ifi_opackets += value;
1195 break;
1196 case IFCOUNTER_OERRORS:
1197 ifp->if_data.ifi_oerrors += value;
1198 break;
1199 case IFCOUNTER_COLLISIONS:
1200 ifp->if_data.ifi_collisions += value;
1201 break;
1202 case IFCOUNTER_IBYTES:
1203 ifp->if_data.ifi_ibytes += value;
1204 break;
1205 case IFCOUNTER_OBYTES:
1206 ifp->if_data.ifi_obytes += value;
1207 break;
1208 case IFCOUNTER_IMCASTS:
1209 ifp->if_data.ifi_imcasts += value;
1210 break;
1211 case IFCOUNTER_OMCASTS:
1212 ifp->if_data.ifi_omcasts += value;
1213 break;
1214 case IFCOUNTER_IQDROPS:
1215 ifp->if_data.ifi_iqdrops += value;
1216 break;
1217 case IFCOUNTER_OQDROPS:
1218 /* ifp->if_data.ifi_oqdrops += value; No such field, just ignore it q*/
1219 break;
1220 case IFCOUNTER_NOPROTO:
1221 ifp->if_data.ifi_noproto += value;
1222 break;
1223 default:
1224 panic("if_inc_counter: non-existant counter");
1225 }
1226 }
1227
1228
1229 #ifdef notyet
1230 /*
1231 * Module glue.
1232 *
1233 * NB: the module name is "wlan" for compatibility with NetBSD.
1234 */
1235 static int
1236 wlan_modevent(module_t mod, int type, void *unused)
1237 {
1238 switch (type) {
1239 case MOD_LOAD:
1240 if (bootverbose)
1241 printf("wlan: <802.11 Link Layer>\n");
1242 wlan_bpfevent = EVENTHANDLER_REGISTER(bpf_track,
1243 bpf_track, 0, EVENTHANDLER_PRI_ANY);
1244 wlan_ifllevent = EVENTHANDLER_REGISTER(iflladdr_event,
1245 wlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
1246 wlan_cloner = if_clone_simple(wlanname, wlan_clone_create,
1247 wlan_clone_destroy, 0);
1248 return 0;
1249 case MOD_UNLOAD:
1250 if_clone_detach(wlan_cloner);
1251 EVENTHANDLER_DEREGISTER(bpf_track, wlan_bpfevent);
1252 EVENTHANDLER_DEREGISTER(iflladdr_event, wlan_ifllevent);
1253 return 0;
1254 }
1255 return EINVAL;
1256 }
1257
1258 static moduledata_t wlan_mod = {
1259 wlanname,
1260 wlan_modevent,
1261 0
1262 };
1263 DECLARE_MODULE(wlan, wlan_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
1264 MODULE_VERSION(wlan, 1);
1265 MODULE_DEPEND(wlan, ether, 1, 1, 1);
1266 #endif
1267
1268 #ifdef IEEE80211_ALQ
1269 MODULE_DEPEND(wlan, alq, 1, 1, 1);
1270 #endif /* IEEE80211_ALQ */
1271
1272 /* Missing support for if_printf in NetBSD ... */
1273 int
1274 if_printf(struct ifnet *ifp, const char *fmt, ...)
1275 {
1276 char if_fmt[256];
1277 va_list ap;
1278
1279 snprintf(if_fmt, sizeof(if_fmt), "%s: %s", ifp->if_xname, fmt);
1280 va_start(ap, fmt);
1281 vlog(LOG_INFO, if_fmt, ap);
1282 va_end(ap);
1283 return (0);
1284 }
1285
1286 /*
1287 * Set the m_data pointer of a newly-allocated mbuf
1288 * to place an object of the specified size at the
1289 * end of the mbuf, longword aligned.
1290 */
1291 void
1292 m_align(struct mbuf *m, int len)
1293 {
1294 int adjust;
1295
1296 KASSERT(len != M_COPYALL);
1297
1298 if (m->m_flags & M_EXT)
1299 adjust = m->m_ext.ext_size - len;
1300 else if (m->m_flags & M_PKTHDR)
1301 adjust = MHLEN - len;
1302 else
1303 adjust = MLEN - len;
1304 m->m_data += adjust &~ (sizeof(long)-1);
1305 }
1306
1307 /*
1308 * Append the specified data to the indicated mbuf chain,
1309 * Extend the mbuf chain if the new data does not fit in
1310 * existing space.
1311 *
1312 * Return 1 if able to complete the job; otherwise 0.
1313 */
1314 int
1315 m_append(struct mbuf *m0, int len, const void *cpv)
1316 {
1317 struct mbuf *m, *n;
1318 int remainder, space;
1319 const char *cp = cpv;
1320
1321 KASSERT(len != M_COPYALL);
1322 for (m = m0; m->m_next != NULL; m = m->m_next)
1323 continue;
1324 remainder = len;
1325 space = M_TRAILINGSPACE(m);
1326 if (space > 0) {
1327 /*
1328 * Copy into available space.
1329 */
1330 if (space > remainder)
1331 space = remainder;
1332 memmove(mtod(m, char *) + m->m_len, cp, space);
1333 m->m_len += space;
1334 cp = cp + space, remainder -= space;
1335 }
1336 while (remainder > 0) {
1337 /*
1338 * Allocate a new mbuf; could check space
1339 * and allocate a cluster instead.
1340 */
1341 n = m_get(M_DONTWAIT, m->m_type);
1342 if (n == NULL)
1343 break;
1344 n->m_len = min(MLEN, remainder);
1345 memmove(mtod(n, void *), cp, n->m_len);
1346 cp += n->m_len, remainder -= n->m_len;
1347 m->m_next = n;
1348 m = n;
1349 }
1350 if (m0->m_flags & M_PKTHDR)
1351 m0->m_pkthdr.len += len - remainder;
1352 return (remainder == 0);
1353 }
1354
1355 /*
1356 * Create a writable copy of the mbuf chain. While doing this
1357 * we compact the chain with a goal of producing a chain with
1358 * at most two mbufs. The second mbuf in this chain is likely
1359 * to be a cluster. The primary purpose of this work is to create
1360 * a writable packet for encryption, compression, etc. The
1361 * secondary goal is to linearize the data so the data can be
1362 * passed to crypto hardware in the most efficient manner possible.
1363 */
1364 struct mbuf *
1365 m_unshare(struct mbuf *m0, int how)
1366 {
1367 struct mbuf *m, *mprev;
1368 struct mbuf *n, *mfirst, *mlast;
1369 int len, off;
1370
1371 mprev = NULL;
1372 for (m = m0; m != NULL; m = mprev->m_next) {
1373 /*
1374 * Regular mbufs are ignored unless there's a cluster
1375 * in front of it that we can use to coalesce. We do
1376 * the latter mainly so later clusters can be coalesced
1377 * also w/o having to handle them specially (i.e. convert
1378 * mbuf+cluster -> cluster). This optimization is heavily
1379 * influenced by the assumption that we're running over
1380 * Ethernet where MCLBYTES is large enough that the max
1381 * packet size will permit lots of coalescing into a
1382 * single cluster. This in turn permits efficient
1383 * crypto operations, especially when using hardware.
1384 */
1385 if ((m->m_flags & M_EXT) == 0) {
1386 if (mprev && (mprev->m_flags & M_EXT) &&
1387 m->m_len <= M_TRAILINGSPACE(mprev)) {
1388 /* XXX: this ignores mbuf types */
1389 memcpy(mtod(mprev, __uint8_t *) + mprev->m_len,
1390 mtod(m, __uint8_t *), m->m_len);
1391 mprev->m_len += m->m_len;
1392 mprev->m_next = m->m_next; /* unlink from chain */
1393 m_free(m); /* reclaim mbuf */
1394 } else {
1395 mprev = m;
1396 }
1397 continue;
1398 }
1399 /*
1400 * Writable mbufs are left alone (for now).
1401 */
1402 if (!M_READONLY(m)) {
1403 mprev = m;
1404 continue;
1405 }
1406
1407 /*
1408 * Not writable, replace with a copy or coalesce with
1409 * the previous mbuf if possible (since we have to copy
1410 * it anyway, we try to reduce the number of mbufs and
1411 * clusters so that future work is easier).
1412 */
1413 FBSDKASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1414 /* NB: we only coalesce into a cluster or larger */
1415 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1416 m->m_len <= M_TRAILINGSPACE(mprev)) {
1417 /* XXX: this ignores mbuf types */
1418 memcpy(mtod(mprev, __uint8_t *) + mprev->m_len,
1419 mtod(m, __uint8_t *), m->m_len);
1420 mprev->m_len += m->m_len;
1421 mprev->m_next = m->m_next; /* unlink from chain */
1422 m_free(m); /* reclaim mbuf */
1423 continue;
1424 }
1425
1426 /*
1427 * Allocate new space to hold the copy and copy the data.
1428 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
1429 * splitting them into clusters. We could just malloc a
1430 * buffer and make it external but too many device drivers
1431 * don't know how to break up the non-contiguous memory when
1432 * doing DMA.
1433 */
1434 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1435 if (n == NULL) {
1436 m_freem(m0);
1437 return (NULL);
1438 }
1439 if (m->m_flags & M_PKTHDR) {
1440 FBSDKASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
1441 __func__, m0, m));
1442 m_move_pkthdr(n, m);
1443 }
1444 len = m->m_len;
1445 off = 0;
1446 mfirst = n;
1447 mlast = NULL;
1448 for (;;) {
1449 int cc = min(len, MCLBYTES);
1450 memcpy(mtod(n, __uint8_t *), mtod(m, __uint8_t *) + off, cc);
1451 n->m_len = cc;
1452 if (mlast != NULL)
1453 mlast->m_next = n;
1454 mlast = n;
1455 #if 0
1456 newipsecstat.ips_clcopied++;
1457 #endif
1458
1459 len -= cc;
1460 if (len <= 0)
1461 break;
1462 off += cc;
1463
1464 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1465 if (n == NULL) {
1466 m_freem(mfirst);
1467 m_freem(m0);
1468 return (NULL);
1469 }
1470 }
1471 n->m_next = m->m_next;
1472 if (mprev == NULL)
1473 m0 = mfirst; /* new head of chain */
1474 else
1475 mprev->m_next = mfirst; /* replace old mbuf */
1476 m_free(m); /* release old mbuf */
1477 mprev = mfirst;
1478 }
1479 return (m0);
1480 }
1481