bpf_stub.c revision 1.4.4.2 1 1.4.4.2 yamt /* $NetBSD: bpf_stub.c,v 1.4.4.2 2010/03/11 15:04:26 yamt Exp $ */
2 1.4.4.2 yamt
3 1.4.4.2 yamt /*
4 1.4.4.2 yamt * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 1.4.4.2 yamt * All rights reserved.
6 1.4.4.2 yamt *
7 1.4.4.2 yamt * Redistribution and use in source and binary forms, with or without
8 1.4.4.2 yamt * modification, are permitted provided that the following conditions
9 1.4.4.2 yamt * are met:
10 1.4.4.2 yamt * 1. Redistributions of source code must retain the above copyright
11 1.4.4.2 yamt * notice, this list of conditions and the following disclaimer.
12 1.4.4.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.4.4.2 yamt * notice, this list of conditions and the following disclaimer in the
14 1.4.4.2 yamt * documentation and/or other materials provided with the distribution.
15 1.4.4.2 yamt *
16 1.4.4.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 1.4.4.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.4.4.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.4.4.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.4.4.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.4.4.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.4.4.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.4.4.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.4.4.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.4.4.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.4.4.2 yamt * POSSIBILITY OF SUCH DAMAGE.
27 1.4.4.2 yamt */
28 1.4.4.2 yamt
29 1.4.4.2 yamt #include <sys/cdefs.h>
30 1.4.4.2 yamt __KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.4.4.2 2010/03/11 15:04:26 yamt Exp $");
31 1.4.4.2 yamt
32 1.4.4.2 yamt #include <sys/param.h>
33 1.4.4.2 yamt #include <sys/kmem.h>
34 1.4.4.2 yamt #include <sys/mbuf.h>
35 1.4.4.2 yamt
36 1.4.4.2 yamt #include <net/bpf.h>
37 1.4.4.2 yamt
38 1.4.4.2 yamt struct laglist {
39 1.4.4.2 yamt struct ifnet *lag_ifp;
40 1.4.4.2 yamt u_int lag_dlt;
41 1.4.4.2 yamt u_int lag_hlen;
42 1.4.4.2 yamt struct bpf_if **lag_drvp;
43 1.4.4.2 yamt
44 1.4.4.2 yamt TAILQ_ENTRY(laglist) lag_entries;
45 1.4.4.2 yamt };
46 1.4.4.2 yamt
47 1.4.4.2 yamt static TAILQ_HEAD(, laglist) lagdrvs = TAILQ_HEAD_INITIALIZER(lagdrvs);
48 1.4.4.2 yamt
49 1.4.4.2 yamt static void bpf_stub_attach(struct ifnet *, u_int, u_int, struct bpf_if **);
50 1.4.4.2 yamt static void bpf_stub_detach(struct ifnet *);
51 1.4.4.2 yamt
52 1.4.4.2 yamt static void bpf_stub_null(void);
53 1.4.4.2 yamt static void bpf_stub_warn(void);
54 1.4.4.2 yamt
55 1.4.4.2 yamt static kmutex_t handovermtx;
56 1.4.4.2 yamt static kcondvar_t handovercv;
57 1.4.4.2 yamt static bool handover;
58 1.4.4.2 yamt
59 1.4.4.2 yamt struct bpf_ops bpf_ops_stub = {
60 1.4.4.2 yamt .bpf_attach = bpf_stub_attach,
61 1.4.4.2 yamt .bpf_detach = bpf_stub_detach,
62 1.4.4.2 yamt .bpf_change_type = (void *)bpf_stub_null,
63 1.4.4.2 yamt
64 1.4.4.2 yamt .bpf_tap = (void *)bpf_stub_warn,
65 1.4.4.2 yamt .bpf_mtap = (void *)bpf_stub_warn,
66 1.4.4.2 yamt .bpf_mtap2 = (void *)bpf_stub_warn,
67 1.4.4.2 yamt .bpf_mtap_af = (void *)bpf_stub_warn,
68 1.4.4.2 yamt .bpf_mtap_et = (void *)bpf_stub_warn,
69 1.4.4.2 yamt .bpf_mtap_sl_in = (void *)bpf_stub_warn,
70 1.4.4.2 yamt .bpf_mtap_sl_out = (void *)bpf_stub_warn,
71 1.4.4.2 yamt };
72 1.4.4.2 yamt struct bpf_ops *bpf_ops;
73 1.4.4.2 yamt
74 1.4.4.2 yamt static void
75 1.4.4.2 yamt bpf_stub_attach(struct ifnet *ifp, u_int dlt, u_int hlen, struct bpf_if **drvp)
76 1.4.4.2 yamt {
77 1.4.4.2 yamt struct laglist *lag;
78 1.4.4.2 yamt bool storeattach = true;
79 1.4.4.2 yamt
80 1.4.4.2 yamt lag = kmem_alloc(sizeof(*lag), KM_SLEEP);
81 1.4.4.2 yamt lag->lag_ifp = ifp;
82 1.4.4.2 yamt lag->lag_dlt = dlt;
83 1.4.4.2 yamt lag->lag_hlen = hlen;
84 1.4.4.2 yamt lag->lag_drvp = drvp;
85 1.4.4.2 yamt
86 1.4.4.2 yamt mutex_enter(&handovermtx);
87 1.4.4.2 yamt /*
88 1.4.4.2 yamt * If handover is in progress, wait for it to finish and complete
89 1.4.4.2 yamt * attach after that. Otherwise record ourselves.
90 1.4.4.2 yamt */
91 1.4.4.2 yamt while (handover) {
92 1.4.4.2 yamt storeattach = false;
93 1.4.4.2 yamt cv_wait(&handovercv, &handovermtx);
94 1.4.4.2 yamt }
95 1.4.4.2 yamt
96 1.4.4.2 yamt if (storeattach == false) {
97 1.4.4.2 yamt mutex_exit(&handovermtx);
98 1.4.4.2 yamt kmem_free(lag, sizeof(*lag));
99 1.4.4.2 yamt KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
100 1.4.4.2 yamt bpf_ops->bpf_attach(ifp, dlt, hlen, drvp);
101 1.4.4.2 yamt } else {
102 1.4.4.2 yamt *drvp = NULL;
103 1.4.4.2 yamt TAILQ_INSERT_TAIL(&lagdrvs, lag, lag_entries);
104 1.4.4.2 yamt mutex_exit(&handovermtx);
105 1.4.4.2 yamt }
106 1.4.4.2 yamt }
107 1.4.4.2 yamt
108 1.4.4.2 yamt static void
109 1.4.4.2 yamt bpf_stub_detach(struct ifnet *ifp)
110 1.4.4.2 yamt {
111 1.4.4.2 yamt TAILQ_HEAD(, laglist) rmlist;
112 1.4.4.2 yamt struct laglist *lag, *lag_next;
113 1.4.4.2 yamt bool didhand;
114 1.4.4.2 yamt
115 1.4.4.2 yamt TAILQ_INIT(&rmlist);
116 1.4.4.2 yamt
117 1.4.4.2 yamt didhand = false;
118 1.4.4.2 yamt mutex_enter(&handovermtx);
119 1.4.4.2 yamt while (handover) {
120 1.4.4.2 yamt didhand = true;
121 1.4.4.2 yamt cv_wait(&handovercv, &handovermtx);
122 1.4.4.2 yamt }
123 1.4.4.2 yamt
124 1.4.4.2 yamt if (didhand == false) {
125 1.4.4.2 yamt /* atomically remove all */
126 1.4.4.2 yamt for (lag = TAILQ_FIRST(&lagdrvs); lag; lag = lag_next) {
127 1.4.4.2 yamt lag_next = TAILQ_NEXT(lag, lag_entries);
128 1.4.4.2 yamt if (lag->lag_ifp == ifp) {
129 1.4.4.2 yamt TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
130 1.4.4.2 yamt TAILQ_INSERT_HEAD(&rmlist, lag, lag_entries);
131 1.4.4.2 yamt }
132 1.4.4.2 yamt }
133 1.4.4.2 yamt mutex_exit(&handovermtx);
134 1.4.4.2 yamt while ((lag = TAILQ_FIRST(&rmlist)) != NULL) {
135 1.4.4.2 yamt TAILQ_REMOVE(&rmlist, lag, lag_entries);
136 1.4.4.2 yamt kmem_free(lag, sizeof(*lag));
137 1.4.4.2 yamt }
138 1.4.4.2 yamt } else {
139 1.4.4.2 yamt mutex_exit(&handovermtx);
140 1.4.4.2 yamt KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
141 1.4.4.2 yamt bpf_ops->bpf_detach(ifp);
142 1.4.4.2 yamt }
143 1.4.4.2 yamt }
144 1.4.4.2 yamt
145 1.4.4.2 yamt static void
146 1.4.4.2 yamt bpf_stub_null(void)
147 1.4.4.2 yamt {
148 1.4.4.2 yamt
149 1.4.4.2 yamt }
150 1.4.4.2 yamt
151 1.4.4.2 yamt static void
152 1.4.4.2 yamt bpf_stub_warn(void)
153 1.4.4.2 yamt {
154 1.4.4.2 yamt
155 1.4.4.2 yamt #ifdef DEBUG
156 1.4.4.2 yamt panic("bpf method called without attached bpf_if");
157 1.4.4.2 yamt #endif
158 1.4.4.2 yamt #ifdef DIAGNOSTIC
159 1.4.4.2 yamt printf("bpf method called without attached bpf_if\n");
160 1.4.4.2 yamt #endif
161 1.4.4.2 yamt }
162 1.4.4.2 yamt
163 1.4.4.2 yamt void
164 1.4.4.2 yamt bpf_setops()
165 1.4.4.2 yamt {
166 1.4.4.2 yamt
167 1.4.4.2 yamt mutex_init(&handovermtx, MUTEX_DEFAULT, IPL_NONE);
168 1.4.4.2 yamt cv_init(&handovercv, "bpfops");
169 1.4.4.2 yamt bpf_ops = &bpf_ops_stub;
170 1.4.4.2 yamt }
171 1.4.4.2 yamt
172 1.4.4.2 yamt /*
173 1.4.4.2 yamt * Party's over, prepare for handover.
174 1.4.4.2 yamt * It needs to happen *before* bpf_ops is set to make it atomic
175 1.4.4.2 yamt * to callers (see also stub implementations, which wait if
176 1.4.4.2 yamt * called during handover). The likelyhood of seeing a full
177 1.4.4.2 yamt * attach-detach *during* handover comes close to astronomical,
178 1.4.4.2 yamt * but handle it anyway since it's relatively easy.
179 1.4.4.2 yamt */
180 1.4.4.2 yamt void
181 1.4.4.2 yamt bpf_ops_handover_enter(struct bpf_ops *newops)
182 1.4.4.2 yamt {
183 1.4.4.2 yamt struct laglist *lag;
184 1.4.4.2 yamt
185 1.4.4.2 yamt mutex_enter(&handovermtx);
186 1.4.4.2 yamt handover = true;
187 1.4.4.2 yamt
188 1.4.4.2 yamt while ((lag = TAILQ_FIRST(&lagdrvs)) != NULL) {
189 1.4.4.2 yamt TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
190 1.4.4.2 yamt mutex_exit(&handovermtx);
191 1.4.4.2 yamt newops->bpf_attach(lag->lag_ifp, lag->lag_dlt,
192 1.4.4.2 yamt lag->lag_hlen, lag->lag_drvp);
193 1.4.4.2 yamt kmem_free(lag, sizeof(*lag));
194 1.4.4.2 yamt mutex_enter(&handovermtx);
195 1.4.4.2 yamt }
196 1.4.4.2 yamt mutex_exit(&handovermtx);
197 1.4.4.2 yamt }
198 1.4.4.2 yamt
199 1.4.4.2 yamt /* hangover done */
200 1.4.4.2 yamt void
201 1.4.4.2 yamt bpf_ops_handover_exit()
202 1.4.4.2 yamt {
203 1.4.4.2 yamt
204 1.4.4.2 yamt mutex_enter(&handovermtx);
205 1.4.4.2 yamt handover = false;
206 1.4.4.2 yamt cv_broadcast(&handovercv);
207 1.4.4.2 yamt mutex_exit(&handovermtx);
208 1.4.4.2 yamt }
209