npf_os.c revision 1.8.2.2 1 /* $NetBSD: npf_os.c,v 1.8.2.2 2017/12/03 11:39:03 jdolecek Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2016 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF main: dynamic load/initialisation and unload routines.
34 */
35
36 #ifdef _KERNEL
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: npf_os.c,v 1.8.2.2 2017/12/03 11:39:03 jdolecek Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "pf.h"
42 #if NPF > 0
43 #error "NPF and PF are mutually exclusive; please select one"
44 #endif
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/types.h>
49
50 #include <sys/conf.h>
51 #include <sys/kauth.h>
52 #include <sys/kmem.h>
53 #include <sys/lwp.h>
54 #include <sys/module.h>
55 #include <sys/socketvar.h>
56 #include <sys/uio.h>
57
58 #include <netinet/in.h>
59 #include <netinet6/in6_var.h>
60 #endif
61
62 #include "npf_impl.h"
63 #include "npfkern.h"
64
65 #ifdef _KERNEL
66 #ifndef _MODULE
67 #include "opt_modular.h"
68 #include "opt_net_mpsafe.h"
69 #endif
70 #include "ioconf.h"
71 #endif
72
73 /*
74 * Module and device structures.
75 */
76 #ifndef _MODULE
77 /*
78 * Modular kernels load drivers too early, and we need percpu to be inited
79 * So we make this misc; a better way would be to have early boot and late
80 * boot drivers.
81 */
82 MODULE(MODULE_CLASS_MISC, npf, "bpf");
83 #else
84 /* This module autoloads via /dev/npf so it needs to be a driver */
85 MODULE(MODULE_CLASS_DRIVER, npf, "bpf");
86 #endif
87
88 static int npf_dev_open(dev_t, int, int, lwp_t *);
89 static int npf_dev_close(dev_t, int, int, lwp_t *);
90 static int npf_dev_ioctl(dev_t, u_long, void *, int, lwp_t *);
91 static int npf_dev_poll(dev_t, int, lwp_t *);
92 static int npf_dev_read(dev_t, struct uio *, int);
93
94 const struct cdevsw npf_cdevsw = {
95 .d_open = npf_dev_open,
96 .d_close = npf_dev_close,
97 .d_read = npf_dev_read,
98 .d_write = nowrite,
99 .d_ioctl = npf_dev_ioctl,
100 .d_stop = nostop,
101 .d_tty = notty,
102 .d_poll = npf_dev_poll,
103 .d_mmap = nommap,
104 .d_kqfilter = nokqfilter,
105 .d_discard = nodiscard,
106 .d_flag = D_OTHER | D_MPSAFE
107 };
108
109 static const char * npf_ifop_getname(ifnet_t *);
110 static ifnet_t * npf_ifop_lookup(const char *);
111 static void npf_ifop_flush(void *);
112 static void * npf_ifop_getmeta(const ifnet_t *);
113 static void npf_ifop_setmeta(ifnet_t *, void *);
114
115 static const unsigned nworkers = 1;
116
117 static bool pfil_registered = false;
118 static pfil_head_t * npf_ph_if = NULL;
119 static pfil_head_t * npf_ph_inet = NULL;
120 static pfil_head_t * npf_ph_inet6 = NULL;
121
122 static const npf_ifops_t kern_ifops = {
123 .getname = npf_ifop_getname,
124 .lookup = npf_ifop_lookup,
125 .flush = npf_ifop_flush,
126 .getmeta = npf_ifop_getmeta,
127 .setmeta = npf_ifop_setmeta,
128 };
129
130 static int
131 npf_fini(void)
132 {
133 npf_t *npf = npf_getkernctx();
134
135 /* At first, detach device and remove pfil hooks. */
136 #ifdef _MODULE
137 devsw_detach(NULL, &npf_cdevsw);
138 #endif
139 npf_pfil_unregister(true);
140 npf_destroy(npf);
141 npf_sysfini();
142 return 0;
143 }
144
145 static int
146 npf_init(void)
147 {
148 npf_t *npf;
149 int error = 0;
150
151 error = npf_sysinit(nworkers);
152 if (error)
153 return error;
154 npf = npf_create(0, NULL, &kern_ifops);
155 npf_setkernctx(npf);
156 npf_pfil_register(true);
157
158 #ifdef _MODULE
159 devmajor_t bmajor = NODEVMAJOR, cmajor = NODEVMAJOR;
160
161 /* Attach /dev/npf device. */
162 error = devsw_attach("npf", NULL, &bmajor, &npf_cdevsw, &cmajor);
163 if (error) {
164 /* It will call devsw_detach(), which is safe. */
165 (void)npf_fini();
166 }
167 #endif
168 return error;
169 }
170
171
172 /*
173 * Module interface.
174 */
175 static int
176 npf_modcmd(modcmd_t cmd, void *arg)
177 {
178 switch (cmd) {
179 case MODULE_CMD_INIT:
180 return npf_init();
181 case MODULE_CMD_FINI:
182 return npf_fini();
183 case MODULE_CMD_AUTOUNLOAD:
184 if (npf_autounload_p()) {
185 return EBUSY;
186 }
187 break;
188 default:
189 return ENOTTY;
190 }
191 return 0;
192 }
193
194 void
195 npfattach(int nunits)
196 {
197 /* Nothing */
198 }
199
200 static int
201 npf_dev_open(dev_t dev, int flag, int mode, lwp_t *l)
202 {
203 /* Available only for super-user. */
204 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
205 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) {
206 return EPERM;
207 }
208 return 0;
209 }
210
211 static int
212 npf_dev_close(dev_t dev, int flag, int mode, lwp_t *l)
213 {
214 return 0;
215 }
216
217 static int
218 npf_stats_export(npf_t *npf, void *data)
219 {
220 uint64_t *fullst, *uptr = *(uint64_t **)data;
221 int error;
222
223 fullst = kmem_alloc(NPF_STATS_SIZE, KM_SLEEP);
224 npf_stats(npf, fullst); /* will zero the buffer */
225 error = copyout(fullst, uptr, NPF_STATS_SIZE);
226 kmem_free(fullst, NPF_STATS_SIZE);
227 return error;
228 }
229
230 static int
231 npf_dev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
232 {
233 npf_t *npf = npf_getkernctx();
234 int error;
235
236 /* Available only for super-user. */
237 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
238 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) {
239 return EPERM;
240 }
241
242 switch (cmd) {
243 case IOC_NPF_TABLE:
244 error = npfctl_table(npf, data);
245 break;
246 case IOC_NPF_RULE:
247 error = npfctl_rule(npf, cmd, data);
248 break;
249 case IOC_NPF_STATS:
250 error = npf_stats_export(npf, data);
251 break;
252 case IOC_NPF_SAVE:
253 error = npfctl_save(npf, cmd, data);
254 break;
255 case IOC_NPF_SWITCH:
256 error = npfctl_switch(data);
257 break;
258 case IOC_NPF_LOAD:
259 error = npfctl_load(npf, cmd, data);
260 break;
261 case IOC_NPF_CONN_LOOKUP:
262 error = npfctl_conn_lookup(npf, cmd, data);
263 break;
264 case IOC_NPF_VERSION:
265 *(int *)data = NPF_VERSION;
266 error = 0;
267 break;
268 default:
269 error = ENOTTY;
270 break;
271 }
272 return error;
273 }
274
275 static int
276 npf_dev_poll(dev_t dev, int events, lwp_t *l)
277 {
278 return ENOTSUP;
279 }
280
281 static int
282 npf_dev_read(dev_t dev, struct uio *uio, int flag)
283 {
284 return ENOTSUP;
285 }
286
287 bool
288 npf_autounload_p(void)
289 {
290 npf_t *npf = npf_getkernctx();
291 return !npf_pfil_registered_p() && npf_default_pass(npf);
292 }
293
294 /*
295 * Interface operations.
296 */
297
298 static const char *
299 npf_ifop_getname(ifnet_t *ifp)
300 {
301 return ifp->if_xname;
302 }
303
304 static ifnet_t *
305 npf_ifop_lookup(const char *name)
306 {
307 return ifunit(name);
308 }
309
310 static void
311 npf_ifop_flush(void *arg)
312 {
313 ifnet_t *ifp;
314
315 KERNEL_LOCK(1, NULL);
316 IFNET_LOCK();
317 IFNET_WRITER_FOREACH(ifp) {
318 ifp->if_pf_kif = arg;
319 }
320 IFNET_UNLOCK();
321 KERNEL_UNLOCK_ONE(NULL);
322 }
323
324 static void *
325 npf_ifop_getmeta(const ifnet_t *ifp)
326 {
327 return ifp->if_pf_kif;
328 }
329
330 static void
331 npf_ifop_setmeta(ifnet_t *ifp, void *arg)
332 {
333 ifp->if_pf_kif = arg;
334 }
335
336 #ifdef _KERNEL
337
338 /*
339 * Wrapper of the main packet handler to pass the kernel NPF context.
340 */
341 static int
342 npfkern_packet_handler(void *arg, struct mbuf **mp, ifnet_t *ifp, int di)
343 {
344 npf_t *npf = npf_getkernctx();
345 return npf_packet_handler(npf, mp, ifp, di);
346 }
347
348 /*
349 * npf_ifhook: hook handling interface changes.
350 */
351 static void
352 npf_ifhook(void *arg, unsigned long cmd, void *arg2)
353 {
354 npf_t *npf = npf_getkernctx();
355 ifnet_t *ifp = arg2;
356
357 switch (cmd) {
358 case PFIL_IFNET_ATTACH:
359 npf_ifmap_attach(npf, ifp);
360 npf_ifaddr_sync(npf, ifp);
361 break;
362 case PFIL_IFNET_DETACH:
363 npf_ifmap_detach(npf, ifp);
364 npf_ifaddr_flush(npf, ifp);
365 break;
366 }
367 }
368
369 static void
370 npf_ifaddrhook(void *arg, u_long cmd, void *arg2)
371 {
372 npf_t *npf = npf_getkernctx();
373 struct ifaddr *ifa = arg2;
374
375 switch (cmd) {
376 case SIOCSIFADDR:
377 case SIOCAIFADDR:
378 case SIOCDIFADDR:
379 #ifdef INET6
380 case SIOCSIFADDR_IN6:
381 case SIOCAIFADDR_IN6:
382 case SIOCDIFADDR_IN6:
383 #endif
384 break;
385 default:
386 return;
387 }
388 npf_ifaddr_sync(npf, ifa->ifa_ifp);
389 }
390
391 /*
392 * npf_pfil_register: register pfil(9) hooks.
393 */
394 int
395 npf_pfil_register(bool init)
396 {
397 npf_t *npf = npf_getkernctx();
398 int error = 0;
399
400 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
401
402 /* Init: interface re-config and attach/detach hook. */
403 if (!npf_ph_if) {
404 npf_ph_if = pfil_head_get(PFIL_TYPE_IFNET, 0);
405 if (!npf_ph_if) {
406 error = ENOENT;
407 goto out;
408 }
409
410 error = pfil_add_ihook(npf_ifhook, NULL,
411 PFIL_IFNET, npf_ph_if);
412 KASSERT(error == 0);
413
414 error = pfil_add_ihook(npf_ifaddrhook, NULL,
415 PFIL_IFADDR, npf_ph_if);
416 KASSERT(error == 0);
417 }
418 if (init) {
419 goto out;
420 }
421
422 /* Check if pfil hooks are not already registered. */
423 if (pfil_registered) {
424 error = EEXIST;
425 goto out;
426 }
427
428 /* Capture points of the activity in the IP layer. */
429 npf_ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
430 npf_ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
431 if (!npf_ph_inet && !npf_ph_inet6) {
432 error = ENOENT;
433 goto out;
434 }
435
436 /* Packet IN/OUT handlers for IP layer. */
437 if (npf_ph_inet) {
438 error = pfil_add_hook(npfkern_packet_handler, npf,
439 PFIL_ALL, npf_ph_inet);
440 KASSERT(error == 0);
441 }
442 if (npf_ph_inet6) {
443 error = pfil_add_hook(npfkern_packet_handler, npf,
444 PFIL_ALL, npf_ph_inet6);
445 KASSERT(error == 0);
446 }
447
448 /*
449 * It is necessary to re-sync all/any interface address tables,
450 * since we did not listen for any changes.
451 */
452 npf_ifaddr_syncall(npf);
453 pfil_registered = true;
454 out:
455 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
456
457 return error;
458 }
459
460 /*
461 * npf_pfil_unregister: unregister pfil(9) hooks.
462 */
463 void
464 npf_pfil_unregister(bool fini)
465 {
466 npf_t *npf = npf_getkernctx();
467
468 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
469
470 if (fini && npf_ph_if) {
471 (void)pfil_remove_ihook(npf_ifhook, NULL,
472 PFIL_IFNET, npf_ph_if);
473 (void)pfil_remove_ihook(npf_ifaddrhook, NULL,
474 PFIL_IFADDR, npf_ph_if);
475 }
476 if (npf_ph_inet) {
477 (void)pfil_remove_hook(npfkern_packet_handler, npf,
478 PFIL_ALL, npf_ph_inet);
479 }
480 if (npf_ph_inet6) {
481 (void)pfil_remove_hook(npfkern_packet_handler, npf,
482 PFIL_ALL, npf_ph_inet6);
483 }
484 pfil_registered = false;
485
486 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
487 }
488
489 bool
490 npf_pfil_registered_p(void)
491 {
492 return pfil_registered;
493 }
494 #endif
495