ip_fil_netbsd.c revision 1.16.2.3 1 /* $NetBSD: ip_fil_netbsd.c,v 1.16.2.3 2016/07/26 03:24:22 pgoyette Exp $ */
2
3 /*
4 * Copyright (C) 2012 by Darren Reed.
5 *
6 * See the IPFILTER.LICENCE file for details on licencing.
7 */
8 #if !defined(lint)
9 #if defined(__NetBSD__)
10 #include <sys/cdefs.h>
11 __KERNEL_RCSID(0, "$NetBSD: ip_fil_netbsd.c,v 1.16.2.3 2016/07/26 03:24:22 pgoyette Exp $");
12 #else
13 static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed";
14 static const char rcsid[] = "@(#)Id: ip_fil_netbsd.c,v 1.1.1.2 2012/07/22 13:45:17 darrenr Exp";
15 #endif
16 #endif
17
18 #if defined(KERNEL) || defined(_KERNEL)
19 # undef KERNEL
20 # undef _KERNEL
21 # define KERNEL 1
22 # define _KERNEL 1
23 #endif
24 #include <sys/param.h>
25 #if (NetBSD >= 199905) && !defined(IPFILTER_LKM)
26 # if (__NetBSD_Version__ >= 799003000)
27 # ifdef _KERNEL_OPT
28 # include "opt_ipsec.h"
29 # endif
30 # else
31 # include "opt_ipsec.h"
32 # endif
33 #endif
34 #include <sys/errno.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/ioctl.h>
38 #include <sys/time.h>
39 #include <sys/systm.h>
40 #include <sys/select.h>
41 #if (NetBSD > 199609)
42 # include <sys/dirent.h>
43 #else
44 # include <sys/dir.h>
45 #endif
46 #if (__NetBSD_Version__ >= 599005900)
47 # include <sys/cprng.h>
48 #endif
49 #include <sys/mbuf.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/poll.h>
53 #if (__NetBSD_Version__ >= 399002000)
54 # include <sys/kauth.h>
55 #endif
56 #if (__NetBSD_Version__ >= 799003000)
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #endif
60 #if (__NetBSD_Version__ >= 799003300)
61 #include <sys/localcount.h>
62 #endif
63
64 #include <net/if.h>
65 #include <net/route.h>
66 #include <netinet/in.h>
67 #include <netinet/in_var.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/tcp.h>
72 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
73 # include <netinet/tcp_timer.h>
74 # include <netinet/tcp_var.h>
75 #endif
76 #include <netinet/udp.h>
77 #include <netinet/tcpip.h>
78 #include <netinet/ip_icmp.h>
79 #include "netinet/ip_compat.h"
80 #ifdef USE_INET6
81 # include <netinet/icmp6.h>
82 # if (__NetBSD_Version__ >= 106000000)
83 # include <netinet6/nd6.h>
84 # endif
85 #endif
86 #include "netinet/ip_fil.h"
87 #include "netinet/ip_nat.h"
88 #include "netinet/ip_frag.h"
89 #include "netinet/ip_state.h"
90 #include "netinet/ip_proxy.h"
91 #include "netinet/ip_auth.h"
92 #include "netinet/ip_sync.h"
93 #include "netinet/ip_lookup.h"
94 #include "netinet/ip_dstlist.h"
95 #ifdef IPFILTER_SCAN
96 #include "netinet/ip_scan.h"
97 #endif
98 #include <sys/md5.h>
99 #include <sys/kernel.h>
100 #include <sys/conf.h>
101 #ifdef INET
102 extern int ip_optcopy (struct ip *, struct ip *);
103 #endif
104
105 #ifdef IPFILTER_M_IPFILTER
106 MALLOC_DEFINE(M_IPFILTER, "IP Filter", "IP Filter packet filter data structures");
107 #endif
108
109 #if __NetBSD_Version__ >= 105009999
110 # define csuminfo csum_flags
111 #endif
112
113 #if __NetBSD_Version__ < 200000000
114 extern struct protosw inetsw[];
115 #endif
116
117 #if (__NetBSD_Version__ >= 599002000)
118 static kauth_listener_t ipf_listener;
119 #endif
120
121 #if (__NetBSD_Version__ < 399001400)
122 extern int ip6_getpmtu (struct route_in6 *, struct route_in6 *,
123 struct ifnet *, struct in6_addr *, u_long *,
124 int *);
125 #endif
126 #if (NetBSD >= 199511)
127 static int ipfopen(dev_t dev, int flags, int devtype, PROC_T *p);
128 static int ipfclose(dev_t dev, int flags, int devtype, PROC_T *p);
129 #else
130 # if (__NetBSD_Version__ >= 399001400)
131 static int ipfopen(dev_t dev, int flags, struct lwp *);
132 static int ipfclose(dev_t dev, int flags, struct lwp *);
133 # else
134 static int ipfopen(dev_t dev, int flags);
135 static int ipfclose(dev_t dev, int flags);
136 # endif /* __NetBSD_Version__ >= 399001400 */
137 #endif
138 static int ipfread(dev_t, struct uio *, int ioflag);
139 static int ipfwrite(dev_t, struct uio *, int ioflag);
140 static int ipfpoll(dev_t, int events, PROC_T *);
141 static void ipf_timer_func(void *ptr);
142
143 const struct cdevsw ipl_cdevsw = {
144 #if (__NetBSD_Version__ >= 799003300)
145 LOCALCOUNT_INITIALIZER
146 #endif
147 .d_open = ipfopen,
148 .d_close = ipfclose,
149 .d_read = ipfread,
150 .d_write = ipfwrite,
151 .d_ioctl = ipfioctl,
152 .d_stop = nostop,
153 .d_tty = notty,
154 .d_poll = ipfpoll,
155 .d_mmap = nommap,
156 #if (__NetBSD_Version__ >= 200000000)
157 .d_kqfilter = nokqfilter,
158 #endif
159 .d_discard = nodiscard,
160 #ifdef D_OTHER
161 .d_flag = D_OTHER
162 #else
163 .d_flag = 0
164 #endif
165 };
166 #if (__NetBSD_Version__ >= 799003000)
167 kmutex_t ipf_ref_mutex;
168 int ipf_active;
169 #endif
170
171 ipf_main_softc_t ipfmain;
172
173 static u_short ipid = 0;
174 static int (*ipf_savep)(void *, ip_t *, int, void *, int, struct mbuf **);
175 static int ipf_send_ip(fr_info_t *, mb_t *);
176 #ifdef USE_INET6
177 static int ipf_fastroute6(struct mbuf *, struct mbuf **,
178 fr_info_t *, frdest_t *);
179 #endif
180
181 #if defined(NETBSD_PF)
182 # include <net/pfil.h>
183 /*
184 * We provide the ipf_checkp name just to minimize changes later.
185 */
186 int (*ipf_checkp)(void *, ip_t *ip, int hlen, void *ifp, int out, mb_t **mp);
187 #endif /* NETBSD_PF */
188
189 #if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
190 # include <net/pfil.h>
191
192 static int ipf_check_wrapper(void *, struct mbuf **, struct ifnet *, int );
193
194 static int
195 ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
196 {
197 struct ip *ip;
198 int rv, hlen;
199
200 #if __NetBSD_Version__ >= 200080000
201 /*
202 * ensure that mbufs are writable beforehand
203 * as it's assumed by ipf code.
204 * XXX inefficient
205 */
206 int error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
207
208 if (error) {
209 m_freem(*mp);
210 *mp = NULL;
211 return error;
212 }
213 #endif
214 ip = mtod(*mp, struct ip *);
215 hlen = ip->ip_hl << 2;
216
217 #ifdef INET
218 #if defined(M_CSUM_TCPv4)
219 /*
220 * If the packet is out-bound, we can't delay checksums
221 * here. For in-bound, the checksum has already been
222 * validated.
223 */
224 if (dir == PFIL_OUT) {
225 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
226 in_delayed_cksum(*mp);
227 (*mp)->m_pkthdr.csum_flags &=
228 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
229 }
230 }
231 #endif /* M_CSUM_TCPv4 */
232 #endif /* INET */
233
234 /*
235 * Note, we don't need to update the checksum, because
236 * it has already been verified.
237 */
238 rv = ipf_check(&ipfmain, ip, hlen, ifp, (dir == PFIL_OUT), mp);
239
240 return (rv);
241 }
242
243 # ifdef USE_INET6
244 # include <netinet/ip6.h>
245
246 static int ipf_check_wrapper6(void *, struct mbuf **, struct ifnet *, int );
247
248 static int
249 ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
250 {
251 #if defined(INET6)
252 # if defined(M_CSUM_TCPv6) && (__NetBSD_Version__ > 200000000)
253 /*
254 * If the packet is out-bound, we can't delay checksums
255 * here. For in-bound, the checksum has already been
256 * validated.
257 */
258 if (dir == PFIL_OUT) {
259 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
260 # if (__NetBSD_Version__ > 399000600)
261 in6_delayed_cksum(*mp);
262 # endif
263 (*mp)->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6|
264 M_CSUM_UDPv6);
265 }
266 }
267 # endif
268 #endif /* INET6 */
269
270 return (ipf_check(&ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr),
271 ifp, (dir == PFIL_OUT), mp));
272 }
273 # endif
274
275
276 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
277 static int ipf_pfilsync(void *, struct mbuf **, struct ifnet *, int);
278
279 static int
280 ipf_pfilsync(void *hdr, struct mbuf **mp, struct ifnet *ifp, int dir)
281 {
282 /*
283 * The interface pointer is useless for create (we have nothing to
284 * compare it to) and at detach, the interface name is still in the
285 * list of active NICs (albeit, down, but that's not any real
286 * indicator) and doing ifunit() on the name will still return the
287 * pointer, so it's not much use then, either.
288 */
289 ipf_sync(&ipfmain, NULL);
290 return 0;
291 }
292 # endif
293
294 #endif /* __NetBSD_Version__ >= 105110000 */
295
296
297 #if defined(IPFILTER_LKM)
298 int
299 ipf_identify(s)
300 char *s;
301 {
302 if (strcmp(s, "ipl") == 0)
303 return 1;
304 return 0;
305 }
306 #endif /* IPFILTER_LKM */
307
308 #if (__NetBSD_Version__ >= 599002000)
309 static int
310 ipf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
311 void *arg0, void *arg1, void *arg2, void *arg3)
312 {
313 int result;
314 enum kauth_network_req req;
315
316 result = KAUTH_RESULT_DEFER;
317 req = (enum kauth_network_req)arg0;
318
319 if (action != KAUTH_NETWORK_FIREWALL)
320 return result;
321
322 /* These must have came from device context. */
323 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) ||
324 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT))
325 result = KAUTH_RESULT_ALLOW;
326
327 return result;
328 }
329 #endif
330
331 /*
332 * Try to detect the case when compiling for NetBSD with pseudo-device
333 */
334 void
335 ipfilterattach(int count)
336 {
337
338 #if (__NetBSD_Version__ >= 799003000)
339 return;
340 #else
341 #if (__NetBSD_Version__ >= 599002000)
342 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
343 ipf_listener_cb, NULL);
344 #endif
345
346 if (ipf_load_all() == 0)
347 (void) ipf_create_all(&ipfmain);
348 #endif
349 }
350
351
352 int
353 ipfattach(ipf_main_softc_t *softc)
354 {
355 SPL_INT(s);
356 #if (__NetBSD_Version__ >= 499005500)
357 int i;
358 #endif
359 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
360 int error = 0;
361 # if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
362 pfil_head_t *ph_inet;
363 # ifdef USE_INET6
364 pfil_head_t *ph_inet6;
365 # endif
366 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
367 pfil_head_t *ph_ifsync;
368 # endif
369 # endif
370 #endif
371
372 SPL_NET(s);
373 if ((softc->ipf_running > 0) || (ipf_checkp == ipf_check)) {
374 printf("IP Filter: already initialized\n");
375 SPL_X(s);
376 IPFERROR(130017);
377 return EBUSY;
378 }
379
380 if (ipf_init_all(softc) < 0) {
381 SPL_X(s);
382 IPFERROR(130015);
383 return EIO;
384 }
385
386 #ifdef NETBSD_PF
387 # if (__NetBSD_Version__ >= 104200000)
388 # if __NetBSD_Version__ >= 105110000
389 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
390 # ifdef USE_INET6
391 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
392 # endif
393 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
394 ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
395 # endif
396
397 if (ph_inet == NULL
398 # ifdef USE_INET6
399 && ph_inet6 == NULL
400 # endif
401 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
402 && ph_ifsync == NULL
403 # endif
404 ) {
405 SPL_X(s);
406 IPFERROR(130016);
407 return ENODEV;
408 }
409
410 if (ph_inet != NULL)
411 error = pfil_add_hook((void *)ipf_check_wrapper, NULL,
412 PFIL_IN|PFIL_OUT, ph_inet);
413 else
414 error = 0;
415 # else
416 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
417 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
418 # endif
419 if (error) {
420 IPFERROR(130013);
421 goto pfil_error;
422 }
423 # else
424 pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
425 # endif
426
427 # ifdef USE_INET6
428 # if __NetBSD_Version__ >= 105110000
429 if (ph_inet6 != NULL)
430 error = pfil_add_hook((void *)ipf_check_wrapper6, NULL,
431 PFIL_IN|PFIL_OUT, ph_inet6);
432 else
433 error = 0;
434 if (error) {
435 pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
436 PFIL_IN|PFIL_OUT, ph_inet6);
437 ipfmain.ipf_interror = 130014;
438 goto pfil_error;
439 }
440 # else
441 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
442 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
443 if (error) {
444 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
445 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
446 IPFERROR(130014);
447 goto pfil_error;
448 }
449 # endif
450 # endif
451
452 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
453 if (ph_ifsync != NULL)
454 (void) pfil_add_hook((void *)ipf_pfilsync, NULL,
455 PFIL_IFNET, ph_ifsync);
456 # endif
457 #endif
458
459 #if (__NetBSD_Version__ >= 499005500)
460 for (i = 0; i < IPL_LOGSIZE; i++)
461 selinit(&ipfmain.ipf_selwait[i]);
462 #else
463 bzero((char *)ipfmain.ipf_selwait, sizeof(ipfmain.ipf_selwait));
464 #endif
465 ipf_savep = ipf_checkp;
466 ipf_checkp = ipf_check;
467
468 #ifdef INET
469 if (softc->ipf_control_forwarding & 1)
470 ipforwarding = 1;
471 #endif
472
473 ipid = 0;
474
475 SPL_X(s);
476
477 #if (__NetBSD_Version__ >= 104010000)
478 # if (__NetBSD_Version__ >= 499002000)
479 callout_init(&softc->ipf_slow_ch, 0);
480 # else
481 callout_init(&softc->ipf_slow_ch);
482 # endif
483 callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT,
484 ipf_timer_func, softc);
485 #else
486 timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
487 #endif
488
489 return 0;
490
491 #if __NetBSD_Version__ >= 105110000
492 pfil_error:
493 SPL_X(s);
494 ipf_fini_all(softc);
495 return error;
496 #endif
497 }
498
499 static void
500 ipf_timer_func(void *ptr)
501 {
502 ipf_main_softc_t *softc = ptr;
503 SPL_INT(s);
504
505 SPL_NET(s);
506 READ_ENTER(&softc->ipf_global);
507
508 if (softc->ipf_running > 0)
509 ipf_slowtimer(softc);
510
511 if (softc->ipf_running == -1 || softc->ipf_running == 1) {
512 #if NETBSD_GE_REV(104240000)
513 callout_reset(&softc->ipf_slow_ch, hz / 2,
514 ipf_timer_func, softc);
515 #else
516 timeout(ipf_timer_func, softc,
517 (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
518 #endif
519 }
520 RWLOCK_EXIT(&softc->ipf_global);
521 SPL_X(s);
522 }
523
524
525 /*
526 * Disable the filter by removing the hooks from the IP input/output
527 * stream.
528 */
529 int
530 ipfdetach(ipf_main_softc_t *softc)
531 {
532 SPL_INT(s);
533 #if (__NetBSD_Version__ >= 499005500)
534 int i;
535 #endif
536 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
537 int error = 0;
538 # if __NetBSD_Version__ >= 105150000
539 pfil_head_t *ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
540 # ifdef USE_INET6
541 pfil_head_t *ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
542 # endif
543 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
544 struct pfil_head *ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
545 # endif
546 # endif
547 #endif
548
549 SPL_NET(s);
550
551 #if (__NetBSD_Version__ >= 104010000)
552 if (softc->ipf_running > 0)
553 callout_stop(&softc->ipf_slow_ch);
554 #else
555 untimeout(ipf_slowtimer, NULL);
556 #endif /* NetBSD */
557
558 ipf_checkp = ipf_savep;
559 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE|FR_INACTIVE);
560 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE);
561
562 #ifdef INET
563 if (softc->ipf_control_forwarding & 2)
564 ipforwarding = 0;
565 #endif
566
567 #ifdef NETBSD_PF
568 # if (__NetBSD_Version__ >= 104200000)
569 # if __NetBSD_Version__ >= 105110000
570 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
571 (void) pfil_remove_hook((void *)ipf_pfilsync, NULL,
572 PFIL_IFNET, ph_ifsync);
573 # endif
574
575 if (ph_inet != NULL)
576 error = pfil_remove_hook((void *)ipf_check_wrapper, NULL,
577 PFIL_IN|PFIL_OUT, ph_inet);
578 else
579 error = 0;
580 # else
581 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
582 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
583 # endif
584 if (error) {
585 SPL_X(s);
586 IPFERROR(130011);
587 return error;
588 }
589 # else
590 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
591 # endif
592 # ifdef USE_INET6
593 # if __NetBSD_Version__ >= 105110000
594 if (ph_inet6 != NULL)
595 error = pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
596 PFIL_IN|PFIL_OUT, ph_inet6);
597 else
598 error = 0;
599 # else
600 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
601 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
602 # endif
603 if (error) {
604 SPL_X(s);
605 IPFERROR(130012);
606 return error;
607 }
608 # endif
609 #endif
610 SPL_X(s);
611
612 #if (__NetBSD_Version__ >= 499005500)
613 for (i = 0; i < IPL_LOGSIZE; i++)
614 seldestroy(&ipfmain.ipf_selwait[i]);
615 #endif
616
617 ipf_fini_all(softc);
618
619 return 0;
620 }
621
622
623 /*
624 * Filter ioctl interface.
625 */
626 int
627 ipfioctl(dev_t dev, u_long cmd,
628 #if (__NetBSD_Version__ >= 499001000)
629 void *data,
630 #else
631 caddr_t data,
632 #endif
633 int mode
634 #if (NetBSD >= 199511)
635 # if (__NetBSD_Version__ >= 399001400)
636 , struct lwp *p
637 # if (__NetBSD_Version__ >= 399002000)
638 # define UID(l) kauth_cred_getuid((l)->l_cred)
639 # else
640 # define UID(l) ((l)->l_proc->p_cred->p_ruid)
641 # endif
642 # else
643 , struct proc *p
644 # define UID(p) ((p)->p_cred->p_ruid)
645 # endif
646 #endif
647 )
648 {
649 int error = 0, unit = 0;
650 SPL_INT(s);
651
652 #if (__NetBSD_Version__ >= 399002000)
653 if ((mode & FWRITE) &&
654 kauth_authorize_network(p->l_cred, KAUTH_NETWORK_FIREWALL,
655 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL,
656 NULL, NULL)) {
657 ipfmain.ipf_interror = 130005;
658 return EPERM;
659 }
660 #else
661 if ((securelevel >= 2) && (mode & FWRITE)) {
662 ipfmain.ipf_interror = 130001;
663 return EPERM;
664 }
665 #endif
666
667 unit = GET_MINOR(dev);
668 if ((IPL_LOGMAX < unit) || (unit < 0)) {
669 ipfmain.ipf_interror = 130002;
670 return ENXIO;
671 }
672
673 if (ipfmain.ipf_running <= 0) {
674 if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) {
675 ipfmain.ipf_interror = 130003;
676 return EIO;
677 }
678 if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET &&
679 cmd != SIOCIPFSET && cmd != SIOCFRENB &&
680 cmd != SIOCGETFS && cmd != SIOCGETFF &&
681 cmd != SIOCIPFINTERROR) {
682 ipfmain.ipf_interror = 130004;
683 return EIO;
684 }
685 }
686
687 SPL_NET(s);
688
689 error = ipf_ioctlswitch(&ipfmain, unit, data, cmd, mode, UID(p), p);
690 if (error != -1) {
691 SPL_X(s);
692 return error;
693 }
694
695 SPL_X(s);
696 return error;
697 }
698
699
700 /*
701 * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that
702 * requires a large amount of setting up and isn't any more efficient.
703 */
704 int
705 ipf_send_reset(fr_info_t *fin)
706 {
707 struct tcphdr *tcp, *tcp2;
708 int tlen = 0, hlen;
709 struct mbuf *m;
710 #ifdef USE_INET6
711 ip6_t *ip6;
712 #endif
713 ip_t *ip;
714
715 tcp = fin->fin_dp;
716 if (tcp->th_flags & TH_RST)
717 return -1; /* feedback loop */
718
719 if (ipf_checkl4sum(fin) == -1)
720 return -1;
721
722 tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) +
723 ((tcp->th_flags & TH_SYN) ? 1 : 0) +
724 ((tcp->th_flags & TH_FIN) ? 1 : 0);
725
726 #ifdef USE_INET6
727 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t);
728 #else
729 hlen = sizeof(ip_t);
730 #endif
731 #ifdef MGETHDR
732 MGETHDR(m, M_DONTWAIT, MT_HEADER);
733 #else
734 MGET(m, M_DONTWAIT, MT_HEADER);
735 #endif
736 if (m == NULL)
737 return -1;
738 if (sizeof(*tcp2) + hlen > MHLEN) {
739 MCLGET(m, M_DONTWAIT);
740 if (m == NULL)
741 return -1;
742 if ((m->m_flags & M_EXT) == 0) {
743 FREE_MB_T(m);
744 return -1;
745 }
746 }
747
748 m->m_len = sizeof(*tcp2) + hlen;
749 m->m_data += max_linkhdr;
750 m->m_pkthdr.len = m->m_len;
751 m_reset_rcvif(m);
752 ip = mtod(m, struct ip *);
753 bzero((char *)ip, hlen);
754 #ifdef USE_INET6
755 ip6 = (ip6_t *)ip;
756 #endif
757 bzero((char *)ip, sizeof(*tcp2) + hlen);
758 tcp2 = (struct tcphdr *)((char *)ip + hlen);
759 tcp2->th_sport = tcp->th_dport;
760 tcp2->th_dport = tcp->th_sport;
761
762 if (tcp->th_flags & TH_ACK) {
763 tcp2->th_seq = tcp->th_ack;
764 tcp2->th_flags = TH_RST;
765 tcp2->th_ack = 0;
766 } else {
767 tcp2->th_seq = 0;
768 tcp2->th_ack = ntohl(tcp->th_seq);
769 tcp2->th_ack += tlen;
770 tcp2->th_ack = htonl(tcp2->th_ack);
771 tcp2->th_flags = TH_RST|TH_ACK;
772 }
773 tcp2->th_x2 = 0;
774 TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2);
775 tcp2->th_win = tcp->th_win;
776 tcp2->th_sum = 0;
777 tcp2->th_urp = 0;
778
779 #ifdef USE_INET6
780 if (fin->fin_v == 6) {
781 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
782 ip6->ip6_plen = htons(sizeof(struct tcphdr));
783 ip6->ip6_nxt = IPPROTO_TCP;
784 ip6->ip6_hlim = 0;
785 ip6->ip6_src = fin->fin_dst6.in6;
786 ip6->ip6_dst = fin->fin_src6.in6;
787 tcp2->th_sum = in6_cksum(m, IPPROTO_TCP,
788 sizeof(*ip6), sizeof(*tcp2));
789 return ipf_send_ip(fin, m);
790 }
791 #endif
792 #ifdef INET
793 ip->ip_p = IPPROTO_TCP;
794 ip->ip_len = htons(sizeof(struct tcphdr));
795 ip->ip_src.s_addr = fin->fin_daddr;
796 ip->ip_dst.s_addr = fin->fin_saddr;
797 tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2));
798 ip->ip_len = hlen + sizeof(*tcp2);
799 return ipf_send_ip(fin, m);
800 #else
801 return 0;
802 #endif
803 }
804
805
806 /*
807 * Expects ip_len to be in host byte order when called.
808 */
809 static int
810 ipf_send_ip(fr_info_t *fin, mb_t *m)
811 {
812 fr_info_t fnew;
813 #ifdef INET
814 ip_t *oip;
815 #endif
816 ip_t *ip;
817 int hlen;
818
819 ip = mtod(m, ip_t *);
820 bzero((char *)&fnew, sizeof(fnew));
821 fnew.fin_main_soft = fin->fin_main_soft;
822
823 IP_V_A(ip, fin->fin_v);
824 switch (fin->fin_v)
825 {
826 #ifdef INET
827 case 4 :
828 oip = fin->fin_ip;
829 hlen = sizeof(*oip);
830 fnew.fin_v = 4;
831 fnew.fin_p = ip->ip_p;
832 fnew.fin_plen = ntohs(ip->ip_len);
833 HTONS(ip->ip_len);
834 IP_HL_A(ip, sizeof(*oip) >> 2);
835 ip->ip_tos = oip->ip_tos;
836 ip->ip_id = ipf_nextipid(fin);
837 ip->ip_off = htons(ip_mtudisc ? IP_DF : 0);
838 ip->ip_ttl = ip_defttl;
839 ip->ip_sum = 0;
840 break;
841 #endif
842 #ifdef USE_INET6
843 case 6 :
844 {
845 ip6_t *ip6 = (ip6_t *)ip;
846
847 ip6->ip6_vfc = 0x60;
848 ip6->ip6_hlim = IPDEFTTL;
849
850 hlen = sizeof(*ip6);
851 fnew.fin_p = ip6->ip6_nxt;
852 fnew.fin_v = 6;
853 fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen;
854 break;
855 }
856 #endif
857 default :
858 return EINVAL;
859 }
860 #ifdef KAME_IPSEC
861 m_reset_rcvif(m);
862 #endif
863
864 fnew.fin_ifp = fin->fin_ifp;
865 fnew.fin_flx = FI_NOCKSUM;
866 fnew.fin_m = m;
867 fnew.fin_ip = ip;
868 fnew.fin_mp = &m;
869 fnew.fin_hlen = hlen;
870 fnew.fin_dp = (char *)ip + hlen;
871 (void) ipf_makefrip(hlen, ip, &fnew);
872
873 return ipf_fastroute(m, &m, &fnew, NULL);
874 }
875
876
877 int
878 ipf_send_icmp_err(int type, fr_info_t *fin, int dst)
879 {
880 int err, hlen, xtra, iclen, ohlen, avail;
881 struct in_addr dst4;
882 struct icmp *icmp;
883 struct mbuf *m;
884 i6addr_t dst6;
885 void *ifp;
886 #ifdef USE_INET6
887 int code;
888 ip6_t *ip6;
889 #endif
890 ip_t *ip, *ip2;
891
892 if ((type < 0) || (type > ICMP_MAXTYPE))
893 return -1;
894
895 #ifdef USE_INET6
896 code = fin->fin_icode;
897 if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int)))
898 return -1;
899 #endif
900
901 if (ipf_checkl4sum(fin) == -1)
902 return -1;
903 #ifdef MGETHDR
904 MGETHDR(m, M_DONTWAIT, MT_HEADER);
905 #else
906 MGET(m, M_DONTWAIT, MT_HEADER);
907 #endif
908 if (m == NULL)
909 return -1;
910 avail = MHLEN;
911
912 xtra = 0;
913 hlen = 0;
914 ohlen = 0;
915 dst4.s_addr = 0;
916 ifp = fin->fin_ifp;
917 if (fin->fin_v == 4) {
918 if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT))
919 switch (ntohs(fin->fin_data[0]) >> 8)
920 {
921 case ICMP_ECHO :
922 case ICMP_TSTAMP :
923 case ICMP_IREQ :
924 case ICMP_MASKREQ :
925 break;
926 default :
927 FREE_MB_T(m);
928 return 0;
929 }
930
931 if (dst == 0) {
932 if (ipf_ifpaddr(&ipfmain, 4, FRI_NORMAL, ifp,
933 &dst6, NULL) == -1) {
934 FREE_MB_T(m);
935 return -1;
936 }
937 dst4 = dst6.in4;
938 } else
939 dst4.s_addr = fin->fin_daddr;
940
941 hlen = sizeof(ip_t);
942 ohlen = fin->fin_hlen;
943 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
944 if (fin->fin_hlen < fin->fin_plen)
945 xtra = MIN(fin->fin_dlen, 8);
946 else
947 xtra = 0;
948 }
949
950 #ifdef USE_INET6
951 else if (fin->fin_v == 6) {
952 hlen = sizeof(ip6_t);
953 ohlen = sizeof(ip6_t);
954 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
955 type = icmptoicmp6types[type];
956 if (type == ICMP6_DST_UNREACH)
957 code = icmptoicmp6unreach[code];
958
959 if (iclen + max_linkhdr + fin->fin_plen > avail) {
960 MCLGET(m, M_DONTWAIT);
961 if (m == NULL)
962 return -1;
963 if ((m->m_flags & M_EXT) == 0) {
964 FREE_MB_T(m);
965 return -1;
966 }
967 avail = MCLBYTES;
968 }
969 xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr);
970 xtra = MIN(xtra, IPV6_MMTU - iclen);
971 if (dst == 0) {
972 if (ipf_ifpaddr(&ipfmain, 6, FRI_NORMAL, ifp,
973 &dst6, NULL) == -1) {
974 FREE_MB_T(m);
975 return -1;
976 }
977 } else
978 dst6 = fin->fin_dst6;
979 }
980 #endif
981 else {
982 FREE_MB_T(m);
983 return -1;
984 }
985
986 avail -= (max_linkhdr + iclen);
987 if (avail < 0) {
988 FREE_MB_T(m);
989 return -1;
990 }
991 if (xtra > avail)
992 xtra = avail;
993 iclen += xtra;
994 m->m_data += max_linkhdr;
995 m_reset_rcvif(m);
996 m->m_pkthdr.len = iclen;
997 m->m_len = iclen;
998 ip = mtod(m, ip_t *);
999 icmp = (struct icmp *)((char *)ip + hlen);
1000 ip2 = (ip_t *)&icmp->icmp_ip;
1001
1002 icmp->icmp_type = type;
1003 icmp->icmp_code = fin->fin_icode;
1004 icmp->icmp_cksum = 0;
1005 #ifdef icmp_nextmtu
1006 if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) {
1007 if (fin->fin_mtu != 0) {
1008 icmp->icmp_nextmtu = htons(fin->fin_mtu);
1009
1010 } else if (ifp != NULL) {
1011 icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp));
1012
1013 } else { /* make up a number... */
1014 icmp->icmp_nextmtu = htons(fin->fin_plen - 20);
1015 }
1016 }
1017 #endif
1018
1019 bcopy((char *)fin->fin_ip, (char *)ip2, ohlen);
1020
1021 #if defined(M_CSUM_IPv4)
1022 /*
1023 * Clear any in-bound checksum flags for this packet.
1024 */
1025 m->m_pkthdr.csuminfo = 0;
1026 #endif /* __NetBSD__ && M_CSUM_IPv4 */
1027
1028 #ifdef USE_INET6
1029 ip6 = (ip6_t *)ip;
1030 if (fin->fin_v == 6) {
1031 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
1032 ip6->ip6_plen = htons(iclen - hlen);
1033 ip6->ip6_nxt = IPPROTO_ICMPV6;
1034 ip6->ip6_hlim = 0;
1035 ip6->ip6_src = dst6.in6;
1036 ip6->ip6_dst = fin->fin_src6.in6;
1037 if (xtra > 0)
1038 bcopy((char *)fin->fin_ip + ohlen,
1039 (char *)&icmp->icmp_ip + ohlen, xtra);
1040 icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6,
1041 sizeof(*ip6), iclen - hlen);
1042 } else
1043 #endif
1044 {
1045 ip->ip_p = IPPROTO_ICMP;
1046 ip->ip_src.s_addr = dst4.s_addr;
1047 ip->ip_dst.s_addr = fin->fin_saddr;
1048
1049 if (xtra > 0)
1050 bcopy((char *)fin->fin_ip + ohlen,
1051 (char *)&icmp->icmp_ip + ohlen, xtra);
1052 icmp->icmp_cksum = ipf_cksum((u_short *)icmp,
1053 sizeof(*icmp) + 8);
1054 ip->ip_len = iclen;
1055 ip->ip_p = IPPROTO_ICMP;
1056 }
1057 err = ipf_send_ip(fin, m);
1058 return err;
1059 }
1060
1061
1062 /*
1063 * m0 - pointer to mbuf where the IP packet starts
1064 * mpp - pointer to the mbuf pointer that is the start of the mbuf chain
1065 */
1066 int
1067 ipf_fastroute(mb_t *m0, mb_t **mpp, fr_info_t *fin, frdest_t *fdp)
1068 {
1069 register struct ip *ip, *mhip;
1070 register struct mbuf *m = *mpp;
1071 register struct route *ro;
1072 int len, off, error = 0, hlen, code;
1073 struct ifnet *ifp, *sifp;
1074 ipf_main_softc_t *softc;
1075 #if __NetBSD_Version__ >= 499001100
1076 union {
1077 struct sockaddr dst;
1078 struct sockaddr_in dst4;
1079 } u;
1080 #else
1081 struct sockaddr_in *dst4;
1082 #endif
1083 struct sockaddr *dst;
1084 u_short ip_off, ip_len;
1085 struct route iproute;
1086 struct rtentry *rt;
1087 frdest_t node;
1088 frentry_t *fr;
1089
1090 if (fin->fin_v == 6) {
1091 #ifdef USE_INET6
1092 error = ipf_fastroute6(m0, mpp, fin, fdp);
1093 #else
1094 error = EPROTONOSUPPORT;
1095 #endif
1096 if ((error != 0) && (*mpp != NULL))
1097 FREE_MB_T(*mpp);
1098 return error;
1099 }
1100 #ifndef INET
1101 FREE_MB_T(*mpp);
1102 return EPROTONOSUPPORT;
1103 #else
1104
1105 hlen = fin->fin_hlen;
1106 ip = mtod(m0, struct ip *);
1107 softc = fin->fin_main_soft;
1108 rt = NULL;
1109 ifp = NULL;
1110
1111 # if defined(M_CSUM_IPv4)
1112 /*
1113 * Clear any in-bound checksum flags for this packet.
1114 */
1115 m0->m_pkthdr.csuminfo = 0;
1116 # endif /* __NetBSD__ && M_CSUM_IPv4 */
1117
1118 /*
1119 * Route packet.
1120 */
1121 ro = &iproute;
1122 memset(ro, 0, sizeof(*ro));
1123 fr = fin->fin_fr;
1124
1125 if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) &&
1126 (fdp->fd_type == FRD_DSTLIST)) {
1127 if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0)
1128 fdp = &node;
1129 }
1130 if (fdp != NULL)
1131 ifp = fdp->fd_ptr;
1132 else
1133 ifp = fin->fin_ifp;
1134
1135 if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) {
1136 error = -2;
1137 goto bad;
1138 }
1139
1140 # if __NetBSD_Version__ >= 499001100
1141 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1142 sockaddr_in_init(&u.dst4, &fdp->fd_ip, 0);
1143 else
1144 sockaddr_in_init(&u.dst4, &ip->ip_dst, 0);
1145 dst = &u.dst;
1146 rtcache_setdst(ro, dst);
1147 rt = rtcache_init(ro);
1148 # else
1149 dst4 = (struct sockaddr_in *)&ro->ro_dst;
1150 dst = (struct sockaddr *)dst4;
1151 dst4->sin_family = AF_INET;
1152 dst4->sin_addr = ip->ip_dst;
1153
1154 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1155 dst4->sin_addr = fdp->fd_ip;
1156
1157 dst4->sin_len = sizeof(*dst);
1158 rtalloc(ro);
1159 rt = ro->ro_rt;
1160 # endif
1161 if ((ifp == NULL) && (rt != NULL))
1162 ifp = rt->rt_ifp;
1163 if ((rt == NULL) || (ifp == NULL)) {
1164 #ifdef INET
1165 if (in_localaddr(ip->ip_dst))
1166 error = EHOSTUNREACH;
1167 else
1168 #endif
1169 error = ENETUNREACH;
1170 goto bad;
1171 }
1172
1173
1174 if (rt->rt_flags & RTF_GATEWAY)
1175 dst = rt->rt_gateway;
1176
1177 rt->rt_use++;
1178
1179 /*
1180 * For input packets which are being "fastrouted", they won't
1181 * go back through output filtering and miss their chance to get
1182 * NAT'd and counted. Duplicated packets aren't considered to be
1183 * part of the normal packet stream, so do not NAT them or pass
1184 * them through stateful checking, etc.
1185 */
1186 if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) {
1187 sifp = fin->fin_ifp;
1188 fin->fin_ifp = ifp;
1189 fin->fin_out = 1;
1190 (void) ipf_acctpkt(fin, NULL);
1191 fin->fin_fr = NULL;
1192 if (!fr || !(fr->fr_flags & FR_RETMASK)) {
1193 u_32_t pass;
1194
1195 (void) ipf_state_check(fin, &pass);
1196 }
1197
1198 switch (ipf_nat_checkout(fin, NULL))
1199 {
1200 case 0 :
1201 break;
1202 case 1 :
1203 ip->ip_sum = 0;
1204 break;
1205 case -1 :
1206 error = -1;
1207 goto bad;
1208 break;
1209 }
1210
1211 fin->fin_ifp = sifp;
1212 fin->fin_out = 0;
1213 } else
1214 ip->ip_sum = 0;
1215 /*
1216 * If small enough for interface, can just send directly.
1217 */
1218 m_set_rcvif(m, ifp);
1219
1220 ip_len = ntohs(ip->ip_len);
1221 if (ip_len <= ifp->if_mtu) {
1222 # if defined(M_CSUM_IPv4)
1223 # if (__NetBSD_Version__ >= 105009999)
1224 if (ifp->if_csum_flags_tx & M_CSUM_IPv4)
1225 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1226 # else
1227 if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
1228 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1229 # endif /* (__NetBSD_Version__ >= 105009999) */
1230 else if (ip->ip_sum == 0)
1231 ip->ip_sum = in_cksum(m, hlen);
1232 # else
1233 if (!ip->ip_sum)
1234 ip->ip_sum = in_cksum(m, hlen);
1235 # endif /* M_CSUM_IPv4 */
1236
1237 error = if_output_lock(ifp, ifp, m, dst, rt);
1238 goto done;
1239 }
1240
1241 /*
1242 * Too large for interface; fragment if possible.
1243 * Must be able to put at least 8 bytes per fragment.
1244 */
1245 ip_off = ntohs(ip->ip_off);
1246 if (ip_off & IP_DF) {
1247 error = EMSGSIZE;
1248 goto bad;
1249 }
1250 len = (ifp->if_mtu - hlen) &~ 7;
1251 if (len < 8) {
1252 error = EMSGSIZE;
1253 goto bad;
1254 }
1255
1256 {
1257 int mhlen, firstlen = len;
1258 struct mbuf **mnext = &m->m_act;
1259
1260 /*
1261 * Loop through length of segment after first fragment,
1262 * make new header and copy data of each part and link onto chain.
1263 */
1264 m0 = m;
1265 mhlen = sizeof (struct ip);
1266 for (off = hlen + len; off < ip_len; off += len) {
1267 # ifdef MGETHDR
1268 MGETHDR(m, M_DONTWAIT, MT_HEADER);
1269 # else
1270 MGET(m, M_DONTWAIT, MT_HEADER);
1271 # endif
1272 if (m == 0) {
1273 m = m0;
1274 error = ENOBUFS;
1275 goto bad;
1276 }
1277 m->m_data += max_linkhdr;
1278 mhip = mtod(m, struct ip *);
1279 bcopy((char *)ip, (char *)mhip, sizeof(*ip));
1280 #ifdef INET
1281 if (hlen > sizeof (struct ip)) {
1282 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
1283 IP_HL_A(mhip, mhlen >> 2);
1284 }
1285 #endif
1286 m->m_len = mhlen;
1287 mhip->ip_off = ((off - hlen) >> 3) + ip_off;
1288 if (off + len >= ip_len)
1289 len = ip_len - off;
1290 else
1291 mhip->ip_off |= IP_MF;
1292 mhip->ip_len = htons((u_short)(len + mhlen));
1293 m->m_next = m_copy(m0, off, len);
1294 if (m->m_next == 0) {
1295 error = ENOBUFS; /* ??? */
1296 goto sendorfree;
1297 }
1298 m->m_pkthdr.len = mhlen + len;
1299 m_reset_rcvif(m);
1300 mhip->ip_off = htons((u_short)mhip->ip_off);
1301 mhip->ip_sum = 0;
1302 #ifdef INET
1303 mhip->ip_sum = in_cksum(m, mhlen);
1304 #endif
1305 *mnext = m;
1306 mnext = &m->m_act;
1307 }
1308 /*
1309 * Update first fragment by trimming what's been copied out
1310 * and updating header, then send each fragment (in order).
1311 */
1312 m_adj(m0, hlen + firstlen - ip_len);
1313 ip->ip_len = htons((u_short)(hlen + firstlen));
1314 ip->ip_off = htons((u_short)IP_MF);
1315 ip->ip_sum = 0;
1316 #ifdef INET
1317 ip->ip_sum = in_cksum(m0, hlen);
1318 #endif
1319 sendorfree:
1320 for (m = m0; m; m = m0) {
1321 m0 = m->m_act;
1322 m->m_act = 0;
1323 if (error == 0) {
1324 KERNEL_LOCK(1, NULL);
1325 error = (*ifp->if_output)(ifp, m, dst, rt);
1326 KERNEL_UNLOCK_ONE(NULL);
1327 } else {
1328 FREE_MB_T(m);
1329 }
1330 }
1331 }
1332 done:
1333 if (!error)
1334 softc->ipf_frouteok[0]++;
1335 else
1336 softc->ipf_frouteok[1]++;
1337
1338 # if __NetBSD_Version__ >= 499001100
1339 rtcache_free(ro);
1340 # else
1341 if (rt) {
1342 RTFREE(rt);
1343 }
1344 # endif
1345 return error;
1346 bad:
1347 if (error == EMSGSIZE) {
1348 sifp = fin->fin_ifp;
1349 code = fin->fin_icode;
1350 fin->fin_icode = ICMP_UNREACH_NEEDFRAG;
1351 fin->fin_ifp = ifp;
1352 (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1);
1353 fin->fin_ifp = sifp;
1354 fin->fin_icode = code;
1355 }
1356 FREE_MB_T(m);
1357 goto done;
1358 #endif /* INET */
1359 }
1360
1361
1362 #if defined(USE_INET6)
1363 /*
1364 * This is the IPv6 specific fastroute code. It doesn't clean up the mbuf's
1365 * or ensure that it is an IPv6 packet that is being forwarded, those are
1366 * expected to be done by the called (ipf_fastroute).
1367 */
1368 static int
1369 ipf_fastroute6(struct mbuf *m0, struct mbuf **mpp, fr_info_t *fin,
1370 frdest_t *fdp)
1371 {
1372 # if __NetBSD_Version__ >= 499001100
1373 struct route ip6route;
1374 const struct sockaddr *dst;
1375 union {
1376 struct sockaddr dst;
1377 struct sockaddr_in6 dst6;
1378 } u;
1379 struct route *ro;
1380 # else
1381 struct route_in6 ip6route;
1382 struct sockaddr_in6 *dst6;
1383 struct route_in6 *ro;
1384 # endif
1385 struct rtentry *rt;
1386 struct ifnet *ifp;
1387 u_long mtu;
1388 int error;
1389
1390 error = 0;
1391 ro = &ip6route;
1392
1393 if (fdp != NULL)
1394 ifp = fdp->fd_ptr;
1395 else
1396 ifp = fin->fin_ifp;
1397 memset(ro, 0, sizeof(*ro));
1398 # if __NetBSD_Version__ >= 499001100
1399 if (fdp != NULL && IP6_NOTZERO(&fdp->fd_ip6))
1400 sockaddr_in6_init(&u.dst6, &fdp->fd_ip6.in6, 0, 0, 0);
1401 else
1402 sockaddr_in6_init(&u.dst6, &fin->fin_fi.fi_dst.in6, 0, 0, 0);
1403 dst = &u.dst;
1404 rtcache_setdst(ro, dst);
1405
1406 rt = rtcache_init(ro);
1407 if ((ifp == NULL) && (rt != NULL))
1408 ifp = rt->rt_ifp;
1409 # else
1410 dst6 = (struct sockaddr_in6 *)&ro->ro_dst;
1411 dst6->sin6_family = AF_INET6;
1412 dst6->sin6_len = sizeof(struct sockaddr_in6);
1413 dst6->sin6_addr = fin->fin_fi.fi_dst.in6;
1414
1415 if (fdp != NULL) {
1416 if (IP6_NOTZERO(&fdp->fd_ip6))
1417 dst6->sin6_addr = fdp->fd_ip6.in6;
1418 }
1419
1420 rtalloc((struct route *)ro);
1421
1422 if ((ifp == NULL) && (ro->ro_rt != NULL))
1423 ifp = ro->ro_rt->rt_ifp;
1424 rt = ro->ro_rt;
1425 # endif
1426 if ((rt == NULL) || (ifp == NULL)) {
1427
1428 error = EHOSTUNREACH;
1429 goto bad;
1430 }
1431
1432 /* KAME */
1433 # if __NetBSD_Version__ >= 499001100
1434 if (IN6_IS_ADDR_LINKLOCAL(&u.dst6.sin6_addr))
1435 u.dst6.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1436 # else
1437 if (IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr))
1438 dst6->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1439 # endif
1440
1441 {
1442 # if (__NetBSD_Version__ >= 106010000) && !defined(IN6_LINKMTU)
1443 struct in6_ifextra *ife;
1444 # endif
1445 if (rt->rt_flags & RTF_GATEWAY)
1446 # if __NetBSD_Version__ >= 499001100
1447 dst = rt->rt_gateway;
1448 # else
1449 dst6 = (struct sockaddr_in6 *)rt->rt_gateway;
1450 # endif
1451 rt->rt_use++;
1452
1453 /* Determine path MTU. */
1454 # if (__NetBSD_Version__ <= 106009999)
1455 mtu = nd_ifinfo[ifp->if_index].linkmtu;
1456 # else
1457 # ifdef IN6_LINKMTU
1458 mtu = IN6_LINKMTU(ifp);
1459 # else
1460 ife = (struct in6_ifextra *)(ifp)->if_afdata[AF_INET6];
1461 mtu = ife->nd_ifinfo[ifp->if_index].linkmtu;
1462 # endif
1463 # endif
1464 if ((error == 0) && (m0->m_pkthdr.len <= mtu)) {
1465 # if __NetBSD_Version__ >= 499001100
1466 error = nd6_output(ifp, ifp, m0, satocsin6(dst), rt);
1467 # else
1468 error = nd6_output(ifp, ifp, m0, dst6, rt);
1469 # endif
1470 } else {
1471 error = EMSGSIZE;
1472 }
1473 }
1474 bad:
1475 # if __NetBSD_Version__ >= 499001100
1476 rtcache_free(ro);
1477 # else
1478 if (ro->ro_rt != NULL) {
1479 RTFREE(((struct route *)ro)->ro_rt);
1480 }
1481 # endif
1482 return error;
1483 }
1484 #endif /* INET6 */
1485
1486
1487 int
1488 ipf_verifysrc(fr_info_t *fin)
1489 {
1490 #if __NetBSD_Version__ >= 499001100
1491 union {
1492 struct sockaddr dst;
1493 struct sockaddr_in dst4;
1494 } u;
1495 struct rtentry *rt;
1496 #else
1497 struct sockaddr_in *dst;
1498 #endif
1499 struct route iproute;
1500 int rc;
1501
1502 #if __NetBSD_Version__ >= 499001100
1503 sockaddr_in_init(&u.dst4, &fin->fin_src, 0);
1504 rtcache_setdst(&iproute, &u.dst);
1505 rt = rtcache_init(&iproute);
1506 if (rt == NULL)
1507 rc = 0;
1508 else
1509 rc = (fin->fin_ifp == rt->rt_ifp);
1510 rtcache_free(&iproute);
1511 #else
1512 dst = (struct sockaddr_in *)&iproute.ro_dst;
1513 dst->sin_len = sizeof(*dst);
1514 dst->sin_family = AF_INET;
1515 dst->sin_addr = fin->fin_src;
1516 rtalloc(&iproute);
1517 if (iproute.ro_rt == NULL)
1518 return 0;
1519 rc = (fin->fin_ifp == iproute.ro_rt->rt_ifp);
1520 RTFREE(iproute.ro_rt);
1521 #endif
1522 return rc;
1523 }
1524
1525
1526 /*
1527 * return the first IP Address associated with an interface
1528 */
1529 int
1530 ipf_ifpaddr(ipf_main_softc_t *softc, int v, int atype, void *ifptr,
1531 i6addr_t *inp, i6addr_t *inpmask)
1532 {
1533 #ifdef USE_INET6
1534 struct in6_addr *inp6 = NULL;
1535 #endif
1536 struct sockaddr *sock, *mask;
1537 struct sockaddr_in *sin;
1538 struct ifaddr *ifa;
1539 struct ifnet *ifp;
1540
1541 if ((ifptr == NULL) || (ifptr == (void *)-1))
1542 return -1;
1543
1544 ifp = ifptr;
1545 mask = NULL;
1546
1547 if (v == 4)
1548 inp->in4.s_addr = 0;
1549 #ifdef USE_INET6
1550 else if (v == 6)
1551 bzero((char *)inp, sizeof(*inp));
1552 #endif
1553
1554 ifa = IFADDR_READER_FIRST(ifp);
1555 sock = ifa ? ifa->ifa_addr : NULL;
1556 while (sock != NULL && ifa != NULL) {
1557 sin = (struct sockaddr_in *)sock;
1558 if ((v == 4) && (sin->sin_family == AF_INET))
1559 break;
1560 #ifdef USE_INET6
1561 if ((v == 6) && (sin->sin_family == AF_INET6)) {
1562 inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr;
1563 if (!IN6_IS_ADDR_LINKLOCAL(inp6) &&
1564 !IN6_IS_ADDR_LOOPBACK(inp6))
1565 break;
1566 }
1567 #endif
1568 ifa = IFADDR_READER_NEXT(ifa);
1569 if (ifa != NULL)
1570 sock = ifa->ifa_addr;
1571 }
1572 if (ifa == NULL || sock == NULL)
1573 return -1;
1574
1575 mask = ifa->ifa_netmask;
1576 if (atype == FRI_BROADCAST)
1577 sock = ifa->ifa_broadaddr;
1578 else if (atype == FRI_PEERADDR)
1579 sock = ifa->ifa_dstaddr;
1580
1581 #ifdef USE_INET6
1582 if (v == 6)
1583 return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock,
1584 (struct sockaddr_in6 *)mask,
1585 inp, inpmask);
1586 #endif
1587 return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock,
1588 (struct sockaddr_in *)mask,
1589 &inp->in4, &inpmask->in4);
1590 }
1591
1592
1593 u_32_t
1594 ipf_newisn(fr_info_t *fin)
1595 {
1596 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
1597 size_t asz;
1598
1599 if (fin->fin_v == 4)
1600 asz = sizeof(struct in_addr);
1601 else if (fin->fin_v == 6)
1602 asz = sizeof(fin->fin_src);
1603 else /* XXX: no way to return error */
1604 return 0;
1605 #ifdef INET
1606 return tcp_new_iss1((void *)&fin->fin_src, (void *)&fin->fin_dst,
1607 fin->fin_sport, fin->fin_dport, asz, 0);
1608 #else
1609 return ENOSYS;
1610 #endif
1611 #else
1612 static int iss_seq_off = 0;
1613 u_char hash[16];
1614 u_32_t newiss;
1615 MD5_CTX ctx;
1616
1617 /*
1618 * Compute the base value of the ISS. It is a hash
1619 * of (saddr, sport, daddr, dport, secret).
1620 */
1621 MD5Init(&ctx);
1622
1623 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_src,
1624 sizeof(fin->fin_fi.fi_src));
1625 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_dst,
1626 sizeof(fin->fin_fi.fi_dst));
1627 MD5Update(&ctx, (u_char *) &fin->fin_dat, sizeof(fin->fin_dat));
1628
1629 MD5Update(&ctx, ipf_iss_secret, sizeof(ipf_iss_secret));
1630
1631 MD5Final(hash, &ctx);
1632
1633 memcpy(&newiss, hash, sizeof(newiss));
1634
1635 /*
1636 * Now increment our "timer", and add it in to
1637 * the computed value.
1638 *
1639 * XXX Use `addin'?
1640 * XXX TCP_ISSINCR too large to use?
1641 */
1642 iss_seq_off += 0x00010000;
1643 newiss += iss_seq_off;
1644 return newiss;
1645 #endif
1646 }
1647
1648
1649 /* ------------------------------------------------------------------------ */
1650 /* Function: ipf_nextipid */
1651 /* Returns: int - 0 == success, -1 == error (packet should be droppped) */
1652 /* Parameters: fin(I) - pointer to packet information */
1653 /* */
1654 /* Returns the next IPv4 ID to use for this packet. */
1655 /* ------------------------------------------------------------------------ */
1656 u_short
1657 ipf_nextipid(fr_info_t *fin)
1658 {
1659 #ifdef USE_MUTEXES
1660 ipf_main_softc_t *softc = fin->fin_main_soft;
1661 #endif
1662 u_short id;
1663
1664 MUTEX_ENTER(&softc->ipf_rw);
1665 id = ipid++;
1666 MUTEX_EXIT(&softc->ipf_rw);
1667
1668 return id;
1669 }
1670
1671
1672 EXTERN_INLINE int
1673 ipf_checkv4sum(fr_info_t *fin)
1674 {
1675 #ifdef M_CSUM_TCP_UDP_BAD
1676 int manual, pflag, cflags, active;
1677 mb_t *m;
1678
1679 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1680 return 0;
1681
1682 if ((fin->fin_flx & FI_SHORT) != 0)
1683 return 1;
1684
1685 if (fin->fin_cksum != FI_CK_NEEDED)
1686 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1687
1688 manual = 0;
1689 m = fin->fin_m;
1690 if (m == NULL) {
1691 manual = 1;
1692 goto skipauto;
1693 }
1694
1695 switch (fin->fin_p)
1696 {
1697 case IPPROTO_UDP :
1698 pflag = M_CSUM_UDPv4;
1699 break;
1700 case IPPROTO_TCP :
1701 pflag = M_CSUM_TCPv4;
1702 break;
1703 default :
1704 pflag = 0;
1705 manual = 1;
1706 break;
1707 }
1708
1709 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1710 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1711 cflags = m->m_pkthdr.csum_flags & active;
1712
1713 if (pflag != 0) {
1714 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1715 fin->fin_flx |= FI_BAD;
1716 fin->fin_cksum = FI_CK_BAD;
1717 } else if (cflags == (pflag | M_CSUM_DATA)) {
1718 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) {
1719 fin->fin_flx |= FI_BAD;
1720 fin->fin_cksum = FI_CK_BAD;
1721 } else {
1722 fin->fin_cksum = FI_CK_SUMOK;
1723 }
1724 } else if (cflags == pflag) {
1725 fin->fin_cksum = FI_CK_SUMOK;
1726 } else {
1727 manual = 1;
1728 }
1729 }
1730 skipauto:
1731 if (manual != 0) {
1732 if (ipf_checkl4sum(fin) == -1) {
1733 fin->fin_flx |= FI_BAD;
1734 return -1;
1735 }
1736 }
1737 #else
1738 if (ipf_checkl4sum(fin) == -1) {
1739 fin->fin_flx |= FI_BAD;
1740 return -1;
1741 }
1742 #endif
1743 return 0;
1744 }
1745
1746
1747 #ifdef USE_INET6
1748 EXTERN_INLINE int
1749 ipf_checkv6sum(fr_info_t *fin)
1750 {
1751 # ifdef M_CSUM_TCP_UDP_BAD
1752 int manual, pflag, cflags, active;
1753 mb_t *m;
1754
1755 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1756 return 0;
1757
1758 if ((fin->fin_flx & FI_SHORT) != 0)
1759 return 1;
1760
1761 if (fin->fin_cksum != FI_CK_SUMOK)
1762 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1763
1764
1765 manual = 0;
1766 m = fin->fin_m;
1767
1768 switch (fin->fin_p)
1769 {
1770 case IPPROTO_UDP :
1771 pflag = M_CSUM_UDPv6;
1772 break;
1773 case IPPROTO_TCP :
1774 pflag = M_CSUM_TCPv6;
1775 break;
1776 default :
1777 pflag = 0;
1778 manual = 1;
1779 break;
1780 }
1781
1782 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1783 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1784 cflags = m->m_pkthdr.csum_flags & active;
1785
1786 if (pflag != 0) {
1787 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1788 fin->fin_flx |= FI_BAD;
1789 } else if (cflags == (pflag | M_CSUM_DATA)) {
1790 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0)
1791 fin->fin_flx |= FI_BAD;
1792 } else if (cflags == pflag) {
1793 ;
1794 } else {
1795 manual = 1;
1796 }
1797 }
1798 if (manual != 0) {
1799 if (ipf_checkl4sum(fin) == -1) {
1800 fin->fin_flx |= FI_BAD;
1801 return -1;
1802 }
1803 }
1804 # else
1805 if (ipf_checkl4sum(fin) == -1) {
1806 fin->fin_flx |= FI_BAD;
1807 return -1;
1808 }
1809 # endif
1810 return 0;
1811 }
1812 #endif /* USE_INET6 */
1813
1814
1815 size_t
1816 mbufchainlen(struct mbuf *m0)
1817 {
1818 size_t len;
1819
1820 if ((m0->m_flags & M_PKTHDR) != 0) {
1821 len = m0->m_pkthdr.len;
1822 } else {
1823 struct mbuf *m;
1824
1825 for (m = m0, len = 0; m != NULL; m = m->m_next)
1826 len += m->m_len;
1827 }
1828 return len;
1829 }
1830
1831
1832 /* ------------------------------------------------------------------------ */
1833 /* Function: ipf_pullup */
1834 /* Returns: NULL == pullup failed, else pointer to protocol header */
1835 /* Parameters: xmin(I)- pointer to buffer where data packet starts */
1836 /* fin(I) - pointer to packet information */
1837 /* len(I) - number of bytes to pullup */
1838 /* */
1839 /* Attempt to move at least len bytes (from the start of the buffer) into a */
1840 /* single buffer for ease of access. Operating system native functions are */
1841 /* used to manage buffers - if necessary. If the entire packet ends up in */
1842 /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */
1843 /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */
1844 /* and ONLY if the pullup succeeds. */
1845 /* */
1846 /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */
1847 /* of buffers that starts at *fin->fin_mp. */
1848 /* ------------------------------------------------------------------------ */
1849 void *
1850 ipf_pullup(mb_t *xmin, fr_info_t *fin, int len)
1851 {
1852 int dpoff, ipoff;
1853 mb_t *m = xmin;
1854 char *ip;
1855
1856 if (m == NULL)
1857 return NULL;
1858
1859 ip = (char *)fin->fin_ip;
1860 if ((fin->fin_flx & FI_COALESCE) != 0)
1861 return ip;
1862
1863 ipoff = fin->fin_ipoff;
1864 if (fin->fin_dp != NULL)
1865 dpoff = (char *)fin->fin_dp - (char *)ip;
1866 else
1867 dpoff = 0;
1868
1869 if (M_LEN(m) < len) {
1870 mb_t *n = *fin->fin_mp;
1871 /*
1872 * Assume that M_PKTHDR is set and just work with what is left
1873 * rather than check..
1874 * Should not make any real difference, anyway.
1875 */
1876 if (m != n) {
1877 /*
1878 * Record the mbuf that points to the mbuf that we're
1879 * about to go to work on so that we can update the
1880 * m_next appropriately later.
1881 */
1882 for (; n->m_next != m; n = n->m_next)
1883 ;
1884 } else {
1885 n = NULL;
1886 }
1887
1888 #ifdef MHLEN
1889 if (len > MHLEN)
1890 #else
1891 if (len > MLEN)
1892 #endif
1893 {
1894 #ifdef HAVE_M_PULLDOWN
1895 if (m_pulldown(m, 0, len, NULL) == NULL)
1896 m = NULL;
1897 #else
1898 FREE_MB_T(*fin->fin_mp);
1899 m = NULL;
1900 n = NULL;
1901 #endif
1902 } else
1903 {
1904 m = m_pullup(m, len);
1905 }
1906 if (n != NULL)
1907 n->m_next = m;
1908 if (m == NULL) {
1909 /*
1910 * When n is non-NULL, it indicates that m pointed to
1911 * a sub-chain (tail) of the mbuf and that the head
1912 * of this chain has not yet been free'd.
1913 */
1914 if (n != NULL) {
1915 FREE_MB_T(*fin->fin_mp);
1916 }
1917
1918 *fin->fin_mp = NULL;
1919 fin->fin_m = NULL;
1920 return NULL;
1921 }
1922
1923 if (n == NULL)
1924 *fin->fin_mp = m;
1925
1926 while (M_LEN(m) == 0) {
1927 m = m->m_next;
1928 }
1929 fin->fin_m = m;
1930 ip = MTOD(m, char *) + ipoff;
1931
1932 fin->fin_ip = (ip_t *)ip;
1933 if (fin->fin_dp != NULL)
1934 fin->fin_dp = (char *)fin->fin_ip + dpoff;
1935 if (fin->fin_fraghdr != NULL)
1936 fin->fin_fraghdr = (char *)ip +
1937 ((char *)fin->fin_fraghdr -
1938 (char *)fin->fin_ip);
1939 }
1940
1941 if (len == fin->fin_plen)
1942 fin->fin_flx |= FI_COALESCE;
1943 return ip;
1944 }
1945
1946
1947 int
1948 ipf_inject(fr_info_t *fin, mb_t *m)
1949 {
1950 int error;
1951
1952 if (fin->fin_out == 0) {
1953 if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
1954 FREE_MB_T(m);
1955 error = ENOBUFS;
1956 } else {
1957 error = 0;
1958 }
1959 } else {
1960 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
1961 }
1962 return error;
1963 }
1964
1965
1966 u_32_t
1967 ipf_random(void)
1968 {
1969 int number;
1970
1971 #ifdef _CPRNG_H
1972 number = cprng_fast32();
1973 #else
1974 number = arc4random();
1975 #endif
1976 return number;
1977 }
1978
1979
1980 /*
1981 * routines below for saving IP headers to buffer
1982 */
1983 static int ipfopen(dev_t dev, int flags
1984 #if (NetBSD >= 199511)
1985 , int devtype, PROC_T *p
1986 #endif
1987 )
1988 {
1989 u_int unit = GET_MINOR(dev);
1990 int error;
1991
1992 if (IPL_LOGMAX < unit) {
1993 error = ENXIO;
1994 } else {
1995 switch (unit)
1996 {
1997 case IPL_LOGIPF :
1998 case IPL_LOGNAT :
1999 case IPL_LOGSTATE :
2000 case IPL_LOGAUTH :
2001 case IPL_LOGLOOKUP :
2002 case IPL_LOGSYNC :
2003 #ifdef IPFILTER_SCAN
2004 case IPL_LOGSCAN :
2005 #endif
2006 error = 0;
2007 break;
2008 default :
2009 error = ENXIO;
2010 break;
2011 }
2012 }
2013 #if (__NetBSD_Version__ >= 799003000)
2014 if (error == 0) {
2015 mutex_enter(&ipf_ref_mutex);
2016 ipf_active = 1;
2017 mutex_exit(&ipf_ref_mutex);
2018 }
2019 #endif
2020 return error;
2021 }
2022
2023
2024 static int ipfclose(dev_t dev, int flags
2025 #if (NetBSD >= 199511)
2026 , int devtype, PROC_T *p
2027 #endif
2028 )
2029 {
2030 u_int unit = GET_MINOR(dev);
2031
2032 if (IPL_LOGMAX < unit)
2033 return ENXIO;
2034 else {
2035 #if (__NetBSD_Version__ >= 799003000)
2036 mutex_enter(&ipf_ref_mutex);
2037 ipf_active = 0;
2038 mutex_exit(&ipf_ref_mutex);
2039 #endif
2040 return 0;
2041 }
2042 }
2043
2044 /*
2045 * ipfread/ipflog
2046 * both of these must operate with at least splnet() lest they be
2047 * called during packet processing and cause an inconsistancy to appear in
2048 * the filter lists.
2049 */
2050 static int ipfread(dev_t dev, struct uio *uio, int ioflag)
2051 {
2052
2053 if (ipfmain.ipf_running < 1) {
2054 ipfmain.ipf_interror = 130006;
2055 return EIO;
2056 }
2057
2058 if (GET_MINOR(dev) == IPL_LOGSYNC)
2059 return ipf_sync_read(&ipfmain, uio);
2060
2061 #ifdef IPFILTER_LOG
2062 return ipf_log_read(&ipfmain, GET_MINOR(dev), uio);
2063 #else
2064 ipfmain.ipf_interror = 130007;
2065 return ENXIO;
2066 #endif
2067 }
2068
2069
2070 /*
2071 * ipfwrite
2072 * both of these must operate with at least splnet() lest they be
2073 * called during packet processing and cause an inconsistancy to appear in
2074 * the filter lists.
2075 */
2076 static int ipfwrite(dev_t dev, struct uio *uio, int ioflag)
2077 {
2078
2079 if (ipfmain.ipf_running < 1) {
2080 ipfmain.ipf_interror = 130008;
2081 return EIO;
2082 }
2083
2084 if (GET_MINOR(dev) == IPL_LOGSYNC)
2085 return ipf_sync_write(&ipfmain, uio);
2086 ipfmain.ipf_interror = 130009;
2087 return ENXIO;
2088 }
2089
2090
2091 static int ipfpoll(dev_t dev, int events, PROC_T *p)
2092 {
2093 u_int unit = GET_MINOR(dev);
2094 int revents = 0;
2095
2096 if (IPL_LOGMAX < unit) {
2097 ipfmain.ipf_interror = 130010;
2098 return ENXIO;
2099 }
2100
2101 switch (unit)
2102 {
2103 case IPL_LOGIPF :
2104 case IPL_LOGNAT :
2105 case IPL_LOGSTATE :
2106 #ifdef IPFILTER_LOG
2107 if ((events & (POLLIN | POLLRDNORM)) &&
2108 ipf_log_canread(&ipfmain, unit))
2109 revents |= events & (POLLIN | POLLRDNORM);
2110 #endif
2111 break;
2112 case IPL_LOGAUTH :
2113 if ((events & (POLLIN | POLLRDNORM)) &&
2114 ipf_auth_waiting(&ipfmain))
2115 revents |= events & (POLLIN | POLLRDNORM);
2116 break;
2117 case IPL_LOGSYNC :
2118 if ((events & (POLLIN | POLLRDNORM)) &&
2119 ipf_sync_canread(&ipfmain))
2120 revents |= events & (POLLIN | POLLRDNORM);
2121 if ((events & (POLLOUT | POLLWRNORM)) &&
2122 ipf_sync_canwrite(&ipfmain))
2123 revents |= events & (POLLOUT | POLLWRNORM);
2124 break;
2125 case IPL_LOGSCAN :
2126 case IPL_LOGLOOKUP :
2127 default :
2128 break;
2129 }
2130
2131 if ((revents == 0) && (((events & (POLLIN|POLLRDNORM)) != 0)))
2132 selrecord(p, &ipfmain.ipf_selwait[unit]);
2133 return revents;
2134 }
2135
2136 u_int
2137 ipf_pcksum(fr_info_t *fin, int hlen, u_int sum)
2138 {
2139 struct mbuf *m;
2140 u_int sum2;
2141 int off;
2142
2143 m = fin->fin_m;
2144 off = (char *)fin->fin_dp - (char *)fin->fin_ip;
2145 m->m_data += hlen;
2146 m->m_len -= hlen;
2147 sum2 = in_cksum(fin->fin_m, fin->fin_plen - off);
2148 m->m_len += hlen;
2149 m->m_data -= hlen;
2150
2151 /*
2152 * Both sum and sum2 are partial sums, so combine them together.
2153 */
2154 sum += ~sum2 & 0xffff;
2155 while (sum > 0xffff)
2156 sum = (sum & 0xffff) + (sum >> 16);
2157 sum2 = ~sum & 0xffff;
2158 return sum2;
2159 }
2160
2161 #if (__NetBSD_Version__ >= 799003000)
2162
2163 /* NetBSD module interface */
2164
2165 MODULE(MODULE_CLASS_DRIVER, ipl, "bpf_filter");
2166
2167 static int ipl_init(void *);
2168 static int ipl_fini(void *);
2169 static int ipl_modcmd(modcmd_t, void *);
2170
2171 #ifdef _MODULE
2172 static devmajor_t ipl_cmaj = -1, ipl_bmaj = -1;
2173 #endif
2174
2175 static int
2176 ipl_modcmd(modcmd_t cmd, void *opaque)
2177 {
2178
2179 switch (cmd) {
2180 case MODULE_CMD_INIT:
2181 return ipl_init(opaque);
2182 case MODULE_CMD_FINI:
2183 return ipl_fini(opaque);
2184 default:
2185 return ENOTTY;
2186 }
2187 }
2188
2189 static int
2190 ipl_init(void *opaque)
2191 {
2192 int error;
2193
2194 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
2195 ipf_listener_cb, NULL);
2196
2197 if ((error = ipf_load_all()) != 0)
2198 return error;
2199
2200 if (ipf_create_all(&ipfmain) == NULL) {
2201 ipf_unload_all();
2202 return ENODEV;
2203 }
2204
2205 /* Initialize our mutex and reference count */
2206 mutex_init(&ipf_ref_mutex, MUTEX_DEFAULT, IPL_NONE);
2207 ipf_active = 0;
2208
2209 #ifdef _MODULE
2210 /*
2211 * Insert ourself into the cdevsw list.
2212 */
2213 error = devsw_attach("ipl", NULL, &ipl_bmaj, &ipl_cdevsw, &ipl_cmaj);
2214 if (error)
2215 ipl_fini(opaque);
2216 #endif
2217
2218 return error;
2219 }
2220
2221 static int
2222 ipl_fini(void *opaque)
2223 {
2224
2225 #ifdef _MODULE
2226 (void)devsw_detach(NULL, &ipl_cdevsw);
2227 #endif
2228
2229 /*
2230 * Grab the mutex, verify that there are no references
2231 * and that there are no running filters. If either
2232 * of these exists, reinsert our cdevsw entry and return
2233 * an error.
2234 */
2235 mutex_enter(&ipf_ref_mutex);
2236 if (ipf_active != 0 || ipfmain.ipf_running > 0) {
2237 #ifdef _MODULE
2238 (void)devsw_attach("ipl", NULL, &ipl_bmaj,
2239 &ipl_cdevsw, &ipl_cmaj);
2240 #endif
2241 mutex_exit(&ipf_ref_mutex);
2242 return EBUSY;
2243 }
2244
2245 /* Clean up the rest of our state before being unloaded */
2246
2247 mutex_exit(&ipf_ref_mutex);
2248 mutex_destroy(&ipf_ref_mutex);
2249 ipf_destroy_all(&ipfmain);
2250 ipf_unload_all();
2251 kauth_unlisten_scope(ipf_listener);
2252
2253 return 0;
2254 }
2255 #endif /* (__NetBSD_Version__ >= 799003000) */
2256