ip_fil_netbsd.c revision 1.16.2.1 1 /* $NetBSD: ip_fil_netbsd.c,v 1.16.2.1 2016/07/17 05:05:10 pgoyette Exp $ */
2
3 /*
4 * Copyright (C) 2012 by Darren Reed.
5 *
6 * See the IPFILTER.LICENCE file for details on licencing.
7 */
8 #if !defined(lint)
9 #if defined(__NetBSD__)
10 #include <sys/cdefs.h>
11 __KERNEL_RCSID(0, "$NetBSD: ip_fil_netbsd.c,v 1.16.2.1 2016/07/17 05:05:10 pgoyette Exp $");
12 #else
13 static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed";
14 static const char rcsid[] = "@(#)Id: ip_fil_netbsd.c,v 1.1.1.2 2012/07/22 13:45:17 darrenr Exp";
15 #endif
16 #endif
17
18 #if defined(KERNEL) || defined(_KERNEL)
19 # undef KERNEL
20 # undef _KERNEL
21 # define KERNEL 1
22 # define _KERNEL 1
23 #endif
24 #include <sys/param.h>
25 #if (NetBSD >= 199905) && !defined(IPFILTER_LKM)
26 # if (__NetBSD_Version__ >= 799003000)
27 # ifdef _KERNEL_OPT
28 # include "opt_ipsec.h"
29 # endif
30 # else
31 # include "opt_ipsec.h"
32 # endif
33 #endif
34 #include <sys/errno.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/ioctl.h>
38 #include <sys/time.h>
39 #include <sys/systm.h>
40 #include <sys/select.h>
41 #if (NetBSD > 199609)
42 # include <sys/dirent.h>
43 #else
44 # include <sys/dir.h>
45 #endif
46 #if (__NetBSD_Version__ >= 599005900)
47 # include <sys/cprng.h>
48 #endif
49 #include <sys/mbuf.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/poll.h>
53 #if (__NetBSD_Version__ >= 399002000)
54 # include <sys/kauth.h>
55 #endif
56 #if (__NetBSD_Version__ >= 799003000)
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #endif
60 #if (__NetBSD_Version__ >= 799003300)
61 #include <sys/localcount.h>
62 #endif
63
64 #include <net/if.h>
65 #include <net/route.h>
66 #include <netinet/in.h>
67 #include <netinet/in_var.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/tcp.h>
72 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
73 # include <netinet/tcp_timer.h>
74 # include <netinet/tcp_var.h>
75 #endif
76 #include <netinet/udp.h>
77 #include <netinet/tcpip.h>
78 #include <netinet/ip_icmp.h>
79 #include "netinet/ip_compat.h"
80 #ifdef USE_INET6
81 # include <netinet/icmp6.h>
82 # if (__NetBSD_Version__ >= 106000000)
83 # include <netinet6/nd6.h>
84 # endif
85 #endif
86 #include "netinet/ip_fil.h"
87 #include "netinet/ip_nat.h"
88 #include "netinet/ip_frag.h"
89 #include "netinet/ip_state.h"
90 #include "netinet/ip_proxy.h"
91 #include "netinet/ip_auth.h"
92 #include "netinet/ip_sync.h"
93 #include "netinet/ip_lookup.h"
94 #include "netinet/ip_dstlist.h"
95 #ifdef IPFILTER_SCAN
96 #include "netinet/ip_scan.h"
97 #endif
98 #include <sys/md5.h>
99 #include <sys/kernel.h>
100 #include <sys/conf.h>
101 #ifdef INET
102 extern int ip_optcopy (struct ip *, struct ip *);
103 #endif
104
105 #ifdef IPFILTER_M_IPFILTER
106 MALLOC_DEFINE(M_IPFILTER, "IP Filter", "IP Filter packet filter data structures");
107 #endif
108
109 #if __NetBSD_Version__ >= 105009999
110 # define csuminfo csum_flags
111 #endif
112
113 #if __NetBSD_Version__ < 200000000
114 extern struct protosw inetsw[];
115 #endif
116
117 #if (__NetBSD_Version__ >= 599002000)
118 static kauth_listener_t ipf_listener;
119 #endif
120
121 #if (__NetBSD_Version__ < 399001400)
122 extern int ip6_getpmtu (struct route_in6 *, struct route_in6 *,
123 struct ifnet *, struct in6_addr *, u_long *,
124 int *);
125 #endif
126 #if (NetBSD >= 199511)
127 static int ipfopen(dev_t dev, int flags, int devtype, PROC_T *p);
128 static int ipfclose(dev_t dev, int flags, int devtype, PROC_T *p);
129 #else
130 # if (__NetBSD_Version__ >= 399001400)
131 static int ipfopen(dev_t dev, int flags, struct lwp *);
132 static int ipfclose(dev_t dev, int flags, struct lwp *);
133 # else
134 static int ipfopen(dev_t dev, int flags);
135 static int ipfclose(dev_t dev, int flags);
136 # endif /* __NetBSD_Version__ >= 399001400 */
137 #endif
138 static int ipfread(dev_t, struct uio *, int ioflag);
139 static int ipfwrite(dev_t, struct uio *, int ioflag);
140 static int ipfpoll(dev_t, int events, PROC_T *);
141 static void ipf_timer_func(void *ptr);
142
143 #if defined(_MODULE) && (__NetBSD_Version__ >= 799003300)
144 struct localcount ipl_localcount;
145 #endif
146
147 const struct cdevsw ipl_cdevsw = {
148 .d_open = ipfopen,
149 .d_close = ipfclose,
150 .d_read = ipfread,
151 .d_write = ipfwrite,
152 .d_ioctl = ipfioctl,
153 .d_stop = nostop,
154 .d_tty = notty,
155 .d_poll = ipfpoll,
156 .d_mmap = nommap,
157 #if (__NetBSD_Version__ >= 200000000)
158 .d_kqfilter = nokqfilter,
159 #endif
160 #ifdef _MODULE
161 .d_localcount = &ipl_localcount,
162 #endif
163
164 .d_discard = nodiscard,
165 #ifdef D_OTHER
166 .d_flag = D_OTHER
167 #else
168 .d_flag = 0
169 #endif
170 };
171 #if (__NetBSD_Version__ >= 799003000)
172 kmutex_t ipf_ref_mutex;
173 int ipf_active;
174 #endif
175
176 ipf_main_softc_t ipfmain;
177
178 static u_short ipid = 0;
179 static int (*ipf_savep)(void *, ip_t *, int, void *, int, struct mbuf **);
180 static int ipf_send_ip(fr_info_t *, mb_t *);
181 #ifdef USE_INET6
182 static int ipf_fastroute6(struct mbuf *, struct mbuf **,
183 fr_info_t *, frdest_t *);
184 #endif
185
186 #if defined(NETBSD_PF)
187 # include <net/pfil.h>
188 /*
189 * We provide the ipf_checkp name just to minimize changes later.
190 */
191 int (*ipf_checkp)(void *, ip_t *ip, int hlen, void *ifp, int out, mb_t **mp);
192 #endif /* NETBSD_PF */
193
194 #if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
195 # include <net/pfil.h>
196
197 static int ipf_check_wrapper(void *, struct mbuf **, struct ifnet *, int );
198
199 static int
200 ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
201 {
202 struct ip *ip;
203 int rv, hlen;
204
205 #if __NetBSD_Version__ >= 200080000
206 /*
207 * ensure that mbufs are writable beforehand
208 * as it's assumed by ipf code.
209 * XXX inefficient
210 */
211 int error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
212
213 if (error) {
214 m_freem(*mp);
215 *mp = NULL;
216 return error;
217 }
218 #endif
219 ip = mtod(*mp, struct ip *);
220 hlen = ip->ip_hl << 2;
221
222 #ifdef INET
223 #if defined(M_CSUM_TCPv4)
224 /*
225 * If the packet is out-bound, we can't delay checksums
226 * here. For in-bound, the checksum has already been
227 * validated.
228 */
229 if (dir == PFIL_OUT) {
230 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
231 in_delayed_cksum(*mp);
232 (*mp)->m_pkthdr.csum_flags &=
233 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
234 }
235 }
236 #endif /* M_CSUM_TCPv4 */
237 #endif /* INET */
238
239 /*
240 * Note, we don't need to update the checksum, because
241 * it has already been verified.
242 */
243 rv = ipf_check(&ipfmain, ip, hlen, ifp, (dir == PFIL_OUT), mp);
244
245 return (rv);
246 }
247
248 # ifdef USE_INET6
249 # include <netinet/ip6.h>
250
251 static int ipf_check_wrapper6(void *, struct mbuf **, struct ifnet *, int );
252
253 static int
254 ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
255 {
256 #if defined(INET6)
257 # if defined(M_CSUM_TCPv6) && (__NetBSD_Version__ > 200000000)
258 /*
259 * If the packet is out-bound, we can't delay checksums
260 * here. For in-bound, the checksum has already been
261 * validated.
262 */
263 if (dir == PFIL_OUT) {
264 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
265 # if (__NetBSD_Version__ > 399000600)
266 in6_delayed_cksum(*mp);
267 # endif
268 (*mp)->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6|
269 M_CSUM_UDPv6);
270 }
271 }
272 # endif
273 #endif /* INET6 */
274
275 return (ipf_check(&ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr),
276 ifp, (dir == PFIL_OUT), mp));
277 }
278 # endif
279
280
281 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
282 static int ipf_pfilsync(void *, struct mbuf **, struct ifnet *, int);
283
284 static int
285 ipf_pfilsync(void *hdr, struct mbuf **mp, struct ifnet *ifp, int dir)
286 {
287 /*
288 * The interface pointer is useless for create (we have nothing to
289 * compare it to) and at detach, the interface name is still in the
290 * list of active NICs (albeit, down, but that's not any real
291 * indicator) and doing ifunit() on the name will still return the
292 * pointer, so it's not much use then, either.
293 */
294 ipf_sync(&ipfmain, NULL);
295 return 0;
296 }
297 # endif
298
299 #endif /* __NetBSD_Version__ >= 105110000 */
300
301
302 #if defined(IPFILTER_LKM)
303 int
304 ipf_identify(s)
305 char *s;
306 {
307 if (strcmp(s, "ipl") == 0)
308 return 1;
309 return 0;
310 }
311 #endif /* IPFILTER_LKM */
312
313 #if (__NetBSD_Version__ >= 599002000)
314 static int
315 ipf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
316 void *arg0, void *arg1, void *arg2, void *arg3)
317 {
318 int result;
319 enum kauth_network_req req;
320
321 result = KAUTH_RESULT_DEFER;
322 req = (enum kauth_network_req)arg0;
323
324 if (action != KAUTH_NETWORK_FIREWALL)
325 return result;
326
327 /* These must have came from device context. */
328 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) ||
329 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT))
330 result = KAUTH_RESULT_ALLOW;
331
332 return result;
333 }
334 #endif
335
336 /*
337 * Try to detect the case when compiling for NetBSD with pseudo-device
338 */
339 void
340 ipfilterattach(int count)
341 {
342
343 #if (__NetBSD_Version__ >= 799003000)
344 return;
345 #else
346 #if (__NetBSD_Version__ >= 599002000)
347 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
348 ipf_listener_cb, NULL);
349 #endif
350
351 if (ipf_load_all() == 0)
352 (void) ipf_create_all(&ipfmain);
353 #endif
354 }
355
356
357 int
358 ipfattach(ipf_main_softc_t *softc)
359 {
360 SPL_INT(s);
361 #if (__NetBSD_Version__ >= 499005500)
362 int i;
363 #endif
364 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
365 int error = 0;
366 # if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
367 pfil_head_t *ph_inet;
368 # ifdef USE_INET6
369 pfil_head_t *ph_inet6;
370 # endif
371 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
372 pfil_head_t *ph_ifsync;
373 # endif
374 # endif
375 #endif
376
377 SPL_NET(s);
378 if ((softc->ipf_running > 0) || (ipf_checkp == ipf_check)) {
379 printf("IP Filter: already initialized\n");
380 SPL_X(s);
381 IPFERROR(130017);
382 return EBUSY;
383 }
384
385 if (ipf_init_all(softc) < 0) {
386 SPL_X(s);
387 IPFERROR(130015);
388 return EIO;
389 }
390
391 #ifdef NETBSD_PF
392 # if (__NetBSD_Version__ >= 104200000)
393 # if __NetBSD_Version__ >= 105110000
394 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
395 # ifdef USE_INET6
396 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
397 # endif
398 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
399 ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
400 # endif
401
402 if (ph_inet == NULL
403 # ifdef USE_INET6
404 && ph_inet6 == NULL
405 # endif
406 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
407 && ph_ifsync == NULL
408 # endif
409 ) {
410 SPL_X(s);
411 IPFERROR(130016);
412 return ENODEV;
413 }
414
415 if (ph_inet != NULL)
416 error = pfil_add_hook((void *)ipf_check_wrapper, NULL,
417 PFIL_IN|PFIL_OUT, ph_inet);
418 else
419 error = 0;
420 # else
421 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
422 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
423 # endif
424 if (error) {
425 IPFERROR(130013);
426 goto pfil_error;
427 }
428 # else
429 pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
430 # endif
431
432 # ifdef USE_INET6
433 # if __NetBSD_Version__ >= 105110000
434 if (ph_inet6 != NULL)
435 error = pfil_add_hook((void *)ipf_check_wrapper6, NULL,
436 PFIL_IN|PFIL_OUT, ph_inet6);
437 else
438 error = 0;
439 if (error) {
440 pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
441 PFIL_IN|PFIL_OUT, ph_inet6);
442 ipfmain.ipf_interror = 130014;
443 goto pfil_error;
444 }
445 # else
446 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
447 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
448 if (error) {
449 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
450 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
451 IPFERROR(130014);
452 goto pfil_error;
453 }
454 # endif
455 # endif
456
457 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
458 if (ph_ifsync != NULL)
459 (void) pfil_add_hook((void *)ipf_pfilsync, NULL,
460 PFIL_IFNET, ph_ifsync);
461 # endif
462 #endif
463
464 #if (__NetBSD_Version__ >= 499005500)
465 for (i = 0; i < IPL_LOGSIZE; i++)
466 selinit(&ipfmain.ipf_selwait[i]);
467 #else
468 bzero((char *)ipfmain.ipf_selwait, sizeof(ipfmain.ipf_selwait));
469 #endif
470 ipf_savep = ipf_checkp;
471 ipf_checkp = ipf_check;
472
473 #ifdef INET
474 if (softc->ipf_control_forwarding & 1)
475 ipforwarding = 1;
476 #endif
477
478 ipid = 0;
479
480 SPL_X(s);
481
482 #if (__NetBSD_Version__ >= 104010000)
483 # if (__NetBSD_Version__ >= 499002000)
484 callout_init(&softc->ipf_slow_ch, 0);
485 # else
486 callout_init(&softc->ipf_slow_ch);
487 # endif
488 callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT,
489 ipf_timer_func, softc);
490 #else
491 timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
492 #endif
493
494 return 0;
495
496 #if __NetBSD_Version__ >= 105110000
497 pfil_error:
498 SPL_X(s);
499 ipf_fini_all(softc);
500 return error;
501 #endif
502 }
503
504 static void
505 ipf_timer_func(void *ptr)
506 {
507 ipf_main_softc_t *softc = ptr;
508 SPL_INT(s);
509
510 SPL_NET(s);
511 READ_ENTER(&softc->ipf_global);
512
513 if (softc->ipf_running > 0)
514 ipf_slowtimer(softc);
515
516 if (softc->ipf_running == -1 || softc->ipf_running == 1) {
517 #if NETBSD_GE_REV(104240000)
518 callout_reset(&softc->ipf_slow_ch, hz / 2,
519 ipf_timer_func, softc);
520 #else
521 timeout(ipf_timer_func, softc,
522 (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
523 #endif
524 }
525 RWLOCK_EXIT(&softc->ipf_global);
526 SPL_X(s);
527 }
528
529
530 /*
531 * Disable the filter by removing the hooks from the IP input/output
532 * stream.
533 */
534 int
535 ipfdetach(ipf_main_softc_t *softc)
536 {
537 SPL_INT(s);
538 #if (__NetBSD_Version__ >= 499005500)
539 int i;
540 #endif
541 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
542 int error = 0;
543 # if __NetBSD_Version__ >= 105150000
544 pfil_head_t *ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
545 # ifdef USE_INET6
546 pfil_head_t *ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
547 # endif
548 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
549 struct pfil_head *ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
550 # endif
551 # endif
552 #endif
553
554 SPL_NET(s);
555
556 #if (__NetBSD_Version__ >= 104010000)
557 if (softc->ipf_running > 0)
558 callout_stop(&softc->ipf_slow_ch);
559 #else
560 untimeout(ipf_slowtimer, NULL);
561 #endif /* NetBSD */
562
563 ipf_checkp = ipf_savep;
564 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE|FR_INACTIVE);
565 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE);
566
567 #ifdef INET
568 if (softc->ipf_control_forwarding & 2)
569 ipforwarding = 0;
570 #endif
571
572 #ifdef NETBSD_PF
573 # if (__NetBSD_Version__ >= 104200000)
574 # if __NetBSD_Version__ >= 105110000
575 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
576 (void) pfil_remove_hook((void *)ipf_pfilsync, NULL,
577 PFIL_IFNET, ph_ifsync);
578 # endif
579
580 if (ph_inet != NULL)
581 error = pfil_remove_hook((void *)ipf_check_wrapper, NULL,
582 PFIL_IN|PFIL_OUT, ph_inet);
583 else
584 error = 0;
585 # else
586 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
587 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
588 # endif
589 if (error) {
590 SPL_X(s);
591 IPFERROR(130011);
592 return error;
593 }
594 # else
595 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
596 # endif
597 # ifdef USE_INET6
598 # if __NetBSD_Version__ >= 105110000
599 if (ph_inet6 != NULL)
600 error = pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
601 PFIL_IN|PFIL_OUT, ph_inet6);
602 else
603 error = 0;
604 # else
605 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
606 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
607 # endif
608 if (error) {
609 SPL_X(s);
610 IPFERROR(130012);
611 return error;
612 }
613 # endif
614 #endif
615 SPL_X(s);
616
617 #if (__NetBSD_Version__ >= 499005500)
618 for (i = 0; i < IPL_LOGSIZE; i++)
619 seldestroy(&ipfmain.ipf_selwait[i]);
620 #endif
621
622 ipf_fini_all(softc);
623
624 return 0;
625 }
626
627
628 /*
629 * Filter ioctl interface.
630 */
631 int
632 ipfioctl(dev_t dev, u_long cmd,
633 #if (__NetBSD_Version__ >= 499001000)
634 void *data,
635 #else
636 caddr_t data,
637 #endif
638 int mode
639 #if (NetBSD >= 199511)
640 # if (__NetBSD_Version__ >= 399001400)
641 , struct lwp *p
642 # if (__NetBSD_Version__ >= 399002000)
643 # define UID(l) kauth_cred_getuid((l)->l_cred)
644 # else
645 # define UID(l) ((l)->l_proc->p_cred->p_ruid)
646 # endif
647 # else
648 , struct proc *p
649 # define UID(p) ((p)->p_cred->p_ruid)
650 # endif
651 #endif
652 )
653 {
654 int error = 0, unit = 0;
655 SPL_INT(s);
656
657 #if (__NetBSD_Version__ >= 399002000)
658 if ((mode & FWRITE) &&
659 kauth_authorize_network(p->l_cred, KAUTH_NETWORK_FIREWALL,
660 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL,
661 NULL, NULL)) {
662 ipfmain.ipf_interror = 130005;
663 return EPERM;
664 }
665 #else
666 if ((securelevel >= 2) && (mode & FWRITE)) {
667 ipfmain.ipf_interror = 130001;
668 return EPERM;
669 }
670 #endif
671
672 unit = GET_MINOR(dev);
673 if ((IPL_LOGMAX < unit) || (unit < 0)) {
674 ipfmain.ipf_interror = 130002;
675 return ENXIO;
676 }
677
678 if (ipfmain.ipf_running <= 0) {
679 if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) {
680 ipfmain.ipf_interror = 130003;
681 return EIO;
682 }
683 if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET &&
684 cmd != SIOCIPFSET && cmd != SIOCFRENB &&
685 cmd != SIOCGETFS && cmd != SIOCGETFF &&
686 cmd != SIOCIPFINTERROR) {
687 ipfmain.ipf_interror = 130004;
688 return EIO;
689 }
690 }
691
692 SPL_NET(s);
693
694 error = ipf_ioctlswitch(&ipfmain, unit, data, cmd, mode, UID(p), p);
695 if (error != -1) {
696 SPL_X(s);
697 return error;
698 }
699
700 SPL_X(s);
701 return error;
702 }
703
704
705 /*
706 * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that
707 * requires a large amount of setting up and isn't any more efficient.
708 */
709 int
710 ipf_send_reset(fr_info_t *fin)
711 {
712 struct tcphdr *tcp, *tcp2;
713 int tlen = 0, hlen;
714 struct mbuf *m;
715 #ifdef USE_INET6
716 ip6_t *ip6;
717 #endif
718 ip_t *ip;
719
720 tcp = fin->fin_dp;
721 if (tcp->th_flags & TH_RST)
722 return -1; /* feedback loop */
723
724 if (ipf_checkl4sum(fin) == -1)
725 return -1;
726
727 tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) +
728 ((tcp->th_flags & TH_SYN) ? 1 : 0) +
729 ((tcp->th_flags & TH_FIN) ? 1 : 0);
730
731 #ifdef USE_INET6
732 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t);
733 #else
734 hlen = sizeof(ip_t);
735 #endif
736 #ifdef MGETHDR
737 MGETHDR(m, M_DONTWAIT, MT_HEADER);
738 #else
739 MGET(m, M_DONTWAIT, MT_HEADER);
740 #endif
741 if (m == NULL)
742 return -1;
743 if (sizeof(*tcp2) + hlen > MHLEN) {
744 MCLGET(m, M_DONTWAIT);
745 if (m == NULL)
746 return -1;
747 if ((m->m_flags & M_EXT) == 0) {
748 FREE_MB_T(m);
749 return -1;
750 }
751 }
752
753 m->m_len = sizeof(*tcp2) + hlen;
754 m->m_data += max_linkhdr;
755 m->m_pkthdr.len = m->m_len;
756 m_reset_rcvif(m);
757 ip = mtod(m, struct ip *);
758 bzero((char *)ip, hlen);
759 #ifdef USE_INET6
760 ip6 = (ip6_t *)ip;
761 #endif
762 bzero((char *)ip, sizeof(*tcp2) + hlen);
763 tcp2 = (struct tcphdr *)((char *)ip + hlen);
764 tcp2->th_sport = tcp->th_dport;
765 tcp2->th_dport = tcp->th_sport;
766
767 if (tcp->th_flags & TH_ACK) {
768 tcp2->th_seq = tcp->th_ack;
769 tcp2->th_flags = TH_RST;
770 tcp2->th_ack = 0;
771 } else {
772 tcp2->th_seq = 0;
773 tcp2->th_ack = ntohl(tcp->th_seq);
774 tcp2->th_ack += tlen;
775 tcp2->th_ack = htonl(tcp2->th_ack);
776 tcp2->th_flags = TH_RST|TH_ACK;
777 }
778 tcp2->th_x2 = 0;
779 TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2);
780 tcp2->th_win = tcp->th_win;
781 tcp2->th_sum = 0;
782 tcp2->th_urp = 0;
783
784 #ifdef USE_INET6
785 if (fin->fin_v == 6) {
786 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
787 ip6->ip6_plen = htons(sizeof(struct tcphdr));
788 ip6->ip6_nxt = IPPROTO_TCP;
789 ip6->ip6_hlim = 0;
790 ip6->ip6_src = fin->fin_dst6.in6;
791 ip6->ip6_dst = fin->fin_src6.in6;
792 tcp2->th_sum = in6_cksum(m, IPPROTO_TCP,
793 sizeof(*ip6), sizeof(*tcp2));
794 return ipf_send_ip(fin, m);
795 }
796 #endif
797 #ifdef INET
798 ip->ip_p = IPPROTO_TCP;
799 ip->ip_len = htons(sizeof(struct tcphdr));
800 ip->ip_src.s_addr = fin->fin_daddr;
801 ip->ip_dst.s_addr = fin->fin_saddr;
802 tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2));
803 ip->ip_len = hlen + sizeof(*tcp2);
804 return ipf_send_ip(fin, m);
805 #else
806 return 0;
807 #endif
808 }
809
810
811 /*
812 * Expects ip_len to be in host byte order when called.
813 */
814 static int
815 ipf_send_ip(fr_info_t *fin, mb_t *m)
816 {
817 fr_info_t fnew;
818 #ifdef INET
819 ip_t *oip;
820 #endif
821 ip_t *ip;
822 int hlen;
823
824 ip = mtod(m, ip_t *);
825 bzero((char *)&fnew, sizeof(fnew));
826 fnew.fin_main_soft = fin->fin_main_soft;
827
828 IP_V_A(ip, fin->fin_v);
829 switch (fin->fin_v)
830 {
831 #ifdef INET
832 case 4 :
833 oip = fin->fin_ip;
834 hlen = sizeof(*oip);
835 fnew.fin_v = 4;
836 fnew.fin_p = ip->ip_p;
837 fnew.fin_plen = ntohs(ip->ip_len);
838 HTONS(ip->ip_len);
839 IP_HL_A(ip, sizeof(*oip) >> 2);
840 ip->ip_tos = oip->ip_tos;
841 ip->ip_id = ipf_nextipid(fin);
842 ip->ip_off = htons(ip_mtudisc ? IP_DF : 0);
843 ip->ip_ttl = ip_defttl;
844 ip->ip_sum = 0;
845 break;
846 #endif
847 #ifdef USE_INET6
848 case 6 :
849 {
850 ip6_t *ip6 = (ip6_t *)ip;
851
852 ip6->ip6_vfc = 0x60;
853 ip6->ip6_hlim = IPDEFTTL;
854
855 hlen = sizeof(*ip6);
856 fnew.fin_p = ip6->ip6_nxt;
857 fnew.fin_v = 6;
858 fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen;
859 break;
860 }
861 #endif
862 default :
863 return EINVAL;
864 }
865 #ifdef KAME_IPSEC
866 m_reset_rcvif(m);
867 #endif
868
869 fnew.fin_ifp = fin->fin_ifp;
870 fnew.fin_flx = FI_NOCKSUM;
871 fnew.fin_m = m;
872 fnew.fin_ip = ip;
873 fnew.fin_mp = &m;
874 fnew.fin_hlen = hlen;
875 fnew.fin_dp = (char *)ip + hlen;
876 (void) ipf_makefrip(hlen, ip, &fnew);
877
878 return ipf_fastroute(m, &m, &fnew, NULL);
879 }
880
881
882 int
883 ipf_send_icmp_err(int type, fr_info_t *fin, int dst)
884 {
885 int err, hlen, xtra, iclen, ohlen, avail;
886 struct in_addr dst4;
887 struct icmp *icmp;
888 struct mbuf *m;
889 i6addr_t dst6;
890 void *ifp;
891 #ifdef USE_INET6
892 int code;
893 ip6_t *ip6;
894 #endif
895 ip_t *ip, *ip2;
896
897 if ((type < 0) || (type > ICMP_MAXTYPE))
898 return -1;
899
900 #ifdef USE_INET6
901 code = fin->fin_icode;
902 if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int)))
903 return -1;
904 #endif
905
906 if (ipf_checkl4sum(fin) == -1)
907 return -1;
908 #ifdef MGETHDR
909 MGETHDR(m, M_DONTWAIT, MT_HEADER);
910 #else
911 MGET(m, M_DONTWAIT, MT_HEADER);
912 #endif
913 if (m == NULL)
914 return -1;
915 avail = MHLEN;
916
917 xtra = 0;
918 hlen = 0;
919 ohlen = 0;
920 dst4.s_addr = 0;
921 ifp = fin->fin_ifp;
922 if (fin->fin_v == 4) {
923 if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT))
924 switch (ntohs(fin->fin_data[0]) >> 8)
925 {
926 case ICMP_ECHO :
927 case ICMP_TSTAMP :
928 case ICMP_IREQ :
929 case ICMP_MASKREQ :
930 break;
931 default :
932 FREE_MB_T(m);
933 return 0;
934 }
935
936 if (dst == 0) {
937 if (ipf_ifpaddr(&ipfmain, 4, FRI_NORMAL, ifp,
938 &dst6, NULL) == -1) {
939 FREE_MB_T(m);
940 return -1;
941 }
942 dst4 = dst6.in4;
943 } else
944 dst4.s_addr = fin->fin_daddr;
945
946 hlen = sizeof(ip_t);
947 ohlen = fin->fin_hlen;
948 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
949 if (fin->fin_hlen < fin->fin_plen)
950 xtra = MIN(fin->fin_dlen, 8);
951 else
952 xtra = 0;
953 }
954
955 #ifdef USE_INET6
956 else if (fin->fin_v == 6) {
957 hlen = sizeof(ip6_t);
958 ohlen = sizeof(ip6_t);
959 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
960 type = icmptoicmp6types[type];
961 if (type == ICMP6_DST_UNREACH)
962 code = icmptoicmp6unreach[code];
963
964 if (iclen + max_linkhdr + fin->fin_plen > avail) {
965 MCLGET(m, M_DONTWAIT);
966 if (m == NULL)
967 return -1;
968 if ((m->m_flags & M_EXT) == 0) {
969 FREE_MB_T(m);
970 return -1;
971 }
972 avail = MCLBYTES;
973 }
974 xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr);
975 xtra = MIN(xtra, IPV6_MMTU - iclen);
976 if (dst == 0) {
977 if (ipf_ifpaddr(&ipfmain, 6, FRI_NORMAL, ifp,
978 &dst6, NULL) == -1) {
979 FREE_MB_T(m);
980 return -1;
981 }
982 } else
983 dst6 = fin->fin_dst6;
984 }
985 #endif
986 else {
987 FREE_MB_T(m);
988 return -1;
989 }
990
991 avail -= (max_linkhdr + iclen);
992 if (avail < 0) {
993 FREE_MB_T(m);
994 return -1;
995 }
996 if (xtra > avail)
997 xtra = avail;
998 iclen += xtra;
999 m->m_data += max_linkhdr;
1000 m_reset_rcvif(m);
1001 m->m_pkthdr.len = iclen;
1002 m->m_len = iclen;
1003 ip = mtod(m, ip_t *);
1004 icmp = (struct icmp *)((char *)ip + hlen);
1005 ip2 = (ip_t *)&icmp->icmp_ip;
1006
1007 icmp->icmp_type = type;
1008 icmp->icmp_code = fin->fin_icode;
1009 icmp->icmp_cksum = 0;
1010 #ifdef icmp_nextmtu
1011 if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) {
1012 if (fin->fin_mtu != 0) {
1013 icmp->icmp_nextmtu = htons(fin->fin_mtu);
1014
1015 } else if (ifp != NULL) {
1016 icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp));
1017
1018 } else { /* make up a number... */
1019 icmp->icmp_nextmtu = htons(fin->fin_plen - 20);
1020 }
1021 }
1022 #endif
1023
1024 bcopy((char *)fin->fin_ip, (char *)ip2, ohlen);
1025
1026 #if defined(M_CSUM_IPv4)
1027 /*
1028 * Clear any in-bound checksum flags for this packet.
1029 */
1030 m->m_pkthdr.csuminfo = 0;
1031 #endif /* __NetBSD__ && M_CSUM_IPv4 */
1032
1033 #ifdef USE_INET6
1034 ip6 = (ip6_t *)ip;
1035 if (fin->fin_v == 6) {
1036 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
1037 ip6->ip6_plen = htons(iclen - hlen);
1038 ip6->ip6_nxt = IPPROTO_ICMPV6;
1039 ip6->ip6_hlim = 0;
1040 ip6->ip6_src = dst6.in6;
1041 ip6->ip6_dst = fin->fin_src6.in6;
1042 if (xtra > 0)
1043 bcopy((char *)fin->fin_ip + ohlen,
1044 (char *)&icmp->icmp_ip + ohlen, xtra);
1045 icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6,
1046 sizeof(*ip6), iclen - hlen);
1047 } else
1048 #endif
1049 {
1050 ip->ip_p = IPPROTO_ICMP;
1051 ip->ip_src.s_addr = dst4.s_addr;
1052 ip->ip_dst.s_addr = fin->fin_saddr;
1053
1054 if (xtra > 0)
1055 bcopy((char *)fin->fin_ip + ohlen,
1056 (char *)&icmp->icmp_ip + ohlen, xtra);
1057 icmp->icmp_cksum = ipf_cksum((u_short *)icmp,
1058 sizeof(*icmp) + 8);
1059 ip->ip_len = iclen;
1060 ip->ip_p = IPPROTO_ICMP;
1061 }
1062 err = ipf_send_ip(fin, m);
1063 return err;
1064 }
1065
1066
1067 /*
1068 * m0 - pointer to mbuf where the IP packet starts
1069 * mpp - pointer to the mbuf pointer that is the start of the mbuf chain
1070 */
1071 int
1072 ipf_fastroute(mb_t *m0, mb_t **mpp, fr_info_t *fin, frdest_t *fdp)
1073 {
1074 register struct ip *ip, *mhip;
1075 register struct mbuf *m = *mpp;
1076 register struct route *ro;
1077 int len, off, error = 0, hlen, code;
1078 struct ifnet *ifp, *sifp;
1079 ipf_main_softc_t *softc;
1080 #if __NetBSD_Version__ >= 499001100
1081 union {
1082 struct sockaddr dst;
1083 struct sockaddr_in dst4;
1084 } u;
1085 #else
1086 struct sockaddr_in *dst4;
1087 #endif
1088 struct sockaddr *dst;
1089 u_short ip_off, ip_len;
1090 struct route iproute;
1091 struct rtentry *rt;
1092 frdest_t node;
1093 frentry_t *fr;
1094
1095 if (fin->fin_v == 6) {
1096 #ifdef USE_INET6
1097 error = ipf_fastroute6(m0, mpp, fin, fdp);
1098 #else
1099 error = EPROTONOSUPPORT;
1100 #endif
1101 if ((error != 0) && (*mpp != NULL))
1102 FREE_MB_T(*mpp);
1103 return error;
1104 }
1105 #ifndef INET
1106 FREE_MB_T(*mpp);
1107 return EPROTONOSUPPORT;
1108 #else
1109
1110 hlen = fin->fin_hlen;
1111 ip = mtod(m0, struct ip *);
1112 softc = fin->fin_main_soft;
1113 rt = NULL;
1114 ifp = NULL;
1115
1116 # if defined(M_CSUM_IPv4)
1117 /*
1118 * Clear any in-bound checksum flags for this packet.
1119 */
1120 m0->m_pkthdr.csuminfo = 0;
1121 # endif /* __NetBSD__ && M_CSUM_IPv4 */
1122
1123 /*
1124 * Route packet.
1125 */
1126 ro = &iproute;
1127 memset(ro, 0, sizeof(*ro));
1128 fr = fin->fin_fr;
1129
1130 if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) &&
1131 (fdp->fd_type == FRD_DSTLIST)) {
1132 if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0)
1133 fdp = &node;
1134 }
1135 if (fdp != NULL)
1136 ifp = fdp->fd_ptr;
1137 else
1138 ifp = fin->fin_ifp;
1139
1140 if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) {
1141 error = -2;
1142 goto bad;
1143 }
1144
1145 # if __NetBSD_Version__ >= 499001100
1146 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1147 sockaddr_in_init(&u.dst4, &fdp->fd_ip, 0);
1148 else
1149 sockaddr_in_init(&u.dst4, &ip->ip_dst, 0);
1150 dst = &u.dst;
1151 rtcache_setdst(ro, dst);
1152 rt = rtcache_init(ro);
1153 # else
1154 dst4 = (struct sockaddr_in *)&ro->ro_dst;
1155 dst = (struct sockaddr *)dst4;
1156 dst4->sin_family = AF_INET;
1157 dst4->sin_addr = ip->ip_dst;
1158
1159 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1160 dst4->sin_addr = fdp->fd_ip;
1161
1162 dst4->sin_len = sizeof(*dst);
1163 rtalloc(ro);
1164 rt = ro->ro_rt;
1165 # endif
1166 if ((ifp == NULL) && (rt != NULL))
1167 ifp = rt->rt_ifp;
1168 if ((rt == NULL) || (ifp == NULL)) {
1169 #ifdef INET
1170 if (in_localaddr(ip->ip_dst))
1171 error = EHOSTUNREACH;
1172 else
1173 #endif
1174 error = ENETUNREACH;
1175 goto bad;
1176 }
1177
1178
1179 if (rt->rt_flags & RTF_GATEWAY)
1180 dst = rt->rt_gateway;
1181
1182 rt->rt_use++;
1183
1184 /*
1185 * For input packets which are being "fastrouted", they won't
1186 * go back through output filtering and miss their chance to get
1187 * NAT'd and counted. Duplicated packets aren't considered to be
1188 * part of the normal packet stream, so do not NAT them or pass
1189 * them through stateful checking, etc.
1190 */
1191 if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) {
1192 sifp = fin->fin_ifp;
1193 fin->fin_ifp = ifp;
1194 fin->fin_out = 1;
1195 (void) ipf_acctpkt(fin, NULL);
1196 fin->fin_fr = NULL;
1197 if (!fr || !(fr->fr_flags & FR_RETMASK)) {
1198 u_32_t pass;
1199
1200 (void) ipf_state_check(fin, &pass);
1201 }
1202
1203 switch (ipf_nat_checkout(fin, NULL))
1204 {
1205 case 0 :
1206 break;
1207 case 1 :
1208 ip->ip_sum = 0;
1209 break;
1210 case -1 :
1211 error = -1;
1212 goto bad;
1213 break;
1214 }
1215
1216 fin->fin_ifp = sifp;
1217 fin->fin_out = 0;
1218 } else
1219 ip->ip_sum = 0;
1220 /*
1221 * If small enough for interface, can just send directly.
1222 */
1223 m_set_rcvif(m, ifp);
1224
1225 ip_len = ntohs(ip->ip_len);
1226 if (ip_len <= ifp->if_mtu) {
1227 # if defined(M_CSUM_IPv4)
1228 # if (__NetBSD_Version__ >= 105009999)
1229 if (ifp->if_csum_flags_tx & M_CSUM_IPv4)
1230 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1231 # else
1232 if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
1233 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1234 # endif /* (__NetBSD_Version__ >= 105009999) */
1235 else if (ip->ip_sum == 0)
1236 ip->ip_sum = in_cksum(m, hlen);
1237 # else
1238 if (!ip->ip_sum)
1239 ip->ip_sum = in_cksum(m, hlen);
1240 # endif /* M_CSUM_IPv4 */
1241
1242 error = if_output_lock(ifp, ifp, m, dst, rt);
1243 goto done;
1244 }
1245
1246 /*
1247 * Too large for interface; fragment if possible.
1248 * Must be able to put at least 8 bytes per fragment.
1249 */
1250 ip_off = ntohs(ip->ip_off);
1251 if (ip_off & IP_DF) {
1252 error = EMSGSIZE;
1253 goto bad;
1254 }
1255 len = (ifp->if_mtu - hlen) &~ 7;
1256 if (len < 8) {
1257 error = EMSGSIZE;
1258 goto bad;
1259 }
1260
1261 {
1262 int mhlen, firstlen = len;
1263 struct mbuf **mnext = &m->m_act;
1264
1265 /*
1266 * Loop through length of segment after first fragment,
1267 * make new header and copy data of each part and link onto chain.
1268 */
1269 m0 = m;
1270 mhlen = sizeof (struct ip);
1271 for (off = hlen + len; off < ip_len; off += len) {
1272 # ifdef MGETHDR
1273 MGETHDR(m, M_DONTWAIT, MT_HEADER);
1274 # else
1275 MGET(m, M_DONTWAIT, MT_HEADER);
1276 # endif
1277 if (m == 0) {
1278 m = m0;
1279 error = ENOBUFS;
1280 goto bad;
1281 }
1282 m->m_data += max_linkhdr;
1283 mhip = mtod(m, struct ip *);
1284 bcopy((char *)ip, (char *)mhip, sizeof(*ip));
1285 #ifdef INET
1286 if (hlen > sizeof (struct ip)) {
1287 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
1288 IP_HL_A(mhip, mhlen >> 2);
1289 }
1290 #endif
1291 m->m_len = mhlen;
1292 mhip->ip_off = ((off - hlen) >> 3) + ip_off;
1293 if (off + len >= ip_len)
1294 len = ip_len - off;
1295 else
1296 mhip->ip_off |= IP_MF;
1297 mhip->ip_len = htons((u_short)(len + mhlen));
1298 m->m_next = m_copy(m0, off, len);
1299 if (m->m_next == 0) {
1300 error = ENOBUFS; /* ??? */
1301 goto sendorfree;
1302 }
1303 m->m_pkthdr.len = mhlen + len;
1304 m_reset_rcvif(m);
1305 mhip->ip_off = htons((u_short)mhip->ip_off);
1306 mhip->ip_sum = 0;
1307 #ifdef INET
1308 mhip->ip_sum = in_cksum(m, mhlen);
1309 #endif
1310 *mnext = m;
1311 mnext = &m->m_act;
1312 }
1313 /*
1314 * Update first fragment by trimming what's been copied out
1315 * and updating header, then send each fragment (in order).
1316 */
1317 m_adj(m0, hlen + firstlen - ip_len);
1318 ip->ip_len = htons((u_short)(hlen + firstlen));
1319 ip->ip_off = htons((u_short)IP_MF);
1320 ip->ip_sum = 0;
1321 #ifdef INET
1322 ip->ip_sum = in_cksum(m0, hlen);
1323 #endif
1324 sendorfree:
1325 for (m = m0; m; m = m0) {
1326 m0 = m->m_act;
1327 m->m_act = 0;
1328 if (error == 0) {
1329 KERNEL_LOCK(1, NULL);
1330 error = (*ifp->if_output)(ifp, m, dst, rt);
1331 KERNEL_UNLOCK_ONE(NULL);
1332 } else {
1333 FREE_MB_T(m);
1334 }
1335 }
1336 }
1337 done:
1338 if (!error)
1339 softc->ipf_frouteok[0]++;
1340 else
1341 softc->ipf_frouteok[1]++;
1342
1343 # if __NetBSD_Version__ >= 499001100
1344 rtcache_free(ro);
1345 # else
1346 if (rt) {
1347 RTFREE(rt);
1348 }
1349 # endif
1350 return error;
1351 bad:
1352 if (error == EMSGSIZE) {
1353 sifp = fin->fin_ifp;
1354 code = fin->fin_icode;
1355 fin->fin_icode = ICMP_UNREACH_NEEDFRAG;
1356 fin->fin_ifp = ifp;
1357 (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1);
1358 fin->fin_ifp = sifp;
1359 fin->fin_icode = code;
1360 }
1361 FREE_MB_T(m);
1362 goto done;
1363 #endif /* INET */
1364 }
1365
1366
1367 #if defined(USE_INET6)
1368 /*
1369 * This is the IPv6 specific fastroute code. It doesn't clean up the mbuf's
1370 * or ensure that it is an IPv6 packet that is being forwarded, those are
1371 * expected to be done by the called (ipf_fastroute).
1372 */
1373 static int
1374 ipf_fastroute6(struct mbuf *m0, struct mbuf **mpp, fr_info_t *fin,
1375 frdest_t *fdp)
1376 {
1377 # if __NetBSD_Version__ >= 499001100
1378 struct route ip6route;
1379 const struct sockaddr *dst;
1380 union {
1381 struct sockaddr dst;
1382 struct sockaddr_in6 dst6;
1383 } u;
1384 struct route *ro;
1385 # else
1386 struct route_in6 ip6route;
1387 struct sockaddr_in6 *dst6;
1388 struct route_in6 *ro;
1389 # endif
1390 struct rtentry *rt;
1391 struct ifnet *ifp;
1392 u_long mtu;
1393 int error;
1394
1395 error = 0;
1396 ro = &ip6route;
1397
1398 if (fdp != NULL)
1399 ifp = fdp->fd_ptr;
1400 else
1401 ifp = fin->fin_ifp;
1402 memset(ro, 0, sizeof(*ro));
1403 # if __NetBSD_Version__ >= 499001100
1404 if (fdp != NULL && IP6_NOTZERO(&fdp->fd_ip6))
1405 sockaddr_in6_init(&u.dst6, &fdp->fd_ip6.in6, 0, 0, 0);
1406 else
1407 sockaddr_in6_init(&u.dst6, &fin->fin_fi.fi_dst.in6, 0, 0, 0);
1408 dst = &u.dst;
1409 rtcache_setdst(ro, dst);
1410
1411 rt = rtcache_init(ro);
1412 if ((ifp == NULL) && (rt != NULL))
1413 ifp = rt->rt_ifp;
1414 # else
1415 dst6 = (struct sockaddr_in6 *)&ro->ro_dst;
1416 dst6->sin6_family = AF_INET6;
1417 dst6->sin6_len = sizeof(struct sockaddr_in6);
1418 dst6->sin6_addr = fin->fin_fi.fi_dst.in6;
1419
1420 if (fdp != NULL) {
1421 if (IP6_NOTZERO(&fdp->fd_ip6))
1422 dst6->sin6_addr = fdp->fd_ip6.in6;
1423 }
1424
1425 rtalloc((struct route *)ro);
1426
1427 if ((ifp == NULL) && (ro->ro_rt != NULL))
1428 ifp = ro->ro_rt->rt_ifp;
1429 rt = ro->ro_rt;
1430 # endif
1431 if ((rt == NULL) || (ifp == NULL)) {
1432
1433 error = EHOSTUNREACH;
1434 goto bad;
1435 }
1436
1437 /* KAME */
1438 # if __NetBSD_Version__ >= 499001100
1439 if (IN6_IS_ADDR_LINKLOCAL(&u.dst6.sin6_addr))
1440 u.dst6.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1441 # else
1442 if (IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr))
1443 dst6->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1444 # endif
1445
1446 {
1447 # if (__NetBSD_Version__ >= 106010000) && !defined(IN6_LINKMTU)
1448 struct in6_ifextra *ife;
1449 # endif
1450 if (rt->rt_flags & RTF_GATEWAY)
1451 # if __NetBSD_Version__ >= 499001100
1452 dst = rt->rt_gateway;
1453 # else
1454 dst6 = (struct sockaddr_in6 *)rt->rt_gateway;
1455 # endif
1456 rt->rt_use++;
1457
1458 /* Determine path MTU. */
1459 # if (__NetBSD_Version__ <= 106009999)
1460 mtu = nd_ifinfo[ifp->if_index].linkmtu;
1461 # else
1462 # ifdef IN6_LINKMTU
1463 mtu = IN6_LINKMTU(ifp);
1464 # else
1465 ife = (struct in6_ifextra *)(ifp)->if_afdata[AF_INET6];
1466 mtu = ife->nd_ifinfo[ifp->if_index].linkmtu;
1467 # endif
1468 # endif
1469 if ((error == 0) && (m0->m_pkthdr.len <= mtu)) {
1470 # if __NetBSD_Version__ >= 499001100
1471 error = nd6_output(ifp, ifp, m0, satocsin6(dst), rt);
1472 # else
1473 error = nd6_output(ifp, ifp, m0, dst6, rt);
1474 # endif
1475 } else {
1476 error = EMSGSIZE;
1477 }
1478 }
1479 bad:
1480 # if __NetBSD_Version__ >= 499001100
1481 rtcache_free(ro);
1482 # else
1483 if (ro->ro_rt != NULL) {
1484 RTFREE(((struct route *)ro)->ro_rt);
1485 }
1486 # endif
1487 return error;
1488 }
1489 #endif /* INET6 */
1490
1491
1492 int
1493 ipf_verifysrc(fr_info_t *fin)
1494 {
1495 #if __NetBSD_Version__ >= 499001100
1496 union {
1497 struct sockaddr dst;
1498 struct sockaddr_in dst4;
1499 } u;
1500 struct rtentry *rt;
1501 #else
1502 struct sockaddr_in *dst;
1503 #endif
1504 struct route iproute;
1505 int rc;
1506
1507 #if __NetBSD_Version__ >= 499001100
1508 sockaddr_in_init(&u.dst4, &fin->fin_src, 0);
1509 rtcache_setdst(&iproute, &u.dst);
1510 rt = rtcache_init(&iproute);
1511 if (rt == NULL)
1512 rc = 0;
1513 else
1514 rc = (fin->fin_ifp == rt->rt_ifp);
1515 rtcache_free(&iproute);
1516 #else
1517 dst = (struct sockaddr_in *)&iproute.ro_dst;
1518 dst->sin_len = sizeof(*dst);
1519 dst->sin_family = AF_INET;
1520 dst->sin_addr = fin->fin_src;
1521 rtalloc(&iproute);
1522 if (iproute.ro_rt == NULL)
1523 return 0;
1524 rc = (fin->fin_ifp == iproute.ro_rt->rt_ifp);
1525 RTFREE(iproute.ro_rt);
1526 #endif
1527 return rc;
1528 }
1529
1530
1531 /*
1532 * return the first IP Address associated with an interface
1533 */
1534 int
1535 ipf_ifpaddr(ipf_main_softc_t *softc, int v, int atype, void *ifptr,
1536 i6addr_t *inp, i6addr_t *inpmask)
1537 {
1538 #ifdef USE_INET6
1539 struct in6_addr *inp6 = NULL;
1540 #endif
1541 struct sockaddr *sock, *mask;
1542 struct sockaddr_in *sin;
1543 struct ifaddr *ifa;
1544 struct ifnet *ifp;
1545
1546 if ((ifptr == NULL) || (ifptr == (void *)-1))
1547 return -1;
1548
1549 ifp = ifptr;
1550 mask = NULL;
1551
1552 if (v == 4)
1553 inp->in4.s_addr = 0;
1554 #ifdef USE_INET6
1555 else if (v == 6)
1556 bzero((char *)inp, sizeof(*inp));
1557 #endif
1558
1559 ifa = IFADDR_READER_FIRST(ifp);
1560 sock = ifa ? ifa->ifa_addr : NULL;
1561 while (sock != NULL && ifa != NULL) {
1562 sin = (struct sockaddr_in *)sock;
1563 if ((v == 4) && (sin->sin_family == AF_INET))
1564 break;
1565 #ifdef USE_INET6
1566 if ((v == 6) && (sin->sin_family == AF_INET6)) {
1567 inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr;
1568 if (!IN6_IS_ADDR_LINKLOCAL(inp6) &&
1569 !IN6_IS_ADDR_LOOPBACK(inp6))
1570 break;
1571 }
1572 #endif
1573 ifa = IFADDR_READER_NEXT(ifa);
1574 if (ifa != NULL)
1575 sock = ifa->ifa_addr;
1576 }
1577 if (ifa == NULL || sock == NULL)
1578 return -1;
1579
1580 mask = ifa->ifa_netmask;
1581 if (atype == FRI_BROADCAST)
1582 sock = ifa->ifa_broadaddr;
1583 else if (atype == FRI_PEERADDR)
1584 sock = ifa->ifa_dstaddr;
1585
1586 #ifdef USE_INET6
1587 if (v == 6)
1588 return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock,
1589 (struct sockaddr_in6 *)mask,
1590 inp, inpmask);
1591 #endif
1592 return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock,
1593 (struct sockaddr_in *)mask,
1594 &inp->in4, &inpmask->in4);
1595 }
1596
1597
1598 u_32_t
1599 ipf_newisn(fr_info_t *fin)
1600 {
1601 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
1602 size_t asz;
1603
1604 if (fin->fin_v == 4)
1605 asz = sizeof(struct in_addr);
1606 else if (fin->fin_v == 6)
1607 asz = sizeof(fin->fin_src);
1608 else /* XXX: no way to return error */
1609 return 0;
1610 #ifdef INET
1611 return tcp_new_iss1((void *)&fin->fin_src, (void *)&fin->fin_dst,
1612 fin->fin_sport, fin->fin_dport, asz, 0);
1613 #else
1614 return ENOSYS;
1615 #endif
1616 #else
1617 static int iss_seq_off = 0;
1618 u_char hash[16];
1619 u_32_t newiss;
1620 MD5_CTX ctx;
1621
1622 /*
1623 * Compute the base value of the ISS. It is a hash
1624 * of (saddr, sport, daddr, dport, secret).
1625 */
1626 MD5Init(&ctx);
1627
1628 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_src,
1629 sizeof(fin->fin_fi.fi_src));
1630 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_dst,
1631 sizeof(fin->fin_fi.fi_dst));
1632 MD5Update(&ctx, (u_char *) &fin->fin_dat, sizeof(fin->fin_dat));
1633
1634 MD5Update(&ctx, ipf_iss_secret, sizeof(ipf_iss_secret));
1635
1636 MD5Final(hash, &ctx);
1637
1638 memcpy(&newiss, hash, sizeof(newiss));
1639
1640 /*
1641 * Now increment our "timer", and add it in to
1642 * the computed value.
1643 *
1644 * XXX Use `addin'?
1645 * XXX TCP_ISSINCR too large to use?
1646 */
1647 iss_seq_off += 0x00010000;
1648 newiss += iss_seq_off;
1649 return newiss;
1650 #endif
1651 }
1652
1653
1654 /* ------------------------------------------------------------------------ */
1655 /* Function: ipf_nextipid */
1656 /* Returns: int - 0 == success, -1 == error (packet should be droppped) */
1657 /* Parameters: fin(I) - pointer to packet information */
1658 /* */
1659 /* Returns the next IPv4 ID to use for this packet. */
1660 /* ------------------------------------------------------------------------ */
1661 u_short
1662 ipf_nextipid(fr_info_t *fin)
1663 {
1664 #ifdef USE_MUTEXES
1665 ipf_main_softc_t *softc = fin->fin_main_soft;
1666 #endif
1667 u_short id;
1668
1669 MUTEX_ENTER(&softc->ipf_rw);
1670 id = ipid++;
1671 MUTEX_EXIT(&softc->ipf_rw);
1672
1673 return id;
1674 }
1675
1676
1677 EXTERN_INLINE int
1678 ipf_checkv4sum(fr_info_t *fin)
1679 {
1680 #ifdef M_CSUM_TCP_UDP_BAD
1681 int manual, pflag, cflags, active;
1682 mb_t *m;
1683
1684 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1685 return 0;
1686
1687 if ((fin->fin_flx & FI_SHORT) != 0)
1688 return 1;
1689
1690 if (fin->fin_cksum != FI_CK_NEEDED)
1691 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1692
1693 manual = 0;
1694 m = fin->fin_m;
1695 if (m == NULL) {
1696 manual = 1;
1697 goto skipauto;
1698 }
1699
1700 switch (fin->fin_p)
1701 {
1702 case IPPROTO_UDP :
1703 pflag = M_CSUM_UDPv4;
1704 break;
1705 case IPPROTO_TCP :
1706 pflag = M_CSUM_TCPv4;
1707 break;
1708 default :
1709 pflag = 0;
1710 manual = 1;
1711 break;
1712 }
1713
1714 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1715 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1716 cflags = m->m_pkthdr.csum_flags & active;
1717
1718 if (pflag != 0) {
1719 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1720 fin->fin_flx |= FI_BAD;
1721 fin->fin_cksum = FI_CK_BAD;
1722 } else if (cflags == (pflag | M_CSUM_DATA)) {
1723 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) {
1724 fin->fin_flx |= FI_BAD;
1725 fin->fin_cksum = FI_CK_BAD;
1726 } else {
1727 fin->fin_cksum = FI_CK_SUMOK;
1728 }
1729 } else if (cflags == pflag) {
1730 fin->fin_cksum = FI_CK_SUMOK;
1731 } else {
1732 manual = 1;
1733 }
1734 }
1735 skipauto:
1736 if (manual != 0) {
1737 if (ipf_checkl4sum(fin) == -1) {
1738 fin->fin_flx |= FI_BAD;
1739 return -1;
1740 }
1741 }
1742 #else
1743 if (ipf_checkl4sum(fin) == -1) {
1744 fin->fin_flx |= FI_BAD;
1745 return -1;
1746 }
1747 #endif
1748 return 0;
1749 }
1750
1751
1752 #ifdef USE_INET6
1753 EXTERN_INLINE int
1754 ipf_checkv6sum(fr_info_t *fin)
1755 {
1756 # ifdef M_CSUM_TCP_UDP_BAD
1757 int manual, pflag, cflags, active;
1758 mb_t *m;
1759
1760 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1761 return 0;
1762
1763 if ((fin->fin_flx & FI_SHORT) != 0)
1764 return 1;
1765
1766 if (fin->fin_cksum != FI_CK_SUMOK)
1767 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1768
1769
1770 manual = 0;
1771 m = fin->fin_m;
1772
1773 switch (fin->fin_p)
1774 {
1775 case IPPROTO_UDP :
1776 pflag = M_CSUM_UDPv6;
1777 break;
1778 case IPPROTO_TCP :
1779 pflag = M_CSUM_TCPv6;
1780 break;
1781 default :
1782 pflag = 0;
1783 manual = 1;
1784 break;
1785 }
1786
1787 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1788 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1789 cflags = m->m_pkthdr.csum_flags & active;
1790
1791 if (pflag != 0) {
1792 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1793 fin->fin_flx |= FI_BAD;
1794 } else if (cflags == (pflag | M_CSUM_DATA)) {
1795 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0)
1796 fin->fin_flx |= FI_BAD;
1797 } else if (cflags == pflag) {
1798 ;
1799 } else {
1800 manual = 1;
1801 }
1802 }
1803 if (manual != 0) {
1804 if (ipf_checkl4sum(fin) == -1) {
1805 fin->fin_flx |= FI_BAD;
1806 return -1;
1807 }
1808 }
1809 # else
1810 if (ipf_checkl4sum(fin) == -1) {
1811 fin->fin_flx |= FI_BAD;
1812 return -1;
1813 }
1814 # endif
1815 return 0;
1816 }
1817 #endif /* USE_INET6 */
1818
1819
1820 size_t
1821 mbufchainlen(struct mbuf *m0)
1822 {
1823 size_t len;
1824
1825 if ((m0->m_flags & M_PKTHDR) != 0) {
1826 len = m0->m_pkthdr.len;
1827 } else {
1828 struct mbuf *m;
1829
1830 for (m = m0, len = 0; m != NULL; m = m->m_next)
1831 len += m->m_len;
1832 }
1833 return len;
1834 }
1835
1836
1837 /* ------------------------------------------------------------------------ */
1838 /* Function: ipf_pullup */
1839 /* Returns: NULL == pullup failed, else pointer to protocol header */
1840 /* Parameters: xmin(I)- pointer to buffer where data packet starts */
1841 /* fin(I) - pointer to packet information */
1842 /* len(I) - number of bytes to pullup */
1843 /* */
1844 /* Attempt to move at least len bytes (from the start of the buffer) into a */
1845 /* single buffer for ease of access. Operating system native functions are */
1846 /* used to manage buffers - if necessary. If the entire packet ends up in */
1847 /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */
1848 /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */
1849 /* and ONLY if the pullup succeeds. */
1850 /* */
1851 /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */
1852 /* of buffers that starts at *fin->fin_mp. */
1853 /* ------------------------------------------------------------------------ */
1854 void *
1855 ipf_pullup(mb_t *xmin, fr_info_t *fin, int len)
1856 {
1857 int dpoff, ipoff;
1858 mb_t *m = xmin;
1859 char *ip;
1860
1861 if (m == NULL)
1862 return NULL;
1863
1864 ip = (char *)fin->fin_ip;
1865 if ((fin->fin_flx & FI_COALESCE) != 0)
1866 return ip;
1867
1868 ipoff = fin->fin_ipoff;
1869 if (fin->fin_dp != NULL)
1870 dpoff = (char *)fin->fin_dp - (char *)ip;
1871 else
1872 dpoff = 0;
1873
1874 if (M_LEN(m) < len) {
1875 mb_t *n = *fin->fin_mp;
1876 /*
1877 * Assume that M_PKTHDR is set and just work with what is left
1878 * rather than check..
1879 * Should not make any real difference, anyway.
1880 */
1881 if (m != n) {
1882 /*
1883 * Record the mbuf that points to the mbuf that we're
1884 * about to go to work on so that we can update the
1885 * m_next appropriately later.
1886 */
1887 for (; n->m_next != m; n = n->m_next)
1888 ;
1889 } else {
1890 n = NULL;
1891 }
1892
1893 #ifdef MHLEN
1894 if (len > MHLEN)
1895 #else
1896 if (len > MLEN)
1897 #endif
1898 {
1899 #ifdef HAVE_M_PULLDOWN
1900 if (m_pulldown(m, 0, len, NULL) == NULL)
1901 m = NULL;
1902 #else
1903 FREE_MB_T(*fin->fin_mp);
1904 m = NULL;
1905 n = NULL;
1906 #endif
1907 } else
1908 {
1909 m = m_pullup(m, len);
1910 }
1911 if (n != NULL)
1912 n->m_next = m;
1913 if (m == NULL) {
1914 /*
1915 * When n is non-NULL, it indicates that m pointed to
1916 * a sub-chain (tail) of the mbuf and that the head
1917 * of this chain has not yet been free'd.
1918 */
1919 if (n != NULL) {
1920 FREE_MB_T(*fin->fin_mp);
1921 }
1922
1923 *fin->fin_mp = NULL;
1924 fin->fin_m = NULL;
1925 return NULL;
1926 }
1927
1928 if (n == NULL)
1929 *fin->fin_mp = m;
1930
1931 while (M_LEN(m) == 0) {
1932 m = m->m_next;
1933 }
1934 fin->fin_m = m;
1935 ip = MTOD(m, char *) + ipoff;
1936
1937 fin->fin_ip = (ip_t *)ip;
1938 if (fin->fin_dp != NULL)
1939 fin->fin_dp = (char *)fin->fin_ip + dpoff;
1940 if (fin->fin_fraghdr != NULL)
1941 fin->fin_fraghdr = (char *)ip +
1942 ((char *)fin->fin_fraghdr -
1943 (char *)fin->fin_ip);
1944 }
1945
1946 if (len == fin->fin_plen)
1947 fin->fin_flx |= FI_COALESCE;
1948 return ip;
1949 }
1950
1951
1952 int
1953 ipf_inject(fr_info_t *fin, mb_t *m)
1954 {
1955 int error;
1956
1957 if (fin->fin_out == 0) {
1958 if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
1959 FREE_MB_T(m);
1960 error = ENOBUFS;
1961 } else {
1962 error = 0;
1963 }
1964 } else {
1965 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
1966 }
1967 return error;
1968 }
1969
1970
1971 u_32_t
1972 ipf_random(void)
1973 {
1974 int number;
1975
1976 #ifdef _CPRNG_H
1977 number = cprng_fast32();
1978 #else
1979 number = arc4random();
1980 #endif
1981 return number;
1982 }
1983
1984
1985 /*
1986 * routines below for saving IP headers to buffer
1987 */
1988 static int ipfopen(dev_t dev, int flags
1989 #if (NetBSD >= 199511)
1990 , int devtype, PROC_T *p
1991 #endif
1992 )
1993 {
1994 u_int unit = GET_MINOR(dev);
1995 int error;
1996
1997 if (IPL_LOGMAX < unit) {
1998 error = ENXIO;
1999 } else {
2000 switch (unit)
2001 {
2002 case IPL_LOGIPF :
2003 case IPL_LOGNAT :
2004 case IPL_LOGSTATE :
2005 case IPL_LOGAUTH :
2006 case IPL_LOGLOOKUP :
2007 case IPL_LOGSYNC :
2008 #ifdef IPFILTER_SCAN
2009 case IPL_LOGSCAN :
2010 #endif
2011 error = 0;
2012 break;
2013 default :
2014 error = ENXIO;
2015 break;
2016 }
2017 }
2018 #if (__NetBSD_Version__ >= 799003000)
2019 if (error == 0) {
2020 mutex_enter(&ipf_ref_mutex);
2021 ipf_active = 1;
2022 mutex_exit(&ipf_ref_mutex);
2023 }
2024 #endif
2025 return error;
2026 }
2027
2028
2029 static int ipfclose(dev_t dev, int flags
2030 #if (NetBSD >= 199511)
2031 , int devtype, PROC_T *p
2032 #endif
2033 )
2034 {
2035 u_int unit = GET_MINOR(dev);
2036
2037 if (IPL_LOGMAX < unit)
2038 return ENXIO;
2039 else {
2040 #if (__NetBSD_Version__ >= 799003000)
2041 mutex_enter(&ipf_ref_mutex);
2042 ipf_active = 0;
2043 mutex_exit(&ipf_ref_mutex);
2044 #endif
2045 return 0;
2046 }
2047 }
2048
2049 /*
2050 * ipfread/ipflog
2051 * both of these must operate with at least splnet() lest they be
2052 * called during packet processing and cause an inconsistancy to appear in
2053 * the filter lists.
2054 */
2055 static int ipfread(dev_t dev, struct uio *uio, int ioflag)
2056 {
2057
2058 if (ipfmain.ipf_running < 1) {
2059 ipfmain.ipf_interror = 130006;
2060 return EIO;
2061 }
2062
2063 if (GET_MINOR(dev) == IPL_LOGSYNC)
2064 return ipf_sync_read(&ipfmain, uio);
2065
2066 #ifdef IPFILTER_LOG
2067 return ipf_log_read(&ipfmain, GET_MINOR(dev), uio);
2068 #else
2069 ipfmain.ipf_interror = 130007;
2070 return ENXIO;
2071 #endif
2072 }
2073
2074
2075 /*
2076 * ipfwrite
2077 * both of these must operate with at least splnet() lest they be
2078 * called during packet processing and cause an inconsistancy to appear in
2079 * the filter lists.
2080 */
2081 static int ipfwrite(dev_t dev, struct uio *uio, int ioflag)
2082 {
2083
2084 if (ipfmain.ipf_running < 1) {
2085 ipfmain.ipf_interror = 130008;
2086 return EIO;
2087 }
2088
2089 if (GET_MINOR(dev) == IPL_LOGSYNC)
2090 return ipf_sync_write(&ipfmain, uio);
2091 ipfmain.ipf_interror = 130009;
2092 return ENXIO;
2093 }
2094
2095
2096 static int ipfpoll(dev_t dev, int events, PROC_T *p)
2097 {
2098 u_int unit = GET_MINOR(dev);
2099 int revents = 0;
2100
2101 if (IPL_LOGMAX < unit) {
2102 ipfmain.ipf_interror = 130010;
2103 return ENXIO;
2104 }
2105
2106 switch (unit)
2107 {
2108 case IPL_LOGIPF :
2109 case IPL_LOGNAT :
2110 case IPL_LOGSTATE :
2111 #ifdef IPFILTER_LOG
2112 if ((events & (POLLIN | POLLRDNORM)) &&
2113 ipf_log_canread(&ipfmain, unit))
2114 revents |= events & (POLLIN | POLLRDNORM);
2115 #endif
2116 break;
2117 case IPL_LOGAUTH :
2118 if ((events & (POLLIN | POLLRDNORM)) &&
2119 ipf_auth_waiting(&ipfmain))
2120 revents |= events & (POLLIN | POLLRDNORM);
2121 break;
2122 case IPL_LOGSYNC :
2123 if ((events & (POLLIN | POLLRDNORM)) &&
2124 ipf_sync_canread(&ipfmain))
2125 revents |= events & (POLLIN | POLLRDNORM);
2126 if ((events & (POLLOUT | POLLWRNORM)) &&
2127 ipf_sync_canwrite(&ipfmain))
2128 revents |= events & (POLLOUT | POLLWRNORM);
2129 break;
2130 case IPL_LOGSCAN :
2131 case IPL_LOGLOOKUP :
2132 default :
2133 break;
2134 }
2135
2136 if ((revents == 0) && (((events & (POLLIN|POLLRDNORM)) != 0)))
2137 selrecord(p, &ipfmain.ipf_selwait[unit]);
2138 return revents;
2139 }
2140
2141 u_int
2142 ipf_pcksum(fr_info_t *fin, int hlen, u_int sum)
2143 {
2144 struct mbuf *m;
2145 u_int sum2;
2146 int off;
2147
2148 m = fin->fin_m;
2149 off = (char *)fin->fin_dp - (char *)fin->fin_ip;
2150 m->m_data += hlen;
2151 m->m_len -= hlen;
2152 sum2 = in_cksum(fin->fin_m, fin->fin_plen - off);
2153 m->m_len += hlen;
2154 m->m_data -= hlen;
2155
2156 /*
2157 * Both sum and sum2 are partial sums, so combine them together.
2158 */
2159 sum += ~sum2 & 0xffff;
2160 while (sum > 0xffff)
2161 sum = (sum & 0xffff) + (sum >> 16);
2162 sum2 = ~sum & 0xffff;
2163 return sum2;
2164 }
2165
2166 #if (__NetBSD_Version__ >= 799003000)
2167
2168 /* NetBSD module interface */
2169
2170 MODULE(MODULE_CLASS_DRIVER, ipl, "bpf_filter");
2171
2172 static int ipl_init(void *);
2173 static int ipl_fini(void *);
2174 static int ipl_modcmd(modcmd_t, void *);
2175
2176 #ifdef _MODULE
2177 static devmajor_t ipl_cmaj = -1, ipl_bmaj = -1;
2178 #endif
2179
2180 static int
2181 ipl_modcmd(modcmd_t cmd, void *opaque)
2182 {
2183
2184 switch (cmd) {
2185 case MODULE_CMD_INIT:
2186 return ipl_init(opaque);
2187 case MODULE_CMD_FINI:
2188 return ipl_fini(opaque);
2189 default:
2190 return ENOTTY;
2191 }
2192 }
2193
2194 static int
2195 ipl_init(void *opaque)
2196 {
2197 int error;
2198
2199 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
2200 ipf_listener_cb, NULL);
2201
2202 if ((error = ipf_load_all()) != 0)
2203 return error;
2204
2205 if (ipf_create_all(&ipfmain) == NULL) {
2206 ipf_unload_all();
2207 return ENODEV;
2208 }
2209
2210 /* Initialize our mutex and reference count */
2211 mutex_init(&ipf_ref_mutex, MUTEX_DEFAULT, IPL_NONE);
2212 ipf_active = 0;
2213
2214 #ifdef _MODULE
2215 /*
2216 * Insert ourself into the cdevsw list. It's OK if we are
2217 * already there, since this will happen when our module is
2218 * built-in to the kernel. (We could skip the insert in
2219 * that case, but that would break the possibility of a
2220 * unload/re-load sequence for the built-in module, which
2221 * corresponds to disable/re-enable.)
2222 */
2223 error = devsw_attach("ipl", NULL, &ipl_bmaj, &ipl_cdevsw, &ipl_cmaj);
2224 if (error == EEXIST)
2225 error = 0;
2226 #endif
2227
2228 if (error)
2229 ipl_fini(opaque);
2230
2231 return error;
2232 }
2233
2234 static int
2235 ipl_fini(void *opaque)
2236 {
2237
2238 #ifdef _MODULE
2239 (void)devsw_detach(NULL, &ipl_cdevsw);
2240 #endif
2241
2242 /*
2243 * Grab the mutex, verify that there are no references
2244 * and that there are no running filters. If either
2245 * of these exists, reinsert our cdevsw entry and return
2246 * an error.
2247 */
2248 mutex_enter(&ipf_ref_mutex);
2249 if (ipf_active != 0 || ipfmain.ipf_running > 0) {
2250 #ifdef _MODULE
2251 (void)devsw_attach("ipl", NULL, &ipl_bmaj,
2252 &ipl_cdevsw, &ipl_cmaj);
2253 #endif
2254 mutex_exit(&ipf_ref_mutex);
2255 return EBUSY;
2256 }
2257
2258 /* Clean up the rest of our state before being unloaded */
2259
2260 mutex_exit(&ipf_ref_mutex);
2261 mutex_destroy(&ipf_ref_mutex);
2262 ipf_destroy_all(&ipfmain);
2263 ipf_unload_all();
2264 kauth_unlisten_scope(ipf_listener);
2265
2266 return 0;
2267 }
2268 #endif /* (__NetBSD_Version__ >= 799003000) */
2269