ip_fil_netbsd.c revision 1.21 1 /* $NetBSD: ip_fil_netbsd.c,v 1.21 2016/12/28 19:53:02 christos Exp $ */
2
3 /*
4 * Copyright (C) 2012 by Darren Reed.
5 *
6 * See the IPFILTER.LICENCE file for details on licencing.
7 */
8 #if !defined(lint)
9 #if defined(__NetBSD__)
10 #include <sys/cdefs.h>
11 __KERNEL_RCSID(0, "$NetBSD: ip_fil_netbsd.c,v 1.21 2016/12/28 19:53:02 christos Exp $");
12 #else
13 static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed";
14 static const char rcsid[] = "@(#)Id: ip_fil_netbsd.c,v 1.1.1.2 2012/07/22 13:45:17 darrenr Exp";
15 #endif
16 #endif
17
18 #if defined(KERNEL) || defined(_KERNEL)
19 # undef KERNEL
20 # undef _KERNEL
21 # define KERNEL 1
22 # define _KERNEL 1
23 #endif
24 #include <sys/param.h>
25 #if (NetBSD >= 199905) && !defined(IPFILTER_LKM)
26 # if (__NetBSD_Version__ >= 799003000)
27 # ifdef _KERNEL_OPT
28 # include "opt_ipsec.h"
29 # endif
30 # else
31 # include "opt_ipsec.h"
32 # endif
33 #endif
34 #include <sys/errno.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/ioctl.h>
38 #include <sys/time.h>
39 #include <sys/systm.h>
40 #include <sys/select.h>
41 #if (NetBSD > 199609)
42 # include <sys/dirent.h>
43 #else
44 # include <sys/dir.h>
45 #endif
46 #if (__NetBSD_Version__ >= 599005900)
47 # include <sys/cprng.h>
48 #endif
49 #include <sys/mbuf.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/poll.h>
53 #if (__NetBSD_Version__ >= 399002000)
54 # include <sys/kauth.h>
55 #endif
56 #if (__NetBSD_Version__ >= 799003000)
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #endif
60
61 #include <net/if.h>
62 #include <net/route.h>
63 #include <netinet/in.h>
64 #include <netinet/in_var.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip_var.h>
68 #include <netinet/tcp.h>
69 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
70 # include <netinet/tcp_timer.h>
71 # include <netinet/tcp_var.h>
72 #endif
73 #include <netinet/udp.h>
74 #include <netinet/tcpip.h>
75 #include <netinet/ip_icmp.h>
76 #include "netinet/ip_compat.h"
77 #ifdef USE_INET6
78 # include <netinet/icmp6.h>
79 # if (__NetBSD_Version__ >= 106000000)
80 # include <netinet6/nd6.h>
81 # endif
82 #endif
83 #include "netinet/ip_fil.h"
84 #include "netinet/ip_nat.h"
85 #include "netinet/ip_frag.h"
86 #include "netinet/ip_state.h"
87 #include "netinet/ip_proxy.h"
88 #include "netinet/ip_auth.h"
89 #include "netinet/ip_sync.h"
90 #include "netinet/ip_lookup.h"
91 #include "netinet/ip_dstlist.h"
92 #ifdef IPFILTER_SCAN
93 #include "netinet/ip_scan.h"
94 #endif
95 #include <sys/md5.h>
96 #include <sys/kernel.h>
97 #include <sys/conf.h>
98 #ifdef INET
99 extern int ip_optcopy (struct ip *, struct ip *);
100 #endif
101
102 #ifdef IPFILTER_M_IPFILTER
103 MALLOC_DEFINE(M_IPFILTER, "IP Filter", "IP Filter packet filter data structures");
104 #endif
105
106 #if __NetBSD_Version__ >= 105009999
107 # define csuminfo csum_flags
108 #endif
109
110 #if __NetBSD_Version__ < 200000000
111 extern struct protosw inetsw[];
112 #endif
113
114 #if (__NetBSD_Version__ >= 599002000)
115 static kauth_listener_t ipf_listener;
116 #endif
117
118 #if (__NetBSD_Version__ < 399001400)
119 extern int ip6_getpmtu (struct route_in6 *, struct route_in6 *,
120 struct ifnet *, struct in6_addr *, u_long *,
121 int *);
122 #endif
123 #if (NetBSD >= 199511)
124 static int ipfopen(dev_t dev, int flags, int devtype, PROC_T *p);
125 static int ipfclose(dev_t dev, int flags, int devtype, PROC_T *p);
126 #else
127 # if (__NetBSD_Version__ >= 399001400)
128 static int ipfopen(dev_t dev, int flags, struct lwp *);
129 static int ipfclose(dev_t dev, int flags, struct lwp *);
130 # else
131 static int ipfopen(dev_t dev, int flags);
132 static int ipfclose(dev_t dev, int flags);
133 # endif /* __NetBSD_Version__ >= 399001400 */
134 #endif
135 static int ipfread(dev_t, struct uio *, int ioflag);
136 static int ipfwrite(dev_t, struct uio *, int ioflag);
137 static int ipfpoll(dev_t, int events, PROC_T *);
138 static void ipf_timer_func(void *ptr);
139
140 const struct cdevsw ipl_cdevsw = {
141 .d_open = ipfopen,
142 .d_close = ipfclose,
143 .d_read = ipfread,
144 .d_write = ipfwrite,
145 .d_ioctl = ipfioctl,
146 .d_stop = nostop,
147 .d_tty = notty,
148 .d_poll = ipfpoll,
149 .d_mmap = nommap,
150 #if (__NetBSD_Version__ >= 200000000)
151 .d_kqfilter = nokqfilter,
152 #endif
153 .d_discard = nodiscard,
154 #ifdef D_OTHER
155 .d_flag = D_OTHER
156 #else
157 .d_flag = 0
158 #endif
159 };
160 #if (__NetBSD_Version__ >= 799003000)
161 kmutex_t ipf_ref_mutex;
162 int ipf_active;
163 #endif
164
165 ipf_main_softc_t ipfmain;
166
167 static u_short ipid = 0;
168 static int (*ipf_savep)(void *, ip_t *, int, void *, int, struct mbuf **);
169 static int ipf_send_ip(fr_info_t *, mb_t *);
170 #ifdef USE_INET6
171 static int ipf_fastroute6(struct mbuf *, struct mbuf **,
172 fr_info_t *, frdest_t *);
173 #endif
174
175 #if defined(NETBSD_PF)
176 # include <net/pfil.h>
177 /*
178 * We provide the ipf_checkp name just to minimize changes later.
179 */
180 int (*ipf_checkp)(void *, ip_t *ip, int hlen, void *ifp, int out, mb_t **mp);
181 #endif /* NETBSD_PF */
182
183 #if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
184 # include <net/pfil.h>
185
186 static int ipf_check_wrapper(void *, struct mbuf **, struct ifnet *, int );
187
188 static int
189 ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
190 {
191 struct ip *ip;
192 int rv, hlen;
193
194 #if __NetBSD_Version__ >= 200080000
195 /*
196 * ensure that mbufs are writable beforehand
197 * as it's assumed by ipf code.
198 * XXX inefficient
199 */
200 int error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
201
202 if (error) {
203 m_freem(*mp);
204 *mp = NULL;
205 return error;
206 }
207 #endif
208 ip = mtod(*mp, struct ip *);
209 hlen = ip->ip_hl << 2;
210
211 #ifdef INET
212 #if defined(M_CSUM_TCPv4)
213 /*
214 * If the packet is out-bound, we can't delay checksums
215 * here. For in-bound, the checksum has already been
216 * validated.
217 */
218 if (dir == PFIL_OUT) {
219 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
220 in_delayed_cksum(*mp);
221 (*mp)->m_pkthdr.csum_flags &=
222 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
223 }
224 }
225 #endif /* M_CSUM_TCPv4 */
226 #endif /* INET */
227
228 /*
229 * Note, we don't need to update the checksum, because
230 * it has already been verified.
231 */
232 rv = ipf_check(&ipfmain, ip, hlen, ifp, (dir == PFIL_OUT), mp);
233
234 return (rv);
235 }
236
237 # ifdef USE_INET6
238 # include <netinet/ip6.h>
239
240 static int ipf_check_wrapper6(void *, struct mbuf **, struct ifnet *, int );
241
242 static int
243 ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
244 {
245 #if defined(INET6)
246 # if defined(M_CSUM_TCPv6) && (__NetBSD_Version__ > 200000000)
247 /*
248 * If the packet is out-bound, we can't delay checksums
249 * here. For in-bound, the checksum has already been
250 * validated.
251 */
252 if (dir == PFIL_OUT) {
253 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
254 # if (__NetBSD_Version__ > 399000600)
255 in6_delayed_cksum(*mp);
256 # endif
257 (*mp)->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6|
258 M_CSUM_UDPv6);
259 }
260 }
261 # endif
262 #endif /* INET6 */
263
264 return (ipf_check(&ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr),
265 ifp, (dir == PFIL_OUT), mp));
266 }
267 # endif
268
269
270 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
271
272 # if (__NetBSD_Version__ >= 799000400)
273
274 static void ipf_pfilsync(void *, unsigned long, void *);
275
276 static void
277 ipf_pfilsync(void *hdr, unsigned long cmd, void *arg2)
278 {
279 /*
280 * The interface pointer is useless for create (we have nothing to
281 * compare it to) and at detach, the interface name is still in the
282 * list of active NICs (albeit, down, but that's not any real
283 * indicator) and doing ifunit() on the name will still return the
284 * pointer, so it's not much use then, either.
285 */
286 ipf_sync(&ipfmain, NULL);
287 }
288
289 # else
290
291 static int ipf_pfilsync(void *, struct mbuf **, struct ifnet *, int);
292
293 static int
294 ipf_pfilsync(void *hdr, struct mbuf **mp, struct ifnet *ifp, int dir)
295 {
296 ipf_sync(&ipfmain, NULL);
297 return 0;
298 }
299
300 # endif
301 # endif
302
303 #endif /* __NetBSD_Version__ >= 105110000 */
304
305
306 #if defined(IPFILTER_LKM)
307 int
308 ipf_identify(s)
309 char *s;
310 {
311 if (strcmp(s, "ipl") == 0)
312 return 1;
313 return 0;
314 }
315 #endif /* IPFILTER_LKM */
316
317 #if (__NetBSD_Version__ >= 599002000)
318 static int
319 ipf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
320 void *arg0, void *arg1, void *arg2, void *arg3)
321 {
322 int result;
323 enum kauth_network_req req;
324
325 result = KAUTH_RESULT_DEFER;
326 req = (enum kauth_network_req)arg0;
327
328 if (action != KAUTH_NETWORK_FIREWALL)
329 return result;
330
331 /* These must have came from device context. */
332 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) ||
333 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT))
334 result = KAUTH_RESULT_ALLOW;
335
336 return result;
337 }
338 #endif
339
340 /*
341 * Try to detect the case when compiling for NetBSD with pseudo-device
342 */
343 void
344 ipfilterattach(int count)
345 {
346
347 #if (__NetBSD_Version__ >= 799003000)
348 return;
349 #else
350 #if (__NetBSD_Version__ >= 599002000)
351 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
352 ipf_listener_cb, NULL);
353 #endif
354
355 if (ipf_load_all() == 0)
356 (void) ipf_create_all(&ipfmain);
357 #endif
358 }
359
360
361 int
362 ipfattach(ipf_main_softc_t *softc)
363 {
364 SPL_INT(s);
365 #if (__NetBSD_Version__ >= 499005500)
366 int i;
367 #endif
368 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
369 int error = 0;
370 # if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
371 pfil_head_t *ph_inet;
372 # ifdef USE_INET6
373 pfil_head_t *ph_inet6;
374 # endif
375 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
376 pfil_head_t *ph_ifsync;
377 # endif
378 # endif
379 #endif
380
381 SPL_NET(s);
382 if ((softc->ipf_running > 0) || (ipf_checkp == ipf_check)) {
383 printf("IP Filter: already initialized\n");
384 SPL_X(s);
385 IPFERROR(130017);
386 return EBUSY;
387 }
388
389 if (ipf_init_all(softc) < 0) {
390 SPL_X(s);
391 IPFERROR(130015);
392 return EIO;
393 }
394
395 #ifdef NETBSD_PF
396 # if (__NetBSD_Version__ >= 104200000)
397 # if __NetBSD_Version__ >= 105110000
398 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
399 # ifdef USE_INET6
400 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
401 # endif
402 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
403 ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
404 # endif
405
406 if (ph_inet == NULL
407 # ifdef USE_INET6
408 && ph_inet6 == NULL
409 # endif
410 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
411 && ph_ifsync == NULL
412 # endif
413 ) {
414 SPL_X(s);
415 IPFERROR(130016);
416 return ENODEV;
417 }
418
419 if (ph_inet != NULL)
420 error = pfil_add_hook((void *)ipf_check_wrapper, NULL,
421 PFIL_IN|PFIL_OUT, ph_inet);
422 else
423 error = 0;
424 # else
425 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
426 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
427 # endif
428 if (error) {
429 IPFERROR(130013);
430 goto pfil_error;
431 }
432 # else
433 pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
434 # endif
435
436 # ifdef USE_INET6
437 # if __NetBSD_Version__ >= 105110000
438 if (ph_inet6 != NULL)
439 error = pfil_add_hook((void *)ipf_check_wrapper6, NULL,
440 PFIL_IN|PFIL_OUT, ph_inet6);
441 else
442 error = 0;
443 if (error) {
444 pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
445 PFIL_IN|PFIL_OUT, ph_inet6);
446 ipfmain.ipf_interror = 130014;
447 goto pfil_error;
448 }
449 # else
450 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
451 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
452 if (error) {
453 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
454 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
455 IPFERROR(130014);
456 goto pfil_error;
457 }
458 # endif
459 # endif
460
461 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
462 if (ph_ifsync != NULL)
463 #if (__NetBSD_Version__ >= 799000400)
464 (void) pfil_add_ihook((void *)ipf_pfilsync, NULL,
465 PFIL_IFNET, ph_ifsync);
466 #else
467 (void) pfil_add_hook((void *)ipf_pfilsync, NULL,
468 PFIL_IFNET, ph_ifsync);
469 #endif
470 # endif
471 #endif
472
473 #if (__NetBSD_Version__ >= 499005500)
474 for (i = 0; i < IPL_LOGSIZE; i++)
475 selinit(&ipfmain.ipf_selwait[i]);
476 #else
477 bzero((char *)ipfmain.ipf_selwait, sizeof(ipfmain.ipf_selwait));
478 #endif
479 ipf_savep = ipf_checkp;
480 ipf_checkp = ipf_check;
481
482 #ifdef INET
483 if (softc->ipf_control_forwarding & 1)
484 ipforwarding = 1;
485 #endif
486
487 ipid = 0;
488
489 SPL_X(s);
490
491 #if (__NetBSD_Version__ >= 104010000)
492 # if (__NetBSD_Version__ >= 499002000)
493 callout_init(&softc->ipf_slow_ch, 0);
494 # else
495 callout_init(&softc->ipf_slow_ch);
496 # endif
497 callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT,
498 ipf_timer_func, softc);
499 #else
500 timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
501 #endif
502
503 return 0;
504
505 #if __NetBSD_Version__ >= 105110000
506 pfil_error:
507 SPL_X(s);
508 ipf_fini_all(softc);
509 return error;
510 #endif
511 }
512
513 static void
514 ipf_timer_func(void *ptr)
515 {
516 ipf_main_softc_t *softc = ptr;
517 SPL_INT(s);
518
519 SPL_NET(s);
520 READ_ENTER(&softc->ipf_global);
521
522 if (softc->ipf_running > 0)
523 ipf_slowtimer(softc);
524
525 if (softc->ipf_running == -1 || softc->ipf_running == 1) {
526 #if NETBSD_GE_REV(104240000)
527 callout_reset(&softc->ipf_slow_ch, hz / 2,
528 ipf_timer_func, softc);
529 #else
530 timeout(ipf_timer_func, softc,
531 (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
532 #endif
533 }
534 RWLOCK_EXIT(&softc->ipf_global);
535 SPL_X(s);
536 }
537
538
539 /*
540 * Disable the filter by removing the hooks from the IP input/output
541 * stream.
542 */
543 int
544 ipfdetach(ipf_main_softc_t *softc)
545 {
546 SPL_INT(s);
547 #if (__NetBSD_Version__ >= 499005500)
548 int i;
549 #endif
550 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
551 int error = 0;
552 # if __NetBSD_Version__ >= 105150000
553 pfil_head_t *ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
554 # ifdef USE_INET6
555 pfil_head_t *ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
556 # endif
557 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
558 struct pfil_head *ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
559 # endif
560 # endif
561 #endif
562
563 SPL_NET(s);
564
565 #if (__NetBSD_Version__ >= 104010000)
566 if (softc->ipf_running > 0)
567 callout_stop(&softc->ipf_slow_ch);
568 #else
569 untimeout(ipf_slowtimer, NULL);
570 #endif /* NetBSD */
571
572 ipf_checkp = ipf_savep;
573 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE|FR_INACTIVE);
574 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE);
575
576 #ifdef INET
577 if (softc->ipf_control_forwarding & 2)
578 ipforwarding = 0;
579 #endif
580
581 #ifdef NETBSD_PF
582 # if (__NetBSD_Version__ >= 104200000)
583 # if __NetBSD_Version__ >= 105110000
584 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
585 (void) pfil_remove_hook((void *)ipf_pfilsync, NULL,
586 PFIL_IFNET, ph_ifsync);
587 # endif
588
589 if (ph_inet != NULL)
590 error = pfil_remove_hook((void *)ipf_check_wrapper, NULL,
591 PFIL_IN|PFIL_OUT, ph_inet);
592 else
593 error = 0;
594 # else
595 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
596 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
597 # endif
598 if (error) {
599 SPL_X(s);
600 IPFERROR(130011);
601 return error;
602 }
603 # else
604 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
605 # endif
606 # ifdef USE_INET6
607 # if __NetBSD_Version__ >= 105110000
608 if (ph_inet6 != NULL)
609 error = pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
610 PFIL_IN|PFIL_OUT, ph_inet6);
611 else
612 error = 0;
613 # else
614 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
615 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
616 # endif
617 if (error) {
618 SPL_X(s);
619 IPFERROR(130012);
620 return error;
621 }
622 # endif
623 #endif
624 SPL_X(s);
625
626 #if (__NetBSD_Version__ >= 499005500)
627 for (i = 0; i < IPL_LOGSIZE; i++)
628 seldestroy(&ipfmain.ipf_selwait[i]);
629 #endif
630
631 ipf_fini_all(softc);
632
633 return 0;
634 }
635
636
637 /*
638 * Filter ioctl interface.
639 */
640 int
641 ipfioctl(dev_t dev, u_long cmd,
642 #if (__NetBSD_Version__ >= 499001000)
643 void *data,
644 #else
645 caddr_t data,
646 #endif
647 int mode
648 #if (NetBSD >= 199511)
649 # if (__NetBSD_Version__ >= 399001400)
650 , struct lwp *p
651 # if (__NetBSD_Version__ >= 399002000)
652 # define UID(l) kauth_cred_getuid((l)->l_cred)
653 # else
654 # define UID(l) ((l)->l_proc->p_cred->p_ruid)
655 # endif
656 # else
657 , struct proc *p
658 # define UID(p) ((p)->p_cred->p_ruid)
659 # endif
660 #endif
661 )
662 {
663 int error = 0, unit = 0;
664 SPL_INT(s);
665
666 #if (__NetBSD_Version__ >= 399002000)
667 if ((mode & FWRITE) &&
668 kauth_authorize_network(p->l_cred, KAUTH_NETWORK_FIREWALL,
669 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL,
670 NULL, NULL)) {
671 ipfmain.ipf_interror = 130005;
672 return EPERM;
673 }
674 #else
675 if ((securelevel >= 2) && (mode & FWRITE)) {
676 ipfmain.ipf_interror = 130001;
677 return EPERM;
678 }
679 #endif
680
681 unit = GET_MINOR(dev);
682 if ((IPL_LOGMAX < unit) || (unit < 0)) {
683 ipfmain.ipf_interror = 130002;
684 return ENXIO;
685 }
686
687 if (ipfmain.ipf_running <= 0) {
688 if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) {
689 ipfmain.ipf_interror = 130003;
690 return EIO;
691 }
692 if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET &&
693 cmd != SIOCIPFSET && cmd != SIOCFRENB &&
694 cmd != SIOCGETFS && cmd != SIOCGETFF &&
695 cmd != SIOCIPFINTERROR) {
696 ipfmain.ipf_interror = 130004;
697 return EIO;
698 }
699 }
700
701 SPL_NET(s);
702
703 error = ipf_ioctlswitch(&ipfmain, unit, data, cmd, mode, UID(p), p);
704 if (error != -1) {
705 SPL_X(s);
706 return error;
707 }
708
709 SPL_X(s);
710 return error;
711 }
712
713
714 /*
715 * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that
716 * requires a large amount of setting up and isn't any more efficient.
717 */
718 int
719 ipf_send_reset(fr_info_t *fin)
720 {
721 struct tcphdr *tcp, *tcp2;
722 int tlen = 0, hlen;
723 struct mbuf *m;
724 #ifdef USE_INET6
725 ip6_t *ip6;
726 #endif
727 ip_t *ip;
728
729 tcp = fin->fin_dp;
730 if (tcp->th_flags & TH_RST)
731 return -1; /* feedback loop */
732
733 if (ipf_checkl4sum(fin) == -1)
734 return -1;
735
736 tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) +
737 ((tcp->th_flags & TH_SYN) ? 1 : 0) +
738 ((tcp->th_flags & TH_FIN) ? 1 : 0);
739
740 #ifdef USE_INET6
741 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t);
742 #else
743 hlen = sizeof(ip_t);
744 #endif
745 #ifdef MGETHDR
746 MGETHDR(m, M_DONTWAIT, MT_HEADER);
747 #else
748 MGET(m, M_DONTWAIT, MT_HEADER);
749 #endif
750 if (m == NULL)
751 return -1;
752 if (sizeof(*tcp2) + hlen > MHLEN) {
753 MCLGET(m, M_DONTWAIT);
754 if (m == NULL)
755 return -1;
756 if ((m->m_flags & M_EXT) == 0) {
757 FREE_MB_T(m);
758 return -1;
759 }
760 }
761
762 m->m_len = sizeof(*tcp2) + hlen;
763 m->m_data += max_linkhdr;
764 m->m_pkthdr.len = m->m_len;
765 m_reset_rcvif(m);
766 ip = mtod(m, struct ip *);
767 bzero((char *)ip, hlen);
768 #ifdef USE_INET6
769 ip6 = (ip6_t *)ip;
770 #endif
771 bzero((char *)ip, sizeof(*tcp2) + hlen);
772 tcp2 = (struct tcphdr *)((char *)ip + hlen);
773 tcp2->th_sport = tcp->th_dport;
774 tcp2->th_dport = tcp->th_sport;
775
776 if (tcp->th_flags & TH_ACK) {
777 tcp2->th_seq = tcp->th_ack;
778 tcp2->th_flags = TH_RST;
779 tcp2->th_ack = 0;
780 } else {
781 tcp2->th_seq = 0;
782 tcp2->th_ack = ntohl(tcp->th_seq);
783 tcp2->th_ack += tlen;
784 tcp2->th_ack = htonl(tcp2->th_ack);
785 tcp2->th_flags = TH_RST|TH_ACK;
786 }
787 tcp2->th_x2 = 0;
788 TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2);
789 tcp2->th_win = tcp->th_win;
790 tcp2->th_sum = 0;
791 tcp2->th_urp = 0;
792
793 #ifdef USE_INET6
794 if (fin->fin_v == 6) {
795 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
796 ip6->ip6_plen = htons(sizeof(struct tcphdr));
797 ip6->ip6_nxt = IPPROTO_TCP;
798 ip6->ip6_hlim = 0;
799 ip6->ip6_src = fin->fin_dst6.in6;
800 ip6->ip6_dst = fin->fin_src6.in6;
801 tcp2->th_sum = in6_cksum(m, IPPROTO_TCP,
802 sizeof(*ip6), sizeof(*tcp2));
803 return ipf_send_ip(fin, m);
804 }
805 #endif
806 #ifdef INET
807 ip->ip_p = IPPROTO_TCP;
808 ip->ip_len = htons(sizeof(struct tcphdr));
809 ip->ip_src.s_addr = fin->fin_daddr;
810 ip->ip_dst.s_addr = fin->fin_saddr;
811 tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2));
812 ip->ip_len = hlen + sizeof(*tcp2);
813 return ipf_send_ip(fin, m);
814 #else
815 return 0;
816 #endif
817 }
818
819
820 /*
821 * Expects ip_len to be in host byte order when called.
822 */
823 static int
824 ipf_send_ip(fr_info_t *fin, mb_t *m)
825 {
826 fr_info_t fnew;
827 #ifdef INET
828 ip_t *oip;
829 #endif
830 ip_t *ip;
831 int hlen;
832
833 ip = mtod(m, ip_t *);
834 bzero((char *)&fnew, sizeof(fnew));
835 fnew.fin_main_soft = fin->fin_main_soft;
836
837 IP_V_A(ip, fin->fin_v);
838 switch (fin->fin_v)
839 {
840 #ifdef INET
841 case 4 :
842 oip = fin->fin_ip;
843 hlen = sizeof(*oip);
844 fnew.fin_v = 4;
845 fnew.fin_p = ip->ip_p;
846 fnew.fin_plen = ntohs(ip->ip_len);
847 HTONS(ip->ip_len);
848 IP_HL_A(ip, sizeof(*oip) >> 2);
849 ip->ip_tos = oip->ip_tos;
850 ip->ip_id = ipf_nextipid(fin);
851 ip->ip_off = htons(ip_mtudisc ? IP_DF : 0);
852 ip->ip_ttl = ip_defttl;
853 ip->ip_sum = 0;
854 break;
855 #endif
856 #ifdef USE_INET6
857 case 6 :
858 {
859 ip6_t *ip6 = (ip6_t *)ip;
860
861 ip6->ip6_vfc = 0x60;
862 ip6->ip6_hlim = IPDEFTTL;
863
864 hlen = sizeof(*ip6);
865 fnew.fin_p = ip6->ip6_nxt;
866 fnew.fin_v = 6;
867 fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen;
868 break;
869 }
870 #endif
871 default :
872 return EINVAL;
873 }
874 #ifdef KAME_IPSEC
875 m_reset_rcvif(m);
876 #endif
877
878 fnew.fin_ifp = fin->fin_ifp;
879 fnew.fin_flx = FI_NOCKSUM;
880 fnew.fin_m = m;
881 fnew.fin_ip = ip;
882 fnew.fin_mp = &m;
883 fnew.fin_hlen = hlen;
884 fnew.fin_dp = (char *)ip + hlen;
885 (void) ipf_makefrip(hlen, ip, &fnew);
886
887 return ipf_fastroute(m, &m, &fnew, NULL);
888 }
889
890
891 int
892 ipf_send_icmp_err(int type, fr_info_t *fin, int dst)
893 {
894 int err, hlen, xtra, iclen, ohlen, avail;
895 struct in_addr dst4;
896 struct icmp *icmp;
897 struct mbuf *m;
898 i6addr_t dst6;
899 void *ifp;
900 #ifdef USE_INET6
901 int code;
902 ip6_t *ip6;
903 #endif
904 ip_t *ip, *ip2;
905
906 if ((type < 0) || (type > ICMP_MAXTYPE))
907 return -1;
908
909 #ifdef USE_INET6
910 code = fin->fin_icode;
911 if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int)))
912 return -1;
913 #endif
914
915 if (ipf_checkl4sum(fin) == -1)
916 return -1;
917 #ifdef MGETHDR
918 MGETHDR(m, M_DONTWAIT, MT_HEADER);
919 #else
920 MGET(m, M_DONTWAIT, MT_HEADER);
921 #endif
922 if (m == NULL)
923 return -1;
924 avail = MHLEN;
925
926 xtra = 0;
927 hlen = 0;
928 ohlen = 0;
929 dst4.s_addr = 0;
930 ifp = fin->fin_ifp;
931 if (fin->fin_v == 4) {
932 if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT))
933 switch (ntohs(fin->fin_data[0]) >> 8)
934 {
935 case ICMP_ECHO :
936 case ICMP_TSTAMP :
937 case ICMP_IREQ :
938 case ICMP_MASKREQ :
939 break;
940 default :
941 FREE_MB_T(m);
942 return 0;
943 }
944
945 if (dst == 0) {
946 if (ipf_ifpaddr(&ipfmain, 4, FRI_NORMAL, ifp,
947 &dst6, NULL) == -1) {
948 FREE_MB_T(m);
949 return -1;
950 }
951 dst4 = dst6.in4;
952 } else
953 dst4.s_addr = fin->fin_daddr;
954
955 hlen = sizeof(ip_t);
956 ohlen = fin->fin_hlen;
957 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
958 if (fin->fin_hlen < fin->fin_plen)
959 xtra = MIN(fin->fin_dlen, 8);
960 else
961 xtra = 0;
962 }
963
964 #ifdef USE_INET6
965 else if (fin->fin_v == 6) {
966 hlen = sizeof(ip6_t);
967 ohlen = sizeof(ip6_t);
968 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
969 type = icmptoicmp6types[type];
970 if (type == ICMP6_DST_UNREACH)
971 code = icmptoicmp6unreach[code];
972
973 if (iclen + max_linkhdr + fin->fin_plen > avail) {
974 MCLGET(m, M_DONTWAIT);
975 if (m == NULL)
976 return -1;
977 if ((m->m_flags & M_EXT) == 0) {
978 FREE_MB_T(m);
979 return -1;
980 }
981 avail = MCLBYTES;
982 }
983 xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr);
984 xtra = MIN(xtra, IPV6_MMTU - iclen);
985 if (dst == 0) {
986 if (ipf_ifpaddr(&ipfmain, 6, FRI_NORMAL, ifp,
987 &dst6, NULL) == -1) {
988 FREE_MB_T(m);
989 return -1;
990 }
991 } else
992 dst6 = fin->fin_dst6;
993 }
994 #endif
995 else {
996 FREE_MB_T(m);
997 return -1;
998 }
999
1000 avail -= (max_linkhdr + iclen);
1001 if (avail < 0) {
1002 FREE_MB_T(m);
1003 return -1;
1004 }
1005 if (xtra > avail)
1006 xtra = avail;
1007 iclen += xtra;
1008 m->m_data += max_linkhdr;
1009 m_reset_rcvif(m);
1010 m->m_pkthdr.len = iclen;
1011 m->m_len = iclen;
1012 ip = mtod(m, ip_t *);
1013 icmp = (struct icmp *)((char *)ip + hlen);
1014 ip2 = (ip_t *)&icmp->icmp_ip;
1015
1016 icmp->icmp_type = type;
1017 icmp->icmp_code = fin->fin_icode;
1018 icmp->icmp_cksum = 0;
1019 #ifdef icmp_nextmtu
1020 if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) {
1021 if (fin->fin_mtu != 0) {
1022 icmp->icmp_nextmtu = htons(fin->fin_mtu);
1023
1024 } else if (ifp != NULL) {
1025 icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp));
1026
1027 } else { /* make up a number... */
1028 icmp->icmp_nextmtu = htons(fin->fin_plen - 20);
1029 }
1030 }
1031 #endif
1032
1033 bcopy((char *)fin->fin_ip, (char *)ip2, ohlen);
1034
1035 #if defined(M_CSUM_IPv4)
1036 /*
1037 * Clear any in-bound checksum flags for this packet.
1038 */
1039 m->m_pkthdr.csuminfo = 0;
1040 #endif /* __NetBSD__ && M_CSUM_IPv4 */
1041
1042 #ifdef USE_INET6
1043 ip6 = (ip6_t *)ip;
1044 if (fin->fin_v == 6) {
1045 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
1046 ip6->ip6_plen = htons(iclen - hlen);
1047 ip6->ip6_nxt = IPPROTO_ICMPV6;
1048 ip6->ip6_hlim = 0;
1049 ip6->ip6_src = dst6.in6;
1050 ip6->ip6_dst = fin->fin_src6.in6;
1051 if (xtra > 0)
1052 bcopy((char *)fin->fin_ip + ohlen,
1053 (char *)&icmp->icmp_ip + ohlen, xtra);
1054 icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6,
1055 sizeof(*ip6), iclen - hlen);
1056 } else
1057 #endif
1058 {
1059 ip->ip_p = IPPROTO_ICMP;
1060 ip->ip_src.s_addr = dst4.s_addr;
1061 ip->ip_dst.s_addr = fin->fin_saddr;
1062
1063 if (xtra > 0)
1064 bcopy((char *)fin->fin_ip + ohlen,
1065 (char *)&icmp->icmp_ip + ohlen, xtra);
1066 icmp->icmp_cksum = ipf_cksum((u_short *)icmp,
1067 sizeof(*icmp) + 8);
1068 ip->ip_len = iclen;
1069 ip->ip_p = IPPROTO_ICMP;
1070 }
1071 err = ipf_send_ip(fin, m);
1072 return err;
1073 }
1074
1075
1076 /*
1077 * m0 - pointer to mbuf where the IP packet starts
1078 * mpp - pointer to the mbuf pointer that is the start of the mbuf chain
1079 */
1080 int
1081 ipf_fastroute(mb_t *m0, mb_t **mpp, fr_info_t *fin, frdest_t *fdp)
1082 {
1083 register struct ip *ip, *mhip;
1084 register struct mbuf *m = *mpp;
1085 register struct route *ro;
1086 int len, off, error = 0, hlen, code;
1087 struct ifnet *ifp, *sifp;
1088 ipf_main_softc_t *softc;
1089 #if __NetBSD_Version__ >= 499001100
1090 union {
1091 struct sockaddr dst;
1092 struct sockaddr_in dst4;
1093 } u;
1094 #else
1095 struct sockaddr_in *dst4;
1096 #endif
1097 struct sockaddr *dst;
1098 u_short ip_off, ip_len;
1099 struct route iproute;
1100 struct rtentry *rt;
1101 frdest_t node;
1102 frentry_t *fr;
1103
1104 if (fin->fin_v == 6) {
1105 #ifdef USE_INET6
1106 error = ipf_fastroute6(m0, mpp, fin, fdp);
1107 #else
1108 error = EPROTONOSUPPORT;
1109 #endif
1110 if ((error != 0) && (*mpp != NULL))
1111 FREE_MB_T(*mpp);
1112 return error;
1113 }
1114 #ifndef INET
1115 FREE_MB_T(*mpp);
1116 return EPROTONOSUPPORT;
1117 #else
1118
1119 hlen = fin->fin_hlen;
1120 ip = mtod(m0, struct ip *);
1121 softc = fin->fin_main_soft;
1122 rt = NULL;
1123 ifp = NULL;
1124
1125 # if defined(M_CSUM_IPv4)
1126 /*
1127 * Clear any in-bound checksum flags for this packet.
1128 */
1129 m0->m_pkthdr.csuminfo = 0;
1130 # endif /* __NetBSD__ && M_CSUM_IPv4 */
1131
1132 /*
1133 * Route packet.
1134 */
1135 ro = &iproute;
1136 memset(ro, 0, sizeof(*ro));
1137 fr = fin->fin_fr;
1138
1139 if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) &&
1140 (fdp->fd_type == FRD_DSTLIST)) {
1141 if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0)
1142 fdp = &node;
1143 }
1144 if (fdp != NULL)
1145 ifp = fdp->fd_ptr;
1146 else
1147 ifp = fin->fin_ifp;
1148
1149 if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) {
1150 error = -2;
1151 goto bad;
1152 }
1153
1154 # if __NetBSD_Version__ >= 499001100
1155 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1156 sockaddr_in_init(&u.dst4, &fdp->fd_ip, 0);
1157 else
1158 sockaddr_in_init(&u.dst4, &ip->ip_dst, 0);
1159 dst = &u.dst;
1160 rtcache_setdst(ro, dst);
1161 rt = rtcache_init(ro);
1162 # else
1163 dst4 = (struct sockaddr_in *)&ro->ro_dst;
1164 dst = (struct sockaddr *)dst4;
1165 dst4->sin_family = AF_INET;
1166 dst4->sin_addr = ip->ip_dst;
1167
1168 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1169 dst4->sin_addr = fdp->fd_ip;
1170
1171 dst4->sin_len = sizeof(*dst);
1172 rtalloc(ro);
1173 rt = ro->ro_rt;
1174 # endif
1175 if ((ifp == NULL) && (rt != NULL))
1176 ifp = rt->rt_ifp;
1177 if ((rt == NULL) || (ifp == NULL)) {
1178 #ifdef INET
1179 if (in_localaddr(ip->ip_dst))
1180 error = EHOSTUNREACH;
1181 else
1182 #endif
1183 error = ENETUNREACH;
1184 goto bad;
1185 }
1186
1187
1188 if (rt->rt_flags & RTF_GATEWAY)
1189 dst = rt->rt_gateway;
1190
1191 rt->rt_use++;
1192
1193 /*
1194 * For input packets which are being "fastrouted", they won't
1195 * go back through output filtering and miss their chance to get
1196 * NAT'd and counted. Duplicated packets aren't considered to be
1197 * part of the normal packet stream, so do not NAT them or pass
1198 * them through stateful checking, etc.
1199 */
1200 if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) {
1201 sifp = fin->fin_ifp;
1202 fin->fin_ifp = ifp;
1203 fin->fin_out = 1;
1204 (void) ipf_acctpkt(fin, NULL);
1205 fin->fin_fr = NULL;
1206 if (!fr || !(fr->fr_flags & FR_RETMASK)) {
1207 u_32_t pass;
1208
1209 (void) ipf_state_check(fin, &pass);
1210 }
1211
1212 switch (ipf_nat_checkout(fin, NULL))
1213 {
1214 case 0 :
1215 break;
1216 case 1 :
1217 ip->ip_sum = 0;
1218 break;
1219 case -1 :
1220 error = -1;
1221 goto bad;
1222 break;
1223 }
1224
1225 fin->fin_ifp = sifp;
1226 fin->fin_out = 0;
1227 } else
1228 ip->ip_sum = 0;
1229 /*
1230 * If small enough for interface, can just send directly.
1231 */
1232 m_set_rcvif(m, ifp);
1233
1234 ip_len = ntohs(ip->ip_len);
1235 if (ip_len <= ifp->if_mtu) {
1236 # if defined(M_CSUM_IPv4)
1237 # if (__NetBSD_Version__ >= 105009999)
1238 if (ifp->if_csum_flags_tx & M_CSUM_IPv4)
1239 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1240 # else
1241 if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
1242 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1243 # endif /* (__NetBSD_Version__ >= 105009999) */
1244 else if (ip->ip_sum == 0)
1245 ip->ip_sum = in_cksum(m, hlen);
1246 # else
1247 if (!ip->ip_sum)
1248 ip->ip_sum = in_cksum(m, hlen);
1249 # endif /* M_CSUM_IPv4 */
1250
1251 error = if_output_lock(ifp, ifp, m, dst, rt);
1252 goto done;
1253 }
1254
1255 /*
1256 * Too large for interface; fragment if possible.
1257 * Must be able to put at least 8 bytes per fragment.
1258 */
1259 ip_off = ntohs(ip->ip_off);
1260 if (ip_off & IP_DF) {
1261 error = EMSGSIZE;
1262 goto bad;
1263 }
1264 len = (ifp->if_mtu - hlen) &~ 7;
1265 if (len < 8) {
1266 error = EMSGSIZE;
1267 goto bad;
1268 }
1269
1270 {
1271 int mhlen, firstlen = len;
1272 struct mbuf **mnext = &m->m_act;
1273
1274 /*
1275 * Loop through length of segment after first fragment,
1276 * make new header and copy data of each part and link onto chain.
1277 */
1278 m0 = m;
1279 mhlen = sizeof (struct ip);
1280 for (off = hlen + len; off < ip_len; off += len) {
1281 # ifdef MGETHDR
1282 MGETHDR(m, M_DONTWAIT, MT_HEADER);
1283 # else
1284 MGET(m, M_DONTWAIT, MT_HEADER);
1285 # endif
1286 if (m == 0) {
1287 m = m0;
1288 error = ENOBUFS;
1289 goto bad;
1290 }
1291 m->m_data += max_linkhdr;
1292 mhip = mtod(m, struct ip *);
1293 bcopy((char *)ip, (char *)mhip, sizeof(*ip));
1294 #ifdef INET
1295 if (hlen > sizeof (struct ip)) {
1296 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
1297 IP_HL_A(mhip, mhlen >> 2);
1298 }
1299 #endif
1300 m->m_len = mhlen;
1301 mhip->ip_off = ((off - hlen) >> 3) + ip_off;
1302 if (off + len >= ip_len)
1303 len = ip_len - off;
1304 else
1305 mhip->ip_off |= IP_MF;
1306 mhip->ip_len = htons((u_short)(len + mhlen));
1307 m->m_next = m_copy(m0, off, len);
1308 if (m->m_next == 0) {
1309 error = ENOBUFS; /* ??? */
1310 goto sendorfree;
1311 }
1312 m->m_pkthdr.len = mhlen + len;
1313 m_reset_rcvif(m);
1314 mhip->ip_off = htons((u_short)mhip->ip_off);
1315 mhip->ip_sum = 0;
1316 #ifdef INET
1317 mhip->ip_sum = in_cksum(m, mhlen);
1318 #endif
1319 *mnext = m;
1320 mnext = &m->m_act;
1321 }
1322 /*
1323 * Update first fragment by trimming what's been copied out
1324 * and updating header, then send each fragment (in order).
1325 */
1326 m_adj(m0, hlen + firstlen - ip_len);
1327 ip->ip_len = htons((u_short)(hlen + firstlen));
1328 ip->ip_off = htons((u_short)IP_MF);
1329 ip->ip_sum = 0;
1330 #ifdef INET
1331 ip->ip_sum = in_cksum(m0, hlen);
1332 #endif
1333 sendorfree:
1334 for (m = m0; m; m = m0) {
1335 m0 = m->m_act;
1336 m->m_act = 0;
1337 if (error == 0) {
1338 KERNEL_LOCK(1, NULL);
1339 error = (*ifp->if_output)(ifp, m, dst, rt);
1340 KERNEL_UNLOCK_ONE(NULL);
1341 } else {
1342 FREE_MB_T(m);
1343 }
1344 }
1345 }
1346 done:
1347 if (!error)
1348 softc->ipf_frouteok[0]++;
1349 else
1350 softc->ipf_frouteok[1]++;
1351
1352 # if __NetBSD_Version__ >= 499001100
1353 rtcache_unref(rt, ro);
1354 rtcache_free(ro);
1355 # else
1356 if (rt) {
1357 RTFREE(rt);
1358 }
1359 # endif
1360 return error;
1361 bad:
1362 if (error == EMSGSIZE) {
1363 sifp = fin->fin_ifp;
1364 code = fin->fin_icode;
1365 fin->fin_icode = ICMP_UNREACH_NEEDFRAG;
1366 fin->fin_ifp = ifp;
1367 (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1);
1368 fin->fin_ifp = sifp;
1369 fin->fin_icode = code;
1370 }
1371 FREE_MB_T(m);
1372 goto done;
1373 #endif /* INET */
1374 }
1375
1376
1377 #if defined(USE_INET6)
1378 /*
1379 * This is the IPv6 specific fastroute code. It doesn't clean up the mbuf's
1380 * or ensure that it is an IPv6 packet that is being forwarded, those are
1381 * expected to be done by the called (ipf_fastroute).
1382 */
1383 static int
1384 ipf_fastroute6(struct mbuf *m0, struct mbuf **mpp, fr_info_t *fin,
1385 frdest_t *fdp)
1386 {
1387 # if __NetBSD_Version__ >= 499001100
1388 struct route ip6route;
1389 const struct sockaddr *dst;
1390 union {
1391 struct sockaddr dst;
1392 struct sockaddr_in6 dst6;
1393 } u;
1394 struct route *ro;
1395 # else
1396 struct route_in6 ip6route;
1397 struct sockaddr_in6 *dst6;
1398 struct route_in6 *ro;
1399 # endif
1400 struct rtentry *rt;
1401 struct ifnet *ifp;
1402 u_long mtu;
1403 int error;
1404
1405 error = 0;
1406 ro = &ip6route;
1407
1408 if (fdp != NULL)
1409 ifp = fdp->fd_ptr;
1410 else
1411 ifp = fin->fin_ifp;
1412 memset(ro, 0, sizeof(*ro));
1413 # if __NetBSD_Version__ >= 499001100
1414 if (fdp != NULL && IP6_NOTZERO(&fdp->fd_ip6))
1415 sockaddr_in6_init(&u.dst6, &fdp->fd_ip6.in6, 0, 0, 0);
1416 else
1417 sockaddr_in6_init(&u.dst6, &fin->fin_fi.fi_dst.in6, 0, 0, 0);
1418 dst = &u.dst;
1419 rtcache_setdst(ro, dst);
1420
1421 rt = rtcache_init(ro);
1422 if ((ifp == NULL) && (rt != NULL))
1423 ifp = rt->rt_ifp;
1424 # else
1425 dst6 = (struct sockaddr_in6 *)&ro->ro_dst;
1426 dst6->sin6_family = AF_INET6;
1427 dst6->sin6_len = sizeof(struct sockaddr_in6);
1428 dst6->sin6_addr = fin->fin_fi.fi_dst.in6;
1429
1430 if (fdp != NULL) {
1431 if (IP6_NOTZERO(&fdp->fd_ip6))
1432 dst6->sin6_addr = fdp->fd_ip6.in6;
1433 }
1434
1435 rtalloc((struct route *)ro);
1436
1437 if ((ifp == NULL) && (ro->ro_rt != NULL))
1438 ifp = ro->ro_rt->rt_ifp;
1439 rt = ro->ro_rt;
1440 # endif
1441 if ((rt == NULL) || (ifp == NULL)) {
1442
1443 error = EHOSTUNREACH;
1444 goto bad;
1445 }
1446
1447 /* KAME */
1448 # if __NetBSD_Version__ >= 499001100
1449 if (IN6_IS_ADDR_LINKLOCAL(&u.dst6.sin6_addr))
1450 u.dst6.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1451 # else
1452 if (IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr))
1453 dst6->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1454 # endif
1455
1456 {
1457 # if (__NetBSD_Version__ >= 106010000) && !defined(IN6_LINKMTU)
1458 struct in6_ifextra *ife;
1459 # endif
1460 if (rt->rt_flags & RTF_GATEWAY)
1461 # if __NetBSD_Version__ >= 499001100
1462 dst = rt->rt_gateway;
1463 # else
1464 dst6 = (struct sockaddr_in6 *)rt->rt_gateway;
1465 # endif
1466 rt->rt_use++;
1467
1468 /* Determine path MTU. */
1469 # if (__NetBSD_Version__ <= 106009999)
1470 mtu = nd_ifinfo[ifp->if_index].linkmtu;
1471 # else
1472 # ifdef IN6_LINKMTU
1473 mtu = IN6_LINKMTU(ifp);
1474 # else
1475 ife = (struct in6_ifextra *)(ifp)->if_afdata[AF_INET6];
1476 mtu = ife->nd_ifinfo[ifp->if_index].linkmtu;
1477 # endif
1478 # endif
1479 if ((error == 0) && (m0->m_pkthdr.len <= mtu)) {
1480 # if __NetBSD_Version__ >= 499001100
1481 error = nd6_output(ifp, ifp, m0, satocsin6(dst), rt);
1482 # else
1483 error = nd6_output(ifp, ifp, m0, dst6, rt);
1484 # endif
1485 } else {
1486 error = EMSGSIZE;
1487 }
1488 }
1489 bad:
1490 # if __NetBSD_Version__ >= 499001100
1491 rtcache_unref(rt, ro);
1492 rtcache_free(ro);
1493 # else
1494 if (ro->ro_rt != NULL) {
1495 RTFREE(((struct route *)ro)->ro_rt);
1496 }
1497 # endif
1498 return error;
1499 }
1500 #endif /* INET6 */
1501
1502
1503 int
1504 ipf_verifysrc(fr_info_t *fin)
1505 {
1506 #if __NetBSD_Version__ >= 499001100
1507 union {
1508 struct sockaddr dst;
1509 struct sockaddr_in dst4;
1510 } u;
1511 struct rtentry *rt;
1512 #else
1513 struct sockaddr_in *dst;
1514 #endif
1515 struct route iproute;
1516 int rc;
1517
1518 #if __NetBSD_Version__ >= 499001100
1519 sockaddr_in_init(&u.dst4, &fin->fin_src, 0);
1520 rtcache_setdst(&iproute, &u.dst);
1521 rt = rtcache_init(&iproute);
1522 if (rt == NULL)
1523 rc = 0;
1524 else
1525 rc = (fin->fin_ifp == rt->rt_ifp);
1526 rtcache_unref(rt, &iproute);
1527 rtcache_free(&iproute);
1528 #else
1529 dst = (struct sockaddr_in *)&iproute.ro_dst;
1530 dst->sin_len = sizeof(*dst);
1531 dst->sin_family = AF_INET;
1532 dst->sin_addr = fin->fin_src;
1533 rtalloc(&iproute);
1534 if (iproute.ro_rt == NULL)
1535 return 0;
1536 rc = (fin->fin_ifp == iproute.ro_rt->rt_ifp);
1537 RTFREE(iproute.ro_rt);
1538 #endif
1539 return rc;
1540 }
1541
1542
1543 /*
1544 * return the first IP Address associated with an interface
1545 */
1546 int
1547 ipf_ifpaddr(ipf_main_softc_t *softc, int v, int atype, void *ifptr,
1548 i6addr_t *inp, i6addr_t *inpmask)
1549 {
1550 #ifdef USE_INET6
1551 struct in6_addr *inp6 = NULL;
1552 #endif
1553 struct sockaddr *sock, *mask;
1554 struct sockaddr_in *sin;
1555 struct ifaddr *ifa;
1556 struct ifnet *ifp;
1557
1558 if ((ifptr == NULL) || (ifptr == (void *)-1))
1559 return -1;
1560
1561 ifp = ifptr;
1562 mask = NULL;
1563
1564 if (v == 4)
1565 inp->in4.s_addr = 0;
1566 #ifdef USE_INET6
1567 else if (v == 6)
1568 bzero((char *)inp, sizeof(*inp));
1569 #endif
1570
1571 ifa = IFADDR_READER_FIRST(ifp);
1572 sock = ifa ? ifa->ifa_addr : NULL;
1573 while (sock != NULL && ifa != NULL) {
1574 sin = (struct sockaddr_in *)sock;
1575 if ((v == 4) && (sin->sin_family == AF_INET))
1576 break;
1577 #ifdef USE_INET6
1578 if ((v == 6) && (sin->sin_family == AF_INET6)) {
1579 inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr;
1580 if (!IN6_IS_ADDR_LINKLOCAL(inp6) &&
1581 !IN6_IS_ADDR_LOOPBACK(inp6))
1582 break;
1583 }
1584 #endif
1585 ifa = IFADDR_READER_NEXT(ifa);
1586 if (ifa != NULL)
1587 sock = ifa->ifa_addr;
1588 }
1589 if (ifa == NULL || sock == NULL)
1590 return -1;
1591
1592 mask = ifa->ifa_netmask;
1593 if (atype == FRI_BROADCAST)
1594 sock = ifa->ifa_broadaddr;
1595 else if (atype == FRI_PEERADDR)
1596 sock = ifa->ifa_dstaddr;
1597
1598 #ifdef USE_INET6
1599 if (v == 6)
1600 return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock,
1601 (struct sockaddr_in6 *)mask,
1602 inp, inpmask);
1603 #endif
1604 return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock,
1605 (struct sockaddr_in *)mask,
1606 &inp->in4, &inpmask->in4);
1607 }
1608
1609
1610 u_32_t
1611 ipf_newisn(fr_info_t *fin)
1612 {
1613 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
1614 size_t asz;
1615
1616 if (fin->fin_v == 4)
1617 asz = sizeof(struct in_addr);
1618 else if (fin->fin_v == 6)
1619 asz = sizeof(fin->fin_src);
1620 else /* XXX: no way to return error */
1621 return 0;
1622 #ifdef INET
1623 return tcp_new_iss1((void *)&fin->fin_src, (void *)&fin->fin_dst,
1624 fin->fin_sport, fin->fin_dport, asz, 0);
1625 #else
1626 return ENOSYS;
1627 #endif
1628 #else
1629 static int iss_seq_off = 0;
1630 u_char hash[16];
1631 u_32_t newiss;
1632 MD5_CTX ctx;
1633
1634 /*
1635 * Compute the base value of the ISS. It is a hash
1636 * of (saddr, sport, daddr, dport, secret).
1637 */
1638 MD5Init(&ctx);
1639
1640 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_src,
1641 sizeof(fin->fin_fi.fi_src));
1642 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_dst,
1643 sizeof(fin->fin_fi.fi_dst));
1644 MD5Update(&ctx, (u_char *) &fin->fin_dat, sizeof(fin->fin_dat));
1645
1646 MD5Update(&ctx, ipf_iss_secret, sizeof(ipf_iss_secret));
1647
1648 MD5Final(hash, &ctx);
1649
1650 memcpy(&newiss, hash, sizeof(newiss));
1651
1652 /*
1653 * Now increment our "timer", and add it in to
1654 * the computed value.
1655 *
1656 * XXX Use `addin'?
1657 * XXX TCP_ISSINCR too large to use?
1658 */
1659 iss_seq_off += 0x00010000;
1660 newiss += iss_seq_off;
1661 return newiss;
1662 #endif
1663 }
1664
1665
1666 /* ------------------------------------------------------------------------ */
1667 /* Function: ipf_nextipid */
1668 /* Returns: int - 0 == success, -1 == error (packet should be droppped) */
1669 /* Parameters: fin(I) - pointer to packet information */
1670 /* */
1671 /* Returns the next IPv4 ID to use for this packet. */
1672 /* ------------------------------------------------------------------------ */
1673 u_short
1674 ipf_nextipid(fr_info_t *fin)
1675 {
1676 #ifdef USE_MUTEXES
1677 ipf_main_softc_t *softc = fin->fin_main_soft;
1678 #endif
1679 u_short id;
1680
1681 MUTEX_ENTER(&softc->ipf_rw);
1682 id = ipid++;
1683 MUTEX_EXIT(&softc->ipf_rw);
1684
1685 return id;
1686 }
1687
1688
1689 EXTERN_INLINE int
1690 ipf_checkv4sum(fr_info_t *fin)
1691 {
1692 #ifdef M_CSUM_TCP_UDP_BAD
1693 int manual, pflag, cflags, active;
1694 mb_t *m;
1695
1696 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1697 return 0;
1698
1699 if ((fin->fin_flx & FI_SHORT) != 0)
1700 return 1;
1701
1702 if (fin->fin_cksum != FI_CK_NEEDED)
1703 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1704
1705 manual = 0;
1706 m = fin->fin_m;
1707 if (m == NULL) {
1708 manual = 1;
1709 goto skipauto;
1710 }
1711
1712 switch (fin->fin_p)
1713 {
1714 case IPPROTO_UDP :
1715 pflag = M_CSUM_UDPv4;
1716 break;
1717 case IPPROTO_TCP :
1718 pflag = M_CSUM_TCPv4;
1719 break;
1720 default :
1721 pflag = 0;
1722 manual = 1;
1723 break;
1724 }
1725
1726 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1727 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1728 cflags = m->m_pkthdr.csum_flags & active;
1729
1730 if (pflag != 0) {
1731 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1732 fin->fin_flx |= FI_BAD;
1733 fin->fin_cksum = FI_CK_BAD;
1734 } else if (cflags == (pflag | M_CSUM_DATA)) {
1735 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) {
1736 fin->fin_flx |= FI_BAD;
1737 fin->fin_cksum = FI_CK_BAD;
1738 } else {
1739 fin->fin_cksum = FI_CK_SUMOK;
1740 }
1741 } else if (cflags == pflag) {
1742 fin->fin_cksum = FI_CK_SUMOK;
1743 } else {
1744 manual = 1;
1745 }
1746 }
1747 skipauto:
1748 if (manual != 0) {
1749 if (ipf_checkl4sum(fin) == -1) {
1750 fin->fin_flx |= FI_BAD;
1751 return -1;
1752 }
1753 }
1754 #else
1755 if (ipf_checkl4sum(fin) == -1) {
1756 fin->fin_flx |= FI_BAD;
1757 return -1;
1758 }
1759 #endif
1760 return 0;
1761 }
1762
1763
1764 #ifdef USE_INET6
1765 EXTERN_INLINE int
1766 ipf_checkv6sum(fr_info_t *fin)
1767 {
1768 # ifdef M_CSUM_TCP_UDP_BAD
1769 int manual, pflag, cflags, active;
1770 mb_t *m;
1771
1772 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1773 return 0;
1774
1775 if ((fin->fin_flx & FI_SHORT) != 0)
1776 return 1;
1777
1778 if (fin->fin_cksum != FI_CK_SUMOK)
1779 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1780
1781
1782 manual = 0;
1783 m = fin->fin_m;
1784
1785 switch (fin->fin_p)
1786 {
1787 case IPPROTO_UDP :
1788 pflag = M_CSUM_UDPv6;
1789 break;
1790 case IPPROTO_TCP :
1791 pflag = M_CSUM_TCPv6;
1792 break;
1793 default :
1794 pflag = 0;
1795 manual = 1;
1796 break;
1797 }
1798
1799 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1800 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1801 cflags = m->m_pkthdr.csum_flags & active;
1802
1803 if (pflag != 0) {
1804 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1805 fin->fin_flx |= FI_BAD;
1806 } else if (cflags == (pflag | M_CSUM_DATA)) {
1807 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0)
1808 fin->fin_flx |= FI_BAD;
1809 } else if (cflags == pflag) {
1810 ;
1811 } else {
1812 manual = 1;
1813 }
1814 }
1815 if (manual != 0) {
1816 if (ipf_checkl4sum(fin) == -1) {
1817 fin->fin_flx |= FI_BAD;
1818 return -1;
1819 }
1820 }
1821 # else
1822 if (ipf_checkl4sum(fin) == -1) {
1823 fin->fin_flx |= FI_BAD;
1824 return -1;
1825 }
1826 # endif
1827 return 0;
1828 }
1829 #endif /* USE_INET6 */
1830
1831
1832 size_t
1833 mbufchainlen(struct mbuf *m0)
1834 {
1835 size_t len;
1836
1837 if ((m0->m_flags & M_PKTHDR) != 0) {
1838 len = m0->m_pkthdr.len;
1839 } else {
1840 struct mbuf *m;
1841
1842 for (m = m0, len = 0; m != NULL; m = m->m_next)
1843 len += m->m_len;
1844 }
1845 return len;
1846 }
1847
1848
1849 /* ------------------------------------------------------------------------ */
1850 /* Function: ipf_pullup */
1851 /* Returns: NULL == pullup failed, else pointer to protocol header */
1852 /* Parameters: xmin(I)- pointer to buffer where data packet starts */
1853 /* fin(I) - pointer to packet information */
1854 /* len(I) - number of bytes to pullup */
1855 /* */
1856 /* Attempt to move at least len bytes (from the start of the buffer) into a */
1857 /* single buffer for ease of access. Operating system native functions are */
1858 /* used to manage buffers - if necessary. If the entire packet ends up in */
1859 /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */
1860 /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */
1861 /* and ONLY if the pullup succeeds. */
1862 /* */
1863 /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */
1864 /* of buffers that starts at *fin->fin_mp. */
1865 /* ------------------------------------------------------------------------ */
1866 void *
1867 ipf_pullup(mb_t *xmin, fr_info_t *fin, int len)
1868 {
1869 int dpoff, ipoff;
1870 mb_t *m = xmin;
1871 char *ip;
1872
1873 if (m == NULL)
1874 return NULL;
1875
1876 ip = (char *)fin->fin_ip;
1877 if ((fin->fin_flx & FI_COALESCE) != 0)
1878 return ip;
1879
1880 ipoff = fin->fin_ipoff;
1881 if (fin->fin_dp != NULL)
1882 dpoff = (char *)fin->fin_dp - (char *)ip;
1883 else
1884 dpoff = 0;
1885
1886 if (M_LEN(m) < len) {
1887 mb_t *n = *fin->fin_mp;
1888 /*
1889 * Assume that M_PKTHDR is set and just work with what is left
1890 * rather than check..
1891 * Should not make any real difference, anyway.
1892 */
1893 if (m != n) {
1894 /*
1895 * Record the mbuf that points to the mbuf that we're
1896 * about to go to work on so that we can update the
1897 * m_next appropriately later.
1898 */
1899 for (; n->m_next != m; n = n->m_next)
1900 ;
1901 } else {
1902 n = NULL;
1903 }
1904
1905 #ifdef MHLEN
1906 if (len > MHLEN)
1907 #else
1908 if (len > MLEN)
1909 #endif
1910 {
1911 #ifdef HAVE_M_PULLDOWN
1912 if (m_pulldown(m, 0, len, NULL) == NULL)
1913 m = NULL;
1914 #else
1915 FREE_MB_T(*fin->fin_mp);
1916 m = NULL;
1917 n = NULL;
1918 #endif
1919 } else
1920 {
1921 m = m_pullup(m, len);
1922 }
1923 if (n != NULL)
1924 n->m_next = m;
1925 if (m == NULL) {
1926 /*
1927 * When n is non-NULL, it indicates that m pointed to
1928 * a sub-chain (tail) of the mbuf and that the head
1929 * of this chain has not yet been free'd.
1930 */
1931 if (n != NULL) {
1932 FREE_MB_T(*fin->fin_mp);
1933 }
1934
1935 *fin->fin_mp = NULL;
1936 fin->fin_m = NULL;
1937 return NULL;
1938 }
1939
1940 if (n == NULL)
1941 *fin->fin_mp = m;
1942
1943 while (M_LEN(m) == 0) {
1944 m = m->m_next;
1945 }
1946 fin->fin_m = m;
1947 ip = MTOD(m, char *) + ipoff;
1948
1949 fin->fin_ip = (ip_t *)ip;
1950 if (fin->fin_dp != NULL)
1951 fin->fin_dp = (char *)fin->fin_ip + dpoff;
1952 if (fin->fin_fraghdr != NULL)
1953 fin->fin_fraghdr = (char *)ip +
1954 ((char *)fin->fin_fraghdr -
1955 (char *)fin->fin_ip);
1956 }
1957
1958 if (len == fin->fin_plen)
1959 fin->fin_flx |= FI_COALESCE;
1960 return ip;
1961 }
1962
1963
1964 int
1965 ipf_inject(fr_info_t *fin, mb_t *m)
1966 {
1967 int error;
1968
1969 if (fin->fin_out == 0) {
1970 if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
1971 FREE_MB_T(m);
1972 error = ENOBUFS;
1973 } else {
1974 error = 0;
1975 }
1976 } else {
1977 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
1978 }
1979 return error;
1980 }
1981
1982
1983 u_32_t
1984 ipf_random(void)
1985 {
1986 int number;
1987
1988 #ifdef _CPRNG_H
1989 number = cprng_fast32();
1990 #else
1991 number = arc4random();
1992 #endif
1993 return number;
1994 }
1995
1996
1997 /*
1998 * routines below for saving IP headers to buffer
1999 */
2000 static int ipfopen(dev_t dev, int flags
2001 #if (NetBSD >= 199511)
2002 , int devtype, PROC_T *p
2003 #endif
2004 )
2005 {
2006 u_int unit = GET_MINOR(dev);
2007 int error;
2008
2009 if (IPL_LOGMAX < unit) {
2010 error = ENXIO;
2011 } else {
2012 switch (unit)
2013 {
2014 case IPL_LOGIPF :
2015 case IPL_LOGNAT :
2016 case IPL_LOGSTATE :
2017 case IPL_LOGAUTH :
2018 case IPL_LOGLOOKUP :
2019 case IPL_LOGSYNC :
2020 #ifdef IPFILTER_SCAN
2021 case IPL_LOGSCAN :
2022 #endif
2023 error = 0;
2024 break;
2025 default :
2026 error = ENXIO;
2027 break;
2028 }
2029 }
2030 #if (__NetBSD_Version__ >= 799003000)
2031 if (error == 0) {
2032 mutex_enter(&ipf_ref_mutex);
2033 ipf_active = 1;
2034 mutex_exit(&ipf_ref_mutex);
2035 }
2036 #endif
2037 return error;
2038 }
2039
2040
2041 static int ipfclose(dev_t dev, int flags
2042 #if (NetBSD >= 199511)
2043 , int devtype, PROC_T *p
2044 #endif
2045 )
2046 {
2047 u_int unit = GET_MINOR(dev);
2048
2049 if (IPL_LOGMAX < unit)
2050 return ENXIO;
2051 else {
2052 #if (__NetBSD_Version__ >= 799003000)
2053 mutex_enter(&ipf_ref_mutex);
2054 ipf_active = 0;
2055 mutex_exit(&ipf_ref_mutex);
2056 #endif
2057 return 0;
2058 }
2059 }
2060
2061 /*
2062 * ipfread/ipflog
2063 * both of these must operate with at least splnet() lest they be
2064 * called during packet processing and cause an inconsistancy to appear in
2065 * the filter lists.
2066 */
2067 static int ipfread(dev_t dev, struct uio *uio, int ioflag)
2068 {
2069
2070 if (ipfmain.ipf_running < 1) {
2071 ipfmain.ipf_interror = 130006;
2072 return EIO;
2073 }
2074
2075 if (GET_MINOR(dev) == IPL_LOGSYNC)
2076 return ipf_sync_read(&ipfmain, uio);
2077
2078 #ifdef IPFILTER_LOG
2079 return ipf_log_read(&ipfmain, GET_MINOR(dev), uio);
2080 #else
2081 ipfmain.ipf_interror = 130007;
2082 return ENXIO;
2083 #endif
2084 }
2085
2086
2087 /*
2088 * ipfwrite
2089 * both of these must operate with at least splnet() lest they be
2090 * called during packet processing and cause an inconsistancy to appear in
2091 * the filter lists.
2092 */
2093 static int ipfwrite(dev_t dev, struct uio *uio, int ioflag)
2094 {
2095
2096 if (ipfmain.ipf_running < 1) {
2097 ipfmain.ipf_interror = 130008;
2098 return EIO;
2099 }
2100
2101 if (GET_MINOR(dev) == IPL_LOGSYNC)
2102 return ipf_sync_write(&ipfmain, uio);
2103 ipfmain.ipf_interror = 130009;
2104 return ENXIO;
2105 }
2106
2107
2108 static int ipfpoll(dev_t dev, int events, PROC_T *p)
2109 {
2110 u_int unit = GET_MINOR(dev);
2111 int revents = 0;
2112
2113 if (IPL_LOGMAX < unit) {
2114 ipfmain.ipf_interror = 130010;
2115 return ENXIO;
2116 }
2117
2118 switch (unit)
2119 {
2120 case IPL_LOGIPF :
2121 case IPL_LOGNAT :
2122 case IPL_LOGSTATE :
2123 #ifdef IPFILTER_LOG
2124 if ((events & (POLLIN | POLLRDNORM)) &&
2125 ipf_log_canread(&ipfmain, unit))
2126 revents |= events & (POLLIN | POLLRDNORM);
2127 #endif
2128 break;
2129 case IPL_LOGAUTH :
2130 if ((events & (POLLIN | POLLRDNORM)) &&
2131 ipf_auth_waiting(&ipfmain))
2132 revents |= events & (POLLIN | POLLRDNORM);
2133 break;
2134 case IPL_LOGSYNC :
2135 if ((events & (POLLIN | POLLRDNORM)) &&
2136 ipf_sync_canread(&ipfmain))
2137 revents |= events & (POLLIN | POLLRDNORM);
2138 if ((events & (POLLOUT | POLLWRNORM)) &&
2139 ipf_sync_canwrite(&ipfmain))
2140 revents |= events & (POLLOUT | POLLWRNORM);
2141 break;
2142 case IPL_LOGSCAN :
2143 case IPL_LOGLOOKUP :
2144 default :
2145 break;
2146 }
2147
2148 if ((revents == 0) && (((events & (POLLIN|POLLRDNORM)) != 0)))
2149 selrecord(p, &ipfmain.ipf_selwait[unit]);
2150 return revents;
2151 }
2152
2153 u_int
2154 ipf_pcksum(fr_info_t *fin, int hlen, u_int sum)
2155 {
2156 struct mbuf *m;
2157 u_int sum2;
2158 int off;
2159
2160 m = fin->fin_m;
2161 off = (char *)fin->fin_dp - (char *)fin->fin_ip;
2162 m->m_data += hlen;
2163 m->m_len -= hlen;
2164 sum2 = in_cksum(fin->fin_m, fin->fin_plen - off);
2165 m->m_len += hlen;
2166 m->m_data -= hlen;
2167
2168 /*
2169 * Both sum and sum2 are partial sums, so combine them together.
2170 */
2171 sum += ~sum2 & 0xffff;
2172 while (sum > 0xffff)
2173 sum = (sum & 0xffff) + (sum >> 16);
2174 sum2 = ~sum & 0xffff;
2175 return sum2;
2176 }
2177
2178 #if (__NetBSD_Version__ >= 799003000)
2179
2180 /* NetBSD module interface */
2181
2182 MODULE(MODULE_CLASS_DRIVER, ipl, "bpf_filter");
2183
2184 static int ipl_init(void *);
2185 static int ipl_fini(void *);
2186 static int ipl_modcmd(modcmd_t, void *);
2187
2188 #ifdef _MODULE
2189 static devmajor_t ipl_cmaj = -1, ipl_bmaj = -1;
2190 #endif
2191
2192 static int
2193 ipl_modcmd(modcmd_t cmd, void *opaque)
2194 {
2195
2196 switch (cmd) {
2197 case MODULE_CMD_INIT:
2198 return ipl_init(opaque);
2199 case MODULE_CMD_FINI:
2200 return ipl_fini(opaque);
2201 default:
2202 return ENOTTY;
2203 }
2204 }
2205
2206 static int
2207 ipl_init(void *opaque)
2208 {
2209 int error;
2210
2211 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
2212 ipf_listener_cb, NULL);
2213
2214 if ((error = ipf_load_all()) != 0)
2215 return error;
2216
2217 if (ipf_create_all(&ipfmain) == NULL) {
2218 ipf_unload_all();
2219 return ENODEV;
2220 }
2221
2222 /* Initialize our mutex and reference count */
2223 mutex_init(&ipf_ref_mutex, MUTEX_DEFAULT, IPL_NONE);
2224 ipf_active = 0;
2225
2226 #ifdef _MODULE
2227 /*
2228 * Insert ourself into the cdevsw list.
2229 */
2230 error = devsw_attach("ipl", NULL, &ipl_bmaj, &ipl_cdevsw, &ipl_cmaj);
2231 if (error)
2232 ipl_fini(opaque);
2233 #endif
2234
2235 return error;
2236 }
2237
2238 static int
2239 ipl_fini(void *opaque)
2240 {
2241
2242 #ifdef _MODULE
2243 (void)devsw_detach(NULL, &ipl_cdevsw);
2244 #endif
2245
2246 /*
2247 * Grab the mutex, verify that there are no references
2248 * and that there are no running filters. If either
2249 * of these exists, reinsert our cdevsw entry and return
2250 * an error.
2251 */
2252 mutex_enter(&ipf_ref_mutex);
2253 if (ipf_active != 0 || ipfmain.ipf_running > 0) {
2254 #ifdef _MODULE
2255 (void)devsw_attach("ipl", NULL, &ipl_bmaj,
2256 &ipl_cdevsw, &ipl_cmaj);
2257 #endif
2258 mutex_exit(&ipf_ref_mutex);
2259 return EBUSY;
2260 }
2261
2262 /* Clean up the rest of our state before being unloaded */
2263
2264 mutex_exit(&ipf_ref_mutex);
2265 mutex_destroy(&ipf_ref_mutex);
2266 ipf_destroy_all(&ipfmain);
2267 ipf_unload_all();
2268 kauth_unlisten_scope(ipf_listener);
2269
2270 return 0;
2271 }
2272 #endif /* (__NetBSD_Version__ >= 799003000) */
2273