ip_fil_netbsd.c revision 1.16.2.5 1 /* $NetBSD: ip_fil_netbsd.c,v 1.16.2.5 2017/01/07 08:56:47 pgoyette Exp $ */
2
3 /*
4 * Copyright (C) 2012 by Darren Reed.
5 *
6 * See the IPFILTER.LICENCE file for details on licencing.
7 */
8 #if !defined(lint)
9 #if defined(__NetBSD__)
10 #include <sys/cdefs.h>
11 __KERNEL_RCSID(0, "$NetBSD: ip_fil_netbsd.c,v 1.16.2.5 2017/01/07 08:56:47 pgoyette Exp $");
12 #else
13 static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed";
14 static const char rcsid[] = "@(#)Id: ip_fil_netbsd.c,v 1.1.1.2 2012/07/22 13:45:17 darrenr Exp";
15 #endif
16 #endif
17
18 #if defined(KERNEL) || defined(_KERNEL)
19 # undef KERNEL
20 # undef _KERNEL
21 # define KERNEL 1
22 # define _KERNEL 1
23 #endif
24 #include <sys/param.h>
25 #if (NetBSD >= 199905) && !defined(IPFILTER_LKM)
26 # if (__NetBSD_Version__ >= 799003000)
27 # ifdef _KERNEL_OPT
28 # include "opt_ipsec.h"
29 # endif
30 # else
31 # include "opt_ipsec.h"
32 # endif
33 #endif
34 #include <sys/errno.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/ioctl.h>
38 #include <sys/time.h>
39 #include <sys/systm.h>
40 #include <sys/select.h>
41 #if (NetBSD > 199609)
42 # include <sys/dirent.h>
43 #else
44 # include <sys/dir.h>
45 #endif
46 #if (__NetBSD_Version__ >= 599005900)
47 # include <sys/cprng.h>
48 #endif
49 #include <sys/mbuf.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/poll.h>
53 #if (__NetBSD_Version__ >= 399002000)
54 # include <sys/kauth.h>
55 #endif
56 #if (__NetBSD_Version__ >= 799003000)
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #endif
60 #if (__NetBSD_Version__ >= 799003300)
61 #include <sys/localcount.h>
62 #endif
63
64 #include <net/if.h>
65 #include <net/route.h>
66 #include <netinet/in.h>
67 #include <netinet/in_var.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/tcp.h>
72 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
73 # include <netinet/tcp_timer.h>
74 # include <netinet/tcp_var.h>
75 #endif
76 #include <netinet/udp.h>
77 #include <netinet/tcpip.h>
78 #include <netinet/ip_icmp.h>
79 #include "netinet/ip_compat.h"
80 #ifdef USE_INET6
81 # include <netinet/icmp6.h>
82 # if (__NetBSD_Version__ >= 106000000)
83 # include <netinet6/nd6.h>
84 # endif
85 #endif
86 #include "netinet/ip_fil.h"
87 #include "netinet/ip_nat.h"
88 #include "netinet/ip_frag.h"
89 #include "netinet/ip_state.h"
90 #include "netinet/ip_proxy.h"
91 #include "netinet/ip_auth.h"
92 #include "netinet/ip_sync.h"
93 #include "netinet/ip_lookup.h"
94 #include "netinet/ip_dstlist.h"
95 #ifdef IPFILTER_SCAN
96 #include "netinet/ip_scan.h"
97 #endif
98 #include <sys/md5.h>
99 #include <sys/kernel.h>
100 #include <sys/conf.h>
101 #ifdef INET
102 extern int ip_optcopy (struct ip *, struct ip *);
103 #endif
104
105 #ifdef IPFILTER_M_IPFILTER
106 MALLOC_DEFINE(M_IPFILTER, "IP Filter", "IP Filter packet filter data structures");
107 #endif
108
109 #if __NetBSD_Version__ >= 105009999
110 # define csuminfo csum_flags
111 #endif
112
113 #if __NetBSD_Version__ < 200000000
114 extern struct protosw inetsw[];
115 #endif
116
117 #if (__NetBSD_Version__ >= 599002000)
118 static kauth_listener_t ipf_listener;
119 #endif
120
121 #if (__NetBSD_Version__ < 399001400)
122 extern int ip6_getpmtu (struct route_in6 *, struct route_in6 *,
123 struct ifnet *, struct in6_addr *, u_long *,
124 int *);
125 #endif
126 #if (NetBSD >= 199511)
127 static int ipfopen(dev_t dev, int flags, int devtype, PROC_T *p);
128 static int ipfclose(dev_t dev, int flags, int devtype, PROC_T *p);
129 #else
130 # if (__NetBSD_Version__ >= 399001400)
131 static int ipfopen(dev_t dev, int flags, struct lwp *);
132 static int ipfclose(dev_t dev, int flags, struct lwp *);
133 # else
134 static int ipfopen(dev_t dev, int flags);
135 static int ipfclose(dev_t dev, int flags);
136 # endif /* __NetBSD_Version__ >= 399001400 */
137 #endif
138 static int ipfread(dev_t, struct uio *, int ioflag);
139 static int ipfwrite(dev_t, struct uio *, int ioflag);
140 static int ipfpoll(dev_t, int events, PROC_T *);
141 static void ipf_timer_func(void *ptr);
142
143 const struct cdevsw ipl_cdevsw = {
144 #if (__NetBSD_Version__ >= 799003300)
145 DEVSW_MODULE_INIT
146 #endif
147 .d_open = ipfopen,
148 .d_close = ipfclose,
149 .d_read = ipfread,
150 .d_write = ipfwrite,
151 .d_ioctl = ipfioctl,
152 .d_stop = nostop,
153 .d_tty = notty,
154 .d_poll = ipfpoll,
155 .d_mmap = nommap,
156 #if (__NetBSD_Version__ >= 200000000)
157 .d_kqfilter = nokqfilter,
158 #endif
159 .d_discard = nodiscard,
160 #ifdef D_OTHER
161 .d_flag = D_OTHER
162 #else
163 .d_flag = 0
164 #endif
165 };
166 #if (__NetBSD_Version__ >= 799003000)
167 kmutex_t ipf_ref_mutex;
168 int ipf_active;
169 #endif
170
171 ipf_main_softc_t ipfmain;
172
173 static u_short ipid = 0;
174 static int (*ipf_savep)(void *, ip_t *, int, void *, int, struct mbuf **);
175 static int ipf_send_ip(fr_info_t *, mb_t *);
176 #ifdef USE_INET6
177 static int ipf_fastroute6(struct mbuf *, struct mbuf **,
178 fr_info_t *, frdest_t *);
179 #endif
180
181 #if defined(NETBSD_PF)
182 # include <net/pfil.h>
183 /*
184 * We provide the ipf_checkp name just to minimize changes later.
185 */
186 int (*ipf_checkp)(void *, ip_t *ip, int hlen, void *ifp, int out, mb_t **mp);
187 #endif /* NETBSD_PF */
188
189 #if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
190 # include <net/pfil.h>
191
192 static int ipf_check_wrapper(void *, struct mbuf **, struct ifnet *, int );
193
194 static int
195 ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
196 {
197 struct ip *ip;
198 int rv, hlen;
199
200 #if __NetBSD_Version__ >= 200080000
201 /*
202 * ensure that mbufs are writable beforehand
203 * as it's assumed by ipf code.
204 * XXX inefficient
205 */
206 int error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
207
208 if (error) {
209 m_freem(*mp);
210 *mp = NULL;
211 return error;
212 }
213 #endif
214 ip = mtod(*mp, struct ip *);
215 hlen = ip->ip_hl << 2;
216
217 #ifdef INET
218 #if defined(M_CSUM_TCPv4)
219 /*
220 * If the packet is out-bound, we can't delay checksums
221 * here. For in-bound, the checksum has already been
222 * validated.
223 */
224 if (dir == PFIL_OUT) {
225 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
226 in_delayed_cksum(*mp);
227 (*mp)->m_pkthdr.csum_flags &=
228 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
229 }
230 }
231 #endif /* M_CSUM_TCPv4 */
232 #endif /* INET */
233
234 /*
235 * Note, we don't need to update the checksum, because
236 * it has already been verified.
237 */
238 rv = ipf_check(&ipfmain, ip, hlen, ifp, (dir == PFIL_OUT), mp);
239
240 return (rv);
241 }
242
243 # ifdef USE_INET6
244 # include <netinet/ip6.h>
245
246 static int ipf_check_wrapper6(void *, struct mbuf **, struct ifnet *, int );
247
248 static int
249 ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
250 {
251 #if defined(INET6)
252 # if defined(M_CSUM_TCPv6) && (__NetBSD_Version__ > 200000000)
253 /*
254 * If the packet is out-bound, we can't delay checksums
255 * here. For in-bound, the checksum has already been
256 * validated.
257 */
258 if (dir == PFIL_OUT) {
259 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
260 # if (__NetBSD_Version__ > 399000600)
261 in6_delayed_cksum(*mp);
262 # endif
263 (*mp)->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6|
264 M_CSUM_UDPv6);
265 }
266 }
267 # endif
268 #endif /* INET6 */
269
270 return (ipf_check(&ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr),
271 ifp, (dir == PFIL_OUT), mp));
272 }
273 # endif
274
275
276 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
277
278 # if (__NetBSD_Version__ >= 799000400)
279
280 static void ipf_pfilsync(void *, unsigned long, void *);
281
282 static void
283 ipf_pfilsync(void *hdr, unsigned long cmd, void *arg2)
284 {
285 /*
286 * The interface pointer is useless for create (we have nothing to
287 * compare it to) and at detach, the interface name is still in the
288 * list of active NICs (albeit, down, but that's not any real
289 * indicator) and doing ifunit() on the name will still return the
290 * pointer, so it's not much use then, either.
291 */
292 ipf_sync(&ipfmain, NULL);
293 }
294
295 # else
296
297 static int ipf_pfilsync(void *, struct mbuf **, struct ifnet *, int);
298
299 static int
300 ipf_pfilsync(void *hdr, struct mbuf **mp, struct ifnet *ifp, int dir)
301 {
302 ipf_sync(&ipfmain, NULL);
303 return 0;
304 }
305
306 # endif
307 # endif
308
309 #endif /* __NetBSD_Version__ >= 105110000 */
310
311
312 #if defined(IPFILTER_LKM)
313 int
314 ipf_identify(s)
315 char *s;
316 {
317 if (strcmp(s, "ipl") == 0)
318 return 1;
319 return 0;
320 }
321 #endif /* IPFILTER_LKM */
322
323 #if (__NetBSD_Version__ >= 599002000)
324 static int
325 ipf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
326 void *arg0, void *arg1, void *arg2, void *arg3)
327 {
328 int result;
329 enum kauth_network_req req;
330
331 result = KAUTH_RESULT_DEFER;
332 req = (enum kauth_network_req)arg0;
333
334 if (action != KAUTH_NETWORK_FIREWALL)
335 return result;
336
337 /* These must have came from device context. */
338 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) ||
339 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT))
340 result = KAUTH_RESULT_ALLOW;
341
342 return result;
343 }
344 #endif
345
346 /*
347 * Try to detect the case when compiling for NetBSD with pseudo-device
348 */
349 void
350 ipfilterattach(int count)
351 {
352
353 #if (__NetBSD_Version__ >= 799003000)
354 return;
355 #else
356 #if (__NetBSD_Version__ >= 599002000)
357 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
358 ipf_listener_cb, NULL);
359 #endif
360
361 if (ipf_load_all() == 0)
362 (void) ipf_create_all(&ipfmain);
363 #endif
364 }
365
366
367 int
368 ipfattach(ipf_main_softc_t *softc)
369 {
370 SPL_INT(s);
371 #if (__NetBSD_Version__ >= 499005500)
372 int i;
373 #endif
374 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
375 int error = 0;
376 # if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
377 pfil_head_t *ph_inet;
378 # ifdef USE_INET6
379 pfil_head_t *ph_inet6;
380 # endif
381 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
382 pfil_head_t *ph_ifsync;
383 # endif
384 # endif
385 #endif
386
387 SPL_NET(s);
388 if ((softc->ipf_running > 0) || (ipf_checkp == ipf_check)) {
389 printf("IP Filter: already initialized\n");
390 SPL_X(s);
391 IPFERROR(130017);
392 return EBUSY;
393 }
394
395 if (ipf_init_all(softc) < 0) {
396 SPL_X(s);
397 IPFERROR(130015);
398 return EIO;
399 }
400
401 #ifdef NETBSD_PF
402 # if (__NetBSD_Version__ >= 104200000)
403 # if __NetBSD_Version__ >= 105110000
404 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
405 # ifdef USE_INET6
406 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
407 # endif
408 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
409 ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
410 # endif
411
412 if (ph_inet == NULL
413 # ifdef USE_INET6
414 && ph_inet6 == NULL
415 # endif
416 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
417 && ph_ifsync == NULL
418 # endif
419 ) {
420 SPL_X(s);
421 IPFERROR(130016);
422 return ENODEV;
423 }
424
425 if (ph_inet != NULL)
426 error = pfil_add_hook((void *)ipf_check_wrapper, NULL,
427 PFIL_IN|PFIL_OUT, ph_inet);
428 else
429 error = 0;
430 # else
431 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
432 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
433 # endif
434 if (error) {
435 IPFERROR(130013);
436 goto pfil_error;
437 }
438 # else
439 pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
440 # endif
441
442 # ifdef USE_INET6
443 # if __NetBSD_Version__ >= 105110000
444 if (ph_inet6 != NULL)
445 error = pfil_add_hook((void *)ipf_check_wrapper6, NULL,
446 PFIL_IN|PFIL_OUT, ph_inet6);
447 else
448 error = 0;
449 if (error) {
450 pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
451 PFIL_IN|PFIL_OUT, ph_inet6);
452 ipfmain.ipf_interror = 130014;
453 goto pfil_error;
454 }
455 # else
456 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
457 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
458 if (error) {
459 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
460 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
461 IPFERROR(130014);
462 goto pfil_error;
463 }
464 # endif
465 # endif
466
467 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
468 if (ph_ifsync != NULL)
469 #if (__NetBSD_Version__ >= 799000400)
470 (void) pfil_add_ihook((void *)ipf_pfilsync, NULL,
471 PFIL_IFNET, ph_ifsync);
472 #else
473 (void) pfil_add_hook((void *)ipf_pfilsync, NULL,
474 PFIL_IFNET, ph_ifsync);
475 #endif
476 # endif
477 #endif
478
479 #if (__NetBSD_Version__ >= 499005500)
480 for (i = 0; i < IPL_LOGSIZE; i++)
481 selinit(&ipfmain.ipf_selwait[i]);
482 #else
483 bzero((char *)ipfmain.ipf_selwait, sizeof(ipfmain.ipf_selwait));
484 #endif
485 ipf_savep = ipf_checkp;
486 ipf_checkp = ipf_check;
487
488 #ifdef INET
489 if (softc->ipf_control_forwarding & 1)
490 ipforwarding = 1;
491 #endif
492
493 ipid = 0;
494
495 SPL_X(s);
496
497 #if (__NetBSD_Version__ >= 104010000)
498 # if (__NetBSD_Version__ >= 499002000)
499 callout_init(&softc->ipf_slow_ch, 0);
500 # else
501 callout_init(&softc->ipf_slow_ch);
502 # endif
503 callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT,
504 ipf_timer_func, softc);
505 #else
506 timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
507 #endif
508
509 return 0;
510
511 #if __NetBSD_Version__ >= 105110000
512 pfil_error:
513 SPL_X(s);
514 ipf_fini_all(softc);
515 return error;
516 #endif
517 }
518
519 static void
520 ipf_timer_func(void *ptr)
521 {
522 ipf_main_softc_t *softc = ptr;
523 SPL_INT(s);
524
525 SPL_NET(s);
526 READ_ENTER(&softc->ipf_global);
527
528 if (softc->ipf_running > 0)
529 ipf_slowtimer(softc);
530
531 if (softc->ipf_running == -1 || softc->ipf_running == 1) {
532 #if NETBSD_GE_REV(104240000)
533 callout_reset(&softc->ipf_slow_ch, hz / 2,
534 ipf_timer_func, softc);
535 #else
536 timeout(ipf_timer_func, softc,
537 (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
538 #endif
539 }
540 RWLOCK_EXIT(&softc->ipf_global);
541 SPL_X(s);
542 }
543
544
545 /*
546 * Disable the filter by removing the hooks from the IP input/output
547 * stream.
548 */
549 int
550 ipfdetach(ipf_main_softc_t *softc)
551 {
552 SPL_INT(s);
553 #if (__NetBSD_Version__ >= 499005500)
554 int i;
555 #endif
556 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
557 int error = 0;
558 # if __NetBSD_Version__ >= 105150000
559 pfil_head_t *ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
560 # ifdef USE_INET6
561 pfil_head_t *ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
562 # endif
563 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
564 struct pfil_head *ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
565 # endif
566 # endif
567 #endif
568
569 SPL_NET(s);
570
571 #if (__NetBSD_Version__ >= 104010000)
572 if (softc->ipf_running > 0)
573 callout_stop(&softc->ipf_slow_ch);
574 #else
575 untimeout(ipf_slowtimer, NULL);
576 #endif /* NetBSD */
577
578 ipf_checkp = ipf_savep;
579 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE|FR_INACTIVE);
580 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE);
581
582 #ifdef INET
583 if (softc->ipf_control_forwarding & 2)
584 ipforwarding = 0;
585 #endif
586
587 #ifdef NETBSD_PF
588 # if (__NetBSD_Version__ >= 104200000)
589 # if __NetBSD_Version__ >= 105110000
590 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
591 (void) pfil_remove_hook((void *)ipf_pfilsync, NULL,
592 PFIL_IFNET, ph_ifsync);
593 # endif
594
595 if (ph_inet != NULL)
596 error = pfil_remove_hook((void *)ipf_check_wrapper, NULL,
597 PFIL_IN|PFIL_OUT, ph_inet);
598 else
599 error = 0;
600 # else
601 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
602 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
603 # endif
604 if (error) {
605 SPL_X(s);
606 IPFERROR(130011);
607 return error;
608 }
609 # else
610 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
611 # endif
612 # ifdef USE_INET6
613 # if __NetBSD_Version__ >= 105110000
614 if (ph_inet6 != NULL)
615 error = pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
616 PFIL_IN|PFIL_OUT, ph_inet6);
617 else
618 error = 0;
619 # else
620 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
621 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
622 # endif
623 if (error) {
624 SPL_X(s);
625 IPFERROR(130012);
626 return error;
627 }
628 # endif
629 #endif
630 SPL_X(s);
631
632 #if (__NetBSD_Version__ >= 499005500)
633 for (i = 0; i < IPL_LOGSIZE; i++)
634 seldestroy(&ipfmain.ipf_selwait[i]);
635 #endif
636
637 ipf_fini_all(softc);
638
639 return 0;
640 }
641
642
643 /*
644 * Filter ioctl interface.
645 */
646 int
647 ipfioctl(dev_t dev, u_long cmd,
648 #if (__NetBSD_Version__ >= 499001000)
649 void *data,
650 #else
651 caddr_t data,
652 #endif
653 int mode
654 #if (NetBSD >= 199511)
655 # if (__NetBSD_Version__ >= 399001400)
656 , struct lwp *p
657 # if (__NetBSD_Version__ >= 399002000)
658 # define UID(l) kauth_cred_getuid((l)->l_cred)
659 # else
660 # define UID(l) ((l)->l_proc->p_cred->p_ruid)
661 # endif
662 # else
663 , struct proc *p
664 # define UID(p) ((p)->p_cred->p_ruid)
665 # endif
666 #endif
667 )
668 {
669 int error = 0, unit = 0;
670 SPL_INT(s);
671
672 #if (__NetBSD_Version__ >= 399002000)
673 if ((mode & FWRITE) &&
674 kauth_authorize_network(p->l_cred, KAUTH_NETWORK_FIREWALL,
675 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL,
676 NULL, NULL)) {
677 ipfmain.ipf_interror = 130005;
678 return EPERM;
679 }
680 #else
681 if ((securelevel >= 2) && (mode & FWRITE)) {
682 ipfmain.ipf_interror = 130001;
683 return EPERM;
684 }
685 #endif
686
687 unit = GET_MINOR(dev);
688 if ((IPL_LOGMAX < unit) || (unit < 0)) {
689 ipfmain.ipf_interror = 130002;
690 return ENXIO;
691 }
692
693 if (ipfmain.ipf_running <= 0) {
694 if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) {
695 ipfmain.ipf_interror = 130003;
696 return EIO;
697 }
698 if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET &&
699 cmd != SIOCIPFSET && cmd != SIOCFRENB &&
700 cmd != SIOCGETFS && cmd != SIOCGETFF &&
701 cmd != SIOCIPFINTERROR) {
702 ipfmain.ipf_interror = 130004;
703 return EIO;
704 }
705 }
706
707 SPL_NET(s);
708
709 error = ipf_ioctlswitch(&ipfmain, unit, data, cmd, mode, UID(p), p);
710 if (error != -1) {
711 SPL_X(s);
712 return error;
713 }
714
715 SPL_X(s);
716 return error;
717 }
718
719
720 /*
721 * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that
722 * requires a large amount of setting up and isn't any more efficient.
723 */
724 int
725 ipf_send_reset(fr_info_t *fin)
726 {
727 struct tcphdr *tcp, *tcp2;
728 int tlen = 0, hlen;
729 struct mbuf *m;
730 #ifdef USE_INET6
731 ip6_t *ip6;
732 #endif
733 ip_t *ip;
734
735 tcp = fin->fin_dp;
736 if (tcp->th_flags & TH_RST)
737 return -1; /* feedback loop */
738
739 if (ipf_checkl4sum(fin) == -1)
740 return -1;
741
742 tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) +
743 ((tcp->th_flags & TH_SYN) ? 1 : 0) +
744 ((tcp->th_flags & TH_FIN) ? 1 : 0);
745
746 #ifdef USE_INET6
747 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t);
748 #else
749 hlen = sizeof(ip_t);
750 #endif
751 #ifdef MGETHDR
752 MGETHDR(m, M_DONTWAIT, MT_HEADER);
753 #else
754 MGET(m, M_DONTWAIT, MT_HEADER);
755 #endif
756 if (m == NULL)
757 return -1;
758 if (sizeof(*tcp2) + hlen > MHLEN) {
759 MCLGET(m, M_DONTWAIT);
760 if (m == NULL)
761 return -1;
762 if ((m->m_flags & M_EXT) == 0) {
763 FREE_MB_T(m);
764 return -1;
765 }
766 }
767
768 m->m_len = sizeof(*tcp2) + hlen;
769 m->m_data += max_linkhdr;
770 m->m_pkthdr.len = m->m_len;
771 m_reset_rcvif(m);
772 ip = mtod(m, struct ip *);
773 bzero((char *)ip, hlen);
774 #ifdef USE_INET6
775 ip6 = (ip6_t *)ip;
776 #endif
777 bzero((char *)ip, sizeof(*tcp2) + hlen);
778 tcp2 = (struct tcphdr *)((char *)ip + hlen);
779 tcp2->th_sport = tcp->th_dport;
780 tcp2->th_dport = tcp->th_sport;
781
782 if (tcp->th_flags & TH_ACK) {
783 tcp2->th_seq = tcp->th_ack;
784 tcp2->th_flags = TH_RST;
785 tcp2->th_ack = 0;
786 } else {
787 tcp2->th_seq = 0;
788 tcp2->th_ack = ntohl(tcp->th_seq);
789 tcp2->th_ack += tlen;
790 tcp2->th_ack = htonl(tcp2->th_ack);
791 tcp2->th_flags = TH_RST|TH_ACK;
792 }
793 tcp2->th_x2 = 0;
794 TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2);
795 tcp2->th_win = tcp->th_win;
796 tcp2->th_sum = 0;
797 tcp2->th_urp = 0;
798
799 #ifdef USE_INET6
800 if (fin->fin_v == 6) {
801 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
802 ip6->ip6_plen = htons(sizeof(struct tcphdr));
803 ip6->ip6_nxt = IPPROTO_TCP;
804 ip6->ip6_hlim = 0;
805 ip6->ip6_src = fin->fin_dst6.in6;
806 ip6->ip6_dst = fin->fin_src6.in6;
807 tcp2->th_sum = in6_cksum(m, IPPROTO_TCP,
808 sizeof(*ip6), sizeof(*tcp2));
809 return ipf_send_ip(fin, m);
810 }
811 #endif
812 #ifdef INET
813 ip->ip_p = IPPROTO_TCP;
814 ip->ip_len = htons(sizeof(struct tcphdr));
815 ip->ip_src.s_addr = fin->fin_daddr;
816 ip->ip_dst.s_addr = fin->fin_saddr;
817 tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2));
818 ip->ip_len = hlen + sizeof(*tcp2);
819 return ipf_send_ip(fin, m);
820 #else
821 return 0;
822 #endif
823 }
824
825
826 /*
827 * Expects ip_len to be in host byte order when called.
828 */
829 static int
830 ipf_send_ip(fr_info_t *fin, mb_t *m)
831 {
832 fr_info_t fnew;
833 #ifdef INET
834 ip_t *oip;
835 #endif
836 ip_t *ip;
837 int hlen;
838
839 ip = mtod(m, ip_t *);
840 bzero((char *)&fnew, sizeof(fnew));
841 fnew.fin_main_soft = fin->fin_main_soft;
842
843 IP_V_A(ip, fin->fin_v);
844 switch (fin->fin_v)
845 {
846 #ifdef INET
847 case 4 :
848 oip = fin->fin_ip;
849 hlen = sizeof(*oip);
850 fnew.fin_v = 4;
851 fnew.fin_p = ip->ip_p;
852 fnew.fin_plen = ntohs(ip->ip_len);
853 HTONS(ip->ip_len);
854 IP_HL_A(ip, sizeof(*oip) >> 2);
855 ip->ip_tos = oip->ip_tos;
856 ip->ip_id = ipf_nextipid(fin);
857 ip->ip_off = htons(ip_mtudisc ? IP_DF : 0);
858 ip->ip_ttl = ip_defttl;
859 ip->ip_sum = 0;
860 break;
861 #endif
862 #ifdef USE_INET6
863 case 6 :
864 {
865 ip6_t *ip6 = (ip6_t *)ip;
866
867 ip6->ip6_vfc = 0x60;
868 ip6->ip6_hlim = IPDEFTTL;
869
870 hlen = sizeof(*ip6);
871 fnew.fin_p = ip6->ip6_nxt;
872 fnew.fin_v = 6;
873 fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen;
874 break;
875 }
876 #endif
877 default :
878 return EINVAL;
879 }
880 #ifdef KAME_IPSEC
881 m_reset_rcvif(m);
882 #endif
883
884 fnew.fin_ifp = fin->fin_ifp;
885 fnew.fin_flx = FI_NOCKSUM;
886 fnew.fin_m = m;
887 fnew.fin_ip = ip;
888 fnew.fin_mp = &m;
889 fnew.fin_hlen = hlen;
890 fnew.fin_dp = (char *)ip + hlen;
891 (void) ipf_makefrip(hlen, ip, &fnew);
892
893 return ipf_fastroute(m, &m, &fnew, NULL);
894 }
895
896
897 int
898 ipf_send_icmp_err(int type, fr_info_t *fin, int dst)
899 {
900 int err, hlen, xtra, iclen, ohlen, avail;
901 struct in_addr dst4;
902 struct icmp *icmp;
903 struct mbuf *m;
904 i6addr_t dst6;
905 void *ifp;
906 #ifdef USE_INET6
907 int code;
908 ip6_t *ip6;
909 #endif
910 ip_t *ip, *ip2;
911
912 if ((type < 0) || (type > ICMP_MAXTYPE))
913 return -1;
914
915 #ifdef USE_INET6
916 code = fin->fin_icode;
917 if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int)))
918 return -1;
919 #endif
920
921 if (ipf_checkl4sum(fin) == -1)
922 return -1;
923 #ifdef MGETHDR
924 MGETHDR(m, M_DONTWAIT, MT_HEADER);
925 #else
926 MGET(m, M_DONTWAIT, MT_HEADER);
927 #endif
928 if (m == NULL)
929 return -1;
930 avail = MHLEN;
931
932 xtra = 0;
933 hlen = 0;
934 ohlen = 0;
935 dst4.s_addr = 0;
936 ifp = fin->fin_ifp;
937 if (fin->fin_v == 4) {
938 if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT))
939 switch (ntohs(fin->fin_data[0]) >> 8)
940 {
941 case ICMP_ECHO :
942 case ICMP_TSTAMP :
943 case ICMP_IREQ :
944 case ICMP_MASKREQ :
945 break;
946 default :
947 FREE_MB_T(m);
948 return 0;
949 }
950
951 if (dst == 0) {
952 if (ipf_ifpaddr(&ipfmain, 4, FRI_NORMAL, ifp,
953 &dst6, NULL) == -1) {
954 FREE_MB_T(m);
955 return -1;
956 }
957 dst4 = dst6.in4;
958 } else
959 dst4.s_addr = fin->fin_daddr;
960
961 hlen = sizeof(ip_t);
962 ohlen = fin->fin_hlen;
963 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
964 if (fin->fin_hlen < fin->fin_plen)
965 xtra = MIN(fin->fin_dlen, 8);
966 else
967 xtra = 0;
968 }
969
970 #ifdef USE_INET6
971 else if (fin->fin_v == 6) {
972 hlen = sizeof(ip6_t);
973 ohlen = sizeof(ip6_t);
974 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
975 type = icmptoicmp6types[type];
976 if (type == ICMP6_DST_UNREACH)
977 code = icmptoicmp6unreach[code];
978
979 if (iclen + max_linkhdr + fin->fin_plen > avail) {
980 MCLGET(m, M_DONTWAIT);
981 if (m == NULL)
982 return -1;
983 if ((m->m_flags & M_EXT) == 0) {
984 FREE_MB_T(m);
985 return -1;
986 }
987 avail = MCLBYTES;
988 }
989 xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr);
990 xtra = MIN(xtra, IPV6_MMTU - iclen);
991 if (dst == 0) {
992 if (ipf_ifpaddr(&ipfmain, 6, FRI_NORMAL, ifp,
993 &dst6, NULL) == -1) {
994 FREE_MB_T(m);
995 return -1;
996 }
997 } else
998 dst6 = fin->fin_dst6;
999 }
1000 #endif
1001 else {
1002 FREE_MB_T(m);
1003 return -1;
1004 }
1005
1006 avail -= (max_linkhdr + iclen);
1007 if (avail < 0) {
1008 FREE_MB_T(m);
1009 return -1;
1010 }
1011 if (xtra > avail)
1012 xtra = avail;
1013 iclen += xtra;
1014 m->m_data += max_linkhdr;
1015 m_reset_rcvif(m);
1016 m->m_pkthdr.len = iclen;
1017 m->m_len = iclen;
1018 ip = mtod(m, ip_t *);
1019 icmp = (struct icmp *)((char *)ip + hlen);
1020 ip2 = (ip_t *)&icmp->icmp_ip;
1021
1022 icmp->icmp_type = type;
1023 icmp->icmp_code = fin->fin_icode;
1024 icmp->icmp_cksum = 0;
1025 #ifdef icmp_nextmtu
1026 if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) {
1027 if (fin->fin_mtu != 0) {
1028 icmp->icmp_nextmtu = htons(fin->fin_mtu);
1029
1030 } else if (ifp != NULL) {
1031 icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp));
1032
1033 } else { /* make up a number... */
1034 icmp->icmp_nextmtu = htons(fin->fin_plen - 20);
1035 }
1036 }
1037 #endif
1038
1039 bcopy((char *)fin->fin_ip, (char *)ip2, ohlen);
1040
1041 #if defined(M_CSUM_IPv4)
1042 /*
1043 * Clear any in-bound checksum flags for this packet.
1044 */
1045 m->m_pkthdr.csuminfo = 0;
1046 #endif /* __NetBSD__ && M_CSUM_IPv4 */
1047
1048 #ifdef USE_INET6
1049 ip6 = (ip6_t *)ip;
1050 if (fin->fin_v == 6) {
1051 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
1052 ip6->ip6_plen = htons(iclen - hlen);
1053 ip6->ip6_nxt = IPPROTO_ICMPV6;
1054 ip6->ip6_hlim = 0;
1055 ip6->ip6_src = dst6.in6;
1056 ip6->ip6_dst = fin->fin_src6.in6;
1057 if (xtra > 0)
1058 bcopy((char *)fin->fin_ip + ohlen,
1059 (char *)&icmp->icmp_ip + ohlen, xtra);
1060 icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6,
1061 sizeof(*ip6), iclen - hlen);
1062 } else
1063 #endif
1064 {
1065 ip->ip_p = IPPROTO_ICMP;
1066 ip->ip_src.s_addr = dst4.s_addr;
1067 ip->ip_dst.s_addr = fin->fin_saddr;
1068
1069 if (xtra > 0)
1070 bcopy((char *)fin->fin_ip + ohlen,
1071 (char *)&icmp->icmp_ip + ohlen, xtra);
1072 icmp->icmp_cksum = ipf_cksum((u_short *)icmp,
1073 sizeof(*icmp) + 8);
1074 ip->ip_len = iclen;
1075 ip->ip_p = IPPROTO_ICMP;
1076 }
1077 err = ipf_send_ip(fin, m);
1078 return err;
1079 }
1080
1081
1082 /*
1083 * m0 - pointer to mbuf where the IP packet starts
1084 * mpp - pointer to the mbuf pointer that is the start of the mbuf chain
1085 */
1086 int
1087 ipf_fastroute(mb_t *m0, mb_t **mpp, fr_info_t *fin, frdest_t *fdp)
1088 {
1089 register struct ip *ip, *mhip;
1090 register struct mbuf *m = *mpp;
1091 register struct route *ro;
1092 int len, off, error = 0, hlen, code;
1093 struct ifnet *ifp, *sifp;
1094 ipf_main_softc_t *softc;
1095 #if __NetBSD_Version__ >= 499001100
1096 union {
1097 struct sockaddr dst;
1098 struct sockaddr_in dst4;
1099 } u;
1100 #else
1101 struct sockaddr_in *dst4;
1102 #endif
1103 struct sockaddr *dst;
1104 u_short ip_off, ip_len;
1105 struct route iproute;
1106 struct rtentry *rt;
1107 frdest_t node;
1108 frentry_t *fr;
1109
1110 if (fin->fin_v == 6) {
1111 #ifdef USE_INET6
1112 error = ipf_fastroute6(m0, mpp, fin, fdp);
1113 #else
1114 error = EPROTONOSUPPORT;
1115 #endif
1116 if ((error != 0) && (*mpp != NULL))
1117 FREE_MB_T(*mpp);
1118 return error;
1119 }
1120 #ifndef INET
1121 FREE_MB_T(*mpp);
1122 return EPROTONOSUPPORT;
1123 #else
1124
1125 hlen = fin->fin_hlen;
1126 ip = mtod(m0, struct ip *);
1127 softc = fin->fin_main_soft;
1128 rt = NULL;
1129 ifp = NULL;
1130
1131 # if defined(M_CSUM_IPv4)
1132 /*
1133 * Clear any in-bound checksum flags for this packet.
1134 */
1135 m0->m_pkthdr.csuminfo = 0;
1136 # endif /* __NetBSD__ && M_CSUM_IPv4 */
1137
1138 /*
1139 * Route packet.
1140 */
1141 ro = &iproute;
1142 memset(ro, 0, sizeof(*ro));
1143 fr = fin->fin_fr;
1144
1145 if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) &&
1146 (fdp->fd_type == FRD_DSTLIST)) {
1147 if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0)
1148 fdp = &node;
1149 }
1150 if (fdp != NULL)
1151 ifp = fdp->fd_ptr;
1152 else
1153 ifp = fin->fin_ifp;
1154
1155 if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) {
1156 error = -2;
1157 goto bad;
1158 }
1159
1160 # if __NetBSD_Version__ >= 499001100
1161 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1162 sockaddr_in_init(&u.dst4, &fdp->fd_ip, 0);
1163 else
1164 sockaddr_in_init(&u.dst4, &ip->ip_dst, 0);
1165 dst = &u.dst;
1166 rtcache_setdst(ro, dst);
1167 rt = rtcache_init(ro);
1168 # else
1169 dst4 = (struct sockaddr_in *)&ro->ro_dst;
1170 dst = (struct sockaddr *)dst4;
1171 dst4->sin_family = AF_INET;
1172 dst4->sin_addr = ip->ip_dst;
1173
1174 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1175 dst4->sin_addr = fdp->fd_ip;
1176
1177 dst4->sin_len = sizeof(*dst);
1178 rtalloc(ro);
1179 rt = ro->ro_rt;
1180 # endif
1181 if ((ifp == NULL) && (rt != NULL))
1182 ifp = rt->rt_ifp;
1183 if ((rt == NULL) || (ifp == NULL)) {
1184 #ifdef INET
1185 if (in_localaddr(ip->ip_dst))
1186 error = EHOSTUNREACH;
1187 else
1188 #endif
1189 error = ENETUNREACH;
1190 goto bad;
1191 }
1192
1193
1194 if (rt->rt_flags & RTF_GATEWAY)
1195 dst = rt->rt_gateway;
1196
1197 rt->rt_use++;
1198
1199 /*
1200 * For input packets which are being "fastrouted", they won't
1201 * go back through output filtering and miss their chance to get
1202 * NAT'd and counted. Duplicated packets aren't considered to be
1203 * part of the normal packet stream, so do not NAT them or pass
1204 * them through stateful checking, etc.
1205 */
1206 if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) {
1207 sifp = fin->fin_ifp;
1208 fin->fin_ifp = ifp;
1209 fin->fin_out = 1;
1210 (void) ipf_acctpkt(fin, NULL);
1211 fin->fin_fr = NULL;
1212 if (!fr || !(fr->fr_flags & FR_RETMASK)) {
1213 u_32_t pass;
1214
1215 (void) ipf_state_check(fin, &pass);
1216 }
1217
1218 switch (ipf_nat_checkout(fin, NULL))
1219 {
1220 case 0 :
1221 break;
1222 case 1 :
1223 ip->ip_sum = 0;
1224 break;
1225 case -1 :
1226 error = -1;
1227 goto bad;
1228 break;
1229 }
1230
1231 fin->fin_ifp = sifp;
1232 fin->fin_out = 0;
1233 } else
1234 ip->ip_sum = 0;
1235 /*
1236 * If small enough for interface, can just send directly.
1237 */
1238 m_set_rcvif(m, ifp);
1239
1240 ip_len = ntohs(ip->ip_len);
1241 if (ip_len <= ifp->if_mtu) {
1242 # if defined(M_CSUM_IPv4)
1243 # if (__NetBSD_Version__ >= 105009999)
1244 if (ifp->if_csum_flags_tx & M_CSUM_IPv4)
1245 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1246 # else
1247 if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
1248 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1249 # endif /* (__NetBSD_Version__ >= 105009999) */
1250 else if (ip->ip_sum == 0)
1251 ip->ip_sum = in_cksum(m, hlen);
1252 # else
1253 if (!ip->ip_sum)
1254 ip->ip_sum = in_cksum(m, hlen);
1255 # endif /* M_CSUM_IPv4 */
1256
1257 error = if_output_lock(ifp, ifp, m, dst, rt);
1258 goto done;
1259 }
1260
1261 /*
1262 * Too large for interface; fragment if possible.
1263 * Must be able to put at least 8 bytes per fragment.
1264 */
1265 ip_off = ntohs(ip->ip_off);
1266 if (ip_off & IP_DF) {
1267 error = EMSGSIZE;
1268 goto bad;
1269 }
1270 len = (ifp->if_mtu - hlen) &~ 7;
1271 if (len < 8) {
1272 error = EMSGSIZE;
1273 goto bad;
1274 }
1275
1276 {
1277 int mhlen, firstlen = len;
1278 struct mbuf **mnext = &m->m_act;
1279
1280 /*
1281 * Loop through length of segment after first fragment,
1282 * make new header and copy data of each part and link onto chain.
1283 */
1284 m0 = m;
1285 mhlen = sizeof (struct ip);
1286 for (off = hlen + len; off < ip_len; off += len) {
1287 # ifdef MGETHDR
1288 MGETHDR(m, M_DONTWAIT, MT_HEADER);
1289 # else
1290 MGET(m, M_DONTWAIT, MT_HEADER);
1291 # endif
1292 if (m == 0) {
1293 m = m0;
1294 error = ENOBUFS;
1295 goto bad;
1296 }
1297 m->m_data += max_linkhdr;
1298 mhip = mtod(m, struct ip *);
1299 bcopy((char *)ip, (char *)mhip, sizeof(*ip));
1300 #ifdef INET
1301 if (hlen > sizeof (struct ip)) {
1302 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
1303 IP_HL_A(mhip, mhlen >> 2);
1304 }
1305 #endif
1306 m->m_len = mhlen;
1307 mhip->ip_off = ((off - hlen) >> 3) + ip_off;
1308 if (off + len >= ip_len)
1309 len = ip_len - off;
1310 else
1311 mhip->ip_off |= IP_MF;
1312 mhip->ip_len = htons((u_short)(len + mhlen));
1313 m->m_next = m_copy(m0, off, len);
1314 if (m->m_next == 0) {
1315 error = ENOBUFS; /* ??? */
1316 goto sendorfree;
1317 }
1318 m->m_pkthdr.len = mhlen + len;
1319 m_reset_rcvif(m);
1320 mhip->ip_off = htons((u_short)mhip->ip_off);
1321 mhip->ip_sum = 0;
1322 #ifdef INET
1323 mhip->ip_sum = in_cksum(m, mhlen);
1324 #endif
1325 *mnext = m;
1326 mnext = &m->m_act;
1327 }
1328 /*
1329 * Update first fragment by trimming what's been copied out
1330 * and updating header, then send each fragment (in order).
1331 */
1332 m_adj(m0, hlen + firstlen - ip_len);
1333 ip->ip_len = htons((u_short)(hlen + firstlen));
1334 ip->ip_off = htons((u_short)IP_MF);
1335 ip->ip_sum = 0;
1336 #ifdef INET
1337 ip->ip_sum = in_cksum(m0, hlen);
1338 #endif
1339 sendorfree:
1340 for (m = m0; m; m = m0) {
1341 m0 = m->m_act;
1342 m->m_act = 0;
1343 if (error == 0) {
1344 KERNEL_LOCK(1, NULL);
1345 error = (*ifp->if_output)(ifp, m, dst, rt);
1346 KERNEL_UNLOCK_ONE(NULL);
1347 } else {
1348 FREE_MB_T(m);
1349 }
1350 }
1351 }
1352 done:
1353 if (!error)
1354 softc->ipf_frouteok[0]++;
1355 else
1356 softc->ipf_frouteok[1]++;
1357
1358 # if __NetBSD_Version__ >= 499001100
1359 rtcache_unref(rt, ro);
1360 rtcache_free(ro);
1361 # else
1362 if (rt) {
1363 RTFREE(rt);
1364 }
1365 # endif
1366 return error;
1367 bad:
1368 if (error == EMSGSIZE) {
1369 sifp = fin->fin_ifp;
1370 code = fin->fin_icode;
1371 fin->fin_icode = ICMP_UNREACH_NEEDFRAG;
1372 fin->fin_ifp = ifp;
1373 (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1);
1374 fin->fin_ifp = sifp;
1375 fin->fin_icode = code;
1376 }
1377 FREE_MB_T(m);
1378 goto done;
1379 #endif /* INET */
1380 }
1381
1382
1383 #if defined(USE_INET6)
1384 /*
1385 * This is the IPv6 specific fastroute code. It doesn't clean up the mbuf's
1386 * or ensure that it is an IPv6 packet that is being forwarded, those are
1387 * expected to be done by the called (ipf_fastroute).
1388 */
1389 static int
1390 ipf_fastroute6(struct mbuf *m0, struct mbuf **mpp, fr_info_t *fin,
1391 frdest_t *fdp)
1392 {
1393 # if __NetBSD_Version__ >= 499001100
1394 struct route ip6route;
1395 const struct sockaddr *dst;
1396 union {
1397 struct sockaddr dst;
1398 struct sockaddr_in6 dst6;
1399 } u;
1400 struct route *ro;
1401 # else
1402 struct route_in6 ip6route;
1403 struct sockaddr_in6 *dst6;
1404 struct route_in6 *ro;
1405 # endif
1406 struct rtentry *rt;
1407 struct ifnet *ifp;
1408 u_long mtu;
1409 int error;
1410
1411 error = 0;
1412 ro = &ip6route;
1413
1414 if (fdp != NULL)
1415 ifp = fdp->fd_ptr;
1416 else
1417 ifp = fin->fin_ifp;
1418 memset(ro, 0, sizeof(*ro));
1419 # if __NetBSD_Version__ >= 499001100
1420 if (fdp != NULL && IP6_NOTZERO(&fdp->fd_ip6))
1421 sockaddr_in6_init(&u.dst6, &fdp->fd_ip6.in6, 0, 0, 0);
1422 else
1423 sockaddr_in6_init(&u.dst6, &fin->fin_fi.fi_dst.in6, 0, 0, 0);
1424 dst = &u.dst;
1425 rtcache_setdst(ro, dst);
1426
1427 rt = rtcache_init(ro);
1428 if ((ifp == NULL) && (rt != NULL))
1429 ifp = rt->rt_ifp;
1430 # else
1431 dst6 = (struct sockaddr_in6 *)&ro->ro_dst;
1432 dst6->sin6_family = AF_INET6;
1433 dst6->sin6_len = sizeof(struct sockaddr_in6);
1434 dst6->sin6_addr = fin->fin_fi.fi_dst.in6;
1435
1436 if (fdp != NULL) {
1437 if (IP6_NOTZERO(&fdp->fd_ip6))
1438 dst6->sin6_addr = fdp->fd_ip6.in6;
1439 }
1440
1441 rtalloc((struct route *)ro);
1442
1443 if ((ifp == NULL) && (ro->ro_rt != NULL))
1444 ifp = ro->ro_rt->rt_ifp;
1445 rt = ro->ro_rt;
1446 # endif
1447 if ((rt == NULL) || (ifp == NULL)) {
1448
1449 error = EHOSTUNREACH;
1450 goto bad;
1451 }
1452
1453 /* KAME */
1454 # if __NetBSD_Version__ >= 499001100
1455 if (IN6_IS_ADDR_LINKLOCAL(&u.dst6.sin6_addr))
1456 u.dst6.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1457 # else
1458 if (IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr))
1459 dst6->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1460 # endif
1461
1462 {
1463 # if (__NetBSD_Version__ >= 106010000) && !defined(IN6_LINKMTU)
1464 struct in6_ifextra *ife;
1465 # endif
1466 if (rt->rt_flags & RTF_GATEWAY)
1467 # if __NetBSD_Version__ >= 499001100
1468 dst = rt->rt_gateway;
1469 # else
1470 dst6 = (struct sockaddr_in6 *)rt->rt_gateway;
1471 # endif
1472 rt->rt_use++;
1473
1474 /* Determine path MTU. */
1475 # if (__NetBSD_Version__ <= 106009999)
1476 mtu = nd_ifinfo[ifp->if_index].linkmtu;
1477 # else
1478 # ifdef IN6_LINKMTU
1479 mtu = IN6_LINKMTU(ifp);
1480 # else
1481 ife = (struct in6_ifextra *)(ifp)->if_afdata[AF_INET6];
1482 mtu = ife->nd_ifinfo[ifp->if_index].linkmtu;
1483 # endif
1484 # endif
1485 if ((error == 0) && (m0->m_pkthdr.len <= mtu)) {
1486 # if __NetBSD_Version__ >= 499001100
1487 error = nd6_output(ifp, ifp, m0, satocsin6(dst), rt);
1488 # else
1489 error = nd6_output(ifp, ifp, m0, dst6, rt);
1490 # endif
1491 } else {
1492 error = EMSGSIZE;
1493 }
1494 }
1495 bad:
1496 # if __NetBSD_Version__ >= 499001100
1497 rtcache_unref(rt, ro);
1498 rtcache_free(ro);
1499 # else
1500 if (ro->ro_rt != NULL) {
1501 RTFREE(((struct route *)ro)->ro_rt);
1502 }
1503 # endif
1504 return error;
1505 }
1506 #endif /* INET6 */
1507
1508
1509 int
1510 ipf_verifysrc(fr_info_t *fin)
1511 {
1512 #if __NetBSD_Version__ >= 499001100
1513 union {
1514 struct sockaddr dst;
1515 struct sockaddr_in dst4;
1516 } u;
1517 struct rtentry *rt;
1518 #else
1519 struct sockaddr_in *dst;
1520 #endif
1521 struct route iproute;
1522 int rc;
1523
1524 #if __NetBSD_Version__ >= 499001100
1525 sockaddr_in_init(&u.dst4, &fin->fin_src, 0);
1526 rtcache_setdst(&iproute, &u.dst);
1527 rt = rtcache_init(&iproute);
1528 if (rt == NULL)
1529 rc = 0;
1530 else
1531 rc = (fin->fin_ifp == rt->rt_ifp);
1532 rtcache_unref(rt, &iproute);
1533 rtcache_free(&iproute);
1534 #else
1535 dst = (struct sockaddr_in *)&iproute.ro_dst;
1536 dst->sin_len = sizeof(*dst);
1537 dst->sin_family = AF_INET;
1538 dst->sin_addr = fin->fin_src;
1539 rtalloc(&iproute);
1540 if (iproute.ro_rt == NULL)
1541 return 0;
1542 rc = (fin->fin_ifp == iproute.ro_rt->rt_ifp);
1543 RTFREE(iproute.ro_rt);
1544 #endif
1545 return rc;
1546 }
1547
1548
1549 /*
1550 * return the first IP Address associated with an interface
1551 */
1552 int
1553 ipf_ifpaddr(ipf_main_softc_t *softc, int v, int atype, void *ifptr,
1554 i6addr_t *inp, i6addr_t *inpmask)
1555 {
1556 #ifdef USE_INET6
1557 struct in6_addr *inp6 = NULL;
1558 #endif
1559 struct sockaddr *sock, *mask;
1560 struct sockaddr_in *sin;
1561 struct ifaddr *ifa;
1562 struct ifnet *ifp;
1563
1564 if ((ifptr == NULL) || (ifptr == (void *)-1))
1565 return -1;
1566
1567 ifp = ifptr;
1568 mask = NULL;
1569
1570 if (v == 4)
1571 inp->in4.s_addr = 0;
1572 #ifdef USE_INET6
1573 else if (v == 6)
1574 bzero((char *)inp, sizeof(*inp));
1575 #endif
1576
1577 ifa = IFADDR_READER_FIRST(ifp);
1578 sock = ifa ? ifa->ifa_addr : NULL;
1579 while (sock != NULL && ifa != NULL) {
1580 sin = (struct sockaddr_in *)sock;
1581 if ((v == 4) && (sin->sin_family == AF_INET))
1582 break;
1583 #ifdef USE_INET6
1584 if ((v == 6) && (sin->sin_family == AF_INET6)) {
1585 inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr;
1586 if (!IN6_IS_ADDR_LINKLOCAL(inp6) &&
1587 !IN6_IS_ADDR_LOOPBACK(inp6))
1588 break;
1589 }
1590 #endif
1591 ifa = IFADDR_READER_NEXT(ifa);
1592 if (ifa != NULL)
1593 sock = ifa->ifa_addr;
1594 }
1595 if (ifa == NULL || sock == NULL)
1596 return -1;
1597
1598 mask = ifa->ifa_netmask;
1599 if (atype == FRI_BROADCAST)
1600 sock = ifa->ifa_broadaddr;
1601 else if (atype == FRI_PEERADDR)
1602 sock = ifa->ifa_dstaddr;
1603
1604 #ifdef USE_INET6
1605 if (v == 6)
1606 return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock,
1607 (struct sockaddr_in6 *)mask,
1608 inp, inpmask);
1609 #endif
1610 return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock,
1611 (struct sockaddr_in *)mask,
1612 &inp->in4, &inpmask->in4);
1613 }
1614
1615
1616 u_32_t
1617 ipf_newisn(fr_info_t *fin)
1618 {
1619 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
1620 size_t asz;
1621
1622 if (fin->fin_v == 4)
1623 asz = sizeof(struct in_addr);
1624 else if (fin->fin_v == 6)
1625 asz = sizeof(fin->fin_src);
1626 else /* XXX: no way to return error */
1627 return 0;
1628 #ifdef INET
1629 return tcp_new_iss1((void *)&fin->fin_src, (void *)&fin->fin_dst,
1630 fin->fin_sport, fin->fin_dport, asz, 0);
1631 #else
1632 return ENOSYS;
1633 #endif
1634 #else
1635 static int iss_seq_off = 0;
1636 u_char hash[16];
1637 u_32_t newiss;
1638 MD5_CTX ctx;
1639
1640 /*
1641 * Compute the base value of the ISS. It is a hash
1642 * of (saddr, sport, daddr, dport, secret).
1643 */
1644 MD5Init(&ctx);
1645
1646 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_src,
1647 sizeof(fin->fin_fi.fi_src));
1648 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_dst,
1649 sizeof(fin->fin_fi.fi_dst));
1650 MD5Update(&ctx, (u_char *) &fin->fin_dat, sizeof(fin->fin_dat));
1651
1652 MD5Update(&ctx, ipf_iss_secret, sizeof(ipf_iss_secret));
1653
1654 MD5Final(hash, &ctx);
1655
1656 memcpy(&newiss, hash, sizeof(newiss));
1657
1658 /*
1659 * Now increment our "timer", and add it in to
1660 * the computed value.
1661 *
1662 * XXX Use `addin'?
1663 * XXX TCP_ISSINCR too large to use?
1664 */
1665 iss_seq_off += 0x00010000;
1666 newiss += iss_seq_off;
1667 return newiss;
1668 #endif
1669 }
1670
1671
1672 /* ------------------------------------------------------------------------ */
1673 /* Function: ipf_nextipid */
1674 /* Returns: int - 0 == success, -1 == error (packet should be droppped) */
1675 /* Parameters: fin(I) - pointer to packet information */
1676 /* */
1677 /* Returns the next IPv4 ID to use for this packet. */
1678 /* ------------------------------------------------------------------------ */
1679 u_short
1680 ipf_nextipid(fr_info_t *fin)
1681 {
1682 #ifdef USE_MUTEXES
1683 ipf_main_softc_t *softc = fin->fin_main_soft;
1684 #endif
1685 u_short id;
1686
1687 MUTEX_ENTER(&softc->ipf_rw);
1688 id = ipid++;
1689 MUTEX_EXIT(&softc->ipf_rw);
1690
1691 return id;
1692 }
1693
1694
1695 EXTERN_INLINE int
1696 ipf_checkv4sum(fr_info_t *fin)
1697 {
1698 #ifdef M_CSUM_TCP_UDP_BAD
1699 int manual, pflag, cflags, active;
1700 mb_t *m;
1701
1702 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1703 return 0;
1704
1705 if ((fin->fin_flx & FI_SHORT) != 0)
1706 return 1;
1707
1708 if (fin->fin_cksum != FI_CK_NEEDED)
1709 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1710
1711 manual = 0;
1712 m = fin->fin_m;
1713 if (m == NULL) {
1714 manual = 1;
1715 goto skipauto;
1716 }
1717
1718 switch (fin->fin_p)
1719 {
1720 case IPPROTO_UDP :
1721 pflag = M_CSUM_UDPv4;
1722 break;
1723 case IPPROTO_TCP :
1724 pflag = M_CSUM_TCPv4;
1725 break;
1726 default :
1727 pflag = 0;
1728 manual = 1;
1729 break;
1730 }
1731
1732 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1733 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1734 cflags = m->m_pkthdr.csum_flags & active;
1735
1736 if (pflag != 0) {
1737 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1738 fin->fin_flx |= FI_BAD;
1739 fin->fin_cksum = FI_CK_BAD;
1740 } else if (cflags == (pflag | M_CSUM_DATA)) {
1741 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) {
1742 fin->fin_flx |= FI_BAD;
1743 fin->fin_cksum = FI_CK_BAD;
1744 } else {
1745 fin->fin_cksum = FI_CK_SUMOK;
1746 }
1747 } else if (cflags == pflag) {
1748 fin->fin_cksum = FI_CK_SUMOK;
1749 } else {
1750 manual = 1;
1751 }
1752 }
1753 skipauto:
1754 if (manual != 0) {
1755 if (ipf_checkl4sum(fin) == -1) {
1756 fin->fin_flx |= FI_BAD;
1757 return -1;
1758 }
1759 }
1760 #else
1761 if (ipf_checkl4sum(fin) == -1) {
1762 fin->fin_flx |= FI_BAD;
1763 return -1;
1764 }
1765 #endif
1766 return 0;
1767 }
1768
1769
1770 #ifdef USE_INET6
1771 EXTERN_INLINE int
1772 ipf_checkv6sum(fr_info_t *fin)
1773 {
1774 # ifdef M_CSUM_TCP_UDP_BAD
1775 int manual, pflag, cflags, active;
1776 mb_t *m;
1777
1778 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1779 return 0;
1780
1781 if ((fin->fin_flx & FI_SHORT) != 0)
1782 return 1;
1783
1784 if (fin->fin_cksum != FI_CK_SUMOK)
1785 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1786
1787
1788 manual = 0;
1789 m = fin->fin_m;
1790
1791 switch (fin->fin_p)
1792 {
1793 case IPPROTO_UDP :
1794 pflag = M_CSUM_UDPv6;
1795 break;
1796 case IPPROTO_TCP :
1797 pflag = M_CSUM_TCPv6;
1798 break;
1799 default :
1800 pflag = 0;
1801 manual = 1;
1802 break;
1803 }
1804
1805 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1806 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1807 cflags = m->m_pkthdr.csum_flags & active;
1808
1809 if (pflag != 0) {
1810 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1811 fin->fin_flx |= FI_BAD;
1812 } else if (cflags == (pflag | M_CSUM_DATA)) {
1813 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0)
1814 fin->fin_flx |= FI_BAD;
1815 } else if (cflags == pflag) {
1816 ;
1817 } else {
1818 manual = 1;
1819 }
1820 }
1821 if (manual != 0) {
1822 if (ipf_checkl4sum(fin) == -1) {
1823 fin->fin_flx |= FI_BAD;
1824 return -1;
1825 }
1826 }
1827 # else
1828 if (ipf_checkl4sum(fin) == -1) {
1829 fin->fin_flx |= FI_BAD;
1830 return -1;
1831 }
1832 # endif
1833 return 0;
1834 }
1835 #endif /* USE_INET6 */
1836
1837
1838 size_t
1839 mbufchainlen(struct mbuf *m0)
1840 {
1841 size_t len;
1842
1843 if ((m0->m_flags & M_PKTHDR) != 0) {
1844 len = m0->m_pkthdr.len;
1845 } else {
1846 struct mbuf *m;
1847
1848 for (m = m0, len = 0; m != NULL; m = m->m_next)
1849 len += m->m_len;
1850 }
1851 return len;
1852 }
1853
1854
1855 /* ------------------------------------------------------------------------ */
1856 /* Function: ipf_pullup */
1857 /* Returns: NULL == pullup failed, else pointer to protocol header */
1858 /* Parameters: xmin(I)- pointer to buffer where data packet starts */
1859 /* fin(I) - pointer to packet information */
1860 /* len(I) - number of bytes to pullup */
1861 /* */
1862 /* Attempt to move at least len bytes (from the start of the buffer) into a */
1863 /* single buffer for ease of access. Operating system native functions are */
1864 /* used to manage buffers - if necessary. If the entire packet ends up in */
1865 /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */
1866 /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */
1867 /* and ONLY if the pullup succeeds. */
1868 /* */
1869 /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */
1870 /* of buffers that starts at *fin->fin_mp. */
1871 /* ------------------------------------------------------------------------ */
1872 void *
1873 ipf_pullup(mb_t *xmin, fr_info_t *fin, int len)
1874 {
1875 int dpoff, ipoff;
1876 mb_t *m = xmin;
1877 char *ip;
1878
1879 if (m == NULL)
1880 return NULL;
1881
1882 ip = (char *)fin->fin_ip;
1883 if ((fin->fin_flx & FI_COALESCE) != 0)
1884 return ip;
1885
1886 ipoff = fin->fin_ipoff;
1887 if (fin->fin_dp != NULL)
1888 dpoff = (char *)fin->fin_dp - (char *)ip;
1889 else
1890 dpoff = 0;
1891
1892 if (M_LEN(m) < len) {
1893 mb_t *n = *fin->fin_mp;
1894 /*
1895 * Assume that M_PKTHDR is set and just work with what is left
1896 * rather than check..
1897 * Should not make any real difference, anyway.
1898 */
1899 if (m != n) {
1900 /*
1901 * Record the mbuf that points to the mbuf that we're
1902 * about to go to work on so that we can update the
1903 * m_next appropriately later.
1904 */
1905 for (; n->m_next != m; n = n->m_next)
1906 ;
1907 } else {
1908 n = NULL;
1909 }
1910
1911 #ifdef MHLEN
1912 if (len > MHLEN)
1913 #else
1914 if (len > MLEN)
1915 #endif
1916 {
1917 #ifdef HAVE_M_PULLDOWN
1918 if (m_pulldown(m, 0, len, NULL) == NULL)
1919 m = NULL;
1920 #else
1921 FREE_MB_T(*fin->fin_mp);
1922 m = NULL;
1923 n = NULL;
1924 #endif
1925 } else
1926 {
1927 m = m_pullup(m, len);
1928 }
1929 if (n != NULL)
1930 n->m_next = m;
1931 if (m == NULL) {
1932 /*
1933 * When n is non-NULL, it indicates that m pointed to
1934 * a sub-chain (tail) of the mbuf and that the head
1935 * of this chain has not yet been free'd.
1936 */
1937 if (n != NULL) {
1938 FREE_MB_T(*fin->fin_mp);
1939 }
1940
1941 *fin->fin_mp = NULL;
1942 fin->fin_m = NULL;
1943 return NULL;
1944 }
1945
1946 if (n == NULL)
1947 *fin->fin_mp = m;
1948
1949 while (M_LEN(m) == 0) {
1950 m = m->m_next;
1951 }
1952 fin->fin_m = m;
1953 ip = MTOD(m, char *) + ipoff;
1954
1955 fin->fin_ip = (ip_t *)ip;
1956 if (fin->fin_dp != NULL)
1957 fin->fin_dp = (char *)fin->fin_ip + dpoff;
1958 if (fin->fin_fraghdr != NULL)
1959 fin->fin_fraghdr = (char *)ip +
1960 ((char *)fin->fin_fraghdr -
1961 (char *)fin->fin_ip);
1962 }
1963
1964 if (len == fin->fin_plen)
1965 fin->fin_flx |= FI_COALESCE;
1966 return ip;
1967 }
1968
1969
1970 int
1971 ipf_inject(fr_info_t *fin, mb_t *m)
1972 {
1973 int error;
1974
1975 if (fin->fin_out == 0) {
1976 if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
1977 FREE_MB_T(m);
1978 error = ENOBUFS;
1979 } else {
1980 error = 0;
1981 }
1982 } else {
1983 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
1984 }
1985 return error;
1986 }
1987
1988
1989 u_32_t
1990 ipf_random(void)
1991 {
1992 int number;
1993
1994 #ifdef _CPRNG_H
1995 number = cprng_fast32();
1996 #else
1997 number = arc4random();
1998 #endif
1999 return number;
2000 }
2001
2002
2003 /*
2004 * routines below for saving IP headers to buffer
2005 */
2006 static int ipfopen(dev_t dev, int flags
2007 #if (NetBSD >= 199511)
2008 , int devtype, PROC_T *p
2009 #endif
2010 )
2011 {
2012 u_int unit = GET_MINOR(dev);
2013 int error;
2014
2015 if (IPL_LOGMAX < unit) {
2016 error = ENXIO;
2017 } else {
2018 switch (unit)
2019 {
2020 case IPL_LOGIPF :
2021 case IPL_LOGNAT :
2022 case IPL_LOGSTATE :
2023 case IPL_LOGAUTH :
2024 case IPL_LOGLOOKUP :
2025 case IPL_LOGSYNC :
2026 #ifdef IPFILTER_SCAN
2027 case IPL_LOGSCAN :
2028 #endif
2029 error = 0;
2030 break;
2031 default :
2032 error = ENXIO;
2033 break;
2034 }
2035 }
2036 #if (__NetBSD_Version__ >= 799003000)
2037 if (error == 0) {
2038 mutex_enter(&ipf_ref_mutex);
2039 ipf_active = 1;
2040 mutex_exit(&ipf_ref_mutex);
2041 }
2042 #endif
2043 return error;
2044 }
2045
2046
2047 static int ipfclose(dev_t dev, int flags
2048 #if (NetBSD >= 199511)
2049 , int devtype, PROC_T *p
2050 #endif
2051 )
2052 {
2053 u_int unit = GET_MINOR(dev);
2054
2055 if (IPL_LOGMAX < unit)
2056 return ENXIO;
2057 else {
2058 #if (__NetBSD_Version__ >= 799003000)
2059 mutex_enter(&ipf_ref_mutex);
2060 ipf_active = 0;
2061 mutex_exit(&ipf_ref_mutex);
2062 #endif
2063 return 0;
2064 }
2065 }
2066
2067 /*
2068 * ipfread/ipflog
2069 * both of these must operate with at least splnet() lest they be
2070 * called during packet processing and cause an inconsistancy to appear in
2071 * the filter lists.
2072 */
2073 static int ipfread(dev_t dev, struct uio *uio, int ioflag)
2074 {
2075
2076 if (ipfmain.ipf_running < 1) {
2077 ipfmain.ipf_interror = 130006;
2078 return EIO;
2079 }
2080
2081 if (GET_MINOR(dev) == IPL_LOGSYNC)
2082 return ipf_sync_read(&ipfmain, uio);
2083
2084 #ifdef IPFILTER_LOG
2085 return ipf_log_read(&ipfmain, GET_MINOR(dev), uio);
2086 #else
2087 ipfmain.ipf_interror = 130007;
2088 return ENXIO;
2089 #endif
2090 }
2091
2092
2093 /*
2094 * ipfwrite
2095 * both of these must operate with at least splnet() lest they be
2096 * called during packet processing and cause an inconsistancy to appear in
2097 * the filter lists.
2098 */
2099 static int ipfwrite(dev_t dev, struct uio *uio, int ioflag)
2100 {
2101
2102 if (ipfmain.ipf_running < 1) {
2103 ipfmain.ipf_interror = 130008;
2104 return EIO;
2105 }
2106
2107 if (GET_MINOR(dev) == IPL_LOGSYNC)
2108 return ipf_sync_write(&ipfmain, uio);
2109 ipfmain.ipf_interror = 130009;
2110 return ENXIO;
2111 }
2112
2113
2114 static int ipfpoll(dev_t dev, int events, PROC_T *p)
2115 {
2116 u_int unit = GET_MINOR(dev);
2117 int revents = 0;
2118
2119 if (IPL_LOGMAX < unit) {
2120 ipfmain.ipf_interror = 130010;
2121 return ENXIO;
2122 }
2123
2124 switch (unit)
2125 {
2126 case IPL_LOGIPF :
2127 case IPL_LOGNAT :
2128 case IPL_LOGSTATE :
2129 #ifdef IPFILTER_LOG
2130 if ((events & (POLLIN | POLLRDNORM)) &&
2131 ipf_log_canread(&ipfmain, unit))
2132 revents |= events & (POLLIN | POLLRDNORM);
2133 #endif
2134 break;
2135 case IPL_LOGAUTH :
2136 if ((events & (POLLIN | POLLRDNORM)) &&
2137 ipf_auth_waiting(&ipfmain))
2138 revents |= events & (POLLIN | POLLRDNORM);
2139 break;
2140 case IPL_LOGSYNC :
2141 if ((events & (POLLIN | POLLRDNORM)) &&
2142 ipf_sync_canread(&ipfmain))
2143 revents |= events & (POLLIN | POLLRDNORM);
2144 if ((events & (POLLOUT | POLLWRNORM)) &&
2145 ipf_sync_canwrite(&ipfmain))
2146 revents |= events & (POLLOUT | POLLWRNORM);
2147 break;
2148 case IPL_LOGSCAN :
2149 case IPL_LOGLOOKUP :
2150 default :
2151 break;
2152 }
2153
2154 if ((revents == 0) && (((events & (POLLIN|POLLRDNORM)) != 0)))
2155 selrecord(p, &ipfmain.ipf_selwait[unit]);
2156 return revents;
2157 }
2158
2159 u_int
2160 ipf_pcksum(fr_info_t *fin, int hlen, u_int sum)
2161 {
2162 struct mbuf *m;
2163 u_int sum2;
2164 int off;
2165
2166 m = fin->fin_m;
2167 off = (char *)fin->fin_dp - (char *)fin->fin_ip;
2168 m->m_data += hlen;
2169 m->m_len -= hlen;
2170 sum2 = in_cksum(fin->fin_m, fin->fin_plen - off);
2171 m->m_len += hlen;
2172 m->m_data -= hlen;
2173
2174 /*
2175 * Both sum and sum2 are partial sums, so combine them together.
2176 */
2177 sum += ~sum2 & 0xffff;
2178 while (sum > 0xffff)
2179 sum = (sum & 0xffff) + (sum >> 16);
2180 sum2 = ~sum & 0xffff;
2181 return sum2;
2182 }
2183
2184 #if (__NetBSD_Version__ >= 799003000)
2185
2186 /* NetBSD module interface */
2187
2188 MODULE(MODULE_CLASS_DRIVER, ipl, "bpf_filter");
2189
2190 static int ipl_init(void *);
2191 static int ipl_fini(void *);
2192 static int ipl_modcmd(modcmd_t, void *);
2193
2194 #ifdef _MODULE
2195 static devmajor_t ipl_cmaj = -1, ipl_bmaj = -1;
2196 #endif
2197
2198 static int
2199 ipl_modcmd(modcmd_t cmd, void *opaque)
2200 {
2201
2202 switch (cmd) {
2203 case MODULE_CMD_INIT:
2204 return ipl_init(opaque);
2205 case MODULE_CMD_FINI:
2206 return ipl_fini(opaque);
2207 default:
2208 return ENOTTY;
2209 }
2210 }
2211
2212 static int
2213 ipl_init(void *opaque)
2214 {
2215 int error;
2216
2217 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
2218 ipf_listener_cb, NULL);
2219
2220 if ((error = ipf_load_all()) != 0)
2221 return error;
2222
2223 if (ipf_create_all(&ipfmain) == NULL) {
2224 ipf_unload_all();
2225 return ENODEV;
2226 }
2227
2228 /* Initialize our mutex and reference count */
2229 mutex_init(&ipf_ref_mutex, MUTEX_DEFAULT, IPL_NONE);
2230 ipf_active = 0;
2231
2232 #ifdef _MODULE
2233 /*
2234 * Insert ourself into the cdevsw list.
2235 */
2236 error = devsw_attach("ipl", NULL, &ipl_bmaj, &ipl_cdevsw, &ipl_cmaj);
2237 if (error)
2238 ipl_fini(opaque);
2239 #endif
2240
2241 return error;
2242 }
2243
2244 static int
2245 ipl_fini(void *opaque)
2246 {
2247
2248 #ifdef _MODULE
2249 (void)devsw_detach(NULL, &ipl_cdevsw);
2250 #endif
2251
2252 /*
2253 * Grab the mutex, verify that there are no references
2254 * and that there are no running filters. If either
2255 * of these exists, reinsert our cdevsw entry and return
2256 * an error.
2257 */
2258 mutex_enter(&ipf_ref_mutex);
2259 if (ipf_active != 0 || ipfmain.ipf_running > 0) {
2260 #ifdef _MODULE
2261 (void)devsw_attach("ipl", NULL, &ipl_bmaj,
2262 &ipl_cdevsw, &ipl_cmaj);
2263 #endif
2264 mutex_exit(&ipf_ref_mutex);
2265 return EBUSY;
2266 }
2267
2268 /* Clean up the rest of our state before being unloaded */
2269
2270 mutex_exit(&ipf_ref_mutex);
2271 mutex_destroy(&ipf_ref_mutex);
2272 ipf_destroy_all(&ipfmain);
2273 ipf_unload_all();
2274 kauth_unlisten_scope(ipf_listener);
2275
2276 return 0;
2277 }
2278 #endif /* (__NetBSD_Version__ >= 799003000) */
2279