ip_fil_netbsd.c revision 1.24 1 /* $NetBSD: ip_fil_netbsd.c,v 1.24 2017/07/20 18:12:51 christos Exp $ */
2
3 /*
4 * Copyright (C) 2012 by Darren Reed.
5 *
6 * See the IPFILTER.LICENCE file for details on licencing.
7 */
8 #if !defined(lint)
9 #if defined(__NetBSD__)
10 #include <sys/cdefs.h>
11 __KERNEL_RCSID(0, "$NetBSD: ip_fil_netbsd.c,v 1.24 2017/07/20 18:12:51 christos Exp $");
12 #else
13 static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed";
14 static const char rcsid[] = "@(#)Id: ip_fil_netbsd.c,v 1.1.1.2 2012/07/22 13:45:17 darrenr Exp";
15 #endif
16 #endif
17
18 #if defined(KERNEL) || defined(_KERNEL)
19 # undef KERNEL
20 # undef _KERNEL
21 # define KERNEL 1
22 # define _KERNEL 1
23 #endif
24 #include <sys/param.h>
25 #if (NetBSD >= 199905) && !defined(IPFILTER_LKM)
26 # if (__NetBSD_Version__ >= 799003000)
27 # ifdef _KERNEL_OPT
28 # include "opt_ipsec.h"
29 # endif
30 # else
31 # include "opt_ipsec.h"
32 # endif
33 #endif
34 #include <sys/errno.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/ioctl.h>
38 #include <sys/time.h>
39 #include <sys/systm.h>
40 #include <sys/select.h>
41 #if (NetBSD > 199609)
42 # include <sys/dirent.h>
43 #else
44 # include <sys/dir.h>
45 #endif
46 #if (__NetBSD_Version__ >= 599005900)
47 # include <sys/cprng.h>
48 #endif
49 #include <sys/mbuf.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/poll.h>
53 #if (__NetBSD_Version__ >= 399002000)
54 # include <sys/kauth.h>
55 #endif
56 #if (__NetBSD_Version__ >= 799003000)
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #endif
60
61 #include <net/if.h>
62 #include <net/route.h>
63 #include <netinet/in.h>
64 #include <netinet/in_var.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip_var.h>
68 #include <netinet/tcp.h>
69 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
70 # include <netinet/tcp_timer.h>
71 # include <netinet/tcp_var.h>
72 #endif
73 #include <netinet/udp.h>
74 #include <netinet/tcpip.h>
75 #include <netinet/ip_icmp.h>
76 #include "netinet/ip_compat.h"
77 #ifdef USE_INET6
78 # include <netinet/icmp6.h>
79 # if (__NetBSD_Version__ >= 106000000)
80 # include <netinet6/nd6.h>
81 # endif
82 #endif
83 #include "netinet/ip_fil.h"
84 #include "netinet/ip_nat.h"
85 #include "netinet/ip_frag.h"
86 #include "netinet/ip_state.h"
87 #include "netinet/ip_proxy.h"
88 #include "netinet/ip_auth.h"
89 #include "netinet/ip_sync.h"
90 #include "netinet/ip_lookup.h"
91 #include "netinet/ip_dstlist.h"
92 #ifdef IPFILTER_SCAN
93 #include "netinet/ip_scan.h"
94 #endif
95 #include <sys/md5.h>
96 #include <sys/kernel.h>
97 #include <sys/conf.h>
98 #ifdef INET
99 extern int ip_optcopy (struct ip *, struct ip *);
100 #endif
101
102 #ifdef IPFILTER_M_IPFILTER
103 MALLOC_DEFINE(M_IPFILTER, "IP Filter", "IP Filter packet filter data structures");
104 #endif
105
106 #if __NetBSD_Version__ >= 105009999
107 # define csuminfo csum_flags
108 #endif
109
110 #if __NetBSD_Version__ < 200000000
111 extern struct protosw inetsw[];
112 #endif
113
114 #if (__NetBSD_Version__ >= 599002000)
115 static kauth_listener_t ipf_listener;
116 #endif
117
118 #if (__NetBSD_Version__ < 399001400)
119 extern int ip6_getpmtu (struct route_in6 *, struct route_in6 *,
120 struct ifnet *, struct in6_addr *, u_long *,
121 int *);
122 #endif
123 #if (NetBSD >= 199511)
124 static int ipfopen(dev_t dev, int flags, int devtype, PROC_T *p);
125 static int ipfclose(dev_t dev, int flags, int devtype, PROC_T *p);
126 #else
127 # if (__NetBSD_Version__ >= 399001400)
128 static int ipfopen(dev_t dev, int flags, struct lwp *);
129 static int ipfclose(dev_t dev, int flags, struct lwp *);
130 # else
131 static int ipfopen(dev_t dev, int flags);
132 static int ipfclose(dev_t dev, int flags);
133 # endif /* __NetBSD_Version__ >= 399001400 */
134 #endif
135 static int ipfread(dev_t, struct uio *, int ioflag);
136 static int ipfwrite(dev_t, struct uio *, int ioflag);
137 static int ipfpoll(dev_t, int events, PROC_T *);
138 static void ipf_timer_func(void *ptr);
139
140 const struct cdevsw ipl_cdevsw = {
141 .d_open = ipfopen,
142 .d_close = ipfclose,
143 .d_read = ipfread,
144 .d_write = ipfwrite,
145 .d_ioctl = ipfioctl,
146 .d_stop = nostop,
147 .d_tty = notty,
148 .d_poll = ipfpoll,
149 .d_mmap = nommap,
150 #if (__NetBSD_Version__ >= 200000000)
151 .d_kqfilter = nokqfilter,
152 #endif
153 .d_discard = nodiscard,
154 #ifdef D_OTHER
155 .d_flag = D_OTHER
156 #else
157 .d_flag = 0
158 #endif
159 };
160 #if (__NetBSD_Version__ >= 799003000)
161 kmutex_t ipf_ref_mutex;
162 int ipf_active;
163 #endif
164
165 ipf_main_softc_t ipfmain;
166
167 static u_short ipid = 0;
168 static int (*ipf_savep)(void *, ip_t *, int, void *, int, struct mbuf **);
169 static int ipf_send_ip(fr_info_t *, mb_t *);
170 #ifdef USE_INET6
171 static int ipf_fastroute6(struct mbuf *, struct mbuf **,
172 fr_info_t *, frdest_t *);
173 #endif
174
175 #if defined(NETBSD_PF)
176 # include <net/pfil.h>
177 /*
178 * We provide the ipf_checkp name just to minimize changes later.
179 */
180 int (*ipf_checkp)(void *, ip_t *ip, int hlen, void *ifp, int out, mb_t **mp);
181 #endif /* NETBSD_PF */
182
183 #if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
184 # include <net/pfil.h>
185
186 static int ipf_check_wrapper(void *, struct mbuf **, struct ifnet *, int );
187
188 static int
189 ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
190 {
191 struct ip *ip;
192 int rv, hlen;
193
194 #if __NetBSD_Version__ >= 200080000
195 /*
196 * ensure that mbufs are writable beforehand
197 * as it's assumed by ipf code.
198 * XXX inefficient
199 */
200 int error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
201
202 if (error) {
203 m_freem(*mp);
204 *mp = NULL;
205 return error;
206 }
207 #endif
208 ip = mtod(*mp, struct ip *);
209 hlen = ip->ip_hl << 2;
210
211 #ifdef INET
212 #if defined(M_CSUM_TCPv4)
213 /*
214 * If the packet is out-bound, we can't delay checksums
215 * here. For in-bound, the checksum has already been
216 * validated.
217 */
218 if (dir == PFIL_OUT) {
219 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
220 in_delayed_cksum(*mp);
221 (*mp)->m_pkthdr.csum_flags &=
222 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
223 }
224 }
225 #endif /* M_CSUM_TCPv4 */
226 #endif /* INET */
227
228 /*
229 * Note, we don't need to update the checksum, because
230 * it has already been verified.
231 */
232 rv = ipf_check(&ipfmain, ip, hlen, ifp, (dir == PFIL_OUT), mp);
233
234 return (rv);
235 }
236
237 # ifdef USE_INET6
238 # include <netinet/ip6.h>
239
240 static int ipf_check_wrapper6(void *, struct mbuf **, struct ifnet *, int );
241
242 static int
243 ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
244 {
245 #if defined(INET6)
246 # if defined(M_CSUM_TCPv6) && (__NetBSD_Version__ > 200000000)
247 /*
248 * If the packet is out-bound, we can't delay checksums
249 * here. For in-bound, the checksum has already been
250 * validated.
251 */
252 if (dir == PFIL_OUT) {
253 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
254 # if (__NetBSD_Version__ > 399000600)
255 in6_delayed_cksum(*mp);
256 # endif
257 (*mp)->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6|
258 M_CSUM_UDPv6);
259 }
260 }
261 # endif
262 #endif /* INET6 */
263
264 return (ipf_check(&ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr),
265 ifp, (dir == PFIL_OUT), mp));
266 }
267 # endif
268
269
270 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
271
272 # if (__NetBSD_Version__ >= 799000400)
273
274 static void ipf_pfilsync(void *, unsigned long, void *);
275
276 static void
277 ipf_pfilsync(void *hdr, unsigned long cmd, void *arg2)
278 {
279 /*
280 * The interface pointer is useless for create (we have nothing to
281 * compare it to) and at detach, the interface name is still in the
282 * list of active NICs (albeit, down, but that's not any real
283 * indicator) and doing ifunit() on the name will still return the
284 * pointer, so it's not much use then, either.
285 */
286 ipf_sync(&ipfmain, NULL);
287 }
288
289 # else
290
291 static int ipf_pfilsync(void *, struct mbuf **, struct ifnet *, int);
292
293 static int
294 ipf_pfilsync(void *hdr, struct mbuf **mp, struct ifnet *ifp, int dir)
295 {
296 ipf_sync(&ipfmain, NULL);
297 return 0;
298 }
299
300 # endif
301 # endif
302
303 #endif /* __NetBSD_Version__ >= 105110000 */
304
305
306 #if defined(IPFILTER_LKM)
307 int
308 ipf_identify(s)
309 char *s;
310 {
311 if (strcmp(s, "ipl") == 0)
312 return 1;
313 return 0;
314 }
315 #endif /* IPFILTER_LKM */
316
317 #if (__NetBSD_Version__ >= 599002000)
318 static int
319 ipf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
320 void *arg0, void *arg1, void *arg2, void *arg3)
321 {
322 int result;
323 enum kauth_network_req req;
324
325 result = KAUTH_RESULT_DEFER;
326 req = (enum kauth_network_req)arg0;
327
328 if (action != KAUTH_NETWORK_FIREWALL)
329 return result;
330
331 /* These must have came from device context. */
332 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) ||
333 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT))
334 result = KAUTH_RESULT_ALLOW;
335
336 return result;
337 }
338 #endif
339
340 /*
341 * Try to detect the case when compiling for NetBSD with pseudo-device
342 */
343 void
344 ipfilterattach(int count)
345 {
346
347 #if (__NetBSD_Version__ >= 799003000)
348 return;
349 #else
350 #if (__NetBSD_Version__ >= 599002000)
351 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
352 ipf_listener_cb, NULL);
353 #endif
354
355 if (ipf_load_all() == 0)
356 (void) ipf_create_all(&ipfmain);
357 #endif
358 }
359
360
361 int
362 ipfattach(ipf_main_softc_t *softc)
363 {
364 SPL_INT(s);
365 #if (__NetBSD_Version__ >= 499005500)
366 int i;
367 #endif
368 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
369 int error = 0;
370 # if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
371 pfil_head_t *ph_inet;
372 # ifdef USE_INET6
373 pfil_head_t *ph_inet6;
374 # endif
375 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
376 pfil_head_t *ph_ifsync;
377 # endif
378 # endif
379 #endif
380
381 SPL_NET(s);
382 if ((softc->ipf_running > 0) || (ipf_checkp == ipf_check)) {
383 printf("IP Filter: already initialized\n");
384 SPL_X(s);
385 IPFERROR(130017);
386 return EBUSY;
387 }
388
389 if (ipf_init_all(softc) < 0) {
390 SPL_X(s);
391 IPFERROR(130015);
392 return EIO;
393 }
394
395 #ifdef NETBSD_PF
396 # if (__NetBSD_Version__ >= 104200000)
397 # if __NetBSD_Version__ >= 105110000
398 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
399 # ifdef USE_INET6
400 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
401 # endif
402 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
403 ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
404 # endif
405
406 if (ph_inet == NULL
407 # ifdef USE_INET6
408 && ph_inet6 == NULL
409 # endif
410 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
411 && ph_ifsync == NULL
412 # endif
413 ) {
414 SPL_X(s);
415 IPFERROR(130016);
416 return ENODEV;
417 }
418
419 if (ph_inet != NULL)
420 error = pfil_add_hook((void *)ipf_check_wrapper, NULL,
421 PFIL_IN|PFIL_OUT, ph_inet);
422 else
423 error = 0;
424 # else
425 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
426 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
427 # endif
428 if (error) {
429 IPFERROR(130013);
430 goto pfil_error;
431 }
432 # else
433 pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
434 # endif
435
436 # ifdef USE_INET6
437 # if __NetBSD_Version__ >= 105110000
438 if (ph_inet6 != NULL)
439 error = pfil_add_hook((void *)ipf_check_wrapper6, NULL,
440 PFIL_IN|PFIL_OUT, ph_inet6);
441 else
442 error = 0;
443 if (error) {
444 pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
445 PFIL_IN|PFIL_OUT, ph_inet6);
446 ipfmain.ipf_interror = 130014;
447 goto pfil_error;
448 }
449 # else
450 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
451 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
452 if (error) {
453 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
454 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
455 IPFERROR(130014);
456 goto pfil_error;
457 }
458 # endif
459 # endif
460
461 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
462 if (ph_ifsync != NULL)
463 #if (__NetBSD_Version__ >= 799000400)
464 (void) pfil_add_ihook((void *)ipf_pfilsync, NULL,
465 PFIL_IFNET, ph_ifsync);
466 #else
467 (void) pfil_add_hook((void *)ipf_pfilsync, NULL,
468 PFIL_IFNET, ph_ifsync);
469 #endif
470 # endif
471 #endif
472
473 #if (__NetBSD_Version__ >= 499005500)
474 for (i = 0; i < IPL_LOGSIZE; i++)
475 selinit(&ipfmain.ipf_selwait[i]);
476 #else
477 bzero((char *)ipfmain.ipf_selwait, sizeof(ipfmain.ipf_selwait));
478 #endif
479 ipf_savep = ipf_checkp;
480 ipf_checkp = ipf_check;
481
482 #ifdef INET
483 if (softc->ipf_control_forwarding & 1)
484 ipforwarding = 1;
485 #endif
486
487 ipid = 0;
488
489 SPL_X(s);
490
491 #if (__NetBSD_Version__ >= 104010000)
492 # if (__NetBSD_Version__ >= 499002000)
493 callout_init(&softc->ipf_slow_ch, 0);
494 # else
495 callout_init(&softc->ipf_slow_ch);
496 # endif
497 callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT,
498 ipf_timer_func, softc);
499 #else
500 timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
501 #endif
502
503 return 0;
504
505 #if __NetBSD_Version__ >= 105110000
506 pfil_error:
507 SPL_X(s);
508 ipf_fini_all(softc);
509 return error;
510 #endif
511 }
512
513 static void
514 ipf_timer_func(void *ptr)
515 {
516 ipf_main_softc_t *softc = ptr;
517 SPL_INT(s);
518
519 SPL_NET(s);
520 READ_ENTER(&softc->ipf_global);
521
522 if (softc->ipf_running > 0)
523 ipf_slowtimer(softc);
524
525 if (softc->ipf_running == -1 || softc->ipf_running == 1) {
526 #if NETBSD_GE_REV(104240000)
527 callout_reset(&softc->ipf_slow_ch, hz / 2,
528 ipf_timer_func, softc);
529 #else
530 timeout(ipf_timer_func, softc,
531 (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
532 #endif
533 }
534 RWLOCK_EXIT(&softc->ipf_global);
535 SPL_X(s);
536 }
537
538
539 /*
540 * Disable the filter by removing the hooks from the IP input/output
541 * stream.
542 */
543 int
544 ipfdetach(ipf_main_softc_t *softc)
545 {
546 SPL_INT(s);
547 #if (__NetBSD_Version__ >= 499005500)
548 int i;
549 #endif
550 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
551 int error = 0;
552 # if __NetBSD_Version__ >= 105150000
553 pfil_head_t *ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
554 # ifdef USE_INET6
555 pfil_head_t *ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
556 # endif
557 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
558 struct pfil_head *ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
559 # endif
560 # endif
561 #endif
562
563 SPL_NET(s);
564
565 #if (__NetBSD_Version__ >= 104010000)
566 if (softc->ipf_running > 0)
567 callout_stop(&softc->ipf_slow_ch);
568 #else
569 untimeout(ipf_slowtimer, NULL);
570 #endif /* NetBSD */
571
572 ipf_checkp = ipf_savep;
573 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE|FR_INACTIVE);
574 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE);
575
576 #ifdef INET
577 if (softc->ipf_control_forwarding & 2)
578 ipforwarding = 0;
579 #endif
580
581 #ifdef NETBSD_PF
582 # if (__NetBSD_Version__ >= 104200000)
583 # if __NetBSD_Version__ >= 105110000
584 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
585 # if __NetBSD_Version__ >= 799000400
586 (void) pfil_remove_ihook((void *)ipf_pfilsync, NULL,
587 PFIL_IFNET, ph_ifsync);
588 # else
589 (void) pfil_remove_hook((void *)ipf_pfilsync, NULL,
590 PFIL_IFNET, ph_ifsync);
591 # endif
592 # endif
593
594 if (ph_inet != NULL)
595 error = pfil_remove_hook((void *)ipf_check_wrapper, NULL,
596 PFIL_IN|PFIL_OUT, ph_inet);
597 else
598 error = 0;
599 # else
600 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
601 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
602 # endif
603 if (error) {
604 SPL_X(s);
605 IPFERROR(130011);
606 return error;
607 }
608 # else
609 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
610 # endif
611 # ifdef USE_INET6
612 # if __NetBSD_Version__ >= 105110000
613 if (ph_inet6 != NULL)
614 error = pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
615 PFIL_IN|PFIL_OUT, ph_inet6);
616 else
617 error = 0;
618 # else
619 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
620 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
621 # endif
622 if (error) {
623 SPL_X(s);
624 IPFERROR(130012);
625 return error;
626 }
627 # endif
628 #endif
629 SPL_X(s);
630
631 #if (__NetBSD_Version__ >= 499005500)
632 for (i = 0; i < IPL_LOGSIZE; i++)
633 seldestroy(&ipfmain.ipf_selwait[i]);
634 #endif
635
636 ipf_fini_all(softc);
637
638 return 0;
639 }
640
641
642 /*
643 * Filter ioctl interface.
644 */
645 int
646 ipfioctl(dev_t dev, u_long cmd,
647 #if (__NetBSD_Version__ >= 499001000)
648 void *data,
649 #else
650 caddr_t data,
651 #endif
652 int mode
653 #if (NetBSD >= 199511)
654 # if (__NetBSD_Version__ >= 399001400)
655 , struct lwp *p
656 # if (__NetBSD_Version__ >= 399002000)
657 # define UID(l) kauth_cred_getuid((l)->l_cred)
658 # else
659 # define UID(l) ((l)->l_proc->p_cred->p_ruid)
660 # endif
661 # else
662 , struct proc *p
663 # define UID(p) ((p)->p_cred->p_ruid)
664 # endif
665 #endif
666 )
667 {
668 int error = 0, unit = 0;
669 SPL_INT(s);
670
671 #if (__NetBSD_Version__ >= 399002000)
672 if ((mode & FWRITE) &&
673 kauth_authorize_network(p->l_cred, KAUTH_NETWORK_FIREWALL,
674 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL,
675 NULL, NULL)) {
676 ipfmain.ipf_interror = 130005;
677 return EPERM;
678 }
679 #else
680 if ((securelevel >= 2) && (mode & FWRITE)) {
681 ipfmain.ipf_interror = 130001;
682 return EPERM;
683 }
684 #endif
685
686 unit = GET_MINOR(dev);
687 if ((IPL_LOGMAX < unit) || (unit < 0)) {
688 ipfmain.ipf_interror = 130002;
689 return ENXIO;
690 }
691
692 if (ipfmain.ipf_running <= 0) {
693 if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) {
694 ipfmain.ipf_interror = 130003;
695 return EIO;
696 }
697 if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET &&
698 cmd != SIOCIPFSET && cmd != SIOCFRENB &&
699 cmd != SIOCGETFS && cmd != SIOCGETFF &&
700 cmd != SIOCIPFINTERROR) {
701 ipfmain.ipf_interror = 130004;
702 return EIO;
703 }
704 }
705
706 SPL_NET(s);
707
708 error = ipf_ioctlswitch(&ipfmain, unit, data, cmd, mode, UID(p), p);
709 if (error != -1) {
710 SPL_X(s);
711 return error;
712 }
713
714 SPL_X(s);
715 return error;
716 }
717
718
719 /*
720 * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that
721 * requires a large amount of setting up and isn't any more efficient.
722 */
723 int
724 ipf_send_reset(fr_info_t *fin)
725 {
726 struct tcphdr *tcp, *tcp2;
727 int tlen = 0, hlen;
728 struct mbuf *m;
729 #ifdef USE_INET6
730 ip6_t *ip6;
731 #endif
732 ip_t *ip;
733
734 tcp = fin->fin_dp;
735 if (tcp->th_flags & TH_RST)
736 return -1; /* feedback loop */
737
738 if (ipf_checkl4sum(fin) == -1)
739 return -1;
740
741 tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) +
742 ((tcp->th_flags & TH_SYN) ? 1 : 0) +
743 ((tcp->th_flags & TH_FIN) ? 1 : 0);
744
745 #ifdef USE_INET6
746 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t);
747 #else
748 hlen = sizeof(ip_t);
749 #endif
750 #ifdef MGETHDR
751 MGETHDR(m, M_DONTWAIT, MT_HEADER);
752 #else
753 MGET(m, M_DONTWAIT, MT_HEADER);
754 #endif
755 if (m == NULL)
756 return -1;
757 if (sizeof(*tcp2) + hlen > MHLEN) {
758 MCLGET(m, M_DONTWAIT);
759 if (m == NULL)
760 return -1;
761 if ((m->m_flags & M_EXT) == 0) {
762 FREE_MB_T(m);
763 return -1;
764 }
765 }
766
767 m->m_len = sizeof(*tcp2) + hlen;
768 m->m_data += max_linkhdr;
769 m->m_pkthdr.len = m->m_len;
770 m_reset_rcvif(m);
771 ip = mtod(m, struct ip *);
772 bzero((char *)ip, hlen);
773 #ifdef USE_INET6
774 ip6 = (ip6_t *)ip;
775 #endif
776 bzero((char *)ip, sizeof(*tcp2) + hlen);
777 tcp2 = (struct tcphdr *)((char *)ip + hlen);
778 tcp2->th_sport = tcp->th_dport;
779 tcp2->th_dport = tcp->th_sport;
780
781 if (tcp->th_flags & TH_ACK) {
782 tcp2->th_seq = tcp->th_ack;
783 tcp2->th_flags = TH_RST;
784 tcp2->th_ack = 0;
785 } else {
786 tcp2->th_seq = 0;
787 tcp2->th_ack = ntohl(tcp->th_seq);
788 tcp2->th_ack += tlen;
789 tcp2->th_ack = htonl(tcp2->th_ack);
790 tcp2->th_flags = TH_RST|TH_ACK;
791 }
792 tcp2->th_x2 = 0;
793 TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2);
794 tcp2->th_win = tcp->th_win;
795 tcp2->th_sum = 0;
796 tcp2->th_urp = 0;
797
798 #ifdef USE_INET6
799 if (fin->fin_v == 6) {
800 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
801 ip6->ip6_plen = htons(sizeof(struct tcphdr));
802 ip6->ip6_nxt = IPPROTO_TCP;
803 ip6->ip6_hlim = 0;
804 ip6->ip6_src = fin->fin_dst6.in6;
805 ip6->ip6_dst = fin->fin_src6.in6;
806 tcp2->th_sum = in6_cksum(m, IPPROTO_TCP,
807 sizeof(*ip6), sizeof(*tcp2));
808 return ipf_send_ip(fin, m);
809 }
810 #endif
811 #ifdef INET
812 ip->ip_p = IPPROTO_TCP;
813 ip->ip_len = htons(sizeof(struct tcphdr));
814 ip->ip_src.s_addr = fin->fin_daddr;
815 ip->ip_dst.s_addr = fin->fin_saddr;
816 tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2));
817 ip->ip_len = hlen + sizeof(*tcp2);
818 return ipf_send_ip(fin, m);
819 #else
820 return 0;
821 #endif
822 }
823
824
825 /*
826 * Expects ip_len to be in host byte order when called.
827 */
828 static int
829 ipf_send_ip(fr_info_t *fin, mb_t *m)
830 {
831 fr_info_t fnew;
832 #ifdef INET
833 ip_t *oip;
834 #endif
835 ip_t *ip;
836 int hlen;
837
838 ip = mtod(m, ip_t *);
839 bzero((char *)&fnew, sizeof(fnew));
840 fnew.fin_main_soft = fin->fin_main_soft;
841
842 IP_V_A(ip, fin->fin_v);
843 switch (fin->fin_v)
844 {
845 #ifdef INET
846 case 4 :
847 oip = fin->fin_ip;
848 hlen = sizeof(*oip);
849 fnew.fin_v = 4;
850 fnew.fin_p = ip->ip_p;
851 fnew.fin_plen = ntohs(ip->ip_len);
852 HTONS(ip->ip_len);
853 IP_HL_A(ip, sizeof(*oip) >> 2);
854 ip->ip_tos = oip->ip_tos;
855 ip->ip_id = ipf_nextipid(fin);
856 ip->ip_off = htons(ip_mtudisc ? IP_DF : 0);
857 ip->ip_ttl = ip_defttl;
858 ip->ip_sum = 0;
859 break;
860 #endif
861 #ifdef USE_INET6
862 case 6 :
863 {
864 ip6_t *ip6 = (ip6_t *)ip;
865
866 ip6->ip6_vfc = 0x60;
867 ip6->ip6_hlim = IPDEFTTL;
868
869 hlen = sizeof(*ip6);
870 fnew.fin_p = ip6->ip6_nxt;
871 fnew.fin_v = 6;
872 fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen;
873 break;
874 }
875 #endif
876 default :
877 return EINVAL;
878 }
879 #ifdef KAME_IPSEC
880 m_reset_rcvif(m);
881 #endif
882
883 fnew.fin_ifp = fin->fin_ifp;
884 fnew.fin_flx = FI_NOCKSUM;
885 fnew.fin_m = m;
886 fnew.fin_ip = ip;
887 fnew.fin_mp = &m;
888 fnew.fin_hlen = hlen;
889 fnew.fin_dp = (char *)ip + hlen;
890 (void) ipf_makefrip(hlen, ip, &fnew);
891
892 return ipf_fastroute(m, &m, &fnew, NULL);
893 }
894
895
896 int
897 ipf_send_icmp_err(int type, fr_info_t *fin, int dst)
898 {
899 int err, hlen, xtra, iclen, ohlen, avail;
900 struct in_addr dst4;
901 struct icmp *icmp;
902 struct mbuf *m;
903 i6addr_t dst6;
904 void *ifp;
905 #ifdef USE_INET6
906 int code;
907 ip6_t *ip6;
908 #endif
909 ip_t *ip, *ip2;
910
911 if ((type < 0) || (type > ICMP_MAXTYPE))
912 return -1;
913
914 #ifdef USE_INET6
915 code = fin->fin_icode;
916 if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int)))
917 return -1;
918 #endif
919
920 if (ipf_checkl4sum(fin) == -1)
921 return -1;
922 #ifdef MGETHDR
923 MGETHDR(m, M_DONTWAIT, MT_HEADER);
924 #else
925 MGET(m, M_DONTWAIT, MT_HEADER);
926 #endif
927 if (m == NULL)
928 return -1;
929 avail = MHLEN;
930
931 xtra = 0;
932 hlen = 0;
933 ohlen = 0;
934 dst4.s_addr = 0;
935 ifp = fin->fin_ifp;
936 if (fin->fin_v == 4) {
937 if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT))
938 switch (ntohs(fin->fin_data[0]) >> 8)
939 {
940 case ICMP_ECHO :
941 case ICMP_TSTAMP :
942 case ICMP_IREQ :
943 case ICMP_MASKREQ :
944 break;
945 default :
946 FREE_MB_T(m);
947 return 0;
948 }
949
950 if (dst == 0) {
951 if (ipf_ifpaddr(&ipfmain, 4, FRI_NORMAL, ifp,
952 &dst6, NULL) == -1) {
953 FREE_MB_T(m);
954 return -1;
955 }
956 dst4 = dst6.in4;
957 } else
958 dst4.s_addr = fin->fin_daddr;
959
960 hlen = sizeof(ip_t);
961 ohlen = fin->fin_hlen;
962 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
963 if (fin->fin_hlen < fin->fin_plen)
964 xtra = MIN(fin->fin_dlen, 8);
965 else
966 xtra = 0;
967 }
968
969 #ifdef USE_INET6
970 else if (fin->fin_v == 6) {
971 hlen = sizeof(ip6_t);
972 ohlen = sizeof(ip6_t);
973 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
974 type = icmptoicmp6types[type];
975 if (type == ICMP6_DST_UNREACH)
976 code = icmptoicmp6unreach[code];
977
978 if (iclen + max_linkhdr + fin->fin_plen > avail) {
979 MCLGET(m, M_DONTWAIT);
980 if (m == NULL)
981 return -1;
982 if ((m->m_flags & M_EXT) == 0) {
983 FREE_MB_T(m);
984 return -1;
985 }
986 avail = MCLBYTES;
987 }
988 xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr);
989 xtra = MIN(xtra, IPV6_MMTU - iclen);
990 if (dst == 0) {
991 if (ipf_ifpaddr(&ipfmain, 6, FRI_NORMAL, ifp,
992 &dst6, NULL) == -1) {
993 FREE_MB_T(m);
994 return -1;
995 }
996 } else
997 dst6 = fin->fin_dst6;
998 }
999 #endif
1000 else {
1001 FREE_MB_T(m);
1002 return -1;
1003 }
1004
1005 avail -= (max_linkhdr + iclen);
1006 if (avail < 0) {
1007 FREE_MB_T(m);
1008 return -1;
1009 }
1010 if (xtra > avail)
1011 xtra = avail;
1012 iclen += xtra;
1013 m->m_data += max_linkhdr;
1014 m_reset_rcvif(m);
1015 m->m_pkthdr.len = iclen;
1016 m->m_len = iclen;
1017 ip = mtod(m, ip_t *);
1018 icmp = (struct icmp *)((char *)ip + hlen);
1019 ip2 = (ip_t *)&icmp->icmp_ip;
1020
1021 icmp->icmp_type = type;
1022 icmp->icmp_code = fin->fin_icode;
1023 icmp->icmp_cksum = 0;
1024 #ifdef icmp_nextmtu
1025 if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) {
1026 if (fin->fin_mtu != 0) {
1027 icmp->icmp_nextmtu = htons(fin->fin_mtu);
1028
1029 } else if (ifp != NULL) {
1030 icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp));
1031
1032 } else { /* make up a number... */
1033 icmp->icmp_nextmtu = htons(fin->fin_plen - 20);
1034 }
1035 }
1036 #endif
1037
1038 bcopy((char *)fin->fin_ip, (char *)ip2, ohlen);
1039
1040 #if defined(M_CSUM_IPv4)
1041 /*
1042 * Clear any in-bound checksum flags for this packet.
1043 */
1044 m->m_pkthdr.csuminfo = 0;
1045 #endif /* __NetBSD__ && M_CSUM_IPv4 */
1046
1047 #ifdef USE_INET6
1048 ip6 = (ip6_t *)ip;
1049 if (fin->fin_v == 6) {
1050 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
1051 ip6->ip6_plen = htons(iclen - hlen);
1052 ip6->ip6_nxt = IPPROTO_ICMPV6;
1053 ip6->ip6_hlim = 0;
1054 ip6->ip6_src = dst6.in6;
1055 ip6->ip6_dst = fin->fin_src6.in6;
1056 if (xtra > 0)
1057 bcopy((char *)fin->fin_ip + ohlen,
1058 (char *)&icmp->icmp_ip + ohlen, xtra);
1059 icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6,
1060 sizeof(*ip6), iclen - hlen);
1061 } else
1062 #endif
1063 {
1064 ip->ip_p = IPPROTO_ICMP;
1065 ip->ip_src.s_addr = dst4.s_addr;
1066 ip->ip_dst.s_addr = fin->fin_saddr;
1067
1068 if (xtra > 0)
1069 bcopy((char *)fin->fin_ip + ohlen,
1070 (char *)&icmp->icmp_ip + ohlen, xtra);
1071 icmp->icmp_cksum = ipf_cksum((u_short *)icmp,
1072 sizeof(*icmp) + 8);
1073 ip->ip_len = iclen;
1074 ip->ip_p = IPPROTO_ICMP;
1075 }
1076 err = ipf_send_ip(fin, m);
1077 return err;
1078 }
1079
1080
1081 /*
1082 * m0 - pointer to mbuf where the IP packet starts
1083 * mpp - pointer to the mbuf pointer that is the start of the mbuf chain
1084 */
1085 int
1086 ipf_fastroute(mb_t *m0, mb_t **mpp, fr_info_t *fin, frdest_t *fdp)
1087 {
1088 register struct ip *ip, *mhip;
1089 register struct mbuf *m = *mpp;
1090 register struct route *ro;
1091 int len, off, error = 0, hlen, code;
1092 struct ifnet *ifp, *sifp;
1093 ipf_main_softc_t *softc;
1094 #if __NetBSD_Version__ >= 499001100
1095 union {
1096 struct sockaddr dst;
1097 struct sockaddr_in dst4;
1098 } u;
1099 #else
1100 struct sockaddr_in *dst4;
1101 #endif
1102 struct sockaddr *dst;
1103 u_short ip_off, ip_len;
1104 struct route iproute;
1105 struct rtentry *rt;
1106 frdest_t node;
1107 frentry_t *fr;
1108
1109 if (fin->fin_v == 6) {
1110 #ifdef USE_INET6
1111 error = ipf_fastroute6(m0, mpp, fin, fdp);
1112 #else
1113 error = EPROTONOSUPPORT;
1114 #endif
1115 if ((error != 0) && (*mpp != NULL))
1116 FREE_MB_T(*mpp);
1117 return error;
1118 }
1119 #ifndef INET
1120 FREE_MB_T(*mpp);
1121 return EPROTONOSUPPORT;
1122 #else
1123
1124 hlen = fin->fin_hlen;
1125 ip = mtod(m0, struct ip *);
1126 softc = fin->fin_main_soft;
1127 rt = NULL;
1128 ifp = NULL;
1129
1130 # if defined(M_CSUM_IPv4)
1131 /*
1132 * Clear any in-bound checksum flags for this packet.
1133 */
1134 m0->m_pkthdr.csuminfo = 0;
1135 # endif /* __NetBSD__ && M_CSUM_IPv4 */
1136
1137 /*
1138 * Route packet.
1139 */
1140 ro = &iproute;
1141 memset(ro, 0, sizeof(*ro));
1142 fr = fin->fin_fr;
1143
1144 if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) &&
1145 (fdp->fd_type == FRD_DSTLIST)) {
1146 if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0)
1147 fdp = &node;
1148 }
1149 if (fdp != NULL)
1150 ifp = fdp->fd_ptr;
1151 else
1152 ifp = fin->fin_ifp;
1153
1154 if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) {
1155 error = -2;
1156 goto bad;
1157 }
1158
1159 # if __NetBSD_Version__ >= 499001100
1160 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1161 sockaddr_in_init(&u.dst4, &fdp->fd_ip, 0);
1162 else
1163 sockaddr_in_init(&u.dst4, &ip->ip_dst, 0);
1164 dst = &u.dst;
1165 rtcache_setdst(ro, dst);
1166 rt = rtcache_init(ro);
1167 # else
1168 dst4 = (struct sockaddr_in *)&ro->ro_dst;
1169 dst = (struct sockaddr *)dst4;
1170 dst4->sin_family = AF_INET;
1171 dst4->sin_addr = ip->ip_dst;
1172
1173 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1174 dst4->sin_addr = fdp->fd_ip;
1175
1176 dst4->sin_len = sizeof(*dst);
1177 rtalloc(ro);
1178 rt = ro->ro_rt;
1179 # endif
1180 if ((ifp == NULL) && (rt != NULL))
1181 ifp = rt->rt_ifp;
1182 if ((rt == NULL) || (ifp == NULL)) {
1183 #ifdef INET
1184 if (in_localaddr(ip->ip_dst))
1185 error = EHOSTUNREACH;
1186 else
1187 #endif
1188 error = ENETUNREACH;
1189 goto bad;
1190 }
1191
1192
1193 if (rt->rt_flags & RTF_GATEWAY)
1194 dst = rt->rt_gateway;
1195
1196 rt->rt_use++;
1197
1198 /*
1199 * For input packets which are being "fastrouted", they won't
1200 * go back through output filtering and miss their chance to get
1201 * NAT'd and counted. Duplicated packets aren't considered to be
1202 * part of the normal packet stream, so do not NAT them or pass
1203 * them through stateful checking, etc.
1204 */
1205 if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) {
1206 sifp = fin->fin_ifp;
1207 fin->fin_ifp = ifp;
1208 fin->fin_out = 1;
1209 (void) ipf_acctpkt(fin, NULL);
1210 fin->fin_fr = NULL;
1211 if (!fr || !(fr->fr_flags & FR_RETMASK)) {
1212 u_32_t pass;
1213
1214 (void) ipf_state_check(fin, &pass);
1215 }
1216
1217 switch (ipf_nat_checkout(fin, NULL))
1218 {
1219 case 0 :
1220 break;
1221 case 1 :
1222 ip->ip_sum = 0;
1223 break;
1224 case -1 :
1225 error = -1;
1226 goto bad;
1227 break;
1228 }
1229
1230 fin->fin_ifp = sifp;
1231 fin->fin_out = 0;
1232 } else
1233 ip->ip_sum = 0;
1234 /*
1235 * If small enough for interface, can just send directly.
1236 */
1237 m_set_rcvif(m, ifp);
1238
1239 ip_len = ntohs(ip->ip_len);
1240 if (ip_len <= ifp->if_mtu) {
1241 # if defined(M_CSUM_IPv4)
1242 # if (__NetBSD_Version__ >= 105009999)
1243 if (ifp->if_csum_flags_tx & M_CSUM_IPv4)
1244 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1245 # else
1246 if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
1247 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1248 # endif /* (__NetBSD_Version__ >= 105009999) */
1249 else if (ip->ip_sum == 0)
1250 ip->ip_sum = in_cksum(m, hlen);
1251 # else
1252 if (!ip->ip_sum)
1253 ip->ip_sum = in_cksum(m, hlen);
1254 # endif /* M_CSUM_IPv4 */
1255
1256 error = if_output_lock(ifp, ifp, m, dst, rt);
1257 goto done;
1258 }
1259
1260 /*
1261 * Too large for interface; fragment if possible.
1262 * Must be able to put at least 8 bytes per fragment.
1263 */
1264 ip_off = ntohs(ip->ip_off);
1265 if (ip_off & IP_DF) {
1266 error = EMSGSIZE;
1267 goto bad;
1268 }
1269 len = (ifp->if_mtu - hlen) &~ 7;
1270 if (len < 8) {
1271 error = EMSGSIZE;
1272 goto bad;
1273 }
1274
1275 {
1276 int mhlen, firstlen = len;
1277 struct mbuf **mnext = &m->m_act;
1278
1279 /*
1280 * Loop through length of segment after first fragment,
1281 * make new header and copy data of each part and link onto chain.
1282 */
1283 m0 = m;
1284 mhlen = sizeof (struct ip);
1285 for (off = hlen + len; off < ip_len; off += len) {
1286 # ifdef MGETHDR
1287 MGETHDR(m, M_DONTWAIT, MT_HEADER);
1288 # else
1289 MGET(m, M_DONTWAIT, MT_HEADER);
1290 # endif
1291 if (m == 0) {
1292 m = m0;
1293 error = ENOBUFS;
1294 goto bad;
1295 }
1296 m->m_data += max_linkhdr;
1297 mhip = mtod(m, struct ip *);
1298 bcopy((char *)ip, (char *)mhip, sizeof(*ip));
1299 #ifdef INET
1300 if (hlen > sizeof (struct ip)) {
1301 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
1302 IP_HL_A(mhip, mhlen >> 2);
1303 }
1304 #endif
1305 m->m_len = mhlen;
1306 mhip->ip_off = ((off - hlen) >> 3) + ip_off;
1307 if (off + len >= ip_len)
1308 len = ip_len - off;
1309 else
1310 mhip->ip_off |= IP_MF;
1311 mhip->ip_len = htons((u_short)(len + mhlen));
1312 m->m_next = m_copy(m0, off, len);
1313 if (m->m_next == 0) {
1314 error = ENOBUFS; /* ??? */
1315 goto sendorfree;
1316 }
1317 m->m_pkthdr.len = mhlen + len;
1318 m_reset_rcvif(m);
1319 mhip->ip_off = htons((u_short)mhip->ip_off);
1320 mhip->ip_sum = 0;
1321 #ifdef INET
1322 mhip->ip_sum = in_cksum(m, mhlen);
1323 #endif
1324 *mnext = m;
1325 mnext = &m->m_act;
1326 }
1327 /*
1328 * Update first fragment by trimming what's been copied out
1329 * and updating header, then send each fragment (in order).
1330 */
1331 m_adj(m0, hlen + firstlen - ip_len);
1332 ip->ip_len = htons((u_short)(hlen + firstlen));
1333 ip->ip_off = htons((u_short)IP_MF);
1334 ip->ip_sum = 0;
1335 #ifdef INET
1336 ip->ip_sum = in_cksum(m0, hlen);
1337 #endif
1338 sendorfree:
1339 for (m = m0; m; m = m0) {
1340 m0 = m->m_act;
1341 m->m_act = 0;
1342 if (error == 0) {
1343 KERNEL_LOCK(1, NULL);
1344 error = (*ifp->if_output)(ifp, m, dst, rt);
1345 KERNEL_UNLOCK_ONE(NULL);
1346 } else {
1347 FREE_MB_T(m);
1348 }
1349 }
1350 }
1351 done:
1352 if (!error)
1353 softc->ipf_frouteok[0]++;
1354 else
1355 softc->ipf_frouteok[1]++;
1356
1357 # if __NetBSD_Version__ >= 499001100
1358 rtcache_unref(rt, ro);
1359 rtcache_free(ro);
1360 # else
1361 if (rt) {
1362 RTFREE(rt);
1363 }
1364 # endif
1365 return error;
1366 bad:
1367 if (error == EMSGSIZE) {
1368 sifp = fin->fin_ifp;
1369 code = fin->fin_icode;
1370 fin->fin_icode = ICMP_UNREACH_NEEDFRAG;
1371 fin->fin_ifp = ifp;
1372 (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1);
1373 fin->fin_ifp = sifp;
1374 fin->fin_icode = code;
1375 }
1376 FREE_MB_T(m);
1377 goto done;
1378 #endif /* INET */
1379 }
1380
1381
1382 #if defined(USE_INET6)
1383 /*
1384 * This is the IPv6 specific fastroute code. It doesn't clean up the mbuf's
1385 * or ensure that it is an IPv6 packet that is being forwarded, those are
1386 * expected to be done by the called (ipf_fastroute).
1387 */
1388 static int
1389 ipf_fastroute6(struct mbuf *m0, struct mbuf **mpp, fr_info_t *fin,
1390 frdest_t *fdp)
1391 {
1392 # if __NetBSD_Version__ >= 499001100
1393 struct route ip6route;
1394 const struct sockaddr *dst;
1395 union {
1396 struct sockaddr dst;
1397 struct sockaddr_in6 dst6;
1398 } u;
1399 struct route *ro;
1400 # else
1401 struct route_in6 ip6route;
1402 struct sockaddr_in6 *dst6;
1403 struct route_in6 *ro;
1404 # endif
1405 struct rtentry *rt;
1406 struct ifnet *ifp;
1407 u_long mtu;
1408 int error;
1409
1410 error = 0;
1411 ro = &ip6route;
1412
1413 if (fdp != NULL)
1414 ifp = fdp->fd_ptr;
1415 else
1416 ifp = fin->fin_ifp;
1417 memset(ro, 0, sizeof(*ro));
1418 # if __NetBSD_Version__ >= 499001100
1419 if (fdp != NULL && IP6_NOTZERO(&fdp->fd_ip6))
1420 sockaddr_in6_init(&u.dst6, &fdp->fd_ip6.in6, 0, 0, 0);
1421 else
1422 sockaddr_in6_init(&u.dst6, &fin->fin_fi.fi_dst.in6, 0, 0, 0);
1423 /* KAME */
1424 if (IN6_IS_ADDR_LINKLOCAL(&u.dst6.sin6_addr))
1425 u.dst6.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1426 dst = &u.dst;
1427 rtcache_setdst(ro, dst);
1428
1429 rt = rtcache_init(ro);
1430 if ((ifp == NULL) && (rt != NULL))
1431 ifp = rt->rt_ifp;
1432 # else
1433 dst6 = (struct sockaddr_in6 *)&ro->ro_dst;
1434 dst6->sin6_family = AF_INET6;
1435 dst6->sin6_len = sizeof(struct sockaddr_in6);
1436 dst6->sin6_addr = fin->fin_fi.fi_dst.in6;
1437 /* KAME */
1438 if (IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr))
1439 dst6->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1440
1441 if (fdp != NULL) {
1442 if (IP6_NOTZERO(&fdp->fd_ip6))
1443 dst6->sin6_addr = fdp->fd_ip6.in6;
1444 }
1445
1446 rtalloc((struct route *)ro);
1447
1448 if ((ifp == NULL) && (ro->ro_rt != NULL))
1449 ifp = ro->ro_rt->rt_ifp;
1450 rt = ro->ro_rt;
1451 # endif
1452 if ((rt == NULL) || (ifp == NULL)) {
1453
1454 error = EHOSTUNREACH;
1455 goto bad;
1456 }
1457
1458 {
1459 # if (__NetBSD_Version__ >= 106010000) && !defined(IN6_LINKMTU)
1460 struct in6_ifextra *ife;
1461 # endif
1462 if (rt->rt_flags & RTF_GATEWAY)
1463 # if __NetBSD_Version__ >= 499001100
1464 dst = rt->rt_gateway;
1465 # else
1466 dst6 = (struct sockaddr_in6 *)rt->rt_gateway;
1467 # endif
1468 rt->rt_use++;
1469
1470 /* Determine path MTU. */
1471 # if (__NetBSD_Version__ <= 106009999)
1472 mtu = nd_ifinfo[ifp->if_index].linkmtu;
1473 # else
1474 # ifdef IN6_LINKMTU
1475 mtu = IN6_LINKMTU(ifp);
1476 # else
1477 ife = (struct in6_ifextra *)(ifp)->if_afdata[AF_INET6];
1478 mtu = ife->nd_ifinfo[ifp->if_index].linkmtu;
1479 # endif
1480 # endif
1481 if ((error == 0) && (m0->m_pkthdr.len <= mtu)) {
1482 # if __NetBSD_Version__ >= 499001100
1483 error = ip6_if_output(ifp, ifp, m0, satocsin6(dst), rt);
1484 # else
1485 error = nd6_output(ifp, ifp, m0, dst6, rt);
1486 # endif
1487 } else {
1488 error = EMSGSIZE;
1489 }
1490 }
1491 bad:
1492 # if __NetBSD_Version__ >= 499001100
1493 rtcache_unref(rt, ro);
1494 rtcache_free(ro);
1495 # else
1496 if (ro->ro_rt != NULL) {
1497 RTFREE(((struct route *)ro)->ro_rt);
1498 }
1499 # endif
1500 return error;
1501 }
1502 #endif /* INET6 */
1503
1504
1505 int
1506 ipf_verifysrc(fr_info_t *fin)
1507 {
1508 #if __NetBSD_Version__ >= 499001100
1509 union {
1510 struct sockaddr dst;
1511 struct sockaddr_in dst4;
1512 } u;
1513 struct rtentry *rt;
1514 #else
1515 struct sockaddr_in *dst;
1516 #endif
1517 struct route iproute;
1518 int rc;
1519
1520 #if __NetBSD_Version__ >= 499001100
1521 sockaddr_in_init(&u.dst4, &fin->fin_src, 0);
1522 rtcache_setdst(&iproute, &u.dst);
1523 rt = rtcache_init(&iproute);
1524 if (rt == NULL)
1525 rc = 0;
1526 else
1527 rc = (fin->fin_ifp == rt->rt_ifp);
1528 rtcache_unref(rt, &iproute);
1529 rtcache_free(&iproute);
1530 #else
1531 dst = (struct sockaddr_in *)&iproute.ro_dst;
1532 dst->sin_len = sizeof(*dst);
1533 dst->sin_family = AF_INET;
1534 dst->sin_addr = fin->fin_src;
1535 rtalloc(&iproute);
1536 if (iproute.ro_rt == NULL)
1537 return 0;
1538 rc = (fin->fin_ifp == iproute.ro_rt->rt_ifp);
1539 RTFREE(iproute.ro_rt);
1540 #endif
1541 return rc;
1542 }
1543
1544
1545 /*
1546 * return the first IP Address associated with an interface
1547 */
1548 int
1549 ipf_ifpaddr(ipf_main_softc_t *softc, int v, int atype, void *ifptr,
1550 i6addr_t *inp, i6addr_t *inpmask)
1551 {
1552 #ifdef USE_INET6
1553 struct in6_addr *inp6 = NULL;
1554 #endif
1555 struct sockaddr *sock, *mask;
1556 struct sockaddr_in *sin;
1557 struct ifaddr *ifa;
1558 struct ifnet *ifp;
1559
1560 if ((ifptr == NULL) || (ifptr == (void *)-1))
1561 return -1;
1562
1563 ifp = ifptr;
1564 mask = NULL;
1565
1566 if (v == 4)
1567 inp->in4.s_addr = 0;
1568 #ifdef USE_INET6
1569 else if (v == 6)
1570 bzero((char *)inp, sizeof(*inp));
1571 #endif
1572
1573 ifa = IFADDR_READER_FIRST(ifp);
1574 sock = ifa ? ifa->ifa_addr : NULL;
1575 while (sock != NULL && ifa != NULL) {
1576 sin = (struct sockaddr_in *)sock;
1577 if ((v == 4) && (sin->sin_family == AF_INET))
1578 break;
1579 #ifdef USE_INET6
1580 if ((v == 6) && (sin->sin_family == AF_INET6)) {
1581 inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr;
1582 if (!IN6_IS_ADDR_LINKLOCAL(inp6) &&
1583 !IN6_IS_ADDR_LOOPBACK(inp6))
1584 break;
1585 }
1586 #endif
1587 ifa = IFADDR_READER_NEXT(ifa);
1588 if (ifa != NULL)
1589 sock = ifa->ifa_addr;
1590 }
1591 if (ifa == NULL || sock == NULL)
1592 return -1;
1593
1594 mask = ifa->ifa_netmask;
1595 if (atype == FRI_BROADCAST)
1596 sock = ifa->ifa_broadaddr;
1597 else if (atype == FRI_PEERADDR)
1598 sock = ifa->ifa_dstaddr;
1599
1600 #ifdef USE_INET6
1601 if (v == 6)
1602 return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock,
1603 (struct sockaddr_in6 *)mask,
1604 inp, inpmask);
1605 #endif
1606 return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock,
1607 (struct sockaddr_in *)mask,
1608 &inp->in4, &inpmask->in4);
1609 }
1610
1611
1612 u_32_t
1613 ipf_newisn(fr_info_t *fin)
1614 {
1615 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
1616 size_t asz;
1617
1618 if (fin->fin_v == 4)
1619 asz = sizeof(struct in_addr);
1620 else if (fin->fin_v == 6)
1621 asz = sizeof(fin->fin_src);
1622 else /* XXX: no way to return error */
1623 return 0;
1624 #ifdef INET
1625 return tcp_new_iss1((void *)&fin->fin_src, (void *)&fin->fin_dst,
1626 fin->fin_sport, fin->fin_dport, asz, 0);
1627 #else
1628 return ENOSYS;
1629 #endif
1630 #else
1631 static int iss_seq_off = 0;
1632 u_char hash[16];
1633 u_32_t newiss;
1634 MD5_CTX ctx;
1635
1636 /*
1637 * Compute the base value of the ISS. It is a hash
1638 * of (saddr, sport, daddr, dport, secret).
1639 */
1640 MD5Init(&ctx);
1641
1642 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_src,
1643 sizeof(fin->fin_fi.fi_src));
1644 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_dst,
1645 sizeof(fin->fin_fi.fi_dst));
1646 MD5Update(&ctx, (u_char *) &fin->fin_dat, sizeof(fin->fin_dat));
1647
1648 MD5Update(&ctx, ipf_iss_secret, sizeof(ipf_iss_secret));
1649
1650 MD5Final(hash, &ctx);
1651
1652 memcpy(&newiss, hash, sizeof(newiss));
1653
1654 /*
1655 * Now increment our "timer", and add it in to
1656 * the computed value.
1657 *
1658 * XXX Use `addin'?
1659 * XXX TCP_ISSINCR too large to use?
1660 */
1661 iss_seq_off += 0x00010000;
1662 newiss += iss_seq_off;
1663 return newiss;
1664 #endif
1665 }
1666
1667
1668 /* ------------------------------------------------------------------------ */
1669 /* Function: ipf_nextipid */
1670 /* Returns: int - 0 == success, -1 == error (packet should be droppped) */
1671 /* Parameters: fin(I) - pointer to packet information */
1672 /* */
1673 /* Returns the next IPv4 ID to use for this packet. */
1674 /* ------------------------------------------------------------------------ */
1675 u_short
1676 ipf_nextipid(fr_info_t *fin)
1677 {
1678 #ifdef USE_MUTEXES
1679 ipf_main_softc_t *softc = fin->fin_main_soft;
1680 #endif
1681 u_short id;
1682
1683 MUTEX_ENTER(&softc->ipf_rw);
1684 id = ipid++;
1685 MUTEX_EXIT(&softc->ipf_rw);
1686
1687 return id;
1688 }
1689
1690
1691 EXTERN_INLINE int
1692 ipf_checkv4sum(fr_info_t *fin)
1693 {
1694 #ifdef M_CSUM_TCP_UDP_BAD
1695 int manual, pflag, cflags, active;
1696 mb_t *m;
1697
1698 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1699 return 0;
1700
1701 if ((fin->fin_flx & FI_SHORT) != 0)
1702 return 1;
1703
1704 if (fin->fin_cksum != FI_CK_NEEDED)
1705 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1706
1707 manual = 0;
1708 m = fin->fin_m;
1709 if (m == NULL) {
1710 manual = 1;
1711 goto skipauto;
1712 }
1713
1714 switch (fin->fin_p)
1715 {
1716 case IPPROTO_UDP :
1717 pflag = M_CSUM_UDPv4;
1718 break;
1719 case IPPROTO_TCP :
1720 pflag = M_CSUM_TCPv4;
1721 break;
1722 default :
1723 pflag = 0;
1724 manual = 1;
1725 break;
1726 }
1727
1728 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1729 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1730 cflags = m->m_pkthdr.csum_flags & active;
1731
1732 if (pflag != 0) {
1733 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1734 fin->fin_flx |= FI_BAD;
1735 fin->fin_cksum = FI_CK_BAD;
1736 } else if (cflags == (pflag | M_CSUM_DATA)) {
1737 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) {
1738 fin->fin_flx |= FI_BAD;
1739 fin->fin_cksum = FI_CK_BAD;
1740 } else {
1741 fin->fin_cksum = FI_CK_SUMOK;
1742 }
1743 } else if (cflags == pflag) {
1744 fin->fin_cksum = FI_CK_SUMOK;
1745 } else {
1746 manual = 1;
1747 }
1748 }
1749 skipauto:
1750 if (manual != 0) {
1751 if (ipf_checkl4sum(fin) == -1) {
1752 fin->fin_flx |= FI_BAD;
1753 return -1;
1754 }
1755 }
1756 #else
1757 if (ipf_checkl4sum(fin) == -1) {
1758 fin->fin_flx |= FI_BAD;
1759 return -1;
1760 }
1761 #endif
1762 return 0;
1763 }
1764
1765
1766 #ifdef USE_INET6
1767 EXTERN_INLINE int
1768 ipf_checkv6sum(fr_info_t *fin)
1769 {
1770 # ifdef M_CSUM_TCP_UDP_BAD
1771 int manual, pflag, cflags, active;
1772 mb_t *m;
1773
1774 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1775 return 0;
1776
1777 if ((fin->fin_flx & FI_SHORT) != 0)
1778 return 1;
1779
1780 if (fin->fin_cksum != FI_CK_SUMOK)
1781 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1782
1783
1784 manual = 0;
1785 m = fin->fin_m;
1786
1787 switch (fin->fin_p)
1788 {
1789 case IPPROTO_UDP :
1790 pflag = M_CSUM_UDPv6;
1791 break;
1792 case IPPROTO_TCP :
1793 pflag = M_CSUM_TCPv6;
1794 break;
1795 default :
1796 pflag = 0;
1797 manual = 1;
1798 break;
1799 }
1800
1801 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1802 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1803 cflags = m->m_pkthdr.csum_flags & active;
1804
1805 if (pflag != 0) {
1806 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1807 fin->fin_flx |= FI_BAD;
1808 } else if (cflags == (pflag | M_CSUM_DATA)) {
1809 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0)
1810 fin->fin_flx |= FI_BAD;
1811 } else if (cflags == pflag) {
1812 ;
1813 } else {
1814 manual = 1;
1815 }
1816 }
1817 if (manual != 0) {
1818 if (ipf_checkl4sum(fin) == -1) {
1819 fin->fin_flx |= FI_BAD;
1820 return -1;
1821 }
1822 }
1823 # else
1824 if (ipf_checkl4sum(fin) == -1) {
1825 fin->fin_flx |= FI_BAD;
1826 return -1;
1827 }
1828 # endif
1829 return 0;
1830 }
1831 #endif /* USE_INET6 */
1832
1833
1834 size_t
1835 mbufchainlen(struct mbuf *m0)
1836 {
1837 size_t len;
1838
1839 if ((m0->m_flags & M_PKTHDR) != 0) {
1840 len = m0->m_pkthdr.len;
1841 } else {
1842 struct mbuf *m;
1843
1844 for (m = m0, len = 0; m != NULL; m = m->m_next)
1845 len += m->m_len;
1846 }
1847 return len;
1848 }
1849
1850
1851 /* ------------------------------------------------------------------------ */
1852 /* Function: ipf_pullup */
1853 /* Returns: NULL == pullup failed, else pointer to protocol header */
1854 /* Parameters: xmin(I)- pointer to buffer where data packet starts */
1855 /* fin(I) - pointer to packet information */
1856 /* len(I) - number of bytes to pullup */
1857 /* */
1858 /* Attempt to move at least len bytes (from the start of the buffer) into a */
1859 /* single buffer for ease of access. Operating system native functions are */
1860 /* used to manage buffers - if necessary. If the entire packet ends up in */
1861 /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */
1862 /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */
1863 /* and ONLY if the pullup succeeds. */
1864 /* */
1865 /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */
1866 /* of buffers that starts at *fin->fin_mp. */
1867 /* ------------------------------------------------------------------------ */
1868 void *
1869 ipf_pullup(mb_t *xmin, fr_info_t *fin, int len)
1870 {
1871 int dpoff, ipoff;
1872 mb_t *m = xmin;
1873 char *ip;
1874
1875 if (m == NULL)
1876 return NULL;
1877
1878 ip = (char *)fin->fin_ip;
1879 if ((fin->fin_flx & FI_COALESCE) != 0)
1880 return ip;
1881
1882 ipoff = fin->fin_ipoff;
1883 if (fin->fin_dp != NULL)
1884 dpoff = (char *)fin->fin_dp - (char *)ip;
1885 else
1886 dpoff = 0;
1887
1888 if (M_LEN(m) < len) {
1889 mb_t *n = *fin->fin_mp;
1890 /*
1891 * Assume that M_PKTHDR is set and just work with what is left
1892 * rather than check..
1893 * Should not make any real difference, anyway.
1894 */
1895 if (m != n) {
1896 /*
1897 * Record the mbuf that points to the mbuf that we're
1898 * about to go to work on so that we can update the
1899 * m_next appropriately later.
1900 */
1901 for (; n->m_next != m; n = n->m_next)
1902 ;
1903 } else {
1904 n = NULL;
1905 }
1906
1907 #ifdef MHLEN
1908 if (len > MHLEN)
1909 #else
1910 if (len > MLEN)
1911 #endif
1912 {
1913 #ifdef HAVE_M_PULLDOWN
1914 if (m_pulldown(m, 0, len, NULL) == NULL)
1915 m = NULL;
1916 #else
1917 FREE_MB_T(*fin->fin_mp);
1918 m = NULL;
1919 n = NULL;
1920 #endif
1921 } else
1922 {
1923 m = m_pullup(m, len);
1924 }
1925 if (n != NULL)
1926 n->m_next = m;
1927 if (m == NULL) {
1928 /*
1929 * When n is non-NULL, it indicates that m pointed to
1930 * a sub-chain (tail) of the mbuf and that the head
1931 * of this chain has not yet been free'd.
1932 */
1933 if (n != NULL) {
1934 FREE_MB_T(*fin->fin_mp);
1935 }
1936
1937 *fin->fin_mp = NULL;
1938 fin->fin_m = NULL;
1939 return NULL;
1940 }
1941
1942 if (n == NULL)
1943 *fin->fin_mp = m;
1944
1945 while (M_LEN(m) == 0) {
1946 m = m->m_next;
1947 }
1948 fin->fin_m = m;
1949 ip = MTOD(m, char *) + ipoff;
1950
1951 fin->fin_ip = (ip_t *)ip;
1952 if (fin->fin_dp != NULL)
1953 fin->fin_dp = (char *)fin->fin_ip + dpoff;
1954 if (fin->fin_fraghdr != NULL)
1955 fin->fin_fraghdr = (char *)ip +
1956 ((char *)fin->fin_fraghdr -
1957 (char *)fin->fin_ip);
1958 }
1959
1960 if (len == fin->fin_plen)
1961 fin->fin_flx |= FI_COALESCE;
1962 return ip;
1963 }
1964
1965
1966 int
1967 ipf_inject(fr_info_t *fin, mb_t *m)
1968 {
1969 int error;
1970
1971 if (fin->fin_out == 0) {
1972 if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
1973 FREE_MB_T(m);
1974 error = ENOBUFS;
1975 } else {
1976 error = 0;
1977 }
1978 } else {
1979 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
1980 }
1981 return error;
1982 }
1983
1984
1985 u_32_t
1986 ipf_random(void)
1987 {
1988 int number;
1989
1990 #ifdef _CPRNG_H
1991 number = cprng_fast32();
1992 #else
1993 number = arc4random();
1994 #endif
1995 return number;
1996 }
1997
1998
1999 /*
2000 * routines below for saving IP headers to buffer
2001 */
2002 static int ipfopen(dev_t dev, int flags
2003 #if (NetBSD >= 199511)
2004 , int devtype, PROC_T *p
2005 #endif
2006 )
2007 {
2008 u_int unit = GET_MINOR(dev);
2009 int error;
2010
2011 if (IPL_LOGMAX < unit) {
2012 error = ENXIO;
2013 } else {
2014 switch (unit)
2015 {
2016 case IPL_LOGIPF :
2017 case IPL_LOGNAT :
2018 case IPL_LOGSTATE :
2019 case IPL_LOGAUTH :
2020 case IPL_LOGLOOKUP :
2021 case IPL_LOGSYNC :
2022 #ifdef IPFILTER_SCAN
2023 case IPL_LOGSCAN :
2024 #endif
2025 error = 0;
2026 break;
2027 default :
2028 error = ENXIO;
2029 break;
2030 }
2031 }
2032 #if (__NetBSD_Version__ >= 799003000)
2033 if (error == 0) {
2034 mutex_enter(&ipf_ref_mutex);
2035 ipf_active = 1;
2036 mutex_exit(&ipf_ref_mutex);
2037 }
2038 #endif
2039 return error;
2040 }
2041
2042
2043 static int ipfclose(dev_t dev, int flags
2044 #if (NetBSD >= 199511)
2045 , int devtype, PROC_T *p
2046 #endif
2047 )
2048 {
2049 u_int unit = GET_MINOR(dev);
2050
2051 if (IPL_LOGMAX < unit)
2052 return ENXIO;
2053 else {
2054 #if (__NetBSD_Version__ >= 799003000)
2055 mutex_enter(&ipf_ref_mutex);
2056 ipf_active = 0;
2057 mutex_exit(&ipf_ref_mutex);
2058 #endif
2059 return 0;
2060 }
2061 }
2062
2063 /*
2064 * ipfread/ipflog
2065 * both of these must operate with at least splnet() lest they be
2066 * called during packet processing and cause an inconsistancy to appear in
2067 * the filter lists.
2068 */
2069 static int ipfread(dev_t dev, struct uio *uio, int ioflag)
2070 {
2071
2072 if (ipfmain.ipf_running < 1) {
2073 ipfmain.ipf_interror = 130006;
2074 return EIO;
2075 }
2076
2077 if (GET_MINOR(dev) == IPL_LOGSYNC)
2078 return ipf_sync_read(&ipfmain, uio);
2079
2080 #ifdef IPFILTER_LOG
2081 return ipf_log_read(&ipfmain, GET_MINOR(dev), uio);
2082 #else
2083 ipfmain.ipf_interror = 130007;
2084 return ENXIO;
2085 #endif
2086 }
2087
2088
2089 /*
2090 * ipfwrite
2091 * both of these must operate with at least splnet() lest they be
2092 * called during packet processing and cause an inconsistancy to appear in
2093 * the filter lists.
2094 */
2095 static int ipfwrite(dev_t dev, struct uio *uio, int ioflag)
2096 {
2097
2098 if (ipfmain.ipf_running < 1) {
2099 ipfmain.ipf_interror = 130008;
2100 return EIO;
2101 }
2102
2103 if (GET_MINOR(dev) == IPL_LOGSYNC)
2104 return ipf_sync_write(&ipfmain, uio);
2105 ipfmain.ipf_interror = 130009;
2106 return ENXIO;
2107 }
2108
2109
2110 static int ipfpoll(dev_t dev, int events, PROC_T *p)
2111 {
2112 u_int unit = GET_MINOR(dev);
2113 int revents = 0;
2114
2115 if (IPL_LOGMAX < unit) {
2116 ipfmain.ipf_interror = 130010;
2117 return ENXIO;
2118 }
2119
2120 switch (unit)
2121 {
2122 case IPL_LOGIPF :
2123 case IPL_LOGNAT :
2124 case IPL_LOGSTATE :
2125 #ifdef IPFILTER_LOG
2126 if ((events & (POLLIN | POLLRDNORM)) &&
2127 ipf_log_canread(&ipfmain, unit))
2128 revents |= events & (POLLIN | POLLRDNORM);
2129 #endif
2130 break;
2131 case IPL_LOGAUTH :
2132 if ((events & (POLLIN | POLLRDNORM)) &&
2133 ipf_auth_waiting(&ipfmain))
2134 revents |= events & (POLLIN | POLLRDNORM);
2135 break;
2136 case IPL_LOGSYNC :
2137 if ((events & (POLLIN | POLLRDNORM)) &&
2138 ipf_sync_canread(&ipfmain))
2139 revents |= events & (POLLIN | POLLRDNORM);
2140 if ((events & (POLLOUT | POLLWRNORM)) &&
2141 ipf_sync_canwrite(&ipfmain))
2142 revents |= events & (POLLOUT | POLLWRNORM);
2143 break;
2144 case IPL_LOGSCAN :
2145 case IPL_LOGLOOKUP :
2146 default :
2147 break;
2148 }
2149
2150 if ((revents == 0) && (((events & (POLLIN|POLLRDNORM)) != 0)))
2151 selrecord(p, &ipfmain.ipf_selwait[unit]);
2152 return revents;
2153 }
2154
2155 u_int
2156 ipf_pcksum(fr_info_t *fin, int hlen, u_int sum)
2157 {
2158 struct mbuf *m;
2159 u_int sum2;
2160 int off;
2161
2162 m = fin->fin_m;
2163 off = (char *)fin->fin_dp - (char *)fin->fin_ip;
2164 m->m_data += hlen;
2165 m->m_len -= hlen;
2166 sum2 = in_cksum(fin->fin_m, fin->fin_plen - off);
2167 m->m_len += hlen;
2168 m->m_data -= hlen;
2169
2170 /*
2171 * Both sum and sum2 are partial sums, so combine them together.
2172 */
2173 sum += ~sum2 & 0xffff;
2174 while (sum > 0xffff)
2175 sum = (sum & 0xffff) + (sum >> 16);
2176 sum2 = ~sum & 0xffff;
2177 return sum2;
2178 }
2179
2180 #if (__NetBSD_Version__ >= 799003000)
2181
2182 /* NetBSD module interface */
2183
2184 MODULE(MODULE_CLASS_DRIVER, ipl, "bpf_filter");
2185
2186 static int ipl_init(void *);
2187 static int ipl_fini(void *);
2188 static int ipl_modcmd(modcmd_t, void *);
2189
2190 #ifdef _MODULE
2191 static devmajor_t ipl_cmaj = -1, ipl_bmaj = -1;
2192 #endif
2193
2194 static int
2195 ipl_modcmd(modcmd_t cmd, void *opaque)
2196 {
2197
2198 switch (cmd) {
2199 case MODULE_CMD_INIT:
2200 return ipl_init(opaque);
2201 case MODULE_CMD_FINI:
2202 return ipl_fini(opaque);
2203 default:
2204 return ENOTTY;
2205 }
2206 }
2207
2208 static int
2209 ipl_init(void *opaque)
2210 {
2211 int error;
2212
2213 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
2214 ipf_listener_cb, NULL);
2215
2216 if ((error = ipf_load_all()) != 0)
2217 return error;
2218
2219 if (ipf_create_all(&ipfmain) == NULL) {
2220 ipf_unload_all();
2221 return ENODEV;
2222 }
2223
2224 /* Initialize our mutex and reference count */
2225 mutex_init(&ipf_ref_mutex, MUTEX_DEFAULT, IPL_NONE);
2226 ipf_active = 0;
2227
2228 #ifdef _MODULE
2229 /*
2230 * Insert ourself into the cdevsw list.
2231 */
2232 error = devsw_attach("ipl", NULL, &ipl_bmaj, &ipl_cdevsw, &ipl_cmaj);
2233 if (error)
2234 ipl_fini(opaque);
2235 #endif
2236
2237 return error;
2238 }
2239
2240 static int
2241 ipl_fini(void *opaque)
2242 {
2243
2244 #ifdef _MODULE
2245 (void)devsw_detach(NULL, &ipl_cdevsw);
2246 #endif
2247
2248 /*
2249 * Grab the mutex, verify that there are no references
2250 * and that there are no running filters. If either
2251 * of these exists, reinsert our cdevsw entry and return
2252 * an error.
2253 */
2254 mutex_enter(&ipf_ref_mutex);
2255 if (ipf_active != 0 || ipfmain.ipf_running > 0) {
2256 #ifdef _MODULE
2257 (void)devsw_attach("ipl", NULL, &ipl_bmaj,
2258 &ipl_cdevsw, &ipl_cmaj);
2259 #endif
2260 mutex_exit(&ipf_ref_mutex);
2261 return EBUSY;
2262 }
2263
2264 /* Clean up the rest of our state before being unloaded */
2265
2266 mutex_exit(&ipf_ref_mutex);
2267 mutex_destroy(&ipf_ref_mutex);
2268 ipf_destroy_all(&ipfmain);
2269 ipf_unload_all();
2270 kauth_unlisten_scope(ipf_listener);
2271
2272 return 0;
2273 }
2274 #endif /* (__NetBSD_Version__ >= 799003000) */
2275