ip_fil_netbsd.c revision 1.23 1 /* $NetBSD: ip_fil_netbsd.c,v 1.23 2017/05/12 08:03:26 christos Exp $ */
2
3 /*
4 * Copyright (C) 2012 by Darren Reed.
5 *
6 * See the IPFILTER.LICENCE file for details on licencing.
7 */
8 #if !defined(lint)
9 #if defined(__NetBSD__)
10 #include <sys/cdefs.h>
11 __KERNEL_RCSID(0, "$NetBSD: ip_fil_netbsd.c,v 1.23 2017/05/12 08:03:26 christos Exp $");
12 #else
13 static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed";
14 static const char rcsid[] = "@(#)Id: ip_fil_netbsd.c,v 1.1.1.2 2012/07/22 13:45:17 darrenr Exp";
15 #endif
16 #endif
17
18 #if defined(KERNEL) || defined(_KERNEL)
19 # undef KERNEL
20 # undef _KERNEL
21 # define KERNEL 1
22 # define _KERNEL 1
23 #endif
24 #include <sys/param.h>
25 #if (NetBSD >= 199905) && !defined(IPFILTER_LKM)
26 # if (__NetBSD_Version__ >= 799003000)
27 # ifdef _KERNEL_OPT
28 # include "opt_ipsec.h"
29 # endif
30 # else
31 # include "opt_ipsec.h"
32 # endif
33 #endif
34 #include <sys/errno.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/ioctl.h>
38 #include <sys/time.h>
39 #include <sys/systm.h>
40 #include <sys/select.h>
41 #if (NetBSD > 199609)
42 # include <sys/dirent.h>
43 #else
44 # include <sys/dir.h>
45 #endif
46 #if (__NetBSD_Version__ >= 599005900)
47 # include <sys/cprng.h>
48 #endif
49 #include <sys/mbuf.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/poll.h>
53 #if (__NetBSD_Version__ >= 399002000)
54 # include <sys/kauth.h>
55 #endif
56 #if (__NetBSD_Version__ >= 799003000)
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #endif
60
61 #include <net/if.h>
62 #include <net/route.h>
63 #include <netinet/in.h>
64 #include <netinet/in_var.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip_var.h>
68 #include <netinet/tcp.h>
69 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
70 # include <netinet/tcp_timer.h>
71 # include <netinet/tcp_var.h>
72 #endif
73 #include <netinet/udp.h>
74 #include <netinet/tcpip.h>
75 #include <netinet/ip_icmp.h>
76 #include "netinet/ip_compat.h"
77 #ifdef USE_INET6
78 # include <netinet/icmp6.h>
79 # if (__NetBSD_Version__ >= 106000000)
80 # include <netinet6/nd6.h>
81 # endif
82 #endif
83 #include "netinet/ip_fil.h"
84 #include "netinet/ip_nat.h"
85 #include "netinet/ip_frag.h"
86 #include "netinet/ip_state.h"
87 #include "netinet/ip_proxy.h"
88 #include "netinet/ip_auth.h"
89 #include "netinet/ip_sync.h"
90 #include "netinet/ip_lookup.h"
91 #include "netinet/ip_dstlist.h"
92 #ifdef IPFILTER_SCAN
93 #include "netinet/ip_scan.h"
94 #endif
95 #include <sys/md5.h>
96 #include <sys/kernel.h>
97 #include <sys/conf.h>
98 #ifdef INET
99 extern int ip_optcopy (struct ip *, struct ip *);
100 #endif
101
102 #ifdef IPFILTER_M_IPFILTER
103 MALLOC_DEFINE(M_IPFILTER, "IP Filter", "IP Filter packet filter data structures");
104 #endif
105
106 #if __NetBSD_Version__ >= 105009999
107 # define csuminfo csum_flags
108 #endif
109
110 #if __NetBSD_Version__ < 200000000
111 extern struct protosw inetsw[];
112 #endif
113
114 #if (__NetBSD_Version__ >= 599002000)
115 static kauth_listener_t ipf_listener;
116 #endif
117
118 #if (__NetBSD_Version__ < 399001400)
119 extern int ip6_getpmtu (struct route_in6 *, struct route_in6 *,
120 struct ifnet *, struct in6_addr *, u_long *,
121 int *);
122 #endif
123 #if (NetBSD >= 199511)
124 static int ipfopen(dev_t dev, int flags, int devtype, PROC_T *p);
125 static int ipfclose(dev_t dev, int flags, int devtype, PROC_T *p);
126 #else
127 # if (__NetBSD_Version__ >= 399001400)
128 static int ipfopen(dev_t dev, int flags, struct lwp *);
129 static int ipfclose(dev_t dev, int flags, struct lwp *);
130 # else
131 static int ipfopen(dev_t dev, int flags);
132 static int ipfclose(dev_t dev, int flags);
133 # endif /* __NetBSD_Version__ >= 399001400 */
134 #endif
135 static int ipfread(dev_t, struct uio *, int ioflag);
136 static int ipfwrite(dev_t, struct uio *, int ioflag);
137 static int ipfpoll(dev_t, int events, PROC_T *);
138 static void ipf_timer_func(void *ptr);
139
140 const struct cdevsw ipl_cdevsw = {
141 .d_open = ipfopen,
142 .d_close = ipfclose,
143 .d_read = ipfread,
144 .d_write = ipfwrite,
145 .d_ioctl = ipfioctl,
146 .d_stop = nostop,
147 .d_tty = notty,
148 .d_poll = ipfpoll,
149 .d_mmap = nommap,
150 #if (__NetBSD_Version__ >= 200000000)
151 .d_kqfilter = nokqfilter,
152 #endif
153 .d_discard = nodiscard,
154 #ifdef D_OTHER
155 .d_flag = D_OTHER
156 #else
157 .d_flag = 0
158 #endif
159 };
160 #if (__NetBSD_Version__ >= 799003000)
161 kmutex_t ipf_ref_mutex;
162 int ipf_active;
163 #endif
164
165 ipf_main_softc_t ipfmain;
166
167 static u_short ipid = 0;
168 static int (*ipf_savep)(void *, ip_t *, int, void *, int, struct mbuf **);
169 static int ipf_send_ip(fr_info_t *, mb_t *);
170 #ifdef USE_INET6
171 static int ipf_fastroute6(struct mbuf *, struct mbuf **,
172 fr_info_t *, frdest_t *);
173 #endif
174
175 #if defined(NETBSD_PF)
176 # include <net/pfil.h>
177 /*
178 * We provide the ipf_checkp name just to minimize changes later.
179 */
180 int (*ipf_checkp)(void *, ip_t *ip, int hlen, void *ifp, int out, mb_t **mp);
181 #endif /* NETBSD_PF */
182
183 #if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
184 # include <net/pfil.h>
185
186 static int ipf_check_wrapper(void *, struct mbuf **, struct ifnet *, int );
187
188 static int
189 ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
190 {
191 struct ip *ip;
192 int rv, hlen;
193
194 #if __NetBSD_Version__ >= 200080000
195 /*
196 * ensure that mbufs are writable beforehand
197 * as it's assumed by ipf code.
198 * XXX inefficient
199 */
200 int error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
201
202 if (error) {
203 m_freem(*mp);
204 *mp = NULL;
205 return error;
206 }
207 #endif
208 ip = mtod(*mp, struct ip *);
209 hlen = ip->ip_hl << 2;
210
211 #ifdef INET
212 #if defined(M_CSUM_TCPv4)
213 /*
214 * If the packet is out-bound, we can't delay checksums
215 * here. For in-bound, the checksum has already been
216 * validated.
217 */
218 if (dir == PFIL_OUT) {
219 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
220 in_delayed_cksum(*mp);
221 (*mp)->m_pkthdr.csum_flags &=
222 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
223 }
224 }
225 #endif /* M_CSUM_TCPv4 */
226 #endif /* INET */
227
228 /*
229 * Note, we don't need to update the checksum, because
230 * it has already been verified.
231 */
232 rv = ipf_check(&ipfmain, ip, hlen, ifp, (dir == PFIL_OUT), mp);
233
234 return (rv);
235 }
236
237 # ifdef USE_INET6
238 # include <netinet/ip6.h>
239
240 static int ipf_check_wrapper6(void *, struct mbuf **, struct ifnet *, int );
241
242 static int
243 ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
244 {
245 #if defined(INET6)
246 # if defined(M_CSUM_TCPv6) && (__NetBSD_Version__ > 200000000)
247 /*
248 * If the packet is out-bound, we can't delay checksums
249 * here. For in-bound, the checksum has already been
250 * validated.
251 */
252 if (dir == PFIL_OUT) {
253 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
254 # if (__NetBSD_Version__ > 399000600)
255 in6_delayed_cksum(*mp);
256 # endif
257 (*mp)->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6|
258 M_CSUM_UDPv6);
259 }
260 }
261 # endif
262 #endif /* INET6 */
263
264 return (ipf_check(&ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr),
265 ifp, (dir == PFIL_OUT), mp));
266 }
267 # endif
268
269
270 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
271
272 # if (__NetBSD_Version__ >= 799000400)
273
274 static void ipf_pfilsync(void *, unsigned long, void *);
275
276 static void
277 ipf_pfilsync(void *hdr, unsigned long cmd, void *arg2)
278 {
279 /*
280 * The interface pointer is useless for create (we have nothing to
281 * compare it to) and at detach, the interface name is still in the
282 * list of active NICs (albeit, down, but that's not any real
283 * indicator) and doing ifunit() on the name will still return the
284 * pointer, so it's not much use then, either.
285 */
286 ipf_sync(&ipfmain, NULL);
287 }
288
289 # else
290
291 static int ipf_pfilsync(void *, struct mbuf **, struct ifnet *, int);
292
293 static int
294 ipf_pfilsync(void *hdr, struct mbuf **mp, struct ifnet *ifp, int dir)
295 {
296 ipf_sync(&ipfmain, NULL);
297 return 0;
298 }
299
300 # endif
301 # endif
302
303 #endif /* __NetBSD_Version__ >= 105110000 */
304
305
306 #if defined(IPFILTER_LKM)
307 int
308 ipf_identify(s)
309 char *s;
310 {
311 if (strcmp(s, "ipl") == 0)
312 return 1;
313 return 0;
314 }
315 #endif /* IPFILTER_LKM */
316
317 #if (__NetBSD_Version__ >= 599002000)
318 static int
319 ipf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
320 void *arg0, void *arg1, void *arg2, void *arg3)
321 {
322 int result;
323 enum kauth_network_req req;
324
325 result = KAUTH_RESULT_DEFER;
326 req = (enum kauth_network_req)arg0;
327
328 if (action != KAUTH_NETWORK_FIREWALL)
329 return result;
330
331 /* These must have came from device context. */
332 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) ||
333 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT))
334 result = KAUTH_RESULT_ALLOW;
335
336 return result;
337 }
338 #endif
339
340 /*
341 * Try to detect the case when compiling for NetBSD with pseudo-device
342 */
343 void
344 ipfilterattach(int count)
345 {
346
347 #if (__NetBSD_Version__ >= 799003000)
348 return;
349 #else
350 #if (__NetBSD_Version__ >= 599002000)
351 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
352 ipf_listener_cb, NULL);
353 #endif
354
355 if (ipf_load_all() == 0)
356 (void) ipf_create_all(&ipfmain);
357 #endif
358 }
359
360
361 int
362 ipfattach(ipf_main_softc_t *softc)
363 {
364 SPL_INT(s);
365 #if (__NetBSD_Version__ >= 499005500)
366 int i;
367 #endif
368 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
369 int error = 0;
370 # if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
371 pfil_head_t *ph_inet;
372 # ifdef USE_INET6
373 pfil_head_t *ph_inet6;
374 # endif
375 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
376 pfil_head_t *ph_ifsync;
377 # endif
378 # endif
379 #endif
380
381 SPL_NET(s);
382 if ((softc->ipf_running > 0) || (ipf_checkp == ipf_check)) {
383 printf("IP Filter: already initialized\n");
384 SPL_X(s);
385 IPFERROR(130017);
386 return EBUSY;
387 }
388
389 if (ipf_init_all(softc) < 0) {
390 SPL_X(s);
391 IPFERROR(130015);
392 return EIO;
393 }
394
395 #ifdef NETBSD_PF
396 # if (__NetBSD_Version__ >= 104200000)
397 # if __NetBSD_Version__ >= 105110000
398 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
399 # ifdef USE_INET6
400 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
401 # endif
402 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
403 ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
404 # endif
405
406 if (ph_inet == NULL
407 # ifdef USE_INET6
408 && ph_inet6 == NULL
409 # endif
410 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
411 && ph_ifsync == NULL
412 # endif
413 ) {
414 SPL_X(s);
415 IPFERROR(130016);
416 return ENODEV;
417 }
418
419 if (ph_inet != NULL)
420 error = pfil_add_hook((void *)ipf_check_wrapper, NULL,
421 PFIL_IN|PFIL_OUT, ph_inet);
422 else
423 error = 0;
424 # else
425 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
426 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
427 # endif
428 if (error) {
429 IPFERROR(130013);
430 goto pfil_error;
431 }
432 # else
433 pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
434 # endif
435
436 # ifdef USE_INET6
437 # if __NetBSD_Version__ >= 105110000
438 if (ph_inet6 != NULL)
439 error = pfil_add_hook((void *)ipf_check_wrapper6, NULL,
440 PFIL_IN|PFIL_OUT, ph_inet6);
441 else
442 error = 0;
443 if (error) {
444 pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
445 PFIL_IN|PFIL_OUT, ph_inet6);
446 ipfmain.ipf_interror = 130014;
447 goto pfil_error;
448 }
449 # else
450 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
451 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
452 if (error) {
453 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
454 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
455 IPFERROR(130014);
456 goto pfil_error;
457 }
458 # endif
459 # endif
460
461 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
462 if (ph_ifsync != NULL)
463 #if (__NetBSD_Version__ >= 799000400)
464 (void) pfil_add_ihook((void *)ipf_pfilsync, NULL,
465 PFIL_IFNET, ph_ifsync);
466 #else
467 (void) pfil_add_hook((void *)ipf_pfilsync, NULL,
468 PFIL_IFNET, ph_ifsync);
469 #endif
470 # endif
471 #endif
472
473 #if (__NetBSD_Version__ >= 499005500)
474 for (i = 0; i < IPL_LOGSIZE; i++)
475 selinit(&ipfmain.ipf_selwait[i]);
476 #else
477 bzero((char *)ipfmain.ipf_selwait, sizeof(ipfmain.ipf_selwait));
478 #endif
479 ipf_savep = ipf_checkp;
480 ipf_checkp = ipf_check;
481
482 #ifdef INET
483 if (softc->ipf_control_forwarding & 1)
484 ipforwarding = 1;
485 #endif
486
487 ipid = 0;
488
489 SPL_X(s);
490
491 #if (__NetBSD_Version__ >= 104010000)
492 # if (__NetBSD_Version__ >= 499002000)
493 callout_init(&softc->ipf_slow_ch, 0);
494 # else
495 callout_init(&softc->ipf_slow_ch);
496 # endif
497 callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT,
498 ipf_timer_func, softc);
499 #else
500 timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
501 #endif
502
503 return 0;
504
505 #if __NetBSD_Version__ >= 105110000
506 pfil_error:
507 SPL_X(s);
508 ipf_fini_all(softc);
509 return error;
510 #endif
511 }
512
513 static void
514 ipf_timer_func(void *ptr)
515 {
516 ipf_main_softc_t *softc = ptr;
517 SPL_INT(s);
518
519 SPL_NET(s);
520 READ_ENTER(&softc->ipf_global);
521
522 if (softc->ipf_running > 0)
523 ipf_slowtimer(softc);
524
525 if (softc->ipf_running == -1 || softc->ipf_running == 1) {
526 #if NETBSD_GE_REV(104240000)
527 callout_reset(&softc->ipf_slow_ch, hz / 2,
528 ipf_timer_func, softc);
529 #else
530 timeout(ipf_timer_func, softc,
531 (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
532 #endif
533 }
534 RWLOCK_EXIT(&softc->ipf_global);
535 SPL_X(s);
536 }
537
538
539 /*
540 * Disable the filter by removing the hooks from the IP input/output
541 * stream.
542 */
543 int
544 ipfdetach(ipf_main_softc_t *softc)
545 {
546 SPL_INT(s);
547 #if (__NetBSD_Version__ >= 499005500)
548 int i;
549 #endif
550 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
551 int error = 0;
552 # if __NetBSD_Version__ >= 105150000
553 pfil_head_t *ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
554 # ifdef USE_INET6
555 pfil_head_t *ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
556 # endif
557 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
558 struct pfil_head *ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
559 # endif
560 # endif
561 #endif
562
563 SPL_NET(s);
564
565 #if (__NetBSD_Version__ >= 104010000)
566 if (softc->ipf_running > 0)
567 callout_stop(&softc->ipf_slow_ch);
568 #else
569 untimeout(ipf_slowtimer, NULL);
570 #endif /* NetBSD */
571
572 ipf_checkp = ipf_savep;
573 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE|FR_INACTIVE);
574 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE);
575
576 #ifdef INET
577 if (softc->ipf_control_forwarding & 2)
578 ipforwarding = 0;
579 #endif
580
581 #ifdef NETBSD_PF
582 # if (__NetBSD_Version__ >= 104200000)
583 # if __NetBSD_Version__ >= 105110000
584 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
585 # if __NetBSD_Version__ >= 799000400
586 (void) pfil_remove_ihook((void *)ipf_pfilsync, NULL,
587 PFIL_IFNET, ph_ifsync);
588 # else
589 (void) pfil_remove_hook((void *)ipf_pfilsync, NULL,
590 PFIL_IFNET, ph_ifsync);
591 # endif
592 # endif
593
594 if (ph_inet != NULL)
595 error = pfil_remove_hook((void *)ipf_check_wrapper, NULL,
596 PFIL_IN|PFIL_OUT, ph_inet);
597 else
598 error = 0;
599 # else
600 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
601 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
602 # endif
603 if (error) {
604 SPL_X(s);
605 IPFERROR(130011);
606 return error;
607 }
608 # else
609 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
610 # endif
611 # ifdef USE_INET6
612 # if __NetBSD_Version__ >= 105110000
613 if (ph_inet6 != NULL)
614 error = pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
615 PFIL_IN|PFIL_OUT, ph_inet6);
616 else
617 error = 0;
618 # else
619 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
620 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
621 # endif
622 if (error) {
623 SPL_X(s);
624 IPFERROR(130012);
625 return error;
626 }
627 # endif
628 #endif
629 SPL_X(s);
630
631 #if (__NetBSD_Version__ >= 499005500)
632 for (i = 0; i < IPL_LOGSIZE; i++)
633 seldestroy(&ipfmain.ipf_selwait[i]);
634 #endif
635
636 ipf_fini_all(softc);
637
638 return 0;
639 }
640
641
642 /*
643 * Filter ioctl interface.
644 */
645 int
646 ipfioctl(dev_t dev, u_long cmd,
647 #if (__NetBSD_Version__ >= 499001000)
648 void *data,
649 #else
650 caddr_t data,
651 #endif
652 int mode
653 #if (NetBSD >= 199511)
654 # if (__NetBSD_Version__ >= 399001400)
655 , struct lwp *p
656 # if (__NetBSD_Version__ >= 399002000)
657 # define UID(l) kauth_cred_getuid((l)->l_cred)
658 # else
659 # define UID(l) ((l)->l_proc->p_cred->p_ruid)
660 # endif
661 # else
662 , struct proc *p
663 # define UID(p) ((p)->p_cred->p_ruid)
664 # endif
665 #endif
666 )
667 {
668 int error = 0, unit = 0;
669 SPL_INT(s);
670
671 #if (__NetBSD_Version__ >= 399002000)
672 if ((mode & FWRITE) &&
673 kauth_authorize_network(p->l_cred, KAUTH_NETWORK_FIREWALL,
674 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL,
675 NULL, NULL)) {
676 ipfmain.ipf_interror = 130005;
677 return EPERM;
678 }
679 #else
680 if ((securelevel >= 2) && (mode & FWRITE)) {
681 ipfmain.ipf_interror = 130001;
682 return EPERM;
683 }
684 #endif
685
686 unit = GET_MINOR(dev);
687 if ((IPL_LOGMAX < unit) || (unit < 0)) {
688 ipfmain.ipf_interror = 130002;
689 return ENXIO;
690 }
691
692 if (ipfmain.ipf_running <= 0) {
693 if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) {
694 ipfmain.ipf_interror = 130003;
695 return EIO;
696 }
697 if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET &&
698 cmd != SIOCIPFSET && cmd != SIOCFRENB &&
699 cmd != SIOCGETFS && cmd != SIOCGETFF &&
700 cmd != SIOCIPFINTERROR) {
701 ipfmain.ipf_interror = 130004;
702 return EIO;
703 }
704 }
705
706 SPL_NET(s);
707
708 error = ipf_ioctlswitch(&ipfmain, unit, data, cmd, mode, UID(p), p);
709 if (error != -1) {
710 SPL_X(s);
711 return error;
712 }
713
714 SPL_X(s);
715 return error;
716 }
717
718
719 /*
720 * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that
721 * requires a large amount of setting up and isn't any more efficient.
722 */
723 int
724 ipf_send_reset(fr_info_t *fin)
725 {
726 struct tcphdr *tcp, *tcp2;
727 int tlen = 0, hlen;
728 struct mbuf *m;
729 #ifdef USE_INET6
730 ip6_t *ip6;
731 #endif
732 ip_t *ip;
733
734 tcp = fin->fin_dp;
735 if (tcp->th_flags & TH_RST)
736 return -1; /* feedback loop */
737
738 if (ipf_checkl4sum(fin) == -1)
739 return -1;
740
741 tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) +
742 ((tcp->th_flags & TH_SYN) ? 1 : 0) +
743 ((tcp->th_flags & TH_FIN) ? 1 : 0);
744
745 #ifdef USE_INET6
746 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t);
747 #else
748 hlen = sizeof(ip_t);
749 #endif
750 #ifdef MGETHDR
751 MGETHDR(m, M_DONTWAIT, MT_HEADER);
752 #else
753 MGET(m, M_DONTWAIT, MT_HEADER);
754 #endif
755 if (m == NULL)
756 return -1;
757 if (sizeof(*tcp2) + hlen > MHLEN) {
758 MCLGET(m, M_DONTWAIT);
759 if (m == NULL)
760 return -1;
761 if ((m->m_flags & M_EXT) == 0) {
762 FREE_MB_T(m);
763 return -1;
764 }
765 }
766
767 m->m_len = sizeof(*tcp2) + hlen;
768 m->m_data += max_linkhdr;
769 m->m_pkthdr.len = m->m_len;
770 m_reset_rcvif(m);
771 ip = mtod(m, struct ip *);
772 bzero((char *)ip, hlen);
773 #ifdef USE_INET6
774 ip6 = (ip6_t *)ip;
775 #endif
776 bzero((char *)ip, sizeof(*tcp2) + hlen);
777 tcp2 = (struct tcphdr *)((char *)ip + hlen);
778 tcp2->th_sport = tcp->th_dport;
779 tcp2->th_dport = tcp->th_sport;
780
781 if (tcp->th_flags & TH_ACK) {
782 tcp2->th_seq = tcp->th_ack;
783 tcp2->th_flags = TH_RST;
784 tcp2->th_ack = 0;
785 } else {
786 tcp2->th_seq = 0;
787 tcp2->th_ack = ntohl(tcp->th_seq);
788 tcp2->th_ack += tlen;
789 tcp2->th_ack = htonl(tcp2->th_ack);
790 tcp2->th_flags = TH_RST|TH_ACK;
791 }
792 tcp2->th_x2 = 0;
793 TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2);
794 tcp2->th_win = tcp->th_win;
795 tcp2->th_sum = 0;
796 tcp2->th_urp = 0;
797
798 #ifdef USE_INET6
799 if (fin->fin_v == 6) {
800 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
801 ip6->ip6_plen = htons(sizeof(struct tcphdr));
802 ip6->ip6_nxt = IPPROTO_TCP;
803 ip6->ip6_hlim = 0;
804 ip6->ip6_src = fin->fin_dst6.in6;
805 ip6->ip6_dst = fin->fin_src6.in6;
806 tcp2->th_sum = in6_cksum(m, IPPROTO_TCP,
807 sizeof(*ip6), sizeof(*tcp2));
808 return ipf_send_ip(fin, m);
809 }
810 #endif
811 #ifdef INET
812 ip->ip_p = IPPROTO_TCP;
813 ip->ip_len = htons(sizeof(struct tcphdr));
814 ip->ip_src.s_addr = fin->fin_daddr;
815 ip->ip_dst.s_addr = fin->fin_saddr;
816 tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2));
817 ip->ip_len = hlen + sizeof(*tcp2);
818 return ipf_send_ip(fin, m);
819 #else
820 return 0;
821 #endif
822 }
823
824
825 /*
826 * Expects ip_len to be in host byte order when called.
827 */
828 static int
829 ipf_send_ip(fr_info_t *fin, mb_t *m)
830 {
831 fr_info_t fnew;
832 #ifdef INET
833 ip_t *oip;
834 #endif
835 ip_t *ip;
836 int hlen;
837
838 ip = mtod(m, ip_t *);
839 bzero((char *)&fnew, sizeof(fnew));
840 fnew.fin_main_soft = fin->fin_main_soft;
841
842 IP_V_A(ip, fin->fin_v);
843 switch (fin->fin_v)
844 {
845 #ifdef INET
846 case 4 :
847 oip = fin->fin_ip;
848 hlen = sizeof(*oip);
849 fnew.fin_v = 4;
850 fnew.fin_p = ip->ip_p;
851 fnew.fin_plen = ntohs(ip->ip_len);
852 HTONS(ip->ip_len);
853 IP_HL_A(ip, sizeof(*oip) >> 2);
854 ip->ip_tos = oip->ip_tos;
855 ip->ip_id = ipf_nextipid(fin);
856 ip->ip_off = htons(ip_mtudisc ? IP_DF : 0);
857 ip->ip_ttl = ip_defttl;
858 ip->ip_sum = 0;
859 break;
860 #endif
861 #ifdef USE_INET6
862 case 6 :
863 {
864 ip6_t *ip6 = (ip6_t *)ip;
865
866 ip6->ip6_vfc = 0x60;
867 ip6->ip6_hlim = IPDEFTTL;
868
869 hlen = sizeof(*ip6);
870 fnew.fin_p = ip6->ip6_nxt;
871 fnew.fin_v = 6;
872 fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen;
873 break;
874 }
875 #endif
876 default :
877 return EINVAL;
878 }
879 #ifdef KAME_IPSEC
880 m_reset_rcvif(m);
881 #endif
882
883 fnew.fin_ifp = fin->fin_ifp;
884 fnew.fin_flx = FI_NOCKSUM;
885 fnew.fin_m = m;
886 fnew.fin_ip = ip;
887 fnew.fin_mp = &m;
888 fnew.fin_hlen = hlen;
889 fnew.fin_dp = (char *)ip + hlen;
890 (void) ipf_makefrip(hlen, ip, &fnew);
891
892 return ipf_fastroute(m, &m, &fnew, NULL);
893 }
894
895
896 int
897 ipf_send_icmp_err(int type, fr_info_t *fin, int dst)
898 {
899 int err, hlen, xtra, iclen, ohlen, avail;
900 struct in_addr dst4;
901 struct icmp *icmp;
902 struct mbuf *m;
903 i6addr_t dst6;
904 void *ifp;
905 #ifdef USE_INET6
906 int code;
907 ip6_t *ip6;
908 #endif
909 ip_t *ip, *ip2;
910
911 if ((type < 0) || (type > ICMP_MAXTYPE))
912 return -1;
913
914 #ifdef USE_INET6
915 code = fin->fin_icode;
916 if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int)))
917 return -1;
918 #endif
919
920 if (ipf_checkl4sum(fin) == -1)
921 return -1;
922 #ifdef MGETHDR
923 MGETHDR(m, M_DONTWAIT, MT_HEADER);
924 #else
925 MGET(m, M_DONTWAIT, MT_HEADER);
926 #endif
927 if (m == NULL)
928 return -1;
929 avail = MHLEN;
930
931 xtra = 0;
932 hlen = 0;
933 ohlen = 0;
934 dst4.s_addr = 0;
935 ifp = fin->fin_ifp;
936 if (fin->fin_v == 4) {
937 if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT))
938 switch (ntohs(fin->fin_data[0]) >> 8)
939 {
940 case ICMP_ECHO :
941 case ICMP_TSTAMP :
942 case ICMP_IREQ :
943 case ICMP_MASKREQ :
944 break;
945 default :
946 FREE_MB_T(m);
947 return 0;
948 }
949
950 if (dst == 0) {
951 if (ipf_ifpaddr(&ipfmain, 4, FRI_NORMAL, ifp,
952 &dst6, NULL) == -1) {
953 FREE_MB_T(m);
954 return -1;
955 }
956 dst4 = dst6.in4;
957 } else
958 dst4.s_addr = fin->fin_daddr;
959
960 hlen = sizeof(ip_t);
961 ohlen = fin->fin_hlen;
962 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
963 if (fin->fin_hlen < fin->fin_plen)
964 xtra = MIN(fin->fin_dlen, 8);
965 else
966 xtra = 0;
967 }
968
969 #ifdef USE_INET6
970 else if (fin->fin_v == 6) {
971 hlen = sizeof(ip6_t);
972 ohlen = sizeof(ip6_t);
973 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
974 type = icmptoicmp6types[type];
975 if (type == ICMP6_DST_UNREACH)
976 code = icmptoicmp6unreach[code];
977
978 if (iclen + max_linkhdr + fin->fin_plen > avail) {
979 MCLGET(m, M_DONTWAIT);
980 if (m == NULL)
981 return -1;
982 if ((m->m_flags & M_EXT) == 0) {
983 FREE_MB_T(m);
984 return -1;
985 }
986 avail = MCLBYTES;
987 }
988 xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr);
989 xtra = MIN(xtra, IPV6_MMTU - iclen);
990 if (dst == 0) {
991 if (ipf_ifpaddr(&ipfmain, 6, FRI_NORMAL, ifp,
992 &dst6, NULL) == -1) {
993 FREE_MB_T(m);
994 return -1;
995 }
996 } else
997 dst6 = fin->fin_dst6;
998 }
999 #endif
1000 else {
1001 FREE_MB_T(m);
1002 return -1;
1003 }
1004
1005 avail -= (max_linkhdr + iclen);
1006 if (avail < 0) {
1007 FREE_MB_T(m);
1008 return -1;
1009 }
1010 if (xtra > avail)
1011 xtra = avail;
1012 iclen += xtra;
1013 m->m_data += max_linkhdr;
1014 m_reset_rcvif(m);
1015 m->m_pkthdr.len = iclen;
1016 m->m_len = iclen;
1017 ip = mtod(m, ip_t *);
1018 icmp = (struct icmp *)((char *)ip + hlen);
1019 ip2 = (ip_t *)&icmp->icmp_ip;
1020
1021 icmp->icmp_type = type;
1022 icmp->icmp_code = fin->fin_icode;
1023 icmp->icmp_cksum = 0;
1024 #ifdef icmp_nextmtu
1025 if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) {
1026 if (fin->fin_mtu != 0) {
1027 icmp->icmp_nextmtu = htons(fin->fin_mtu);
1028
1029 } else if (ifp != NULL) {
1030 icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp));
1031
1032 } else { /* make up a number... */
1033 icmp->icmp_nextmtu = htons(fin->fin_plen - 20);
1034 }
1035 }
1036 #endif
1037
1038 bcopy((char *)fin->fin_ip, (char *)ip2, ohlen);
1039
1040 #if defined(M_CSUM_IPv4)
1041 /*
1042 * Clear any in-bound checksum flags for this packet.
1043 */
1044 m->m_pkthdr.csuminfo = 0;
1045 #endif /* __NetBSD__ && M_CSUM_IPv4 */
1046
1047 #ifdef USE_INET6
1048 ip6 = (ip6_t *)ip;
1049 if (fin->fin_v == 6) {
1050 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
1051 ip6->ip6_plen = htons(iclen - hlen);
1052 ip6->ip6_nxt = IPPROTO_ICMPV6;
1053 ip6->ip6_hlim = 0;
1054 ip6->ip6_src = dst6.in6;
1055 ip6->ip6_dst = fin->fin_src6.in6;
1056 if (xtra > 0)
1057 bcopy((char *)fin->fin_ip + ohlen,
1058 (char *)&icmp->icmp_ip + ohlen, xtra);
1059 icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6,
1060 sizeof(*ip6), iclen - hlen);
1061 } else
1062 #endif
1063 {
1064 ip->ip_p = IPPROTO_ICMP;
1065 ip->ip_src.s_addr = dst4.s_addr;
1066 ip->ip_dst.s_addr = fin->fin_saddr;
1067
1068 if (xtra > 0)
1069 bcopy((char *)fin->fin_ip + ohlen,
1070 (char *)&icmp->icmp_ip + ohlen, xtra);
1071 icmp->icmp_cksum = ipf_cksum((u_short *)icmp,
1072 sizeof(*icmp) + 8);
1073 ip->ip_len = iclen;
1074 ip->ip_p = IPPROTO_ICMP;
1075 }
1076 err = ipf_send_ip(fin, m);
1077 return err;
1078 }
1079
1080
1081 /*
1082 * m0 - pointer to mbuf where the IP packet starts
1083 * mpp - pointer to the mbuf pointer that is the start of the mbuf chain
1084 */
1085 int
1086 ipf_fastroute(mb_t *m0, mb_t **mpp, fr_info_t *fin, frdest_t *fdp)
1087 {
1088 register struct ip *ip, *mhip;
1089 register struct mbuf *m = *mpp;
1090 register struct route *ro;
1091 int len, off, error = 0, hlen, code;
1092 struct ifnet *ifp, *sifp;
1093 ipf_main_softc_t *softc;
1094 #if __NetBSD_Version__ >= 499001100
1095 union {
1096 struct sockaddr dst;
1097 struct sockaddr_in dst4;
1098 } u;
1099 #else
1100 struct sockaddr_in *dst4;
1101 #endif
1102 struct sockaddr *dst;
1103 u_short ip_off, ip_len;
1104 struct route iproute;
1105 struct rtentry *rt;
1106 frdest_t node;
1107 frentry_t *fr;
1108
1109 if (fin->fin_v == 6) {
1110 #ifdef USE_INET6
1111 error = ipf_fastroute6(m0, mpp, fin, fdp);
1112 #else
1113 error = EPROTONOSUPPORT;
1114 #endif
1115 if ((error != 0) && (*mpp != NULL))
1116 FREE_MB_T(*mpp);
1117 return error;
1118 }
1119 #ifndef INET
1120 FREE_MB_T(*mpp);
1121 return EPROTONOSUPPORT;
1122 #else
1123
1124 hlen = fin->fin_hlen;
1125 ip = mtod(m0, struct ip *);
1126 softc = fin->fin_main_soft;
1127 rt = NULL;
1128 ifp = NULL;
1129
1130 # if defined(M_CSUM_IPv4)
1131 /*
1132 * Clear any in-bound checksum flags for this packet.
1133 */
1134 m0->m_pkthdr.csuminfo = 0;
1135 # endif /* __NetBSD__ && M_CSUM_IPv4 */
1136
1137 /*
1138 * Route packet.
1139 */
1140 ro = &iproute;
1141 memset(ro, 0, sizeof(*ro));
1142 fr = fin->fin_fr;
1143
1144 if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) &&
1145 (fdp->fd_type == FRD_DSTLIST)) {
1146 if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0)
1147 fdp = &node;
1148 }
1149 if (fdp != NULL)
1150 ifp = fdp->fd_ptr;
1151 else
1152 ifp = fin->fin_ifp;
1153
1154 if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) {
1155 error = -2;
1156 goto bad;
1157 }
1158
1159 # if __NetBSD_Version__ >= 499001100
1160 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1161 sockaddr_in_init(&u.dst4, &fdp->fd_ip, 0);
1162 else
1163 sockaddr_in_init(&u.dst4, &ip->ip_dst, 0);
1164 dst = &u.dst;
1165 rtcache_setdst(ro, dst);
1166 rt = rtcache_init(ro);
1167 # else
1168 dst4 = (struct sockaddr_in *)&ro->ro_dst;
1169 dst = (struct sockaddr *)dst4;
1170 dst4->sin_family = AF_INET;
1171 dst4->sin_addr = ip->ip_dst;
1172
1173 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1174 dst4->sin_addr = fdp->fd_ip;
1175
1176 dst4->sin_len = sizeof(*dst);
1177 rtalloc(ro);
1178 rt = ro->ro_rt;
1179 # endif
1180 if ((ifp == NULL) && (rt != NULL))
1181 ifp = rt->rt_ifp;
1182 if ((rt == NULL) || (ifp == NULL)) {
1183 #ifdef INET
1184 if (in_localaddr(ip->ip_dst))
1185 error = EHOSTUNREACH;
1186 else
1187 #endif
1188 error = ENETUNREACH;
1189 goto bad;
1190 }
1191
1192
1193 if (rt->rt_flags & RTF_GATEWAY)
1194 dst = rt->rt_gateway;
1195
1196 rt->rt_use++;
1197
1198 /*
1199 * For input packets which are being "fastrouted", they won't
1200 * go back through output filtering and miss their chance to get
1201 * NAT'd and counted. Duplicated packets aren't considered to be
1202 * part of the normal packet stream, so do not NAT them or pass
1203 * them through stateful checking, etc.
1204 */
1205 if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) {
1206 sifp = fin->fin_ifp;
1207 fin->fin_ifp = ifp;
1208 fin->fin_out = 1;
1209 (void) ipf_acctpkt(fin, NULL);
1210 fin->fin_fr = NULL;
1211 if (!fr || !(fr->fr_flags & FR_RETMASK)) {
1212 u_32_t pass;
1213
1214 (void) ipf_state_check(fin, &pass);
1215 }
1216
1217 switch (ipf_nat_checkout(fin, NULL))
1218 {
1219 case 0 :
1220 break;
1221 case 1 :
1222 ip->ip_sum = 0;
1223 break;
1224 case -1 :
1225 error = -1;
1226 goto bad;
1227 break;
1228 }
1229
1230 fin->fin_ifp = sifp;
1231 fin->fin_out = 0;
1232 } else
1233 ip->ip_sum = 0;
1234 /*
1235 * If small enough for interface, can just send directly.
1236 */
1237 m_set_rcvif(m, ifp);
1238
1239 ip_len = ntohs(ip->ip_len);
1240 if (ip_len <= ifp->if_mtu) {
1241 # if defined(M_CSUM_IPv4)
1242 # if (__NetBSD_Version__ >= 105009999)
1243 if (ifp->if_csum_flags_tx & M_CSUM_IPv4)
1244 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1245 # else
1246 if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
1247 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1248 # endif /* (__NetBSD_Version__ >= 105009999) */
1249 else if (ip->ip_sum == 0)
1250 ip->ip_sum = in_cksum(m, hlen);
1251 # else
1252 if (!ip->ip_sum)
1253 ip->ip_sum = in_cksum(m, hlen);
1254 # endif /* M_CSUM_IPv4 */
1255
1256 error = if_output_lock(ifp, ifp, m, dst, rt);
1257 goto done;
1258 }
1259
1260 /*
1261 * Too large for interface; fragment if possible.
1262 * Must be able to put at least 8 bytes per fragment.
1263 */
1264 ip_off = ntohs(ip->ip_off);
1265 if (ip_off & IP_DF) {
1266 error = EMSGSIZE;
1267 goto bad;
1268 }
1269 len = (ifp->if_mtu - hlen) &~ 7;
1270 if (len < 8) {
1271 error = EMSGSIZE;
1272 goto bad;
1273 }
1274
1275 {
1276 int mhlen, firstlen = len;
1277 struct mbuf **mnext = &m->m_act;
1278
1279 /*
1280 * Loop through length of segment after first fragment,
1281 * make new header and copy data of each part and link onto chain.
1282 */
1283 m0 = m;
1284 mhlen = sizeof (struct ip);
1285 for (off = hlen + len; off < ip_len; off += len) {
1286 # ifdef MGETHDR
1287 MGETHDR(m, M_DONTWAIT, MT_HEADER);
1288 # else
1289 MGET(m, M_DONTWAIT, MT_HEADER);
1290 # endif
1291 if (m == 0) {
1292 m = m0;
1293 error = ENOBUFS;
1294 goto bad;
1295 }
1296 m->m_data += max_linkhdr;
1297 mhip = mtod(m, struct ip *);
1298 bcopy((char *)ip, (char *)mhip, sizeof(*ip));
1299 #ifdef INET
1300 if (hlen > sizeof (struct ip)) {
1301 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
1302 IP_HL_A(mhip, mhlen >> 2);
1303 }
1304 #endif
1305 m->m_len = mhlen;
1306 mhip->ip_off = ((off - hlen) >> 3) + ip_off;
1307 if (off + len >= ip_len)
1308 len = ip_len - off;
1309 else
1310 mhip->ip_off |= IP_MF;
1311 mhip->ip_len = htons((u_short)(len + mhlen));
1312 m->m_next = m_copy(m0, off, len);
1313 if (m->m_next == 0) {
1314 error = ENOBUFS; /* ??? */
1315 goto sendorfree;
1316 }
1317 m->m_pkthdr.len = mhlen + len;
1318 m_reset_rcvif(m);
1319 mhip->ip_off = htons((u_short)mhip->ip_off);
1320 mhip->ip_sum = 0;
1321 #ifdef INET
1322 mhip->ip_sum = in_cksum(m, mhlen);
1323 #endif
1324 *mnext = m;
1325 mnext = &m->m_act;
1326 }
1327 /*
1328 * Update first fragment by trimming what's been copied out
1329 * and updating header, then send each fragment (in order).
1330 */
1331 m_adj(m0, hlen + firstlen - ip_len);
1332 ip->ip_len = htons((u_short)(hlen + firstlen));
1333 ip->ip_off = htons((u_short)IP_MF);
1334 ip->ip_sum = 0;
1335 #ifdef INET
1336 ip->ip_sum = in_cksum(m0, hlen);
1337 #endif
1338 sendorfree:
1339 for (m = m0; m; m = m0) {
1340 m0 = m->m_act;
1341 m->m_act = 0;
1342 if (error == 0) {
1343 KERNEL_LOCK(1, NULL);
1344 error = (*ifp->if_output)(ifp, m, dst, rt);
1345 KERNEL_UNLOCK_ONE(NULL);
1346 } else {
1347 FREE_MB_T(m);
1348 }
1349 }
1350 }
1351 done:
1352 if (!error)
1353 softc->ipf_frouteok[0]++;
1354 else
1355 softc->ipf_frouteok[1]++;
1356
1357 # if __NetBSD_Version__ >= 499001100
1358 rtcache_unref(rt, ro);
1359 rtcache_free(ro);
1360 # else
1361 if (rt) {
1362 RTFREE(rt);
1363 }
1364 # endif
1365 return error;
1366 bad:
1367 if (error == EMSGSIZE) {
1368 sifp = fin->fin_ifp;
1369 code = fin->fin_icode;
1370 fin->fin_icode = ICMP_UNREACH_NEEDFRAG;
1371 fin->fin_ifp = ifp;
1372 (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1);
1373 fin->fin_ifp = sifp;
1374 fin->fin_icode = code;
1375 }
1376 FREE_MB_T(m);
1377 goto done;
1378 #endif /* INET */
1379 }
1380
1381
1382 #if defined(USE_INET6)
1383 /*
1384 * This is the IPv6 specific fastroute code. It doesn't clean up the mbuf's
1385 * or ensure that it is an IPv6 packet that is being forwarded, those are
1386 * expected to be done by the called (ipf_fastroute).
1387 */
1388 static int
1389 ipf_fastroute6(struct mbuf *m0, struct mbuf **mpp, fr_info_t *fin,
1390 frdest_t *fdp)
1391 {
1392 # if __NetBSD_Version__ >= 499001100
1393 struct route ip6route;
1394 const struct sockaddr *dst;
1395 union {
1396 struct sockaddr dst;
1397 struct sockaddr_in6 dst6;
1398 } u;
1399 struct route *ro;
1400 # else
1401 struct route_in6 ip6route;
1402 struct sockaddr_in6 *dst6;
1403 struct route_in6 *ro;
1404 # endif
1405 struct rtentry *rt;
1406 struct ifnet *ifp;
1407 u_long mtu;
1408 int error;
1409
1410 error = 0;
1411 ro = &ip6route;
1412
1413 if (fdp != NULL)
1414 ifp = fdp->fd_ptr;
1415 else
1416 ifp = fin->fin_ifp;
1417 memset(ro, 0, sizeof(*ro));
1418 # if __NetBSD_Version__ >= 499001100
1419 if (fdp != NULL && IP6_NOTZERO(&fdp->fd_ip6))
1420 sockaddr_in6_init(&u.dst6, &fdp->fd_ip6.in6, 0, 0, 0);
1421 else
1422 sockaddr_in6_init(&u.dst6, &fin->fin_fi.fi_dst.in6, 0, 0, 0);
1423 dst = &u.dst;
1424 rtcache_setdst(ro, dst);
1425
1426 rt = rtcache_init(ro);
1427 if ((ifp == NULL) && (rt != NULL))
1428 ifp = rt->rt_ifp;
1429 # else
1430 dst6 = (struct sockaddr_in6 *)&ro->ro_dst;
1431 dst6->sin6_family = AF_INET6;
1432 dst6->sin6_len = sizeof(struct sockaddr_in6);
1433 dst6->sin6_addr = fin->fin_fi.fi_dst.in6;
1434
1435 if (fdp != NULL) {
1436 if (IP6_NOTZERO(&fdp->fd_ip6))
1437 dst6->sin6_addr = fdp->fd_ip6.in6;
1438 }
1439
1440 rtalloc((struct route *)ro);
1441
1442 if ((ifp == NULL) && (ro->ro_rt != NULL))
1443 ifp = ro->ro_rt->rt_ifp;
1444 rt = ro->ro_rt;
1445 # endif
1446 if ((rt == NULL) || (ifp == NULL)) {
1447
1448 error = EHOSTUNREACH;
1449 goto bad;
1450 }
1451
1452 /* KAME */
1453 # if __NetBSD_Version__ >= 499001100
1454 if (IN6_IS_ADDR_LINKLOCAL(&u.dst6.sin6_addr))
1455 u.dst6.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1456 # else
1457 if (IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr))
1458 dst6->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1459 # endif
1460
1461 {
1462 # if (__NetBSD_Version__ >= 106010000) && !defined(IN6_LINKMTU)
1463 struct in6_ifextra *ife;
1464 # endif
1465 if (rt->rt_flags & RTF_GATEWAY)
1466 # if __NetBSD_Version__ >= 499001100
1467 dst = rt->rt_gateway;
1468 # else
1469 dst6 = (struct sockaddr_in6 *)rt->rt_gateway;
1470 # endif
1471 rt->rt_use++;
1472
1473 /* Determine path MTU. */
1474 # if (__NetBSD_Version__ <= 106009999)
1475 mtu = nd_ifinfo[ifp->if_index].linkmtu;
1476 # else
1477 # ifdef IN6_LINKMTU
1478 mtu = IN6_LINKMTU(ifp);
1479 # else
1480 ife = (struct in6_ifextra *)(ifp)->if_afdata[AF_INET6];
1481 mtu = ife->nd_ifinfo[ifp->if_index].linkmtu;
1482 # endif
1483 # endif
1484 if ((error == 0) && (m0->m_pkthdr.len <= mtu)) {
1485 # if __NetBSD_Version__ >= 499001100
1486 error = ip6_if_output(ifp, ifp, m0, satocsin6(dst), rt);
1487 # else
1488 error = nd6_output(ifp, ifp, m0, dst6, rt);
1489 # endif
1490 } else {
1491 error = EMSGSIZE;
1492 }
1493 }
1494 bad:
1495 # if __NetBSD_Version__ >= 499001100
1496 rtcache_unref(rt, ro);
1497 rtcache_free(ro);
1498 # else
1499 if (ro->ro_rt != NULL) {
1500 RTFREE(((struct route *)ro)->ro_rt);
1501 }
1502 # endif
1503 return error;
1504 }
1505 #endif /* INET6 */
1506
1507
1508 int
1509 ipf_verifysrc(fr_info_t *fin)
1510 {
1511 #if __NetBSD_Version__ >= 499001100
1512 union {
1513 struct sockaddr dst;
1514 struct sockaddr_in dst4;
1515 } u;
1516 struct rtentry *rt;
1517 #else
1518 struct sockaddr_in *dst;
1519 #endif
1520 struct route iproute;
1521 int rc;
1522
1523 #if __NetBSD_Version__ >= 499001100
1524 sockaddr_in_init(&u.dst4, &fin->fin_src, 0);
1525 rtcache_setdst(&iproute, &u.dst);
1526 rt = rtcache_init(&iproute);
1527 if (rt == NULL)
1528 rc = 0;
1529 else
1530 rc = (fin->fin_ifp == rt->rt_ifp);
1531 rtcache_unref(rt, &iproute);
1532 rtcache_free(&iproute);
1533 #else
1534 dst = (struct sockaddr_in *)&iproute.ro_dst;
1535 dst->sin_len = sizeof(*dst);
1536 dst->sin_family = AF_INET;
1537 dst->sin_addr = fin->fin_src;
1538 rtalloc(&iproute);
1539 if (iproute.ro_rt == NULL)
1540 return 0;
1541 rc = (fin->fin_ifp == iproute.ro_rt->rt_ifp);
1542 RTFREE(iproute.ro_rt);
1543 #endif
1544 return rc;
1545 }
1546
1547
1548 /*
1549 * return the first IP Address associated with an interface
1550 */
1551 int
1552 ipf_ifpaddr(ipf_main_softc_t *softc, int v, int atype, void *ifptr,
1553 i6addr_t *inp, i6addr_t *inpmask)
1554 {
1555 #ifdef USE_INET6
1556 struct in6_addr *inp6 = NULL;
1557 #endif
1558 struct sockaddr *sock, *mask;
1559 struct sockaddr_in *sin;
1560 struct ifaddr *ifa;
1561 struct ifnet *ifp;
1562
1563 if ((ifptr == NULL) || (ifptr == (void *)-1))
1564 return -1;
1565
1566 ifp = ifptr;
1567 mask = NULL;
1568
1569 if (v == 4)
1570 inp->in4.s_addr = 0;
1571 #ifdef USE_INET6
1572 else if (v == 6)
1573 bzero((char *)inp, sizeof(*inp));
1574 #endif
1575
1576 ifa = IFADDR_READER_FIRST(ifp);
1577 sock = ifa ? ifa->ifa_addr : NULL;
1578 while (sock != NULL && ifa != NULL) {
1579 sin = (struct sockaddr_in *)sock;
1580 if ((v == 4) && (sin->sin_family == AF_INET))
1581 break;
1582 #ifdef USE_INET6
1583 if ((v == 6) && (sin->sin_family == AF_INET6)) {
1584 inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr;
1585 if (!IN6_IS_ADDR_LINKLOCAL(inp6) &&
1586 !IN6_IS_ADDR_LOOPBACK(inp6))
1587 break;
1588 }
1589 #endif
1590 ifa = IFADDR_READER_NEXT(ifa);
1591 if (ifa != NULL)
1592 sock = ifa->ifa_addr;
1593 }
1594 if (ifa == NULL || sock == NULL)
1595 return -1;
1596
1597 mask = ifa->ifa_netmask;
1598 if (atype == FRI_BROADCAST)
1599 sock = ifa->ifa_broadaddr;
1600 else if (atype == FRI_PEERADDR)
1601 sock = ifa->ifa_dstaddr;
1602
1603 #ifdef USE_INET6
1604 if (v == 6)
1605 return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock,
1606 (struct sockaddr_in6 *)mask,
1607 inp, inpmask);
1608 #endif
1609 return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock,
1610 (struct sockaddr_in *)mask,
1611 &inp->in4, &inpmask->in4);
1612 }
1613
1614
1615 u_32_t
1616 ipf_newisn(fr_info_t *fin)
1617 {
1618 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
1619 size_t asz;
1620
1621 if (fin->fin_v == 4)
1622 asz = sizeof(struct in_addr);
1623 else if (fin->fin_v == 6)
1624 asz = sizeof(fin->fin_src);
1625 else /* XXX: no way to return error */
1626 return 0;
1627 #ifdef INET
1628 return tcp_new_iss1((void *)&fin->fin_src, (void *)&fin->fin_dst,
1629 fin->fin_sport, fin->fin_dport, asz, 0);
1630 #else
1631 return ENOSYS;
1632 #endif
1633 #else
1634 static int iss_seq_off = 0;
1635 u_char hash[16];
1636 u_32_t newiss;
1637 MD5_CTX ctx;
1638
1639 /*
1640 * Compute the base value of the ISS. It is a hash
1641 * of (saddr, sport, daddr, dport, secret).
1642 */
1643 MD5Init(&ctx);
1644
1645 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_src,
1646 sizeof(fin->fin_fi.fi_src));
1647 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_dst,
1648 sizeof(fin->fin_fi.fi_dst));
1649 MD5Update(&ctx, (u_char *) &fin->fin_dat, sizeof(fin->fin_dat));
1650
1651 MD5Update(&ctx, ipf_iss_secret, sizeof(ipf_iss_secret));
1652
1653 MD5Final(hash, &ctx);
1654
1655 memcpy(&newiss, hash, sizeof(newiss));
1656
1657 /*
1658 * Now increment our "timer", and add it in to
1659 * the computed value.
1660 *
1661 * XXX Use `addin'?
1662 * XXX TCP_ISSINCR too large to use?
1663 */
1664 iss_seq_off += 0x00010000;
1665 newiss += iss_seq_off;
1666 return newiss;
1667 #endif
1668 }
1669
1670
1671 /* ------------------------------------------------------------------------ */
1672 /* Function: ipf_nextipid */
1673 /* Returns: int - 0 == success, -1 == error (packet should be droppped) */
1674 /* Parameters: fin(I) - pointer to packet information */
1675 /* */
1676 /* Returns the next IPv4 ID to use for this packet. */
1677 /* ------------------------------------------------------------------------ */
1678 u_short
1679 ipf_nextipid(fr_info_t *fin)
1680 {
1681 #ifdef USE_MUTEXES
1682 ipf_main_softc_t *softc = fin->fin_main_soft;
1683 #endif
1684 u_short id;
1685
1686 MUTEX_ENTER(&softc->ipf_rw);
1687 id = ipid++;
1688 MUTEX_EXIT(&softc->ipf_rw);
1689
1690 return id;
1691 }
1692
1693
1694 EXTERN_INLINE int
1695 ipf_checkv4sum(fr_info_t *fin)
1696 {
1697 #ifdef M_CSUM_TCP_UDP_BAD
1698 int manual, pflag, cflags, active;
1699 mb_t *m;
1700
1701 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1702 return 0;
1703
1704 if ((fin->fin_flx & FI_SHORT) != 0)
1705 return 1;
1706
1707 if (fin->fin_cksum != FI_CK_NEEDED)
1708 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1709
1710 manual = 0;
1711 m = fin->fin_m;
1712 if (m == NULL) {
1713 manual = 1;
1714 goto skipauto;
1715 }
1716
1717 switch (fin->fin_p)
1718 {
1719 case IPPROTO_UDP :
1720 pflag = M_CSUM_UDPv4;
1721 break;
1722 case IPPROTO_TCP :
1723 pflag = M_CSUM_TCPv4;
1724 break;
1725 default :
1726 pflag = 0;
1727 manual = 1;
1728 break;
1729 }
1730
1731 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1732 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1733 cflags = m->m_pkthdr.csum_flags & active;
1734
1735 if (pflag != 0) {
1736 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1737 fin->fin_flx |= FI_BAD;
1738 fin->fin_cksum = FI_CK_BAD;
1739 } else if (cflags == (pflag | M_CSUM_DATA)) {
1740 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) {
1741 fin->fin_flx |= FI_BAD;
1742 fin->fin_cksum = FI_CK_BAD;
1743 } else {
1744 fin->fin_cksum = FI_CK_SUMOK;
1745 }
1746 } else if (cflags == pflag) {
1747 fin->fin_cksum = FI_CK_SUMOK;
1748 } else {
1749 manual = 1;
1750 }
1751 }
1752 skipauto:
1753 if (manual != 0) {
1754 if (ipf_checkl4sum(fin) == -1) {
1755 fin->fin_flx |= FI_BAD;
1756 return -1;
1757 }
1758 }
1759 #else
1760 if (ipf_checkl4sum(fin) == -1) {
1761 fin->fin_flx |= FI_BAD;
1762 return -1;
1763 }
1764 #endif
1765 return 0;
1766 }
1767
1768
1769 #ifdef USE_INET6
1770 EXTERN_INLINE int
1771 ipf_checkv6sum(fr_info_t *fin)
1772 {
1773 # ifdef M_CSUM_TCP_UDP_BAD
1774 int manual, pflag, cflags, active;
1775 mb_t *m;
1776
1777 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1778 return 0;
1779
1780 if ((fin->fin_flx & FI_SHORT) != 0)
1781 return 1;
1782
1783 if (fin->fin_cksum != FI_CK_SUMOK)
1784 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1785
1786
1787 manual = 0;
1788 m = fin->fin_m;
1789
1790 switch (fin->fin_p)
1791 {
1792 case IPPROTO_UDP :
1793 pflag = M_CSUM_UDPv6;
1794 break;
1795 case IPPROTO_TCP :
1796 pflag = M_CSUM_TCPv6;
1797 break;
1798 default :
1799 pflag = 0;
1800 manual = 1;
1801 break;
1802 }
1803
1804 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1805 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1806 cflags = m->m_pkthdr.csum_flags & active;
1807
1808 if (pflag != 0) {
1809 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1810 fin->fin_flx |= FI_BAD;
1811 } else if (cflags == (pflag | M_CSUM_DATA)) {
1812 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0)
1813 fin->fin_flx |= FI_BAD;
1814 } else if (cflags == pflag) {
1815 ;
1816 } else {
1817 manual = 1;
1818 }
1819 }
1820 if (manual != 0) {
1821 if (ipf_checkl4sum(fin) == -1) {
1822 fin->fin_flx |= FI_BAD;
1823 return -1;
1824 }
1825 }
1826 # else
1827 if (ipf_checkl4sum(fin) == -1) {
1828 fin->fin_flx |= FI_BAD;
1829 return -1;
1830 }
1831 # endif
1832 return 0;
1833 }
1834 #endif /* USE_INET6 */
1835
1836
1837 size_t
1838 mbufchainlen(struct mbuf *m0)
1839 {
1840 size_t len;
1841
1842 if ((m0->m_flags & M_PKTHDR) != 0) {
1843 len = m0->m_pkthdr.len;
1844 } else {
1845 struct mbuf *m;
1846
1847 for (m = m0, len = 0; m != NULL; m = m->m_next)
1848 len += m->m_len;
1849 }
1850 return len;
1851 }
1852
1853
1854 /* ------------------------------------------------------------------------ */
1855 /* Function: ipf_pullup */
1856 /* Returns: NULL == pullup failed, else pointer to protocol header */
1857 /* Parameters: xmin(I)- pointer to buffer where data packet starts */
1858 /* fin(I) - pointer to packet information */
1859 /* len(I) - number of bytes to pullup */
1860 /* */
1861 /* Attempt to move at least len bytes (from the start of the buffer) into a */
1862 /* single buffer for ease of access. Operating system native functions are */
1863 /* used to manage buffers - if necessary. If the entire packet ends up in */
1864 /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */
1865 /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */
1866 /* and ONLY if the pullup succeeds. */
1867 /* */
1868 /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */
1869 /* of buffers that starts at *fin->fin_mp. */
1870 /* ------------------------------------------------------------------------ */
1871 void *
1872 ipf_pullup(mb_t *xmin, fr_info_t *fin, int len)
1873 {
1874 int dpoff, ipoff;
1875 mb_t *m = xmin;
1876 char *ip;
1877
1878 if (m == NULL)
1879 return NULL;
1880
1881 ip = (char *)fin->fin_ip;
1882 if ((fin->fin_flx & FI_COALESCE) != 0)
1883 return ip;
1884
1885 ipoff = fin->fin_ipoff;
1886 if (fin->fin_dp != NULL)
1887 dpoff = (char *)fin->fin_dp - (char *)ip;
1888 else
1889 dpoff = 0;
1890
1891 if (M_LEN(m) < len) {
1892 mb_t *n = *fin->fin_mp;
1893 /*
1894 * Assume that M_PKTHDR is set and just work with what is left
1895 * rather than check..
1896 * Should not make any real difference, anyway.
1897 */
1898 if (m != n) {
1899 /*
1900 * Record the mbuf that points to the mbuf that we're
1901 * about to go to work on so that we can update the
1902 * m_next appropriately later.
1903 */
1904 for (; n->m_next != m; n = n->m_next)
1905 ;
1906 } else {
1907 n = NULL;
1908 }
1909
1910 #ifdef MHLEN
1911 if (len > MHLEN)
1912 #else
1913 if (len > MLEN)
1914 #endif
1915 {
1916 #ifdef HAVE_M_PULLDOWN
1917 if (m_pulldown(m, 0, len, NULL) == NULL)
1918 m = NULL;
1919 #else
1920 FREE_MB_T(*fin->fin_mp);
1921 m = NULL;
1922 n = NULL;
1923 #endif
1924 } else
1925 {
1926 m = m_pullup(m, len);
1927 }
1928 if (n != NULL)
1929 n->m_next = m;
1930 if (m == NULL) {
1931 /*
1932 * When n is non-NULL, it indicates that m pointed to
1933 * a sub-chain (tail) of the mbuf and that the head
1934 * of this chain has not yet been free'd.
1935 */
1936 if (n != NULL) {
1937 FREE_MB_T(*fin->fin_mp);
1938 }
1939
1940 *fin->fin_mp = NULL;
1941 fin->fin_m = NULL;
1942 return NULL;
1943 }
1944
1945 if (n == NULL)
1946 *fin->fin_mp = m;
1947
1948 while (M_LEN(m) == 0) {
1949 m = m->m_next;
1950 }
1951 fin->fin_m = m;
1952 ip = MTOD(m, char *) + ipoff;
1953
1954 fin->fin_ip = (ip_t *)ip;
1955 if (fin->fin_dp != NULL)
1956 fin->fin_dp = (char *)fin->fin_ip + dpoff;
1957 if (fin->fin_fraghdr != NULL)
1958 fin->fin_fraghdr = (char *)ip +
1959 ((char *)fin->fin_fraghdr -
1960 (char *)fin->fin_ip);
1961 }
1962
1963 if (len == fin->fin_plen)
1964 fin->fin_flx |= FI_COALESCE;
1965 return ip;
1966 }
1967
1968
1969 int
1970 ipf_inject(fr_info_t *fin, mb_t *m)
1971 {
1972 int error;
1973
1974 if (fin->fin_out == 0) {
1975 if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
1976 FREE_MB_T(m);
1977 error = ENOBUFS;
1978 } else {
1979 error = 0;
1980 }
1981 } else {
1982 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
1983 }
1984 return error;
1985 }
1986
1987
1988 u_32_t
1989 ipf_random(void)
1990 {
1991 int number;
1992
1993 #ifdef _CPRNG_H
1994 number = cprng_fast32();
1995 #else
1996 number = arc4random();
1997 #endif
1998 return number;
1999 }
2000
2001
2002 /*
2003 * routines below for saving IP headers to buffer
2004 */
2005 static int ipfopen(dev_t dev, int flags
2006 #if (NetBSD >= 199511)
2007 , int devtype, PROC_T *p
2008 #endif
2009 )
2010 {
2011 u_int unit = GET_MINOR(dev);
2012 int error;
2013
2014 if (IPL_LOGMAX < unit) {
2015 error = ENXIO;
2016 } else {
2017 switch (unit)
2018 {
2019 case IPL_LOGIPF :
2020 case IPL_LOGNAT :
2021 case IPL_LOGSTATE :
2022 case IPL_LOGAUTH :
2023 case IPL_LOGLOOKUP :
2024 case IPL_LOGSYNC :
2025 #ifdef IPFILTER_SCAN
2026 case IPL_LOGSCAN :
2027 #endif
2028 error = 0;
2029 break;
2030 default :
2031 error = ENXIO;
2032 break;
2033 }
2034 }
2035 #if (__NetBSD_Version__ >= 799003000)
2036 if (error == 0) {
2037 mutex_enter(&ipf_ref_mutex);
2038 ipf_active = 1;
2039 mutex_exit(&ipf_ref_mutex);
2040 }
2041 #endif
2042 return error;
2043 }
2044
2045
2046 static int ipfclose(dev_t dev, int flags
2047 #if (NetBSD >= 199511)
2048 , int devtype, PROC_T *p
2049 #endif
2050 )
2051 {
2052 u_int unit = GET_MINOR(dev);
2053
2054 if (IPL_LOGMAX < unit)
2055 return ENXIO;
2056 else {
2057 #if (__NetBSD_Version__ >= 799003000)
2058 mutex_enter(&ipf_ref_mutex);
2059 ipf_active = 0;
2060 mutex_exit(&ipf_ref_mutex);
2061 #endif
2062 return 0;
2063 }
2064 }
2065
2066 /*
2067 * ipfread/ipflog
2068 * both of these must operate with at least splnet() lest they be
2069 * called during packet processing and cause an inconsistancy to appear in
2070 * the filter lists.
2071 */
2072 static int ipfread(dev_t dev, struct uio *uio, int ioflag)
2073 {
2074
2075 if (ipfmain.ipf_running < 1) {
2076 ipfmain.ipf_interror = 130006;
2077 return EIO;
2078 }
2079
2080 if (GET_MINOR(dev) == IPL_LOGSYNC)
2081 return ipf_sync_read(&ipfmain, uio);
2082
2083 #ifdef IPFILTER_LOG
2084 return ipf_log_read(&ipfmain, GET_MINOR(dev), uio);
2085 #else
2086 ipfmain.ipf_interror = 130007;
2087 return ENXIO;
2088 #endif
2089 }
2090
2091
2092 /*
2093 * ipfwrite
2094 * both of these must operate with at least splnet() lest they be
2095 * called during packet processing and cause an inconsistancy to appear in
2096 * the filter lists.
2097 */
2098 static int ipfwrite(dev_t dev, struct uio *uio, int ioflag)
2099 {
2100
2101 if (ipfmain.ipf_running < 1) {
2102 ipfmain.ipf_interror = 130008;
2103 return EIO;
2104 }
2105
2106 if (GET_MINOR(dev) == IPL_LOGSYNC)
2107 return ipf_sync_write(&ipfmain, uio);
2108 ipfmain.ipf_interror = 130009;
2109 return ENXIO;
2110 }
2111
2112
2113 static int ipfpoll(dev_t dev, int events, PROC_T *p)
2114 {
2115 u_int unit = GET_MINOR(dev);
2116 int revents = 0;
2117
2118 if (IPL_LOGMAX < unit) {
2119 ipfmain.ipf_interror = 130010;
2120 return ENXIO;
2121 }
2122
2123 switch (unit)
2124 {
2125 case IPL_LOGIPF :
2126 case IPL_LOGNAT :
2127 case IPL_LOGSTATE :
2128 #ifdef IPFILTER_LOG
2129 if ((events & (POLLIN | POLLRDNORM)) &&
2130 ipf_log_canread(&ipfmain, unit))
2131 revents |= events & (POLLIN | POLLRDNORM);
2132 #endif
2133 break;
2134 case IPL_LOGAUTH :
2135 if ((events & (POLLIN | POLLRDNORM)) &&
2136 ipf_auth_waiting(&ipfmain))
2137 revents |= events & (POLLIN | POLLRDNORM);
2138 break;
2139 case IPL_LOGSYNC :
2140 if ((events & (POLLIN | POLLRDNORM)) &&
2141 ipf_sync_canread(&ipfmain))
2142 revents |= events & (POLLIN | POLLRDNORM);
2143 if ((events & (POLLOUT | POLLWRNORM)) &&
2144 ipf_sync_canwrite(&ipfmain))
2145 revents |= events & (POLLOUT | POLLWRNORM);
2146 break;
2147 case IPL_LOGSCAN :
2148 case IPL_LOGLOOKUP :
2149 default :
2150 break;
2151 }
2152
2153 if ((revents == 0) && (((events & (POLLIN|POLLRDNORM)) != 0)))
2154 selrecord(p, &ipfmain.ipf_selwait[unit]);
2155 return revents;
2156 }
2157
2158 u_int
2159 ipf_pcksum(fr_info_t *fin, int hlen, u_int sum)
2160 {
2161 struct mbuf *m;
2162 u_int sum2;
2163 int off;
2164
2165 m = fin->fin_m;
2166 off = (char *)fin->fin_dp - (char *)fin->fin_ip;
2167 m->m_data += hlen;
2168 m->m_len -= hlen;
2169 sum2 = in_cksum(fin->fin_m, fin->fin_plen - off);
2170 m->m_len += hlen;
2171 m->m_data -= hlen;
2172
2173 /*
2174 * Both sum and sum2 are partial sums, so combine them together.
2175 */
2176 sum += ~sum2 & 0xffff;
2177 while (sum > 0xffff)
2178 sum = (sum & 0xffff) + (sum >> 16);
2179 sum2 = ~sum & 0xffff;
2180 return sum2;
2181 }
2182
2183 #if (__NetBSD_Version__ >= 799003000)
2184
2185 /* NetBSD module interface */
2186
2187 MODULE(MODULE_CLASS_DRIVER, ipl, "bpf_filter");
2188
2189 static int ipl_init(void *);
2190 static int ipl_fini(void *);
2191 static int ipl_modcmd(modcmd_t, void *);
2192
2193 #ifdef _MODULE
2194 static devmajor_t ipl_cmaj = -1, ipl_bmaj = -1;
2195 #endif
2196
2197 static int
2198 ipl_modcmd(modcmd_t cmd, void *opaque)
2199 {
2200
2201 switch (cmd) {
2202 case MODULE_CMD_INIT:
2203 return ipl_init(opaque);
2204 case MODULE_CMD_FINI:
2205 return ipl_fini(opaque);
2206 default:
2207 return ENOTTY;
2208 }
2209 }
2210
2211 static int
2212 ipl_init(void *opaque)
2213 {
2214 int error;
2215
2216 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
2217 ipf_listener_cb, NULL);
2218
2219 if ((error = ipf_load_all()) != 0)
2220 return error;
2221
2222 if (ipf_create_all(&ipfmain) == NULL) {
2223 ipf_unload_all();
2224 return ENODEV;
2225 }
2226
2227 /* Initialize our mutex and reference count */
2228 mutex_init(&ipf_ref_mutex, MUTEX_DEFAULT, IPL_NONE);
2229 ipf_active = 0;
2230
2231 #ifdef _MODULE
2232 /*
2233 * Insert ourself into the cdevsw list.
2234 */
2235 error = devsw_attach("ipl", NULL, &ipl_bmaj, &ipl_cdevsw, &ipl_cmaj);
2236 if (error)
2237 ipl_fini(opaque);
2238 #endif
2239
2240 return error;
2241 }
2242
2243 static int
2244 ipl_fini(void *opaque)
2245 {
2246
2247 #ifdef _MODULE
2248 (void)devsw_detach(NULL, &ipl_cdevsw);
2249 #endif
2250
2251 /*
2252 * Grab the mutex, verify that there are no references
2253 * and that there are no running filters. If either
2254 * of these exists, reinsert our cdevsw entry and return
2255 * an error.
2256 */
2257 mutex_enter(&ipf_ref_mutex);
2258 if (ipf_active != 0 || ipfmain.ipf_running > 0) {
2259 #ifdef _MODULE
2260 (void)devsw_attach("ipl", NULL, &ipl_bmaj,
2261 &ipl_cdevsw, &ipl_cmaj);
2262 #endif
2263 mutex_exit(&ipf_ref_mutex);
2264 return EBUSY;
2265 }
2266
2267 /* Clean up the rest of our state before being unloaded */
2268
2269 mutex_exit(&ipf_ref_mutex);
2270 mutex_destroy(&ipf_ref_mutex);
2271 ipf_destroy_all(&ipfmain);
2272 ipf_unload_all();
2273 kauth_unlisten_scope(ipf_listener);
2274
2275 return 0;
2276 }
2277 #endif /* (__NetBSD_Version__ >= 799003000) */
2278