ip_fil_netbsd.c revision 1.29 1 /* $NetBSD: ip_fil_netbsd.c,v 1.29 2018/07/11 05:25:46 maxv Exp $ */
2
3 /*
4 * Copyright (C) 2012 by Darren Reed.
5 *
6 * See the IPFILTER.LICENCE file for details on licencing.
7 */
8 #if !defined(lint)
9 #if defined(__NetBSD__)
10 #include <sys/cdefs.h>
11 __KERNEL_RCSID(0, "$NetBSD: ip_fil_netbsd.c,v 1.29 2018/07/11 05:25:46 maxv Exp $");
12 #else
13 static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed";
14 static const char rcsid[] = "@(#)Id: ip_fil_netbsd.c,v 1.1.1.2 2012/07/22 13:45:17 darrenr Exp";
15 #endif
16 #endif
17
18 #if defined(KERNEL) || defined(_KERNEL)
19 # undef KERNEL
20 # undef _KERNEL
21 # define KERNEL 1
22 # define _KERNEL 1
23 #endif
24 #include <sys/param.h>
25 #if (NetBSD >= 199905) && !defined(IPFILTER_LKM)
26 # if (__NetBSD_Version__ >= 799003000)
27 # ifdef _KERNEL_OPT
28 # include "opt_ipsec.h"
29 # endif
30 # else
31 # include "opt_ipsec.h"
32 # endif
33 #endif
34 #include <sys/errno.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/ioctl.h>
38 #include <sys/time.h>
39 #include <sys/systm.h>
40 #include <sys/select.h>
41 #if (NetBSD > 199609)
42 # include <sys/dirent.h>
43 #else
44 # include <sys/dir.h>
45 #endif
46 #if (__NetBSD_Version__ >= 599005900)
47 # include <sys/cprng.h>
48 #endif
49 #include <sys/mbuf.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/poll.h>
53 #if (__NetBSD_Version__ >= 399002000)
54 # include <sys/kauth.h>
55 #endif
56 #if (__NetBSD_Version__ >= 799003000)
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #endif
60 #if defined(__NetBSD__)
61 #include <netinet/in_offload.h>
62 #endif
63
64 #include <net/if.h>
65 #include <net/route.h>
66 #include <netinet/in.h>
67 #include <netinet/in_var.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/tcp.h>
72 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
73 # include <netinet/tcp_timer.h>
74 # include <netinet/tcp_var.h>
75 #endif
76 #include <netinet/udp.h>
77 #include <netinet/ip_icmp.h>
78 #include "netinet/ip_compat.h"
79 #ifdef USE_INET6
80 # include <netinet/icmp6.h>
81 # if (__NetBSD_Version__ >= 106000000)
82 # include <netinet6/nd6.h>
83 # endif
84 # if __NetBSD_Version__ >= 499001100
85 # include <netinet6/scope6_var.h>
86 # endif
87 #endif
88 #include "netinet/ip_fil.h"
89 #include "netinet/ip_nat.h"
90 #include "netinet/ip_frag.h"
91 #include "netinet/ip_state.h"
92 #include "netinet/ip_proxy.h"
93 #include "netinet/ip_auth.h"
94 #include "netinet/ip_sync.h"
95 #include "netinet/ip_lookup.h"
96 #include "netinet/ip_dstlist.h"
97 #ifdef IPFILTER_SCAN
98 #include "netinet/ip_scan.h"
99 #endif
100 #include <sys/md5.h>
101 #include <sys/kernel.h>
102 #include <sys/conf.h>
103 #ifdef INET
104 extern int ip_optcopy (struct ip *, struct ip *);
105 #endif
106
107 #ifdef IPFILTER_M_IPFILTER
108 MALLOC_DEFINE(M_IPFILTER, "IP Filter", "IP Filter packet filter data structures");
109 #endif
110
111 #if __NetBSD_Version__ >= 105009999
112 # define csuminfo csum_flags
113 #endif
114
115 #if __NetBSD_Version__ < 200000000
116 extern struct protosw inetsw[];
117 #endif
118
119 #if (__NetBSD_Version__ >= 599002000)
120 static kauth_listener_t ipf_listener;
121 #endif
122
123 #if (__NetBSD_Version__ < 399001400)
124 extern int ip6_getpmtu (struct route_in6 *, struct route_in6 *,
125 struct ifnet *, struct in6_addr *, u_long *,
126 int *);
127 #endif
128 #if (NetBSD >= 199511)
129 static int ipfopen(dev_t dev, int flags, int devtype, PROC_T *p);
130 static int ipfclose(dev_t dev, int flags, int devtype, PROC_T *p);
131 #else
132 # if (__NetBSD_Version__ >= 399001400)
133 static int ipfopen(dev_t dev, int flags, struct lwp *);
134 static int ipfclose(dev_t dev, int flags, struct lwp *);
135 # else
136 static int ipfopen(dev_t dev, int flags);
137 static int ipfclose(dev_t dev, int flags);
138 # endif /* __NetBSD_Version__ >= 399001400 */
139 #endif
140 static int ipfread(dev_t, struct uio *, int ioflag);
141 static int ipfwrite(dev_t, struct uio *, int ioflag);
142 static int ipfpoll(dev_t, int events, PROC_T *);
143 static void ipf_timer_func(void *ptr);
144
145 const struct cdevsw ipl_cdevsw = {
146 .d_open = ipfopen,
147 .d_close = ipfclose,
148 .d_read = ipfread,
149 .d_write = ipfwrite,
150 .d_ioctl = ipfioctl,
151 .d_stop = nostop,
152 .d_tty = notty,
153 .d_poll = ipfpoll,
154 .d_mmap = nommap,
155 #if (__NetBSD_Version__ >= 200000000)
156 .d_kqfilter = nokqfilter,
157 #endif
158 .d_discard = nodiscard,
159 #ifdef D_OTHER
160 .d_flag = D_OTHER
161 #else
162 .d_flag = 0
163 #endif
164 };
165 #if (__NetBSD_Version__ >= 799003000)
166 kmutex_t ipf_ref_mutex;
167 int ipf_active;
168 #endif
169
170 ipf_main_softc_t ipfmain;
171
172 static u_short ipid = 0;
173 static int (*ipf_savep)(void *, ip_t *, int, void *, int, struct mbuf **);
174 static int ipf_send_ip(fr_info_t *, mb_t *);
175 #ifdef USE_INET6
176 static int ipf_fastroute6(struct mbuf *, struct mbuf **,
177 fr_info_t *, frdest_t *);
178 #endif
179
180 #if defined(NETBSD_PF)
181 # include <net/pfil.h>
182 /*
183 * We provide the ipf_checkp name just to minimize changes later.
184 */
185 int (*ipf_checkp)(void *, ip_t *ip, int hlen, void *ifp, int out, mb_t **mp);
186 #endif /* NETBSD_PF */
187
188 #if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
189 # include <net/pfil.h>
190
191 static int ipf_check_wrapper(void *, struct mbuf **, struct ifnet *, int );
192
193 static int
194 ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
195 {
196 struct ip *ip;
197 int rv, hlen;
198
199 #if __NetBSD_Version__ >= 200080000
200 /*
201 * ensure that mbufs are writable beforehand
202 * as it's assumed by ipf code.
203 * XXX inefficient
204 */
205 int error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
206
207 if (error) {
208 m_freem(*mp);
209 *mp = NULL;
210 return error;
211 }
212 #endif
213 ip = mtod(*mp, struct ip *);
214 hlen = ip->ip_hl << 2;
215
216 #ifdef INET
217 #if defined(M_CSUM_TCPv4)
218 /*
219 * If the packet is out-bound, we can't delay checksums
220 * here. For in-bound, the checksum has already been
221 * validated.
222 */
223 if (dir == PFIL_OUT) {
224 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
225 in_undefer_cksum_tcpudp(*mp);
226 (*mp)->m_pkthdr.csum_flags &=
227 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
228 }
229 }
230 #endif /* M_CSUM_TCPv4 */
231 #endif /* INET */
232
233 /*
234 * Note, we don't need to update the checksum, because
235 * it has already been verified.
236 */
237 rv = ipf_check(&ipfmain, ip, hlen, ifp, (dir == PFIL_OUT), mp);
238
239 return (rv);
240 }
241
242 # ifdef USE_INET6
243 # include <netinet/ip6.h>
244
245 static int ipf_check_wrapper6(void *, struct mbuf **, struct ifnet *, int );
246
247 static int
248 ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
249 {
250 #if defined(INET6)
251 # if defined(M_CSUM_TCPv6) && (__NetBSD_Version__ > 200000000)
252 /*
253 * If the packet is out-bound, we can't delay checksums
254 * here. For in-bound, the checksum has already been
255 * validated.
256 */
257 if (dir == PFIL_OUT) {
258 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
259 # if (__NetBSD_Version__ > 399000600)
260 in6_delayed_cksum(*mp);
261 # endif
262 (*mp)->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6|
263 M_CSUM_UDPv6);
264 }
265 }
266 # endif
267 #endif /* INET6 */
268
269 return (ipf_check(&ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr),
270 ifp, (dir == PFIL_OUT), mp));
271 }
272 # endif
273
274
275 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
276
277 # if (__NetBSD_Version__ >= 799000400)
278
279 static void ipf_pfilsync(void *, unsigned long, void *);
280
281 static void
282 ipf_pfilsync(void *hdr, unsigned long cmd, void *arg2)
283 {
284 /*
285 * The interface pointer is useless for create (we have nothing to
286 * compare it to) and at detach, the interface name is still in the
287 * list of active NICs (albeit, down, but that's not any real
288 * indicator) and doing ifunit() on the name will still return the
289 * pointer, so it's not much use then, either.
290 */
291 ipf_sync(&ipfmain, NULL);
292 }
293
294 # else
295
296 static int ipf_pfilsync(void *, struct mbuf **, struct ifnet *, int);
297
298 static int
299 ipf_pfilsync(void *hdr, struct mbuf **mp, struct ifnet *ifp, int dir)
300 {
301 ipf_sync(&ipfmain, NULL);
302 return 0;
303 }
304
305 # endif
306 # endif
307
308 #endif /* __NetBSD_Version__ >= 105110000 */
309
310
311 #if defined(IPFILTER_LKM)
312 int
313 ipf_identify(s)
314 char *s;
315 {
316 if (strcmp(s, "ipl") == 0)
317 return 1;
318 return 0;
319 }
320 #endif /* IPFILTER_LKM */
321
322 #if (__NetBSD_Version__ >= 599002000)
323 static int
324 ipf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
325 void *arg0, void *arg1, void *arg2, void *arg3)
326 {
327 int result;
328 enum kauth_network_req req;
329
330 result = KAUTH_RESULT_DEFER;
331 req = (enum kauth_network_req)arg0;
332
333 if (action != KAUTH_NETWORK_FIREWALL)
334 return result;
335
336 /* These must have came from device context. */
337 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) ||
338 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT))
339 result = KAUTH_RESULT_ALLOW;
340
341 return result;
342 }
343 #endif
344
345 /*
346 * Try to detect the case when compiling for NetBSD with pseudo-device
347 */
348 void
349 ipfilterattach(int count)
350 {
351
352 #if (__NetBSD_Version__ >= 799003000)
353 return;
354 #else
355 #if (__NetBSD_Version__ >= 599002000)
356 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
357 ipf_listener_cb, NULL);
358 #endif
359
360 if (ipf_load_all() == 0)
361 (void) ipf_create_all(&ipfmain);
362 #endif
363 }
364
365
366 int
367 ipfattach(ipf_main_softc_t *softc)
368 {
369 SPL_INT(s);
370 #if (__NetBSD_Version__ >= 499005500)
371 int i;
372 #endif
373 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
374 int error = 0;
375 # if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
376 pfil_head_t *ph_inet;
377 # ifdef USE_INET6
378 pfil_head_t *ph_inet6;
379 # endif
380 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
381 pfil_head_t *ph_ifsync;
382 # endif
383 # endif
384 #endif
385
386 SPL_NET(s);
387 if ((softc->ipf_running > 0) || (ipf_checkp == ipf_check)) {
388 printf("IP Filter: already initialized\n");
389 SPL_X(s);
390 IPFERROR(130017);
391 return EBUSY;
392 }
393
394 if (ipf_init_all(softc) < 0) {
395 SPL_X(s);
396 IPFERROR(130015);
397 return EIO;
398 }
399
400 #ifdef NETBSD_PF
401 # if (__NetBSD_Version__ >= 104200000)
402 # if __NetBSD_Version__ >= 105110000
403 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
404 # ifdef USE_INET6
405 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
406 # endif
407 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
408 ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
409 # endif
410
411 if (ph_inet == NULL
412 # ifdef USE_INET6
413 && ph_inet6 == NULL
414 # endif
415 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
416 && ph_ifsync == NULL
417 # endif
418 ) {
419 SPL_X(s);
420 IPFERROR(130016);
421 return ENODEV;
422 }
423
424 if (ph_inet != NULL)
425 error = pfil_add_hook((void *)ipf_check_wrapper, NULL,
426 PFIL_IN|PFIL_OUT, ph_inet);
427 else
428 error = 0;
429 # else
430 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
431 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
432 # endif
433 if (error) {
434 IPFERROR(130013);
435 goto pfil_error;
436 }
437 # else
438 pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
439 # endif
440
441 # ifdef USE_INET6
442 # if __NetBSD_Version__ >= 105110000
443 if (ph_inet6 != NULL)
444 error = pfil_add_hook((void *)ipf_check_wrapper6, NULL,
445 PFIL_IN|PFIL_OUT, ph_inet6);
446 else
447 error = 0;
448 if (error) {
449 pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
450 PFIL_IN|PFIL_OUT, ph_inet6);
451 ipfmain.ipf_interror = 130014;
452 goto pfil_error;
453 }
454 # else
455 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
456 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
457 if (error) {
458 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
459 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
460 IPFERROR(130014);
461 goto pfil_error;
462 }
463 # endif
464 # endif
465
466 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
467 if (ph_ifsync != NULL)
468 #if (__NetBSD_Version__ >= 799000400)
469 (void) pfil_add_ihook((void *)ipf_pfilsync, NULL,
470 PFIL_IFNET, ph_ifsync);
471 #else
472 (void) pfil_add_hook((void *)ipf_pfilsync, NULL,
473 PFIL_IFNET, ph_ifsync);
474 #endif
475 # endif
476 #endif
477
478 #if (__NetBSD_Version__ >= 499005500)
479 for (i = 0; i < IPL_LOGSIZE; i++)
480 selinit(&ipfmain.ipf_selwait[i]);
481 #else
482 bzero((char *)ipfmain.ipf_selwait, sizeof(ipfmain.ipf_selwait));
483 #endif
484 ipf_savep = ipf_checkp;
485 ipf_checkp = ipf_check;
486
487 #ifdef INET
488 if (softc->ipf_control_forwarding & 1)
489 ipforwarding = 1;
490 #endif
491
492 ipid = 0;
493
494 SPL_X(s);
495
496 #if (__NetBSD_Version__ >= 104010000)
497 # if (__NetBSD_Version__ >= 499002000)
498 callout_init(&softc->ipf_slow_ch, 0);
499 # else
500 callout_init(&softc->ipf_slow_ch);
501 # endif
502 callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT,
503 ipf_timer_func, softc);
504 #else
505 timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
506 #endif
507
508 return 0;
509
510 #if __NetBSD_Version__ >= 105110000
511 pfil_error:
512 SPL_X(s);
513 ipf_fini_all(softc);
514 return error;
515 #endif
516 }
517
518 static void
519 ipf_timer_func(void *ptr)
520 {
521 ipf_main_softc_t *softc = ptr;
522 SPL_INT(s);
523
524 SPL_NET(s);
525 READ_ENTER(&softc->ipf_global);
526
527 if (softc->ipf_running > 0)
528 ipf_slowtimer(softc);
529
530 if (softc->ipf_running == -1 || softc->ipf_running == 1) {
531 #if NETBSD_GE_REV(104240000)
532 callout_reset(&softc->ipf_slow_ch, hz / 2,
533 ipf_timer_func, softc);
534 #else
535 timeout(ipf_timer_func, softc,
536 (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
537 #endif
538 }
539 RWLOCK_EXIT(&softc->ipf_global);
540 SPL_X(s);
541 }
542
543
544 /*
545 * Disable the filter by removing the hooks from the IP input/output
546 * stream.
547 */
548 int
549 ipfdetach(ipf_main_softc_t *softc)
550 {
551 SPL_INT(s);
552 #if (__NetBSD_Version__ >= 499005500)
553 int i;
554 #endif
555 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
556 int error = 0;
557 # if __NetBSD_Version__ >= 105150000
558 pfil_head_t *ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
559 # ifdef USE_INET6
560 pfil_head_t *ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
561 # endif
562 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
563 struct pfil_head *ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
564 # endif
565 # endif
566 #endif
567
568 SPL_NET(s);
569
570 #if (__NetBSD_Version__ >= 104010000)
571 if (softc->ipf_running > 0)
572 callout_stop(&softc->ipf_slow_ch);
573 #else
574 untimeout(ipf_slowtimer, NULL);
575 #endif /* NetBSD */
576
577 ipf_checkp = ipf_savep;
578 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE|FR_INACTIVE);
579 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE);
580
581 #ifdef INET
582 if (softc->ipf_control_forwarding & 2)
583 ipforwarding = 0;
584 #endif
585
586 #ifdef NETBSD_PF
587 # if (__NetBSD_Version__ >= 104200000)
588 # if __NetBSD_Version__ >= 105110000
589 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
590 # if __NetBSD_Version__ >= 799000400
591 (void) pfil_remove_ihook((void *)ipf_pfilsync, NULL,
592 PFIL_IFNET, ph_ifsync);
593 # else
594 (void) pfil_remove_hook((void *)ipf_pfilsync, NULL,
595 PFIL_IFNET, ph_ifsync);
596 # endif
597 # endif
598
599 if (ph_inet != NULL)
600 error = pfil_remove_hook((void *)ipf_check_wrapper, NULL,
601 PFIL_IN|PFIL_OUT, ph_inet);
602 else
603 error = 0;
604 # else
605 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
606 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
607 # endif
608 if (error) {
609 SPL_X(s);
610 IPFERROR(130011);
611 return error;
612 }
613 # else
614 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
615 # endif
616 # ifdef USE_INET6
617 # if __NetBSD_Version__ >= 105110000
618 if (ph_inet6 != NULL)
619 error = pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
620 PFIL_IN|PFIL_OUT, ph_inet6);
621 else
622 error = 0;
623 # else
624 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
625 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
626 # endif
627 if (error) {
628 SPL_X(s);
629 IPFERROR(130012);
630 return error;
631 }
632 # endif
633 #endif
634 SPL_X(s);
635
636 #if (__NetBSD_Version__ >= 499005500)
637 for (i = 0; i < IPL_LOGSIZE; i++)
638 seldestroy(&ipfmain.ipf_selwait[i]);
639 #endif
640
641 ipf_fini_all(softc);
642
643 return 0;
644 }
645
646
647 /*
648 * Filter ioctl interface.
649 */
650 int
651 ipfioctl(dev_t dev, u_long cmd,
652 #if (__NetBSD_Version__ >= 499001000)
653 void *data,
654 #else
655 caddr_t data,
656 #endif
657 int mode
658 #if (NetBSD >= 199511)
659 # if (__NetBSD_Version__ >= 399001400)
660 , struct lwp *p
661 # if (__NetBSD_Version__ >= 399002000)
662 # define UID(l) kauth_cred_getuid((l)->l_cred)
663 # else
664 # define UID(l) ((l)->l_proc->p_cred->p_ruid)
665 # endif
666 # else
667 , struct proc *p
668 # define UID(p) ((p)->p_cred->p_ruid)
669 # endif
670 #endif
671 )
672 {
673 int error = 0, unit = 0;
674 SPL_INT(s);
675
676 #if (__NetBSD_Version__ >= 399002000)
677 if ((mode & FWRITE) &&
678 kauth_authorize_network(p->l_cred, KAUTH_NETWORK_FIREWALL,
679 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL,
680 NULL, NULL)) {
681 ipfmain.ipf_interror = 130005;
682 return EPERM;
683 }
684 #else
685 if ((securelevel >= 2) && (mode & FWRITE)) {
686 ipfmain.ipf_interror = 130001;
687 return EPERM;
688 }
689 #endif
690
691 unit = GET_MINOR(dev);
692 if ((IPL_LOGMAX < unit) || (unit < 0)) {
693 ipfmain.ipf_interror = 130002;
694 return ENXIO;
695 }
696
697 if (ipfmain.ipf_running <= 0) {
698 if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) {
699 ipfmain.ipf_interror = 130003;
700 return EIO;
701 }
702 if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET &&
703 cmd != SIOCIPFSET && cmd != SIOCFRENB &&
704 cmd != SIOCGETFS && cmd != SIOCGETFF &&
705 cmd != SIOCIPFINTERROR) {
706 ipfmain.ipf_interror = 130004;
707 return EIO;
708 }
709 }
710
711 SPL_NET(s);
712
713 error = ipf_ioctlswitch(&ipfmain, unit, data, cmd, mode, UID(p), p);
714 if (error != -1) {
715 SPL_X(s);
716 return error;
717 }
718
719 SPL_X(s);
720 return error;
721 }
722
723
724 /*
725 * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that
726 * requires a large amount of setting up and isn't any more efficient.
727 */
728 int
729 ipf_send_reset(fr_info_t *fin)
730 {
731 struct tcphdr *tcp, *tcp2;
732 int tlen = 0, hlen;
733 struct mbuf *m;
734 #ifdef USE_INET6
735 ip6_t *ip6;
736 #endif
737 ip_t *ip;
738
739 tcp = fin->fin_dp;
740 if (tcp->th_flags & TH_RST)
741 return -1; /* feedback loop */
742
743 if (ipf_checkl4sum(fin) == -1)
744 return -1;
745
746 tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) +
747 ((tcp->th_flags & TH_SYN) ? 1 : 0) +
748 ((tcp->th_flags & TH_FIN) ? 1 : 0);
749
750 #ifdef USE_INET6
751 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t);
752 #else
753 hlen = sizeof(ip_t);
754 #endif
755 #ifdef MGETHDR
756 MGETHDR(m, M_DONTWAIT, MT_HEADER);
757 #else
758 MGET(m, M_DONTWAIT, MT_HEADER);
759 #endif
760 if (m == NULL)
761 return -1;
762 if (sizeof(*tcp2) + hlen > MHLEN) {
763 MCLGET(m, M_DONTWAIT);
764 if (m == NULL)
765 return -1;
766 if ((m->m_flags & M_EXT) == 0) {
767 FREE_MB_T(m);
768 return -1;
769 }
770 }
771
772 m->m_len = sizeof(*tcp2) + hlen;
773 m->m_data += max_linkhdr;
774 m->m_pkthdr.len = m->m_len;
775 m_reset_rcvif(m);
776 ip = mtod(m, struct ip *);
777 bzero((char *)ip, hlen);
778 #ifdef USE_INET6
779 ip6 = (ip6_t *)ip;
780 #endif
781 bzero((char *)ip, sizeof(*tcp2) + hlen);
782 tcp2 = (struct tcphdr *)((char *)ip + hlen);
783 tcp2->th_sport = tcp->th_dport;
784 tcp2->th_dport = tcp->th_sport;
785
786 if (tcp->th_flags & TH_ACK) {
787 tcp2->th_seq = tcp->th_ack;
788 tcp2->th_flags = TH_RST;
789 tcp2->th_ack = 0;
790 } else {
791 tcp2->th_seq = 0;
792 tcp2->th_ack = ntohl(tcp->th_seq);
793 tcp2->th_ack += tlen;
794 tcp2->th_ack = htonl(tcp2->th_ack);
795 tcp2->th_flags = TH_RST|TH_ACK;
796 }
797 tcp2->th_x2 = 0;
798 TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2);
799 tcp2->th_win = tcp->th_win;
800 tcp2->th_sum = 0;
801 tcp2->th_urp = 0;
802
803 #ifdef USE_INET6
804 if (fin->fin_v == 6) {
805 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
806 ip6->ip6_plen = htons(sizeof(struct tcphdr));
807 ip6->ip6_nxt = IPPROTO_TCP;
808 ip6->ip6_hlim = 0;
809 ip6->ip6_src = fin->fin_dst6.in6;
810 ip6->ip6_dst = fin->fin_src6.in6;
811 tcp2->th_sum = in6_cksum(m, IPPROTO_TCP,
812 sizeof(*ip6), sizeof(*tcp2));
813 return ipf_send_ip(fin, m);
814 }
815 #endif
816 #ifdef INET
817 ip->ip_p = IPPROTO_TCP;
818 ip->ip_len = htons(sizeof(struct tcphdr));
819 ip->ip_src.s_addr = fin->fin_daddr;
820 ip->ip_dst.s_addr = fin->fin_saddr;
821 tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2));
822 ip->ip_len = hlen + sizeof(*tcp2);
823 return ipf_send_ip(fin, m);
824 #else
825 return 0;
826 #endif
827 }
828
829
830 /*
831 * Expects ip_len to be in host byte order when called.
832 */
833 static int
834 ipf_send_ip(fr_info_t *fin, mb_t *m)
835 {
836 fr_info_t fnew;
837 #ifdef INET
838 ip_t *oip;
839 #endif
840 ip_t *ip;
841 int hlen;
842
843 ip = mtod(m, ip_t *);
844 bzero((char *)&fnew, sizeof(fnew));
845 fnew.fin_main_soft = fin->fin_main_soft;
846
847 IP_V_A(ip, fin->fin_v);
848 switch (fin->fin_v)
849 {
850 #ifdef INET
851 case 4 :
852 oip = fin->fin_ip;
853 hlen = sizeof(*oip);
854 fnew.fin_v = 4;
855 fnew.fin_p = ip->ip_p;
856 fnew.fin_plen = ntohs(ip->ip_len);
857 HTONS(ip->ip_len);
858 IP_HL_A(ip, sizeof(*oip) >> 2);
859 ip->ip_tos = oip->ip_tos;
860 ip->ip_id = ipf_nextipid(fin);
861 ip->ip_off = htons(ip_mtudisc ? IP_DF : 0);
862 ip->ip_ttl = ip_defttl;
863 ip->ip_sum = 0;
864 break;
865 #endif
866 #ifdef USE_INET6
867 case 6 :
868 {
869 ip6_t *ip6 = (ip6_t *)ip;
870
871 ip6->ip6_vfc = 0x60;
872 ip6->ip6_hlim = IPDEFTTL;
873
874 hlen = sizeof(*ip6);
875 fnew.fin_p = ip6->ip6_nxt;
876 fnew.fin_v = 6;
877 fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen;
878 break;
879 }
880 #endif
881 default :
882 return EINVAL;
883 }
884 #ifdef KAME_IPSEC
885 m_reset_rcvif(m);
886 #endif
887
888 fnew.fin_ifp = fin->fin_ifp;
889 fnew.fin_flx = FI_NOCKSUM;
890 fnew.fin_m = m;
891 fnew.fin_ip = ip;
892 fnew.fin_mp = &m;
893 fnew.fin_hlen = hlen;
894 fnew.fin_dp = (char *)ip + hlen;
895 (void) ipf_makefrip(hlen, ip, &fnew);
896
897 return ipf_fastroute(m, &m, &fnew, NULL);
898 }
899
900
901 int
902 ipf_send_icmp_err(int type, fr_info_t *fin, int dst)
903 {
904 int err, hlen, xtra, iclen, ohlen, avail;
905 struct in_addr dst4;
906 struct icmp *icmp;
907 struct mbuf *m;
908 i6addr_t dst6;
909 void *ifp;
910 #ifdef USE_INET6
911 int code;
912 ip6_t *ip6;
913 #endif
914 ip_t *ip, *ip2;
915
916 if ((type < 0) || (type > ICMP_MAXTYPE))
917 return -1;
918
919 #ifdef USE_INET6
920 code = fin->fin_icode;
921 if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int)))
922 return -1;
923 #endif
924
925 if (ipf_checkl4sum(fin) == -1)
926 return -1;
927 #ifdef MGETHDR
928 MGETHDR(m, M_DONTWAIT, MT_HEADER);
929 #else
930 MGET(m, M_DONTWAIT, MT_HEADER);
931 #endif
932 if (m == NULL)
933 return -1;
934 avail = MHLEN;
935
936 xtra = 0;
937 hlen = 0;
938 ohlen = 0;
939 dst4.s_addr = 0;
940 ifp = fin->fin_ifp;
941 if (fin->fin_v == 4) {
942 if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT))
943 switch (ntohs(fin->fin_data[0]) >> 8)
944 {
945 case ICMP_ECHO :
946 case ICMP_TSTAMP :
947 case ICMP_IREQ :
948 case ICMP_MASKREQ :
949 break;
950 default :
951 FREE_MB_T(m);
952 return 0;
953 }
954
955 if (dst == 0) {
956 if (ipf_ifpaddr(&ipfmain, 4, FRI_NORMAL, ifp,
957 &dst6, NULL) == -1) {
958 FREE_MB_T(m);
959 return -1;
960 }
961 dst4 = dst6.in4;
962 } else
963 dst4.s_addr = fin->fin_daddr;
964
965 hlen = sizeof(ip_t);
966 ohlen = fin->fin_hlen;
967 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
968 if (fin->fin_hlen < fin->fin_plen)
969 xtra = MIN(fin->fin_dlen, 8);
970 else
971 xtra = 0;
972 }
973
974 #ifdef USE_INET6
975 else if (fin->fin_v == 6) {
976 hlen = sizeof(ip6_t);
977 ohlen = sizeof(ip6_t);
978 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
979 type = icmptoicmp6types[type];
980 if (type == ICMP6_DST_UNREACH)
981 code = icmptoicmp6unreach[code];
982
983 if (iclen + max_linkhdr + fin->fin_plen > avail) {
984 MCLGET(m, M_DONTWAIT);
985 if (m == NULL)
986 return -1;
987 if ((m->m_flags & M_EXT) == 0) {
988 FREE_MB_T(m);
989 return -1;
990 }
991 avail = MCLBYTES;
992 }
993 xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr);
994 xtra = MIN(xtra, IPV6_MMTU - iclen);
995 if (dst == 0 && !IN6_IS_ADDR_LINKLOCAL(&fin->fin_dst6.in6)) {
996 if (ipf_ifpaddr(&ipfmain, 6, FRI_NORMAL, ifp,
997 &dst6, NULL) == -1) {
998 FREE_MB_T(m);
999 return -1;
1000 }
1001 } else
1002 dst6 = fin->fin_dst6;
1003 }
1004 #endif
1005 else {
1006 FREE_MB_T(m);
1007 return -1;
1008 }
1009
1010 avail -= (max_linkhdr + iclen);
1011 if (avail < 0) {
1012 FREE_MB_T(m);
1013 return -1;
1014 }
1015 if (xtra > avail)
1016 xtra = avail;
1017 iclen += xtra;
1018 m->m_data += max_linkhdr;
1019 m_reset_rcvif(m);
1020 m->m_pkthdr.len = iclen;
1021 m->m_len = iclen;
1022 ip = mtod(m, ip_t *);
1023 icmp = (struct icmp *)((char *)ip + hlen);
1024 ip2 = (ip_t *)&icmp->icmp_ip;
1025
1026 icmp->icmp_type = type;
1027 icmp->icmp_code = fin->fin_icode;
1028 icmp->icmp_cksum = 0;
1029 #ifdef icmp_nextmtu
1030 if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) {
1031 if (fin->fin_mtu != 0) {
1032 icmp->icmp_nextmtu = htons(fin->fin_mtu);
1033
1034 } else if (ifp != NULL) {
1035 icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp));
1036
1037 } else { /* make up a number... */
1038 icmp->icmp_nextmtu = htons(fin->fin_plen - 20);
1039 }
1040 }
1041 #endif
1042
1043 bcopy((char *)fin->fin_ip, (char *)ip2, ohlen);
1044
1045 #if defined(M_CSUM_IPv4)
1046 /*
1047 * Clear any in-bound checksum flags for this packet.
1048 */
1049 m->m_pkthdr.csuminfo = 0;
1050 #endif /* __NetBSD__ && M_CSUM_IPv4 */
1051
1052 #ifdef USE_INET6
1053 ip6 = (ip6_t *)ip;
1054 if (fin->fin_v == 6) {
1055 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
1056 ip6->ip6_plen = htons(iclen - hlen);
1057 ip6->ip6_nxt = IPPROTO_ICMPV6;
1058 ip6->ip6_hlim = 0;
1059 ip6->ip6_src = dst6.in6;
1060 ip6->ip6_dst = fin->fin_src6.in6;
1061 if (xtra > 0)
1062 bcopy((char *)fin->fin_ip + ohlen,
1063 (char *)&icmp->icmp_ip + ohlen, xtra);
1064 icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6,
1065 sizeof(*ip6), iclen - hlen);
1066 } else
1067 #endif
1068 {
1069 ip->ip_p = IPPROTO_ICMP;
1070 ip->ip_src.s_addr = dst4.s_addr;
1071 ip->ip_dst.s_addr = fin->fin_saddr;
1072
1073 if (xtra > 0)
1074 bcopy((char *)fin->fin_ip + ohlen,
1075 (char *)&icmp->icmp_ip + ohlen, xtra);
1076 icmp->icmp_cksum = ipf_cksum((u_short *)icmp,
1077 sizeof(*icmp) + 8);
1078 ip->ip_len = iclen;
1079 ip->ip_p = IPPROTO_ICMP;
1080 }
1081 err = ipf_send_ip(fin, m);
1082 return err;
1083 }
1084
1085
1086 /*
1087 * m0 - pointer to mbuf where the IP packet starts
1088 * mpp - pointer to the mbuf pointer that is the start of the mbuf chain
1089 */
1090 int
1091 ipf_fastroute(mb_t *m0, mb_t **mpp, fr_info_t *fin, frdest_t *fdp)
1092 {
1093 register struct ip *ip, *mhip;
1094 register struct mbuf *m = *mpp;
1095 register struct route *ro;
1096 int len, off, error = 0, hlen, code;
1097 struct ifnet *ifp, *sifp;
1098 ipf_main_softc_t *softc;
1099 #if __NetBSD_Version__ >= 499001100
1100 union {
1101 struct sockaddr dst;
1102 struct sockaddr_in dst4;
1103 } u;
1104 #else
1105 struct sockaddr_in *dst4;
1106 #endif
1107 struct sockaddr *dst;
1108 u_short ip_off, ip_len;
1109 struct route iproute;
1110 struct rtentry *rt;
1111 frdest_t node;
1112 frentry_t *fr;
1113
1114 if (fin->fin_v == 6) {
1115 #ifdef USE_INET6
1116 error = ipf_fastroute6(m0, mpp, fin, fdp);
1117 #else
1118 error = EPROTONOSUPPORT;
1119 #endif
1120 if ((error != 0) && (*mpp != NULL))
1121 FREE_MB_T(*mpp);
1122 return error;
1123 }
1124 #ifndef INET
1125 FREE_MB_T(*mpp);
1126 return EPROTONOSUPPORT;
1127 #else
1128
1129 hlen = fin->fin_hlen;
1130 ip = mtod(m0, struct ip *);
1131 softc = fin->fin_main_soft;
1132 rt = NULL;
1133 ifp = NULL;
1134
1135 # if defined(M_CSUM_IPv4)
1136 /*
1137 * Clear any in-bound checksum flags for this packet.
1138 */
1139 m0->m_pkthdr.csuminfo = 0;
1140 # endif /* __NetBSD__ && M_CSUM_IPv4 */
1141
1142 /*
1143 * Route packet.
1144 */
1145 ro = &iproute;
1146 memset(ro, 0, sizeof(*ro));
1147 fr = fin->fin_fr;
1148
1149 if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) &&
1150 (fdp->fd_type == FRD_DSTLIST)) {
1151 if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0)
1152 fdp = &node;
1153 }
1154 if (fdp != NULL)
1155 ifp = fdp->fd_ptr;
1156 else
1157 ifp = fin->fin_ifp;
1158
1159 if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) {
1160 error = -2;
1161 goto bad;
1162 }
1163
1164 # if __NetBSD_Version__ >= 499001100
1165 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1166 sockaddr_in_init(&u.dst4, &fdp->fd_ip, 0);
1167 else
1168 sockaddr_in_init(&u.dst4, &ip->ip_dst, 0);
1169 dst = &u.dst;
1170 rtcache_setdst(ro, dst);
1171 rt = rtcache_init(ro);
1172 # else
1173 dst4 = (struct sockaddr_in *)&ro->ro_dst;
1174 dst = (struct sockaddr *)dst4;
1175 dst4->sin_family = AF_INET;
1176 dst4->sin_addr = ip->ip_dst;
1177
1178 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1179 dst4->sin_addr = fdp->fd_ip;
1180
1181 dst4->sin_len = sizeof(*dst);
1182 rtalloc(ro);
1183 rt = ro->ro_rt;
1184 # endif
1185 if ((ifp == NULL) && (rt != NULL))
1186 ifp = rt->rt_ifp;
1187 if ((rt == NULL) || (ifp == NULL)) {
1188 #ifdef INET
1189 if (in_localaddr(ip->ip_dst))
1190 error = EHOSTUNREACH;
1191 else
1192 #endif
1193 error = ENETUNREACH;
1194 goto bad;
1195 }
1196
1197
1198 if (rt->rt_flags & RTF_GATEWAY)
1199 dst = rt->rt_gateway;
1200
1201 rt->rt_use++;
1202
1203 /*
1204 * For input packets which are being "fastrouted", they won't
1205 * go back through output filtering and miss their chance to get
1206 * NAT'd and counted. Duplicated packets aren't considered to be
1207 * part of the normal packet stream, so do not NAT them or pass
1208 * them through stateful checking, etc.
1209 */
1210 if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) {
1211 sifp = fin->fin_ifp;
1212 fin->fin_ifp = ifp;
1213 fin->fin_out = 1;
1214 (void) ipf_acctpkt(fin, NULL);
1215 fin->fin_fr = NULL;
1216 if (!fr || !(fr->fr_flags & FR_RETMASK)) {
1217 u_32_t pass;
1218
1219 (void) ipf_state_check(fin, &pass);
1220 }
1221
1222 switch (ipf_nat_checkout(fin, NULL))
1223 {
1224 case 0 :
1225 break;
1226 case 1 :
1227 ip->ip_sum = 0;
1228 break;
1229 case -1 :
1230 error = -1;
1231 goto bad;
1232 break;
1233 }
1234
1235 fin->fin_ifp = sifp;
1236 fin->fin_out = 0;
1237 } else
1238 ip->ip_sum = 0;
1239 /*
1240 * If small enough for interface, can just send directly.
1241 */
1242 m_set_rcvif(m, ifp);
1243
1244 ip_len = ntohs(ip->ip_len);
1245 if (ip_len <= ifp->if_mtu) {
1246 # if defined(M_CSUM_IPv4)
1247 # if (__NetBSD_Version__ >= 105009999)
1248 if (ifp->if_csum_flags_tx & M_CSUM_IPv4)
1249 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1250 # else
1251 if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
1252 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1253 # endif /* (__NetBSD_Version__ >= 105009999) */
1254 else if (ip->ip_sum == 0)
1255 ip->ip_sum = in_cksum(m, hlen);
1256 # else
1257 if (!ip->ip_sum)
1258 ip->ip_sum = in_cksum(m, hlen);
1259 # endif /* M_CSUM_IPv4 */
1260
1261 error = if_output_lock(ifp, ifp, m, dst, rt);
1262 goto done;
1263 }
1264
1265 /*
1266 * Too large for interface; fragment if possible.
1267 * Must be able to put at least 8 bytes per fragment.
1268 */
1269 ip_off = ntohs(ip->ip_off);
1270 if (ip_off & IP_DF) {
1271 error = EMSGSIZE;
1272 goto bad;
1273 }
1274 len = (ifp->if_mtu - hlen) &~ 7;
1275 if (len < 8) {
1276 error = EMSGSIZE;
1277 goto bad;
1278 }
1279
1280 {
1281 int mhlen, firstlen = len;
1282 struct mbuf **mnext = &m->m_act;
1283
1284 /*
1285 * Loop through length of segment after first fragment,
1286 * make new header and copy data of each part and link onto chain.
1287 */
1288 m0 = m;
1289 mhlen = sizeof (struct ip);
1290 for (off = hlen + len; off < ip_len; off += len) {
1291 # ifdef MGETHDR
1292 MGETHDR(m, M_DONTWAIT, MT_HEADER);
1293 # else
1294 MGET(m, M_DONTWAIT, MT_HEADER);
1295 # endif
1296 if (m == 0) {
1297 m = m0;
1298 error = ENOBUFS;
1299 goto bad;
1300 }
1301 m->m_data += max_linkhdr;
1302 mhip = mtod(m, struct ip *);
1303 bcopy((char *)ip, (char *)mhip, sizeof(*ip));
1304 #ifdef INET
1305 if (hlen > sizeof (struct ip)) {
1306 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
1307 IP_HL_A(mhip, mhlen >> 2);
1308 }
1309 #endif
1310 m->m_len = mhlen;
1311 mhip->ip_off = ((off - hlen) >> 3) + ip_off;
1312 if (off + len >= ip_len)
1313 len = ip_len - off;
1314 else
1315 mhip->ip_off |= IP_MF;
1316 mhip->ip_len = htons((u_short)(len + mhlen));
1317 m->m_next = m_copym(m0, off, len, M_DONTWAIT);
1318 if (m->m_next == 0) {
1319 error = ENOBUFS; /* ??? */
1320 goto sendorfree;
1321 }
1322 m->m_pkthdr.len = mhlen + len;
1323 m_reset_rcvif(m);
1324 mhip->ip_off = htons((u_short)mhip->ip_off);
1325 mhip->ip_sum = 0;
1326 #ifdef INET
1327 mhip->ip_sum = in_cksum(m, mhlen);
1328 #endif
1329 *mnext = m;
1330 mnext = &m->m_act;
1331 }
1332 /*
1333 * Update first fragment by trimming what's been copied out
1334 * and updating header, then send each fragment (in order).
1335 */
1336 m_adj(m0, hlen + firstlen - ip_len);
1337 ip->ip_len = htons((u_short)(hlen + firstlen));
1338 ip->ip_off = htons((u_short)IP_MF);
1339 ip->ip_sum = 0;
1340 #ifdef INET
1341 ip->ip_sum = in_cksum(m0, hlen);
1342 #endif
1343 sendorfree:
1344 for (m = m0; m; m = m0) {
1345 m0 = m->m_act;
1346 m->m_act = 0;
1347 if (error == 0) {
1348 KERNEL_LOCK(1, NULL);
1349 error = (*ifp->if_output)(ifp, m, dst, rt);
1350 KERNEL_UNLOCK_ONE(NULL);
1351 } else {
1352 FREE_MB_T(m);
1353 }
1354 }
1355 }
1356 done:
1357 if (!error)
1358 softc->ipf_frouteok[0]++;
1359 else
1360 softc->ipf_frouteok[1]++;
1361
1362 # if __NetBSD_Version__ >= 499001100
1363 rtcache_unref(rt, ro);
1364 rtcache_free(ro);
1365 # else
1366 if (rt) {
1367 RTFREE(rt);
1368 }
1369 # endif
1370 return error;
1371 bad:
1372 if (error == EMSGSIZE) {
1373 sifp = fin->fin_ifp;
1374 code = fin->fin_icode;
1375 fin->fin_icode = ICMP_UNREACH_NEEDFRAG;
1376 fin->fin_ifp = ifp;
1377 (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1);
1378 fin->fin_ifp = sifp;
1379 fin->fin_icode = code;
1380 }
1381 FREE_MB_T(m);
1382 goto done;
1383 #endif /* INET */
1384 }
1385
1386
1387 #if defined(USE_INET6)
1388 /*
1389 * This is the IPv6 specific fastroute code. It doesn't clean up the mbuf's
1390 * or ensure that it is an IPv6 packet that is being forwarded, those are
1391 * expected to be done by the called (ipf_fastroute).
1392 */
1393 static int
1394 ipf_fastroute6(struct mbuf *m0, struct mbuf **mpp, fr_info_t *fin,
1395 frdest_t *fdp)
1396 {
1397 # if __NetBSD_Version__ >= 499001100
1398 struct route ip6route;
1399 const struct sockaddr *dst;
1400 union {
1401 struct sockaddr dst;
1402 struct sockaddr_in6 dst6;
1403 } u;
1404 struct route *ro;
1405 # else
1406 struct route_in6 ip6route;
1407 struct sockaddr_in6 *dst6;
1408 struct route_in6 *ro;
1409 # endif
1410 struct rtentry *rt;
1411 struct ifnet *ifp;
1412 u_long mtu;
1413 int error;
1414
1415 error = 0;
1416 ro = &ip6route;
1417
1418 if (fdp != NULL)
1419 ifp = fdp->fd_ptr;
1420 else
1421 ifp = fin->fin_ifp;
1422 memset(ro, 0, sizeof(*ro));
1423 # if __NetBSD_Version__ >= 499001100
1424 if (fdp != NULL && IP6_NOTZERO(&fdp->fd_ip6))
1425 sockaddr_in6_init(&u.dst6, &fdp->fd_ip6.in6, 0, 0, 0);
1426 else
1427 sockaddr_in6_init(&u.dst6, &fin->fin_fi.fi_dst.in6, 0, 0, 0);
1428 if ((error = in6_setscope(&u.dst6.sin6_addr, ifp,
1429 &u.dst6.sin6_scope_id)) != 0)
1430 return error;
1431 if ((error = sa6_embedscope(&u.dst6, 0)) != 0)
1432 return error;
1433
1434 dst = &u.dst;
1435 rtcache_setdst(ro, dst);
1436
1437 rt = rtcache_init(ro);
1438 if ((ifp == NULL) && (rt != NULL))
1439 ifp = rt->rt_ifp;
1440 # else
1441 dst6 = (struct sockaddr_in6 *)&ro->ro_dst;
1442 dst6->sin6_family = AF_INET6;
1443 dst6->sin6_len = sizeof(struct sockaddr_in6);
1444 dst6->sin6_addr = fin->fin_fi.fi_dst.in6;
1445 /* KAME */
1446 if (IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr))
1447 dst6->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1448
1449 if (fdp != NULL) {
1450 if (IP6_NOTZERO(&fdp->fd_ip6))
1451 dst6->sin6_addr = fdp->fd_ip6.in6;
1452 }
1453
1454 rtalloc((struct route *)ro);
1455
1456 if ((ifp == NULL) && (ro->ro_rt != NULL))
1457 ifp = ro->ro_rt->rt_ifp;
1458 rt = ro->ro_rt;
1459 # endif
1460 if ((rt == NULL) || (ifp == NULL)) {
1461
1462 error = EHOSTUNREACH;
1463 goto bad;
1464 }
1465
1466 {
1467 # if (__NetBSD_Version__ >= 106010000) && !defined(IN6_LINKMTU)
1468 struct in6_ifextra *ife;
1469 # endif
1470 if (rt->rt_flags & RTF_GATEWAY)
1471 # if __NetBSD_Version__ >= 499001100
1472 dst = rt->rt_gateway;
1473 # else
1474 dst6 = (struct sockaddr_in6 *)rt->rt_gateway;
1475 # endif
1476 rt->rt_use++;
1477
1478 /* Determine path MTU. */
1479 # if (__NetBSD_Version__ <= 106009999)
1480 mtu = nd_ifinfo[ifp->if_index].linkmtu;
1481 # else
1482 # ifdef IN6_LINKMTU
1483 mtu = IN6_LINKMTU(ifp);
1484 # else
1485 ife = (struct in6_ifextra *)(ifp)->if_afdata[AF_INET6];
1486 mtu = ife->nd_ifinfo[ifp->if_index].linkmtu;
1487 # endif
1488 # endif
1489 if ((error == 0) && (m0->m_pkthdr.len <= mtu)) {
1490 # if __NetBSD_Version__ >= 499001100
1491 error = ip6_if_output(ifp, ifp, m0, satocsin6(dst), rt);
1492 # else
1493 error = nd6_output(ifp, ifp, m0, dst6, rt);
1494 # endif
1495 } else {
1496 error = EMSGSIZE;
1497 }
1498 }
1499 bad:
1500 # if __NetBSD_Version__ >= 499001100
1501 rtcache_unref(rt, ro);
1502 rtcache_free(ro);
1503 # else
1504 if (ro->ro_rt != NULL) {
1505 RTFREE(((struct route *)ro)->ro_rt);
1506 }
1507 # endif
1508 return error;
1509 }
1510 #endif /* INET6 */
1511
1512
1513 int
1514 ipf_verifysrc(fr_info_t *fin)
1515 {
1516 #if __NetBSD_Version__ >= 499001100
1517 union {
1518 struct sockaddr dst;
1519 struct sockaddr_in dst4;
1520 } u;
1521 struct rtentry *rt;
1522 #else
1523 struct sockaddr_in *dst;
1524 #endif
1525 struct route iproute;
1526 int rc;
1527
1528 #if __NetBSD_Version__ >= 499001100
1529 sockaddr_in_init(&u.dst4, &fin->fin_src, 0);
1530 rtcache_setdst(&iproute, &u.dst);
1531 rt = rtcache_init(&iproute);
1532 if (rt == NULL)
1533 rc = 0;
1534 else
1535 rc = (fin->fin_ifp == rt->rt_ifp);
1536 rtcache_unref(rt, &iproute);
1537 rtcache_free(&iproute);
1538 #else
1539 dst = (struct sockaddr_in *)&iproute.ro_dst;
1540 dst->sin_len = sizeof(*dst);
1541 dst->sin_family = AF_INET;
1542 dst->sin_addr = fin->fin_src;
1543 rtalloc(&iproute);
1544 if (iproute.ro_rt == NULL)
1545 return 0;
1546 rc = (fin->fin_ifp == iproute.ro_rt->rt_ifp);
1547 RTFREE(iproute.ro_rt);
1548 #endif
1549 return rc;
1550 }
1551
1552
1553 /*
1554 * return the first IP Address associated with an interface
1555 */
1556 int
1557 ipf_ifpaddr(ipf_main_softc_t *softc, int v, int atype, void *ifptr,
1558 i6addr_t *inp, i6addr_t *inpmask)
1559 {
1560 #ifdef USE_INET6
1561 struct in6_addr *inp6 = NULL;
1562 #endif
1563 struct sockaddr *sock, *mask;
1564 struct sockaddr_in *sin;
1565 struct ifaddr *ifa;
1566 struct ifnet *ifp;
1567
1568 if ((ifptr == NULL) || (ifptr == (void *)-1))
1569 return -1;
1570
1571 ifp = ifptr;
1572 mask = NULL;
1573
1574 if (v == 4)
1575 inp->in4.s_addr = 0;
1576 #ifdef USE_INET6
1577 else if (v == 6)
1578 bzero((char *)inp, sizeof(*inp));
1579 #endif
1580
1581 ifa = IFADDR_READER_FIRST(ifp);
1582 sock = ifa ? ifa->ifa_addr : NULL;
1583 while (sock != NULL && ifa != NULL) {
1584 sin = (struct sockaddr_in *)sock;
1585 if ((v == 4) && (sin->sin_family == AF_INET))
1586 break;
1587 #ifdef USE_INET6
1588 if ((v == 6) && (sin->sin_family == AF_INET6)) {
1589 inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr;
1590 if (!IN6_IS_ADDR_LINKLOCAL(inp6) &&
1591 !IN6_IS_ADDR_LOOPBACK(inp6))
1592 break;
1593 }
1594 #endif
1595 ifa = IFADDR_READER_NEXT(ifa);
1596 if (ifa != NULL)
1597 sock = ifa->ifa_addr;
1598 }
1599 if (ifa == NULL || sock == NULL)
1600 return -1;
1601
1602 mask = ifa->ifa_netmask;
1603 if (atype == FRI_BROADCAST)
1604 sock = ifa->ifa_broadaddr;
1605 else if (atype == FRI_PEERADDR)
1606 sock = ifa->ifa_dstaddr;
1607
1608 #ifdef USE_INET6
1609 if (v == 6)
1610 return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock,
1611 (struct sockaddr_in6 *)mask,
1612 inp, inpmask);
1613 #endif
1614 return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock,
1615 (struct sockaddr_in *)mask,
1616 &inp->in4, &inpmask->in4);
1617 }
1618
1619
1620 u_32_t
1621 ipf_newisn(fr_info_t *fin)
1622 {
1623 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
1624 size_t asz;
1625
1626 if (fin->fin_v == 4)
1627 asz = sizeof(struct in_addr);
1628 else if (fin->fin_v == 6)
1629 asz = sizeof(fin->fin_src);
1630 else /* XXX: no way to return error */
1631 return 0;
1632 #ifdef INET
1633 return tcp_new_iss1((void *)&fin->fin_src, (void *)&fin->fin_dst,
1634 fin->fin_sport, fin->fin_dport, asz, 0);
1635 #else
1636 return ENOSYS;
1637 #endif
1638 #else
1639 static int iss_seq_off = 0;
1640 u_char hash[16];
1641 u_32_t newiss;
1642 MD5_CTX ctx;
1643
1644 /*
1645 * Compute the base value of the ISS. It is a hash
1646 * of (saddr, sport, daddr, dport, secret).
1647 */
1648 MD5Init(&ctx);
1649
1650 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_src,
1651 sizeof(fin->fin_fi.fi_src));
1652 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_dst,
1653 sizeof(fin->fin_fi.fi_dst));
1654 MD5Update(&ctx, (u_char *) &fin->fin_dat, sizeof(fin->fin_dat));
1655
1656 MD5Update(&ctx, ipf_iss_secret, sizeof(ipf_iss_secret));
1657
1658 MD5Final(hash, &ctx);
1659
1660 memcpy(&newiss, hash, sizeof(newiss));
1661
1662 /*
1663 * Now increment our "timer", and add it in to
1664 * the computed value.
1665 *
1666 * XXX Use `addin'?
1667 * XXX TCP_ISSINCR too large to use?
1668 */
1669 iss_seq_off += 0x00010000;
1670 newiss += iss_seq_off;
1671 return newiss;
1672 #endif
1673 }
1674
1675
1676 /* ------------------------------------------------------------------------ */
1677 /* Function: ipf_nextipid */
1678 /* Returns: int - 0 == success, -1 == error (packet should be droppped) */
1679 /* Parameters: fin(I) - pointer to packet information */
1680 /* */
1681 /* Returns the next IPv4 ID to use for this packet. */
1682 /* ------------------------------------------------------------------------ */
1683 u_short
1684 ipf_nextipid(fr_info_t *fin)
1685 {
1686 #ifdef USE_MUTEXES
1687 ipf_main_softc_t *softc = fin->fin_main_soft;
1688 #endif
1689 u_short id;
1690
1691 MUTEX_ENTER(&softc->ipf_rw);
1692 id = ipid++;
1693 MUTEX_EXIT(&softc->ipf_rw);
1694
1695 return id;
1696 }
1697
1698
1699 EXTERN_INLINE int
1700 ipf_checkv4sum(fr_info_t *fin)
1701 {
1702 #ifdef M_CSUM_TCP_UDP_BAD
1703 int manual, pflag, cflags, active;
1704 mb_t *m;
1705
1706 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1707 return 0;
1708
1709 if ((fin->fin_flx & FI_SHORT) != 0)
1710 return 1;
1711
1712 if (fin->fin_cksum != FI_CK_NEEDED)
1713 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1714
1715 manual = 0;
1716 m = fin->fin_m;
1717 if (m == NULL) {
1718 manual = 1;
1719 goto skipauto;
1720 }
1721
1722 switch (fin->fin_p)
1723 {
1724 case IPPROTO_UDP :
1725 pflag = M_CSUM_UDPv4;
1726 break;
1727 case IPPROTO_TCP :
1728 pflag = M_CSUM_TCPv4;
1729 break;
1730 default :
1731 pflag = 0;
1732 manual = 1;
1733 break;
1734 }
1735
1736 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1737 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1738 cflags = m->m_pkthdr.csum_flags & active;
1739
1740 if (pflag != 0) {
1741 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1742 fin->fin_flx |= FI_BAD;
1743 fin->fin_cksum = FI_CK_BAD;
1744 } else if (cflags == (pflag | M_CSUM_DATA)) {
1745 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) {
1746 fin->fin_flx |= FI_BAD;
1747 fin->fin_cksum = FI_CK_BAD;
1748 } else {
1749 fin->fin_cksum = FI_CK_SUMOK;
1750 }
1751 } else if (cflags == pflag) {
1752 fin->fin_cksum = FI_CK_SUMOK;
1753 } else {
1754 manual = 1;
1755 }
1756 }
1757 skipauto:
1758 if (manual != 0) {
1759 if (ipf_checkl4sum(fin) == -1) {
1760 fin->fin_flx |= FI_BAD;
1761 return -1;
1762 }
1763 }
1764 #else
1765 if (ipf_checkl4sum(fin) == -1) {
1766 fin->fin_flx |= FI_BAD;
1767 return -1;
1768 }
1769 #endif
1770 return 0;
1771 }
1772
1773
1774 #ifdef USE_INET6
1775 EXTERN_INLINE int
1776 ipf_checkv6sum(fr_info_t *fin)
1777 {
1778 # ifdef M_CSUM_TCP_UDP_BAD
1779 int manual, pflag, cflags, active;
1780 mb_t *m;
1781
1782 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1783 return 0;
1784
1785 if ((fin->fin_flx & FI_SHORT) != 0)
1786 return 1;
1787
1788 if (fin->fin_cksum != FI_CK_SUMOK)
1789 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1790
1791
1792 manual = 0;
1793 m = fin->fin_m;
1794
1795 switch (fin->fin_p)
1796 {
1797 case IPPROTO_UDP :
1798 pflag = M_CSUM_UDPv6;
1799 break;
1800 case IPPROTO_TCP :
1801 pflag = M_CSUM_TCPv6;
1802 break;
1803 default :
1804 pflag = 0;
1805 manual = 1;
1806 break;
1807 }
1808
1809 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1810 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1811 cflags = m->m_pkthdr.csum_flags & active;
1812
1813 if (pflag != 0) {
1814 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1815 fin->fin_flx |= FI_BAD;
1816 } else if (cflags == (pflag | M_CSUM_DATA)) {
1817 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0)
1818 fin->fin_flx |= FI_BAD;
1819 } else if (cflags == pflag) {
1820 ;
1821 } else {
1822 manual = 1;
1823 }
1824 }
1825 if (manual != 0) {
1826 if (ipf_checkl4sum(fin) == -1) {
1827 fin->fin_flx |= FI_BAD;
1828 return -1;
1829 }
1830 }
1831 # else
1832 if (ipf_checkl4sum(fin) == -1) {
1833 fin->fin_flx |= FI_BAD;
1834 return -1;
1835 }
1836 # endif
1837 return 0;
1838 }
1839 #endif /* USE_INET6 */
1840
1841
1842 size_t
1843 mbufchainlen(struct mbuf *m0)
1844 {
1845 size_t len;
1846
1847 if ((m0->m_flags & M_PKTHDR) != 0) {
1848 len = m0->m_pkthdr.len;
1849 } else {
1850 struct mbuf *m;
1851
1852 for (m = m0, len = 0; m != NULL; m = m->m_next)
1853 len += m->m_len;
1854 }
1855 return len;
1856 }
1857
1858
1859 /* ------------------------------------------------------------------------ */
1860 /* Function: ipf_pullup */
1861 /* Returns: NULL == pullup failed, else pointer to protocol header */
1862 /* Parameters: xmin(I)- pointer to buffer where data packet starts */
1863 /* fin(I) - pointer to packet information */
1864 /* len(I) - number of bytes to pullup */
1865 /* */
1866 /* Attempt to move at least len bytes (from the start of the buffer) into a */
1867 /* single buffer for ease of access. Operating system native functions are */
1868 /* used to manage buffers - if necessary. If the entire packet ends up in */
1869 /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */
1870 /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */
1871 /* and ONLY if the pullup succeeds. */
1872 /* */
1873 /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */
1874 /* of buffers that starts at *fin->fin_mp. */
1875 /* ------------------------------------------------------------------------ */
1876 void *
1877 ipf_pullup(mb_t *xmin, fr_info_t *fin, int len)
1878 {
1879 int dpoff, ipoff;
1880 mb_t *m = xmin;
1881 char *ip;
1882
1883 if (m == NULL)
1884 return NULL;
1885
1886 ip = (char *)fin->fin_ip;
1887 if ((fin->fin_flx & FI_COALESCE) != 0)
1888 return ip;
1889
1890 ipoff = fin->fin_ipoff;
1891 if (fin->fin_dp != NULL)
1892 dpoff = (char *)fin->fin_dp - (char *)ip;
1893 else
1894 dpoff = 0;
1895
1896 if (M_LEN(m) < len) {
1897 mb_t *n = *fin->fin_mp;
1898 /*
1899 * Assume that M_PKTHDR is set and just work with what is left
1900 * rather than check..
1901 * Should not make any real difference, anyway.
1902 */
1903 if (m != n) {
1904 /*
1905 * Record the mbuf that points to the mbuf that we're
1906 * about to go to work on so that we can update the
1907 * m_next appropriately later.
1908 */
1909 for (; n->m_next != m; n = n->m_next)
1910 ;
1911 } else {
1912 n = NULL;
1913 }
1914
1915 #ifdef MHLEN
1916 if (len > MHLEN)
1917 #else
1918 if (len > MLEN)
1919 #endif
1920 {
1921 #ifdef HAVE_M_PULLDOWN
1922 if (m_pulldown(m, 0, len, NULL) == NULL)
1923 m = NULL;
1924 #else
1925 FREE_MB_T(*fin->fin_mp);
1926 m = NULL;
1927 n = NULL;
1928 #endif
1929 } else
1930 {
1931 m = m_pullup(m, len);
1932 }
1933 if (n != NULL)
1934 n->m_next = m;
1935 if (m == NULL) {
1936 /*
1937 * When n is non-NULL, it indicates that m pointed to
1938 * a sub-chain (tail) of the mbuf and that the head
1939 * of this chain has not yet been free'd.
1940 */
1941 if (n != NULL) {
1942 FREE_MB_T(*fin->fin_mp);
1943 }
1944
1945 *fin->fin_mp = NULL;
1946 fin->fin_m = NULL;
1947 return NULL;
1948 }
1949
1950 if (n == NULL)
1951 *fin->fin_mp = m;
1952
1953 while (M_LEN(m) == 0) {
1954 m = m->m_next;
1955 }
1956 fin->fin_m = m;
1957 ip = MTOD(m, char *) + ipoff;
1958
1959 fin->fin_ip = (ip_t *)ip;
1960 if (fin->fin_dp != NULL)
1961 fin->fin_dp = (char *)fin->fin_ip + dpoff;
1962 if (fin->fin_fraghdr != NULL)
1963 fin->fin_fraghdr = (char *)ip +
1964 ((char *)fin->fin_fraghdr -
1965 (char *)fin->fin_ip);
1966 }
1967
1968 if (len == fin->fin_plen)
1969 fin->fin_flx |= FI_COALESCE;
1970 return ip;
1971 }
1972
1973
1974 int
1975 ipf_inject(fr_info_t *fin, mb_t *m)
1976 {
1977 int error;
1978
1979 if (fin->fin_out == 0) {
1980 if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
1981 FREE_MB_T(m);
1982 error = ENOBUFS;
1983 } else {
1984 error = 0;
1985 }
1986 } else {
1987 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
1988 }
1989 return error;
1990 }
1991
1992
1993 u_32_t
1994 ipf_random(void)
1995 {
1996 int number;
1997
1998 #ifdef _CPRNG_H
1999 number = cprng_fast32();
2000 #else
2001 number = arc4random();
2002 #endif
2003 return number;
2004 }
2005
2006
2007 /*
2008 * routines below for saving IP headers to buffer
2009 */
2010 static int ipfopen(dev_t dev, int flags
2011 #if (NetBSD >= 199511)
2012 , int devtype, PROC_T *p
2013 #endif
2014 )
2015 {
2016 u_int unit = GET_MINOR(dev);
2017 int error;
2018
2019 if (IPL_LOGMAX < unit) {
2020 error = ENXIO;
2021 } else {
2022 switch (unit)
2023 {
2024 case IPL_LOGIPF :
2025 case IPL_LOGNAT :
2026 case IPL_LOGSTATE :
2027 case IPL_LOGAUTH :
2028 case IPL_LOGLOOKUP :
2029 case IPL_LOGSYNC :
2030 #ifdef IPFILTER_SCAN
2031 case IPL_LOGSCAN :
2032 #endif
2033 error = 0;
2034 break;
2035 default :
2036 error = ENXIO;
2037 break;
2038 }
2039 }
2040 #if (__NetBSD_Version__ >= 799003000)
2041 if (error == 0) {
2042 mutex_enter(&ipf_ref_mutex);
2043 ipf_active = 1;
2044 mutex_exit(&ipf_ref_mutex);
2045 }
2046 #endif
2047 return error;
2048 }
2049
2050
2051 static int ipfclose(dev_t dev, int flags
2052 #if (NetBSD >= 199511)
2053 , int devtype, PROC_T *p
2054 #endif
2055 )
2056 {
2057 u_int unit = GET_MINOR(dev);
2058
2059 if (IPL_LOGMAX < unit)
2060 return ENXIO;
2061 else {
2062 #if (__NetBSD_Version__ >= 799003000)
2063 mutex_enter(&ipf_ref_mutex);
2064 ipf_active = 0;
2065 mutex_exit(&ipf_ref_mutex);
2066 #endif
2067 return 0;
2068 }
2069 }
2070
2071 /*
2072 * ipfread/ipflog
2073 * both of these must operate with at least splnet() lest they be
2074 * called during packet processing and cause an inconsistancy to appear in
2075 * the filter lists.
2076 */
2077 static int ipfread(dev_t dev, struct uio *uio, int ioflag)
2078 {
2079
2080 if (ipfmain.ipf_running < 1) {
2081 ipfmain.ipf_interror = 130006;
2082 return EIO;
2083 }
2084
2085 if (GET_MINOR(dev) == IPL_LOGSYNC)
2086 return ipf_sync_read(&ipfmain, uio);
2087
2088 #ifdef IPFILTER_LOG
2089 return ipf_log_read(&ipfmain, GET_MINOR(dev), uio);
2090 #else
2091 ipfmain.ipf_interror = 130007;
2092 return ENXIO;
2093 #endif
2094 }
2095
2096
2097 /*
2098 * ipfwrite
2099 * both of these must operate with at least splnet() lest they be
2100 * called during packet processing and cause an inconsistancy to appear in
2101 * the filter lists.
2102 */
2103 static int ipfwrite(dev_t dev, struct uio *uio, int ioflag)
2104 {
2105
2106 if (ipfmain.ipf_running < 1) {
2107 ipfmain.ipf_interror = 130008;
2108 return EIO;
2109 }
2110
2111 if (GET_MINOR(dev) == IPL_LOGSYNC)
2112 return ipf_sync_write(&ipfmain, uio);
2113 ipfmain.ipf_interror = 130009;
2114 return ENXIO;
2115 }
2116
2117
2118 static int ipfpoll(dev_t dev, int events, PROC_T *p)
2119 {
2120 u_int unit = GET_MINOR(dev);
2121 int revents = 0;
2122
2123 if (IPL_LOGMAX < unit) {
2124 ipfmain.ipf_interror = 130010;
2125 return ENXIO;
2126 }
2127
2128 switch (unit)
2129 {
2130 case IPL_LOGIPF :
2131 case IPL_LOGNAT :
2132 case IPL_LOGSTATE :
2133 #ifdef IPFILTER_LOG
2134 if ((events & (POLLIN | POLLRDNORM)) &&
2135 ipf_log_canread(&ipfmain, unit))
2136 revents |= events & (POLLIN | POLLRDNORM);
2137 #endif
2138 break;
2139 case IPL_LOGAUTH :
2140 if ((events & (POLLIN | POLLRDNORM)) &&
2141 ipf_auth_waiting(&ipfmain))
2142 revents |= events & (POLLIN | POLLRDNORM);
2143 break;
2144 case IPL_LOGSYNC :
2145 if ((events & (POLLIN | POLLRDNORM)) &&
2146 ipf_sync_canread(&ipfmain))
2147 revents |= events & (POLLIN | POLLRDNORM);
2148 if ((events & (POLLOUT | POLLWRNORM)) &&
2149 ipf_sync_canwrite(&ipfmain))
2150 revents |= events & (POLLOUT | POLLWRNORM);
2151 break;
2152 case IPL_LOGSCAN :
2153 case IPL_LOGLOOKUP :
2154 default :
2155 break;
2156 }
2157
2158 if ((revents == 0) && (((events & (POLLIN|POLLRDNORM)) != 0)))
2159 selrecord(p, &ipfmain.ipf_selwait[unit]);
2160 return revents;
2161 }
2162
2163 u_int
2164 ipf_pcksum(fr_info_t *fin, int hlen, u_int sum)
2165 {
2166 struct mbuf *m;
2167 u_int sum2;
2168 int off;
2169
2170 m = fin->fin_m;
2171 off = (char *)fin->fin_dp - (char *)fin->fin_ip;
2172 m->m_data += hlen;
2173 m->m_len -= hlen;
2174 sum2 = in_cksum(fin->fin_m, fin->fin_plen - off);
2175 m->m_len += hlen;
2176 m->m_data -= hlen;
2177
2178 /*
2179 * Both sum and sum2 are partial sums, so combine them together.
2180 */
2181 sum += ~sum2 & 0xffff;
2182 while (sum > 0xffff)
2183 sum = (sum & 0xffff) + (sum >> 16);
2184 sum2 = ~sum & 0xffff;
2185 return sum2;
2186 }
2187
2188 #if (__NetBSD_Version__ >= 799003000)
2189
2190 /* NetBSD module interface */
2191
2192 MODULE(MODULE_CLASS_DRIVER, ipl, "bpf_filter");
2193
2194 static int ipl_init(void *);
2195 static int ipl_fini(void *);
2196 static int ipl_modcmd(modcmd_t, void *);
2197
2198 #ifdef _MODULE
2199 static devmajor_t ipl_cmaj = -1, ipl_bmaj = -1;
2200 #endif
2201
2202 static int
2203 ipl_modcmd(modcmd_t cmd, void *opaque)
2204 {
2205
2206 switch (cmd) {
2207 case MODULE_CMD_INIT:
2208 return ipl_init(opaque);
2209 case MODULE_CMD_FINI:
2210 return ipl_fini(opaque);
2211 default:
2212 return ENOTTY;
2213 }
2214 }
2215
2216 static int
2217 ipl_init(void *opaque)
2218 {
2219 int error;
2220
2221 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
2222 ipf_listener_cb, NULL);
2223
2224 if ((error = ipf_load_all()) != 0)
2225 return error;
2226
2227 if (ipf_create_all(&ipfmain) == NULL) {
2228 ipf_unload_all();
2229 return ENODEV;
2230 }
2231
2232 /* Initialize our mutex and reference count */
2233 mutex_init(&ipf_ref_mutex, MUTEX_DEFAULT, IPL_NONE);
2234 ipf_active = 0;
2235
2236 #ifdef _MODULE
2237 /*
2238 * Insert ourself into the cdevsw list.
2239 */
2240 error = devsw_attach("ipl", NULL, &ipl_bmaj, &ipl_cdevsw, &ipl_cmaj);
2241 if (error)
2242 ipl_fini(opaque);
2243 #endif
2244
2245 return error;
2246 }
2247
2248 static int
2249 ipl_fini(void *opaque)
2250 {
2251
2252 #ifdef _MODULE
2253 (void)devsw_detach(NULL, &ipl_cdevsw);
2254 #endif
2255
2256 /*
2257 * Grab the mutex, verify that there are no references
2258 * and that there are no running filters. If either
2259 * of these exists, reinsert our cdevsw entry and return
2260 * an error.
2261 */
2262 mutex_enter(&ipf_ref_mutex);
2263 if (ipf_active != 0 || ipfmain.ipf_running > 0) {
2264 #ifdef _MODULE
2265 (void)devsw_attach("ipl", NULL, &ipl_bmaj,
2266 &ipl_cdevsw, &ipl_cmaj);
2267 #endif
2268 mutex_exit(&ipf_ref_mutex);
2269 return EBUSY;
2270 }
2271
2272 /* Clean up the rest of our state before being unloaded */
2273
2274 mutex_exit(&ipf_ref_mutex);
2275 mutex_destroy(&ipf_ref_mutex);
2276 ipf_destroy_all(&ipfmain);
2277 ipf_unload_all();
2278 kauth_unlisten_scope(ipf_listener);
2279
2280 return 0;
2281 }
2282 #endif /* (__NetBSD_Version__ >= 799003000) */
2283