if_pfsync.c revision 1.6 1 /* $NetBSD: if_pfsync.c,v 1.6 2010/04/05 07:22:22 joerg Exp $ */
2 /* $OpenBSD: if_pfsync.c,v 1.83 2007/06/26 14:44:12 mcbride Exp $ */
3
4 /*
5 * Copyright (c) 2002 Michael Shalayeff
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: if_pfsync.c,v 1.6 2010/04/05 07:22:22 joerg Exp $");
32
33 #ifdef _KERNEL_OPT
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #endif
37
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/systm.h>
41 #include <sys/time.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/ioctl.h>
45 #include <sys/callout.h>
46 #include <sys/kernel.h>
47
48 #include <net/if.h>
49 #include <net/if_types.h>
50 #include <net/route.h>
51 #include <net/bpf.h>
52 #include <netinet/in.h>
53 #ifndef __NetBSD__
54 #include <netinet/if_ether.h>
55 #else
56 #include <net/if_ether.h>
57 #endif /* __NetBSD__ */
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_seq.h>
60
61 #ifdef INET
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/ip.h>
65 #include <netinet/ip_var.h>
66 #endif
67
68 #ifdef INET6
69 #include <netinet6/nd6.h>
70 #endif /* INET6 */
71
72 #include "carp.h"
73 #if NCARP > 0
74 extern int carp_suppress_preempt;
75 #endif
76
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79
80 #ifdef __NetBSD__
81 #include <sys/conf.h>
82 #include <sys/lwp.h>
83 #include <sys/kauth.h>
84 #include <sys/sysctl.h>
85
86 #include <net/net_stats.h>
87
88 percpu_t *pfsyncstat_percpu;
89
90 #define PFSYNC_STATINC(x) _NET_STATINC(pfsyncstat_percpu, x)
91 #endif /* __NetBSD__ */
92
93 #include "pfsync.h"
94
95 #define PFSYNC_MINMTU \
96 (sizeof(struct pfsync_header) + sizeof(struct pf_state))
97
98 #ifdef PFSYNCDEBUG
99 #define DPRINTF(x) do { if (pfsyncdebug) printf x ; } while (0)
100 int pfsyncdebug;
101 #else
102 #define DPRINTF(x)
103 #endif
104
105 extern int ifqmaxlen; /* XXX */
106
107 struct pfsync_softc *pfsyncif = NULL;
108
109 void pfsyncattach(int);
110 int pfsync_clone_create(struct if_clone *, int);
111 int pfsync_clone_destroy(struct ifnet *);
112 void pfsync_setmtu(struct pfsync_softc *, int);
113 int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
114 struct pf_state_peer *);
115 int pfsync_insert_net_state(struct pfsync_state *, u_int8_t);
116 void pfsync_update_net_tdb(struct pfsync_tdb *);
117 int pfsyncoutput(struct ifnet *, struct mbuf *, const struct sockaddr *,
118 struct rtentry *);
119 int pfsyncioctl(struct ifnet *, u_long, void*);
120 void pfsyncstart(struct ifnet *);
121
122 struct mbuf *pfsync_get_mbuf(struct pfsync_softc *, u_int8_t, void **);
123 int pfsync_request_update(struct pfsync_state_upd *, struct in_addr *);
124 int pfsync_sendout(struct pfsync_softc *);
125 int pfsync_tdb_sendout(struct pfsync_softc *);
126 int pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *);
127 void pfsync_timeout(void *);
128 void pfsync_tdb_timeout(void *);
129 void pfsync_send_bus(struct pfsync_softc *, u_int8_t);
130 void pfsync_bulk_update(void *);
131 void pfsync_bulkfail(void *);
132
133 int pfsync_sync_ok;
134
135 struct if_clone pfsync_cloner =
136 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
137
138 void
139 pfsyncattach(int npfsync)
140 {
141 if_clone_attach(&pfsync_cloner);
142
143 pfsyncstat_percpu = percpu_alloc(sizeof(uint64_t) * PFSYNC_NSTATS);
144 }
145
146 int
147 pfsync_clone_create(struct if_clone *ifc, int unit)
148 {
149 struct ifnet *ifp;
150
151 if (unit != 0)
152 return (EINVAL);
153
154 pfsync_sync_ok = 1;
155 if ((pfsyncif = malloc(sizeof(*pfsyncif), M_DEVBUF, M_NOWAIT)) == NULL)
156 return (ENOMEM);
157 memset(pfsyncif, 0, sizeof(*pfsyncif));
158 pfsyncif->sc_mbuf = NULL;
159 pfsyncif->sc_mbuf_net = NULL;
160 pfsyncif->sc_mbuf_tdb = NULL;
161 pfsyncif->sc_statep.s = NULL;
162 pfsyncif->sc_statep_net.s = NULL;
163 pfsyncif->sc_statep_tdb.t = NULL;
164 pfsyncif->sc_maxupdates = 128;
165 pfsyncif->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
166 pfsyncif->sc_sendaddr.s_addr = INADDR_PFSYNC_GROUP;
167 pfsyncif->sc_ureq_received = 0;
168 pfsyncif->sc_ureq_sent = 0;
169 pfsyncif->sc_bulk_send_next = NULL;
170 pfsyncif->sc_bulk_terminator = NULL;
171 ifp = &pfsyncif->sc_if;
172 snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
173 ifp->if_softc = pfsyncif;
174 ifp->if_ioctl = pfsyncioctl;
175 ifp->if_output = pfsyncoutput;
176 ifp->if_start = pfsyncstart;
177 ifp->if_type = IFT_PFSYNC;
178 ifp->if_snd.ifq_maxlen = ifqmaxlen;
179 ifp->if_hdrlen = PFSYNC_HDRLEN;
180 pfsync_setmtu(pfsyncif, ETHERMTU);
181
182 callout_init(&pfsyncif->sc_tmo, 0);
183 callout_init(&pfsyncif->sc_tdb_tmo, 0);
184 callout_init(&pfsyncif->sc_bulk_tmo, 0);
185 callout_init(&pfsyncif->sc_bulkfail_tmo, 0);
186 callout_setfunc(&pfsyncif->sc_tmo, pfsync_timeout, pfsyncif);
187 callout_setfunc(&pfsyncif->sc_tdb_tmo, pfsync_tdb_timeout, pfsyncif);
188 callout_setfunc(&pfsyncif->sc_bulk_tmo, pfsync_bulk_update, pfsyncif);
189 callout_setfunc(&pfsyncif->sc_bulkfail_tmo, pfsync_bulkfail, pfsyncif);
190
191 if_attach(ifp);
192 if_alloc_sadl(ifp);
193
194 bpf_attach(&pfsyncif->sc_if, DLT_PFSYNC, PFSYNC_HDRLEN);
195
196 return (0);
197 }
198
199 int
200 pfsync_clone_destroy(struct ifnet *ifp)
201 {
202 bpf_detach(ifp);
203 if_detach(ifp);
204 free(pfsyncif, M_DEVBUF);
205 pfsyncif = NULL;
206 return (0);
207 }
208
209 /*
210 * Start output on the pfsync interface.
211 */
212 void
213 pfsyncstart(struct ifnet *ifp)
214 {
215 struct mbuf *m;
216 int s;
217
218 for (;;) {
219 s = splnet();
220 IF_DROP(&ifp->if_snd);
221 IF_DEQUEUE(&ifp->if_snd, m);
222 splx(s);
223
224 if (m == NULL)
225 return;
226 else
227 m_freem(m);
228 }
229 }
230
231 int
232 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
233 struct pf_state_peer *d)
234 {
235 if (s->scrub.scrub_flag && d->scrub == NULL) {
236 d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
237 if (d->scrub == NULL)
238 return (ENOMEM);
239 memset(d->scrub, 0, sizeof(*d->scrub));
240 }
241
242 return (0);
243 }
244
245 int
246 pfsync_insert_net_state(struct pfsync_state *sp, u_int8_t chksum_flag)
247 {
248 struct pf_state *st = NULL;
249 struct pf_state_key *sk = NULL;
250 struct pf_rule *r = NULL;
251 struct pfi_kif *kif;
252
253 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
254 printf("pfsync_insert_net_state: invalid creator id:"
255 " %08x\n", ntohl(sp->creatorid));
256 return (EINVAL);
257 }
258
259 kif = pfi_kif_get(sp->ifname);
260 if (kif == NULL) {
261 if (pf_status.debug >= PF_DEBUG_MISC)
262 printf("pfsync_insert_net_state: "
263 "unknown interface: %s\n", sp->ifname);
264 /* skip this state */
265 return (0);
266 }
267
268 /*
269 * If the ruleset checksums match, it's safe to associate the state
270 * with the rule of that number.
271 */
272 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && chksum_flag &&
273 ntohl(sp->rule) <
274 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
275 r = pf_main_ruleset.rules[
276 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
277 else
278 r = &pf_default_rule;
279
280 if (!r->max_states || r->states < r->max_states)
281 st = pool_get(&pf_state_pl, PR_NOWAIT);
282 if (st == NULL) {
283 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
284 return (ENOMEM);
285 }
286 memset(st, 0, sizeof(*st));
287
288 if ((sk = pf_alloc_state_key(st)) == NULL) {
289 pool_put(&pf_state_pl, st);
290 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
291 return (ENOMEM);
292 }
293
294 /* allocate memory for scrub info */
295 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
296 pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) {
297 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
298 if (st->src.scrub)
299 pool_put(&pf_state_scrub_pl, st->src.scrub);
300 pool_put(&pf_state_pl, st);
301 pool_put(&pf_state_key_pl, sk);
302 return (ENOMEM);
303 }
304
305 st->rule.ptr = r;
306 /* XXX get pointers to nat_rule and anchor */
307
308 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
309 r->states++;
310
311 /* fill in the rest of the state entry */
312 pf_state_host_ntoh(&sp->lan, &sk->lan);
313 pf_state_host_ntoh(&sp->gwy, &sk->gwy);
314 pf_state_host_ntoh(&sp->ext, &sk->ext);
315
316 pf_state_peer_ntoh(&sp->src, &st->src);
317 pf_state_peer_ntoh(&sp->dst, &st->dst);
318
319 memcpy(&st->rt_addr, &sp->rt_addr, sizeof(st->rt_addr));
320 st->creation = time_second - ntohl(sp->creation);
321 st->expire = ntohl(sp->expire) + time_second;
322
323 sk->af = sp->af;
324 sk->proto = sp->proto;
325 sk->direction = sp->direction;
326 st->log = sp->log;
327 st->timeout = sp->timeout;
328 st->allow_opts = sp->allow_opts;
329
330 memcpy(&st->id, sp->id, sizeof(st->id));
331 st->creatorid = sp->creatorid;
332 st->sync_flags = PFSTATE_FROMSYNC;
333
334 if (pf_insert_state(kif, st)) {
335 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
336 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
337 r->states--;
338 if (st->dst.scrub)
339 pool_put(&pf_state_scrub_pl, st->dst.scrub);
340 if (st->src.scrub)
341 pool_put(&pf_state_scrub_pl, st->src.scrub);
342 pool_put(&pf_state_pl, st);
343 return (EINVAL);
344 }
345
346 return (0);
347 }
348
349 void
350 pfsync_input(struct mbuf *m, ...)
351 {
352 struct ip *ip = mtod(m, struct ip *);
353 struct pfsync_header *ph;
354 struct pfsync_softc *sc = pfsyncif;
355 struct pf_state *st;
356 struct pf_state_key *sk;
357 struct pf_state_cmp id_key;
358 struct pfsync_state *sp;
359 struct pfsync_state_upd *up;
360 struct pfsync_state_del *dp;
361 struct pfsync_state_clr *cp;
362 struct pfsync_state_upd_req *rup;
363 struct pfsync_state_bus *bus;
364 #ifdef IPSEC
365 struct pfsync_tdb *pt;
366 #endif
367 struct in_addr src;
368 struct mbuf *mp;
369 int iplen, action, error, i, s, count, offp, sfail, stale = 0;
370 u_int8_t chksum_flag = 0;
371
372 PFSYNC_STATINC(PFSYNC_STAT_IPACKETS);
373
374 /* verify that we have a sync interface configured */
375 if (!sc || !sc->sc_sync_ifp || !pf_status.running)
376 goto done;
377
378 /* verify that the packet came in on the right interface */
379 if (sc->sc_sync_ifp != m->m_pkthdr.rcvif) {
380 PFSYNC_STATINC(PFSYNC_STAT_BADIF);
381 goto done;
382 }
383
384 /* verify that the IP TTL is 255. */
385 if (ip->ip_ttl != PFSYNC_DFLTTL) {
386 PFSYNC_STATINC(PFSYNC_STAT_BADTTL);
387 goto done;
388 }
389
390 iplen = ip->ip_hl << 2;
391
392 if (m->m_pkthdr.len < iplen + sizeof(*ph)) {
393 PFSYNC_STATINC(PFSYNC_STAT_HDROPS);
394 goto done;
395 }
396
397 if (iplen + sizeof(*ph) > m->m_len) {
398 if ((m = m_pullup(m, iplen + sizeof(*ph))) == NULL) {
399 PFSYNC_STATINC(PFSYNC_STAT_HDROPS);
400 goto done;
401 }
402 ip = mtod(m, struct ip *);
403 }
404 ph = (struct pfsync_header *)((char *)ip + iplen);
405
406 /* verify the version */
407 if (ph->version != PFSYNC_VERSION) {
408 PFSYNC_STATINC(PFSYNC_STAT_BADVER);
409 goto done;
410 }
411
412 action = ph->action;
413 count = ph->count;
414
415 /* make sure it's a valid action code */
416 if (action >= PFSYNC_ACT_MAX) {
417 PFSYNC_STATINC(PFSYNC_STAT_BADACT);
418 goto done;
419 }
420
421 /* Cheaper to grab this now than having to mess with mbufs later */
422 src = ip->ip_src;
423
424 if (!bcmp(&ph->pf_chksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
425 chksum_flag++;
426
427 switch (action) {
428 case PFSYNC_ACT_CLR: {
429 struct pf_state *nexts;
430 struct pf_state_key *nextsk;
431 struct pfi_kif *kif;
432 u_int32_t creatorid;
433 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
434 sizeof(*cp), &offp)) == NULL) {
435 PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
436 return;
437 }
438 cp = (struct pfsync_state_clr *)(mp->m_data + offp);
439 creatorid = cp->creatorid;
440
441 s = splsoftnet();
442 if (cp->ifname[0] == '\0') {
443 for (st = RB_MIN(pf_state_tree_id, &tree_id);
444 st; st = nexts) {
445 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
446 if (st->creatorid == creatorid) {
447 st->sync_flags |= PFSTATE_FROMSYNC;
448 pf_unlink_state(st);
449 }
450 }
451 } else {
452 if ((kif = pfi_kif_get(cp->ifname)) == NULL) {
453 splx(s);
454 return;
455 }
456 for (sk = RB_MIN(pf_state_tree_lan_ext,
457 &pf_statetbl_lan_ext); sk; sk = nextsk) {
458 nextsk = RB_NEXT(pf_state_tree_lan_ext,
459 &pf_statetbl_lan_ext, sk);
460 TAILQ_FOREACH(st, &sk->states, next) {
461 if (st->creatorid == creatorid) {
462 st->sync_flags |=
463 PFSTATE_FROMSYNC;
464 pf_unlink_state(st);
465 }
466 }
467 }
468 }
469 splx(s);
470
471 break;
472 }
473 case PFSYNC_ACT_INS:
474 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
475 count * sizeof(*sp), &offp)) == NULL) {
476 PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
477 return;
478 }
479
480 s = splsoftnet();
481 for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
482 i < count; i++, sp++) {
483 /* check for invalid values */
484 if (sp->timeout >= PFTM_MAX ||
485 sp->src.state > PF_TCPS_PROXY_DST ||
486 sp->dst.state > PF_TCPS_PROXY_DST ||
487 sp->direction > PF_OUT ||
488 (sp->af != AF_INET && sp->af != AF_INET6)) {
489 if (pf_status.debug >= PF_DEBUG_MISC)
490 printf("pfsync_insert: PFSYNC_ACT_INS: "
491 "invalid value\n");
492 PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
493 continue;
494 }
495
496 if ((error = pfsync_insert_net_state(sp,
497 chksum_flag))) {
498 if (error == ENOMEM) {
499 splx(s);
500 goto done;
501 }
502 continue;
503 }
504 }
505 splx(s);
506 break;
507 case PFSYNC_ACT_UPD:
508 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
509 count * sizeof(*sp), &offp)) == NULL) {
510 PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
511 return;
512 }
513
514 s = splsoftnet();
515 for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
516 i < count; i++, sp++) {
517 int flags = PFSYNC_FLAG_STALE;
518
519 /* check for invalid values */
520 if (sp->timeout >= PFTM_MAX ||
521 sp->src.state > PF_TCPS_PROXY_DST ||
522 sp->dst.state > PF_TCPS_PROXY_DST) {
523 if (pf_status.debug >= PF_DEBUG_MISC)
524 printf("pfsync_insert: PFSYNC_ACT_UPD: "
525 "invalid value\n");
526 PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
527 continue;
528 }
529
530 memcpy(&id_key.id, sp->id, sizeof(id_key.id));
531 id_key.creatorid = sp->creatorid;
532
533 st = pf_find_state_byid(&id_key);
534 if (st == NULL) {
535 /* insert the update */
536 if (pfsync_insert_net_state(sp, chksum_flag)) {
537 PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
538 }
539 continue;
540 }
541 sk = st->state_key;
542 sfail = 0;
543 if (sk->proto == IPPROTO_TCP) {
544 /*
545 * The state should never go backwards except
546 * for syn-proxy states. Neither should the
547 * sequence window slide backwards.
548 */
549 if (st->src.state > sp->src.state &&
550 (st->src.state < PF_TCPS_PROXY_SRC ||
551 sp->src.state >= PF_TCPS_PROXY_SRC))
552 sfail = 1;
553 else if (SEQ_GT(st->src.seqlo,
554 ntohl(sp->src.seqlo)))
555 sfail = 3;
556 else if (st->dst.state > sp->dst.state) {
557 /* There might still be useful
558 * information about the src state here,
559 * so import that part of the update,
560 * then "fail" so we send the updated
561 * state back to the peer who is missing
562 * our what we know. */
563 pf_state_peer_ntoh(&sp->src, &st->src);
564 /* XXX do anything with timeouts? */
565 sfail = 7;
566 flags = 0;
567 } else if (st->dst.state >= TCPS_SYN_SENT &&
568 SEQ_GT(st->dst.seqlo, ntohl(sp->dst.seqlo)))
569 sfail = 4;
570 } else {
571 /*
572 * Non-TCP protocol state machine always go
573 * forwards
574 */
575 if (st->src.state > sp->src.state)
576 sfail = 5;
577 else if (st->dst.state > sp->dst.state)
578 sfail = 6;
579 }
580 if (sfail) {
581 if (pf_status.debug >= PF_DEBUG_MISC)
582 printf("pfsync: %s stale update "
583 "(%d) id: %016" PRIu64 ""
584 "creatorid: %08x\n",
585 (sfail < 7 ? "ignoring"
586 : "partial"), sfail,
587 be64toh(st->id),
588 ntohl(st->creatorid));
589 PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
590
591 if (!(sp->sync_flags & PFSTATE_STALE)) {
592 /* we have a better state, send it */
593 if (sc->sc_mbuf != NULL && !stale)
594 pfsync_sendout(sc);
595 stale++;
596 if (!st->sync_flags)
597 pfsync_pack_state(
598 PFSYNC_ACT_UPD, st, flags);
599 }
600 continue;
601 }
602 pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
603 pf_state_peer_ntoh(&sp->src, &st->src);
604 pf_state_peer_ntoh(&sp->dst, &st->dst);
605 st->expire = ntohl(sp->expire) + time_second;
606 st->timeout = sp->timeout;
607 }
608 if (stale && sc->sc_mbuf != NULL)
609 pfsync_sendout(sc);
610 splx(s);
611 break;
612 /*
613 * It's not strictly necessary for us to support the "uncompressed"
614 * delete action, but it's relatively simple and maintains consistency.
615 */
616 case PFSYNC_ACT_DEL:
617 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
618 count * sizeof(*sp), &offp)) == NULL) {
619 PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
620 return;
621 }
622
623 s = splsoftnet();
624 for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
625 i < count; i++, sp++) {
626 memcpy(&id_key.id, sp->id, sizeof(id_key.id));
627 id_key.creatorid = sp->creatorid;
628
629 st = pf_find_state_byid(&id_key);
630 if (st == NULL) {
631 PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
632 continue;
633 }
634 st->sync_flags |= PFSTATE_FROMSYNC;
635 pf_unlink_state(st);
636 }
637 splx(s);
638 break;
639 case PFSYNC_ACT_UPD_C: {
640 int update_requested = 0;
641
642 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
643 count * sizeof(*up), &offp)) == NULL) {
644 PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
645 return;
646 }
647
648 s = splsoftnet();
649 for (i = 0, up = (struct pfsync_state_upd *)(mp->m_data + offp);
650 i < count; i++, up++) {
651 /* check for invalid values */
652 if (up->timeout >= PFTM_MAX ||
653 up->src.state > PF_TCPS_PROXY_DST ||
654 up->dst.state > PF_TCPS_PROXY_DST) {
655 if (pf_status.debug >= PF_DEBUG_MISC)
656 printf("pfsync_insert: "
657 "PFSYNC_ACT_UPD_C: "
658 "invalid value\n");
659 PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
660 continue;
661 }
662
663 memcpy(&id_key.id, up->id, sizeof(id_key.id));
664 id_key.creatorid = up->creatorid;
665
666 st = pf_find_state_byid(&id_key);
667 if (st == NULL) {
668 /* We don't have this state. Ask for it. */
669 error = pfsync_request_update(up, &src);
670 if (error == ENOMEM) {
671 splx(s);
672 goto done;
673 }
674 update_requested = 1;
675 PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
676 continue;
677 }
678 sk = st->state_key;
679 sfail = 0;
680 if (sk->proto == IPPROTO_TCP) {
681 /*
682 * The state should never go backwards except
683 * for syn-proxy states. Neither should the
684 * sequence window slide backwards.
685 */
686 if (st->src.state > up->src.state &&
687 (st->src.state < PF_TCPS_PROXY_SRC ||
688 up->src.state >= PF_TCPS_PROXY_SRC))
689 sfail = 1;
690 else if (st->dst.state > up->dst.state)
691 sfail = 2;
692 else if (SEQ_GT(st->src.seqlo,
693 ntohl(up->src.seqlo)))
694 sfail = 3;
695 else if (st->dst.state >= TCPS_SYN_SENT &&
696 SEQ_GT(st->dst.seqlo, ntohl(up->dst.seqlo)))
697 sfail = 4;
698 } else {
699 /*
700 * Non-TCP protocol state machine always go
701 * forwards
702 */
703 if (st->src.state > up->src.state)
704 sfail = 5;
705 else if (st->dst.state > up->dst.state)
706 sfail = 6;
707 }
708 if (sfail) {
709 if (pf_status.debug >= PF_DEBUG_MISC)
710 printf("pfsync: ignoring stale update "
711 "(%d) id: %016" PRIu64 ""
712 "creatorid: %08x\n", sfail,
713 be64toh(st->id),
714 ntohl(st->creatorid));
715 PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
716
717 /* we have a better state, send it out */
718 if ((!stale || update_requested) &&
719 sc->sc_mbuf != NULL) {
720 pfsync_sendout(sc);
721 update_requested = 0;
722 }
723 stale++;
724 if (!st->sync_flags)
725 pfsync_pack_state(PFSYNC_ACT_UPD, st,
726 PFSYNC_FLAG_STALE);
727 continue;
728 }
729 pfsync_alloc_scrub_memory(&up->dst, &st->dst);
730 pf_state_peer_ntoh(&up->src, &st->src);
731 pf_state_peer_ntoh(&up->dst, &st->dst);
732 st->expire = ntohl(up->expire) + time_second;
733 st->timeout = up->timeout;
734 }
735 if ((update_requested || stale) && sc->sc_mbuf)
736 pfsync_sendout(sc);
737 splx(s);
738 break;
739 }
740 case PFSYNC_ACT_DEL_C:
741 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
742 count * sizeof(*dp), &offp)) == NULL) {
743 PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
744 return;
745 }
746
747 s = splsoftnet();
748 for (i = 0, dp = (struct pfsync_state_del *)(mp->m_data + offp);
749 i < count; i++, dp++) {
750 memcpy(&id_key.id, dp->id, sizeof(id_key.id));
751 id_key.creatorid = dp->creatorid;
752
753 st = pf_find_state_byid(&id_key);
754 if (st == NULL) {
755 PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
756 continue;
757 }
758 st->sync_flags |= PFSTATE_FROMSYNC;
759 pf_unlink_state(st);
760 }
761 splx(s);
762 break;
763 case PFSYNC_ACT_INS_F:
764 case PFSYNC_ACT_DEL_F:
765 /* not implemented */
766 break;
767 case PFSYNC_ACT_UREQ:
768 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
769 count * sizeof(*rup), &offp)) == NULL) {
770 PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
771 return;
772 }
773
774 s = splsoftnet();
775 if (sc->sc_mbuf != NULL)
776 pfsync_sendout(sc);
777 for (i = 0,
778 rup = (struct pfsync_state_upd_req *)(mp->m_data + offp);
779 i < count; i++, rup++) {
780 memcpy(&id_key.id, rup->id, sizeof(id_key.id));
781 id_key.creatorid = rup->creatorid;
782
783 if (id_key.id == 0 && id_key.creatorid == 0) {
784 sc->sc_ureq_received = time_uptime;
785 if (sc->sc_bulk_send_next == NULL)
786 sc->sc_bulk_send_next =
787 TAILQ_FIRST(&state_list);
788 sc->sc_bulk_terminator = sc->sc_bulk_send_next;
789 if (pf_status.debug >= PF_DEBUG_MISC)
790 printf("pfsync: received "
791 "bulk update request\n");
792 pfsync_send_bus(sc, PFSYNC_BUS_START);
793 callout_schedule(&sc->sc_bulk_tmo, 1 * hz);
794 } else {
795 st = pf_find_state_byid(&id_key);
796 if (st == NULL) {
797 PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
798 continue;
799 }
800 if (!st->sync_flags)
801 pfsync_pack_state(PFSYNC_ACT_UPD,
802 st, 0);
803 }
804 }
805 if (sc->sc_mbuf != NULL)
806 pfsync_sendout(sc);
807 splx(s);
808 break;
809 case PFSYNC_ACT_BUS:
810 /* If we're not waiting for a bulk update, who cares. */
811 if (sc->sc_ureq_sent == 0)
812 break;
813
814 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
815 sizeof(*bus), &offp)) == NULL) {
816 PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
817 return;
818 }
819 bus = (struct pfsync_state_bus *)(mp->m_data + offp);
820 switch (bus->status) {
821 case PFSYNC_BUS_START:
822 callout_schedule(&sc->sc_bulkfail_tmo,
823 pf_pool_limits[PF_LIMIT_STATES].limit /
824 (PFSYNC_BULKPACKETS * sc->sc_maxcount));
825 if (pf_status.debug >= PF_DEBUG_MISC)
826 printf("pfsync: received bulk "
827 "update start\n");
828 break;
829 case PFSYNC_BUS_END:
830 if (time_uptime - ntohl(bus->endtime) >=
831 sc->sc_ureq_sent) {
832 /* that's it, we're happy */
833 sc->sc_ureq_sent = 0;
834 sc->sc_bulk_tries = 0;
835 callout_stop(&sc->sc_bulkfail_tmo);
836 #if NCARP > 0
837 if (!pfsync_sync_ok)
838 carp_suppress_preempt--;
839 #endif
840 pfsync_sync_ok = 1;
841 if (pf_status.debug >= PF_DEBUG_MISC)
842 printf("pfsync: received valid "
843 "bulk update end\n");
844 } else {
845 if (pf_status.debug >= PF_DEBUG_MISC)
846 printf("pfsync: received invalid "
847 "bulk update end: bad timestamp\n");
848 }
849 break;
850 }
851 break;
852 #ifdef IPSEC
853 case PFSYNC_ACT_TDB_UPD:
854 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
855 count * sizeof(*pt), &offp)) == NULL) {
856 PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
857 return;
858 }
859 s = splsoftnet();
860 for (i = 0, pt = (struct pfsync_tdb *)(mp->m_data + offp);
861 i < count; i++, pt++)
862 pfsync_update_net_tdb(pt);
863 splx(s);
864 break;
865 #endif
866 }
867
868 done:
869 if (m)
870 m_freem(m);
871 }
872
873 int
874 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
875 struct rtentry *rt)
876 {
877 m_freem(m);
878 return (0);
879 }
880
881 /* ARGSUSED */
882 int
883 pfsyncioctl(struct ifnet *ifp, u_long cmd, void* data)
884 {
885 struct lwp *l = curlwp;
886 struct pfsync_softc *sc = ifp->if_softc;
887 struct ifreq *ifr = (struct ifreq *)data;
888 struct ip_moptions *imo = &sc->sc_imo;
889 struct pfsyncreq pfsyncr;
890 struct ifnet *sifp;
891 int s, error;
892
893 switch (cmd) {
894 case SIOCSIFADDR:
895 case SIOCAIFADDR:
896 case SIOCSIFDSTADDR:
897 case SIOCSIFFLAGS:
898 if (ifp->if_flags & IFF_UP)
899 ifp->if_flags |= IFF_RUNNING;
900 else
901 ifp->if_flags &= ~IFF_RUNNING;
902 break;
903 case SIOCSIFMTU:
904 if (ifr->ifr_mtu < PFSYNC_MINMTU)
905 return (EINVAL);
906 if (ifr->ifr_mtu > MCLBYTES)
907 ifr->ifr_mtu = MCLBYTES;
908 s = splnet();
909 if (ifr->ifr_mtu < ifp->if_mtu)
910 pfsync_sendout(sc);
911 pfsync_setmtu(sc, ifr->ifr_mtu);
912 splx(s);
913 break;
914 case SIOCGETPFSYNC:
915 if ((error = kauth_authorize_network(l->l_cred,
916 KAUTH_NETWORK_INTERFACE,
917 KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, ifp, (void *)cmd,
918 NULL)) != 0)
919 return (error);
920 memset(&pfsyncr, 0, sizeof(pfsyncr));
921 if (sc->sc_sync_ifp)
922 strlcpy(pfsyncr.pfsyncr_syncdev,
923 sc->sc_sync_ifp->if_xname, IFNAMSIZ);
924 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
925 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
926 if ((error = copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr))))
927 return (error);
928 break;
929 case SIOCSETPFSYNC:
930 if ((error = kauth_authorize_network(l->l_cred,
931 KAUTH_NETWORK_INTERFACE,
932 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
933 NULL)) != 0)
934 return (error);
935 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
936 return (error);
937
938 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
939 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
940 else
941 sc->sc_sync_peer.s_addr =
942 pfsyncr.pfsyncr_syncpeer.s_addr;
943
944 if (pfsyncr.pfsyncr_maxupdates > 255)
945 return (EINVAL);
946 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
947
948 if (pfsyncr.pfsyncr_syncdev[0] == 0) {
949 sc->sc_sync_ifp = NULL;
950 if (sc->sc_mbuf_net != NULL) {
951 /* Don't keep stale pfsync packets around. */
952 s = splnet();
953 m_freem(sc->sc_mbuf_net);
954 sc->sc_mbuf_net = NULL;
955 sc->sc_statep_net.s = NULL;
956 splx(s);
957 }
958 if (imo->imo_num_memberships > 0) {
959 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
960 imo->imo_multicast_ifp = NULL;
961 }
962 break;
963 }
964
965 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
966 return (EINVAL);
967
968 s = splnet();
969 if (sifp->if_mtu < sc->sc_if.if_mtu ||
970 (sc->sc_sync_ifp != NULL &&
971 sifp->if_mtu < sc->sc_sync_ifp->if_mtu) ||
972 sifp->if_mtu < MCLBYTES - sizeof(struct ip))
973 pfsync_sendout(sc);
974 sc->sc_sync_ifp = sifp;
975
976 pfsync_setmtu(sc, sc->sc_if.if_mtu);
977
978 if (imo->imo_num_memberships > 0) {
979 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
980 imo->imo_multicast_ifp = NULL;
981 }
982
983 if (sc->sc_sync_ifp &&
984 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
985 struct in_addr addr;
986
987 if (!(sc->sc_sync_ifp->if_flags & IFF_MULTICAST)) {
988 sc->sc_sync_ifp = NULL;
989 splx(s);
990 return (EADDRNOTAVAIL);
991 }
992
993 addr.s_addr = INADDR_PFSYNC_GROUP;
994
995 if ((imo->imo_membership[0] =
996 in_addmulti(&addr, sc->sc_sync_ifp)) == NULL) {
997 sc->sc_sync_ifp = NULL;
998 splx(s);
999 return (ENOBUFS);
1000 }
1001 imo->imo_num_memberships++;
1002 imo->imo_multicast_ifp = sc->sc_sync_ifp;
1003 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
1004 imo->imo_multicast_loop = 0;
1005 }
1006
1007 if (sc->sc_sync_ifp ||
1008 sc->sc_sendaddr.s_addr != INADDR_PFSYNC_GROUP) {
1009 /* Request a full state table update. */
1010 sc->sc_ureq_sent = time_uptime;
1011 #if NCARP > 0
1012 if (pfsync_sync_ok)
1013 carp_suppress_preempt ++;
1014 #endif
1015 pfsync_sync_ok = 0;
1016 if (pf_status.debug >= PF_DEBUG_MISC)
1017 printf("pfsync: requesting bulk update\n");
1018 callout_schedule(&sc->sc_bulkfail_tmo, 5 * hz);
1019 error = pfsync_request_update(NULL, NULL);
1020 if (error == ENOMEM) {
1021 splx(s);
1022 return (ENOMEM);
1023 }
1024 pfsync_sendout(sc);
1025 }
1026 splx(s);
1027
1028 break;
1029
1030 default:
1031 return ifioctl_common(ifp, cmd, data);
1032 }
1033
1034 return (0);
1035 }
1036
1037 void
1038 pfsync_setmtu(struct pfsync_softc *sc, int mtu_req)
1039 {
1040 int mtu;
1041
1042 if (sc->sc_sync_ifp && sc->sc_sync_ifp->if_mtu < mtu_req)
1043 mtu = sc->sc_sync_ifp->if_mtu;
1044 else
1045 mtu = mtu_req;
1046
1047 sc->sc_maxcount = (mtu - sizeof(struct pfsync_header)) /
1048 sizeof(struct pfsync_state);
1049 if (sc->sc_maxcount > 254)
1050 sc->sc_maxcount = 254;
1051 sc->sc_if.if_mtu = sizeof(struct pfsync_header) +
1052 sc->sc_maxcount * sizeof(struct pfsync_state);
1053 }
1054
1055 struct mbuf *
1056 pfsync_get_mbuf(struct pfsync_softc *sc, u_int8_t action, void **sp)
1057 {
1058 struct pfsync_header *h;
1059 struct mbuf *m;
1060 int len;
1061
1062 MGETHDR(m, M_DONTWAIT, MT_DATA);
1063 if (m == NULL) {
1064 sc->sc_if.if_oerrors++;
1065 return (NULL);
1066 }
1067
1068 switch (action) {
1069 case PFSYNC_ACT_CLR:
1070 len = sizeof(struct pfsync_header) +
1071 sizeof(struct pfsync_state_clr);
1072 break;
1073 case PFSYNC_ACT_UPD_C:
1074 len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd)) +
1075 sizeof(struct pfsync_header);
1076 break;
1077 case PFSYNC_ACT_DEL_C:
1078 len = (sc->sc_maxcount * sizeof(struct pfsync_state_del)) +
1079 sizeof(struct pfsync_header);
1080 break;
1081 case PFSYNC_ACT_UREQ:
1082 len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd_req)) +
1083 sizeof(struct pfsync_header);
1084 break;
1085 case PFSYNC_ACT_BUS:
1086 len = sizeof(struct pfsync_header) +
1087 sizeof(struct pfsync_state_bus);
1088 break;
1089 case PFSYNC_ACT_TDB_UPD:
1090 len = (sc->sc_maxcount * sizeof(struct pfsync_tdb)) +
1091 sizeof(struct pfsync_header);
1092 break;
1093 default:
1094 len = (sc->sc_maxcount * sizeof(struct pfsync_state)) +
1095 sizeof(struct pfsync_header);
1096 break;
1097 }
1098
1099 if (len > MHLEN) {
1100 MCLGET(m, M_DONTWAIT);
1101 if ((m->m_flags & M_EXT) == 0) {
1102 m_free(m);
1103 sc->sc_if.if_oerrors++;
1104 return (NULL);
1105 }
1106 m->m_data += (MCLBYTES - len) &~ (sizeof(long) - 1);
1107 } else
1108 MH_ALIGN(m, len);
1109
1110 m->m_pkthdr.rcvif = NULL;
1111 m->m_pkthdr.len = m->m_len = sizeof(struct pfsync_header);
1112 h = mtod(m, struct pfsync_header *);
1113 h->version = PFSYNC_VERSION;
1114 h->af = 0;
1115 h->count = 0;
1116 h->action = action;
1117 if (action != PFSYNC_ACT_TDB_UPD)
1118 memcpy(&h->pf_chksum, &pf_status.pf_chksum,
1119 PF_MD5_DIGEST_LENGTH);
1120
1121 *sp = (void *)((char *)h + PFSYNC_HDRLEN);
1122 if (action == PFSYNC_ACT_TDB_UPD)
1123 callout_schedule(&sc->sc_tdb_tmo, hz);
1124 else
1125 callout_schedule(&sc->sc_tmo, hz);
1126 return (m);
1127 }
1128
1129 int
1130 pfsync_pack_state(u_int8_t action, struct pf_state *st, int flags)
1131 {
1132 struct ifnet *ifp = NULL;
1133 struct pfsync_softc *sc = pfsyncif;
1134 struct pfsync_header *h, *h_net;
1135 struct pfsync_state *sp = NULL;
1136 struct pfsync_state_upd *up = NULL;
1137 struct pfsync_state_del *dp = NULL;
1138 struct pf_state_key *sk = st->state_key;
1139 struct pf_rule *r;
1140 u_long secs;
1141 int s, ret = 0;
1142 u_int8_t i = 255, newaction = 0;
1143
1144 if (sc == NULL)
1145 return (0);
1146 ifp = &sc->sc_if;
1147
1148 /*
1149 * If a packet falls in the forest and there's nobody around to
1150 * hear, does it make a sound?
1151 */
1152 if (ifp->if_bpf == NULL && sc->sc_sync_ifp == NULL &&
1153 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1154 /* Don't leave any stale pfsync packets hanging around. */
1155 if (sc->sc_mbuf != NULL) {
1156 m_freem(sc->sc_mbuf);
1157 sc->sc_mbuf = NULL;
1158 sc->sc_statep.s = NULL;
1159 }
1160 return (0);
1161 }
1162
1163 if (action >= PFSYNC_ACT_MAX)
1164 return (EINVAL);
1165
1166 s = splnet();
1167 if (sc->sc_mbuf == NULL) {
1168 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
1169 (void *)&sc->sc_statep.s)) == NULL) {
1170 splx(s);
1171 return (ENOMEM);
1172 }
1173 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1174 } else {
1175 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1176 if (h->action != action) {
1177 pfsync_sendout(sc);
1178 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
1179 (void *)&sc->sc_statep.s)) == NULL) {
1180 splx(s);
1181 return (ENOMEM);
1182 }
1183 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1184 } else {
1185 /*
1186 * If it's an update, look in the packet to see if
1187 * we already have an update for the state.
1188 */
1189 if (action == PFSYNC_ACT_UPD && sc->sc_maxupdates) {
1190 struct pfsync_state *usp =
1191 (void *)((char *)h + PFSYNC_HDRLEN);
1192
1193 for (i = 0; i < h->count; i++) {
1194 if (!memcmp(usp->id, &st->id,
1195 PFSYNC_ID_LEN) &&
1196 usp->creatorid == st->creatorid) {
1197 sp = usp;
1198 sp->updates++;
1199 break;
1200 }
1201 usp++;
1202 }
1203 }
1204 }
1205 }
1206
1207 secs = time_second;
1208
1209 st->pfsync_time = time_uptime;
1210
1211 if (sp == NULL) {
1212 /* not a "duplicate" update */
1213 i = 255;
1214 sp = sc->sc_statep.s++;
1215 sc->sc_mbuf->m_pkthdr.len =
1216 sc->sc_mbuf->m_len += sizeof(struct pfsync_state);
1217 h->count++;
1218 memset(sp, 0, sizeof(*sp));
1219
1220 memcpy(sp->id, &st->id, sizeof(sp->id));
1221 sp->creatorid = st->creatorid;
1222
1223 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
1224 pf_state_host_hton(&sk->lan, &sp->lan);
1225 pf_state_host_hton(&sk->gwy, &sp->gwy);
1226 pf_state_host_hton(&sk->ext, &sp->ext);
1227
1228 memcpy(&sp->rt_addr, &st->rt_addr, sizeof(sp->rt_addr));
1229
1230 sp->creation = htonl(secs - st->creation);
1231 pf_state_counter_hton(st->packets[0], sp->packets[0]);
1232 pf_state_counter_hton(st->packets[1], sp->packets[1]);
1233 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
1234 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
1235 if ((r = st->rule.ptr) == NULL)
1236 sp->rule = htonl(-1);
1237 else
1238 sp->rule = htonl(r->nr);
1239 if ((r = st->anchor.ptr) == NULL)
1240 sp->anchor = htonl(-1);
1241 else
1242 sp->anchor = htonl(r->nr);
1243 sp->af = sk->af;
1244 sp->proto = sk->proto;
1245 sp->direction = sk->direction;
1246 sp->log = st->log;
1247 sp->allow_opts = st->allow_opts;
1248 sp->timeout = st->timeout;
1249
1250 if (flags & PFSYNC_FLAG_STALE)
1251 sp->sync_flags |= PFSTATE_STALE;
1252 }
1253
1254 pf_state_peer_hton(&st->src, &sp->src);
1255 pf_state_peer_hton(&st->dst, &sp->dst);
1256
1257 if (st->expire <= secs)
1258 sp->expire = htonl(0);
1259 else
1260 sp->expire = htonl(st->expire - secs);
1261
1262 /* do we need to build "compressed" actions for network transfer? */
1263 if (sc->sc_sync_ifp && flags & PFSYNC_FLAG_COMPRESS) {
1264 switch (action) {
1265 case PFSYNC_ACT_UPD:
1266 newaction = PFSYNC_ACT_UPD_C;
1267 break;
1268 case PFSYNC_ACT_DEL:
1269 newaction = PFSYNC_ACT_DEL_C;
1270 break;
1271 default:
1272 /* by default we just send the uncompressed states */
1273 break;
1274 }
1275 }
1276
1277 if (newaction) {
1278 if (sc->sc_mbuf_net == NULL) {
1279 if ((sc->sc_mbuf_net = pfsync_get_mbuf(sc, newaction,
1280 (void *)&sc->sc_statep_net.s)) == NULL) {
1281 splx(s);
1282 return (ENOMEM);
1283 }
1284 }
1285 h_net = mtod(sc->sc_mbuf_net, struct pfsync_header *);
1286
1287 switch (newaction) {
1288 case PFSYNC_ACT_UPD_C:
1289 if (i != 255) {
1290 up = (void *)((char *)h_net +
1291 PFSYNC_HDRLEN + (i * sizeof(*up)));
1292 up->updates++;
1293 } else {
1294 h_net->count++;
1295 sc->sc_mbuf_net->m_pkthdr.len =
1296 sc->sc_mbuf_net->m_len += sizeof(*up);
1297 up = sc->sc_statep_net.u++;
1298
1299 memset(up, 0, sizeof(*up));
1300 memcpy(up->id, &st->id, sizeof(up->id));
1301 up->creatorid = st->creatorid;
1302 }
1303 up->timeout = st->timeout;
1304 up->expire = sp->expire;
1305 up->src = sp->src;
1306 up->dst = sp->dst;
1307 break;
1308 case PFSYNC_ACT_DEL_C:
1309 sc->sc_mbuf_net->m_pkthdr.len =
1310 sc->sc_mbuf_net->m_len += sizeof(*dp);
1311 dp = sc->sc_statep_net.d++;
1312 h_net->count++;
1313
1314 memset(dp, 0, sizeof(*dp));
1315 memcpy(dp->id, &st->id, sizeof(dp->id));
1316 dp->creatorid = st->creatorid;
1317 break;
1318 }
1319 }
1320
1321 if (h->count == sc->sc_maxcount ||
1322 (sc->sc_maxupdates && (sp->updates >= sc->sc_maxupdates)))
1323 ret = pfsync_sendout(sc);
1324
1325 splx(s);
1326 return (ret);
1327 }
1328
1329 /* This must be called in splnet() */
1330 int
1331 pfsync_request_update(struct pfsync_state_upd *up, struct in_addr *src)
1332 {
1333 struct ifnet *ifp = NULL;
1334 struct pfsync_header *h;
1335 struct pfsync_softc *sc = pfsyncif;
1336 struct pfsync_state_upd_req *rup;
1337 int ret = 0;
1338
1339 if (sc == NULL)
1340 return (0);
1341
1342 ifp = &sc->sc_if;
1343 if (sc->sc_mbuf == NULL) {
1344 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
1345 (void *)&sc->sc_statep.s)) == NULL)
1346 return (ENOMEM);
1347 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1348 } else {
1349 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1350 if (h->action != PFSYNC_ACT_UREQ) {
1351 pfsync_sendout(sc);
1352 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
1353 (void *)&sc->sc_statep.s)) == NULL)
1354 return (ENOMEM);
1355 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1356 }
1357 }
1358
1359 if (src != NULL)
1360 sc->sc_sendaddr = *src;
1361 sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*rup);
1362 h->count++;
1363 rup = sc->sc_statep.r++;
1364 memset(rup, 0, sizeof(*rup));
1365 if (up != NULL) {
1366 memcpy(rup->id, up->id, sizeof(rup->id));
1367 rup->creatorid = up->creatorid;
1368 }
1369
1370 if (h->count == sc->sc_maxcount)
1371 ret = pfsync_sendout(sc);
1372
1373 return (ret);
1374 }
1375
1376 int
1377 pfsync_clear_states(u_int32_t creatorid, char *ifname)
1378 {
1379 struct ifnet *ifp = NULL;
1380 struct pfsync_softc *sc = pfsyncif;
1381 struct pfsync_state_clr *cp;
1382 int s, ret;
1383
1384 if (sc == NULL)
1385 return (0);
1386
1387 ifp = &sc->sc_if;
1388 s = splnet();
1389 if (sc->sc_mbuf != NULL)
1390 pfsync_sendout(sc);
1391 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_CLR,
1392 (void *)&sc->sc_statep.c)) == NULL) {
1393 splx(s);
1394 return (ENOMEM);
1395 }
1396 sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*cp);
1397 cp = sc->sc_statep.c;
1398 cp->creatorid = creatorid;
1399 if (ifname != NULL)
1400 strlcpy(cp->ifname, ifname, IFNAMSIZ);
1401
1402 ret = (pfsync_sendout(sc));
1403 splx(s);
1404 return (ret);
1405 }
1406
1407 void
1408 pfsync_timeout(void *v)
1409 {
1410 struct pfsync_softc *sc = v;
1411 int s;
1412
1413 s = splnet();
1414 pfsync_sendout(sc);
1415 splx(s);
1416 }
1417
1418 void
1419 pfsync_tdb_timeout(void *v)
1420 {
1421 struct pfsync_softc *sc = v;
1422 int s;
1423
1424 s = splnet();
1425 pfsync_tdb_sendout(sc);
1426 splx(s);
1427 }
1428
1429 /* This must be called in splnet() */
1430 void
1431 pfsync_send_bus(struct pfsync_softc *sc, u_int8_t status)
1432 {
1433 struct pfsync_state_bus *bus;
1434
1435 if (sc->sc_mbuf != NULL)
1436 pfsync_sendout(sc);
1437
1438 if (pfsync_sync_ok &&
1439 (sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_BUS,
1440 (void *)&sc->sc_statep.b)) != NULL) {
1441 sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*bus);
1442 bus = sc->sc_statep.b;
1443 bus->creatorid = pf_status.hostid;
1444 bus->status = status;
1445 bus->endtime = htonl(time_uptime - sc->sc_ureq_received);
1446 pfsync_sendout(sc);
1447 }
1448 }
1449
1450 void
1451 pfsync_bulk_update(void *v)
1452 {
1453 struct pfsync_softc *sc = v;
1454 int s, i = 0;
1455 struct pf_state *state;
1456
1457 s = splnet();
1458 if (sc->sc_mbuf != NULL)
1459 pfsync_sendout(sc);
1460
1461 /*
1462 * Grab at most PFSYNC_BULKPACKETS worth of states which have not
1463 * been sent since the latest request was made.
1464 */
1465 state = sc->sc_bulk_send_next;
1466 if (state)
1467 do {
1468 /* send state update if syncable and not already sent */
1469 if (!state->sync_flags
1470 && state->timeout < PFTM_MAX
1471 && state->pfsync_time <= sc->sc_ureq_received) {
1472 pfsync_pack_state(PFSYNC_ACT_UPD, state, 0);
1473 i++;
1474 }
1475
1476 /* figure next state to send */
1477 state = TAILQ_NEXT(state, entry_list);
1478
1479 /* wrap to start of list if we hit the end */
1480 if (!state)
1481 state = TAILQ_FIRST(&state_list);
1482 } while (i < sc->sc_maxcount * PFSYNC_BULKPACKETS &&
1483 state != sc->sc_bulk_terminator);
1484
1485 if (!state || state == sc->sc_bulk_terminator) {
1486 /* we're done */
1487 pfsync_send_bus(sc, PFSYNC_BUS_END);
1488 sc->sc_ureq_received = 0;
1489 sc->sc_bulk_send_next = NULL;
1490 sc->sc_bulk_terminator = NULL;
1491 callout_stop(&sc->sc_bulk_tmo);
1492 if (pf_status.debug >= PF_DEBUG_MISC)
1493 printf("pfsync: bulk update complete\n");
1494 } else {
1495 /* look again for more in a bit */
1496 callout_schedule(&sc->sc_bulk_tmo, 1);
1497 sc->sc_bulk_send_next = state;
1498 }
1499 if (sc->sc_mbuf != NULL)
1500 pfsync_sendout(sc);
1501 splx(s);
1502 }
1503
1504 void
1505 pfsync_bulkfail(void *v)
1506 {
1507 struct pfsync_softc *sc = v;
1508 int s, error;
1509
1510 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
1511 /* Try again in a bit */
1512 callout_schedule(&sc->sc_bulkfail_tmo, 5 * hz);
1513 s = splnet();
1514 error = pfsync_request_update(NULL, NULL);
1515 if (error == ENOMEM) {
1516 if (pf_status.debug >= PF_DEBUG_MISC)
1517 printf("pfsync: cannot allocate mbufs for "
1518 "bulk update\n");
1519 } else
1520 pfsync_sendout(sc);
1521 splx(s);
1522 } else {
1523 /* Pretend like the transfer was ok */
1524 sc->sc_ureq_sent = 0;
1525 sc->sc_bulk_tries = 0;
1526 #if NCARP > 0
1527 if (!pfsync_sync_ok)
1528 carp_suppress_preempt --;
1529 #endif
1530 pfsync_sync_ok = 1;
1531 if (pf_status.debug >= PF_DEBUG_MISC)
1532 printf("pfsync: failed to receive "
1533 "bulk update status\n");
1534 callout_stop(&sc->sc_bulkfail_tmo);
1535 }
1536 }
1537
1538 /* This must be called in splnet() */
1539 int
1540 pfsync_sendout(struct pfsync_softc *sc)
1541 {
1542 struct ifnet *ifp = &sc->sc_if;
1543 struct mbuf *m;
1544
1545 callout_stop(&sc->sc_tmo);
1546
1547 if (sc->sc_mbuf == NULL)
1548 return (0);
1549 m = sc->sc_mbuf;
1550 sc->sc_mbuf = NULL;
1551 sc->sc_statep.s = NULL;
1552
1553 bpf_mtap(ifp, m);
1554
1555 if (sc->sc_mbuf_net) {
1556 m_freem(m);
1557 m = sc->sc_mbuf_net;
1558 sc->sc_mbuf_net = NULL;
1559 sc->sc_statep_net.s = NULL;
1560 }
1561
1562 return pfsync_sendout_mbuf(sc, m);
1563 }
1564
1565 int
1566 pfsync_tdb_sendout(struct pfsync_softc *sc)
1567 {
1568 struct ifnet *ifp = &sc->sc_if;
1569 struct mbuf *m;
1570
1571 callout_stop(&sc->sc_tdb_tmo);
1572
1573 if (sc->sc_mbuf_tdb == NULL)
1574 return (0);
1575 m = sc->sc_mbuf_tdb;
1576 sc->sc_mbuf_tdb = NULL;
1577 sc->sc_statep_tdb.t = NULL;
1578
1579 bpf_mtap(ifp, m);
1580
1581 return pfsync_sendout_mbuf(sc, m);
1582 }
1583
1584 int
1585 pfsync_sendout_mbuf(struct pfsync_softc *sc, struct mbuf *m)
1586 {
1587 struct sockaddr sa;
1588 struct ip *ip;
1589
1590 if (sc->sc_sync_ifp ||
1591 sc->sc_sync_peer.s_addr != INADDR_PFSYNC_GROUP) {
1592 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
1593 if (m == NULL) {
1594 PFSYNC_STATINC(PFSYNC_STAT_ONOMEM);
1595 return (0);
1596 }
1597 ip = mtod(m, struct ip *);
1598 ip->ip_v = IPVERSION;
1599 ip->ip_hl = sizeof(*ip) >> 2;
1600 ip->ip_tos = IPTOS_LOWDELAY;
1601 ip->ip_len = htons(m->m_pkthdr.len);
1602 ip->ip_id = htons(ip_randomid(0));
1603 ip->ip_off = htons(IP_DF);
1604 ip->ip_ttl = PFSYNC_DFLTTL;
1605 ip->ip_p = IPPROTO_PFSYNC;
1606 ip->ip_sum = 0;
1607
1608 memset(&sa, 0, sizeof(sa));
1609 ip->ip_src.s_addr = INADDR_ANY;
1610
1611 if (sc->sc_sendaddr.s_addr == INADDR_PFSYNC_GROUP)
1612 m->m_flags |= M_MCAST;
1613 ip->ip_dst = sc->sc_sendaddr;
1614 sc->sc_sendaddr.s_addr = sc->sc_sync_peer.s_addr;
1615
1616 PFSYNC_STATINC(PFSYNC_STAT_OPACKETS);
1617
1618 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)) {
1619 PFSYNC_STATINC(PFSYNC_STAT_OERRORS);
1620 }
1621 } else
1622 m_freem(m);
1623
1624 return (0);
1625 }
1626
1627 #ifdef IPSEC
1628 /* Update an in-kernel tdb. Silently fail if no tdb is found. */
1629 void
1630 pfsync_update_net_tdb(struct pfsync_tdb *pt)
1631 {
1632 struct tdb *tdb;
1633 int s;
1634
1635 /* check for invalid values */
1636 if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1637 (pt->dst.sa.sa_family != AF_INET &&
1638 pt->dst.sa.sa_family != AF_INET6))
1639 goto bad;
1640
1641 s = spltdb();
1642 tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1643 if (tdb) {
1644 pt->rpl = ntohl(pt->rpl);
1645 pt->cur_bytes = betoh64(pt->cur_bytes);
1646
1647 /* Neither replay nor byte counter should ever decrease. */
1648 if (pt->rpl < tdb->tdb_rpl ||
1649 pt->cur_bytes < tdb->tdb_cur_bytes) {
1650 splx(s);
1651 goto bad;
1652 }
1653
1654 tdb->tdb_rpl = pt->rpl;
1655 tdb->tdb_cur_bytes = pt->cur_bytes;
1656 }
1657 splx(s);
1658 return;
1659
1660 bad:
1661 if (pf_status.debug >= PF_DEBUG_MISC)
1662 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1663 "invalid value\n");
1664 PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
1665 return;
1666 }
1667
1668 /* One of our local tdbs have been updated, need to sync rpl with others */
1669 int
1670 pfsync_update_tdb(struct tdb *tdb, int output)
1671 {
1672 struct ifnet *ifp = NULL;
1673 struct pfsync_softc *sc = pfsyncif;
1674 struct pfsync_header *h;
1675 struct pfsync_tdb *pt = NULL;
1676 int s, i, ret;
1677
1678 if (sc == NULL)
1679 return (0);
1680
1681 ifp = &sc->sc_if;
1682 if (ifp->if_bpf == NULL && sc->sc_sync_ifp == NULL &&
1683 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1684 /* Don't leave any stale pfsync packets hanging around. */
1685 if (sc->sc_mbuf_tdb != NULL) {
1686 m_freem(sc->sc_mbuf_tdb);
1687 sc->sc_mbuf_tdb = NULL;
1688 sc->sc_statep_tdb.t = NULL;
1689 }
1690 return (0);
1691 }
1692
1693 s = splnet();
1694 if (sc->sc_mbuf_tdb == NULL) {
1695 if ((sc->sc_mbuf_tdb = pfsync_get_mbuf(sc, PFSYNC_ACT_TDB_UPD,
1696 (void *)&sc->sc_statep_tdb.t)) == NULL) {
1697 splx(s);
1698 return (ENOMEM);
1699 }
1700 h = mtod(sc->sc_mbuf_tdb, struct pfsync_header *);
1701 } else {
1702 h = mtod(sc->sc_mbuf_tdb, struct pfsync_header *);
1703 if (h->action != PFSYNC_ACT_TDB_UPD) {
1704 /*
1705 * XXX will never happen as long as there's
1706 * only one "TDB action".
1707 */
1708 pfsync_tdb_sendout(sc);
1709 sc->sc_mbuf_tdb = pfsync_get_mbuf(sc,
1710 PFSYNC_ACT_TDB_UPD, (void *)&sc->sc_statep_tdb.t);
1711 if (sc->sc_mbuf_tdb == NULL) {
1712 splx(s);
1713 return (ENOMEM);
1714 }
1715 h = mtod(sc->sc_mbuf_tdb, struct pfsync_header *);
1716 } else if (sc->sc_maxupdates) {
1717 /*
1718 * If it's an update, look in the packet to see if
1719 * we already have an update for the state.
1720 */
1721 struct pfsync_tdb *u =
1722 (void *)((char *)h + PFSYNC_HDRLEN);
1723
1724 for (i = 0; !pt && i < h->count; i++) {
1725 if (tdb->tdb_spi == u->spi &&
1726 tdb->tdb_sproto == u->sproto &&
1727 !bcmp(&tdb->tdb_dst, &u->dst,
1728 SA_LEN(&u->dst.sa))) {
1729 pt = u;
1730 pt->updates++;
1731 }
1732 u++;
1733 }
1734 }
1735 }
1736
1737 if (pt == NULL) {
1738 /* not a "duplicate" update */
1739 pt = sc->sc_statep_tdb.t++;
1740 sc->sc_mbuf_tdb->m_pkthdr.len =
1741 sc->sc_mbuf_tdb->m_len += sizeof(struct pfsync_tdb);
1742 h->count++;
1743 memset(pt, 0, sizeof(*pt));
1744
1745 pt->spi = tdb->tdb_spi;
1746 memcpy(&pt->dst, &tdb->tdb_dst, sizeof pt->dst);
1747 pt->sproto = tdb->tdb_sproto;
1748 }
1749
1750 /*
1751 * When a failover happens, the master's rpl is probably above
1752 * what we see here (we may be up to a second late), so
1753 * increase it a bit for outbound tdbs to manage most such
1754 * situations.
1755 *
1756 * For now, just add an offset that is likely to be larger
1757 * than the number of packets we can see in one second. The RFC
1758 * just says the next packet must have a higher seq value.
1759 *
1760 * XXX What is a good algorithm for this? We could use
1761 * a rate-determined increase, but to know it, we would have
1762 * to extend struct tdb.
1763 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
1764 * will soon be replaced anyway. For now, just don't handle
1765 * this edge case.
1766 */
1767 #define RPL_INCR 16384
1768 pt->rpl = htonl(tdb->tdb_rpl + (output ? RPL_INCR : 0));
1769 pt->cur_bytes = htobe64(tdb->tdb_cur_bytes);
1770
1771 if (h->count == sc->sc_maxcount ||
1772 (sc->sc_maxupdates && (pt->updates >= sc->sc_maxupdates)))
1773 ret = pfsync_tdb_sendout(sc);
1774
1775 splx(s);
1776 return (ret);
1777 }
1778 #endif
1779
1780 static int
1781 sysctl_net_inet_pfsync_stats(SYSCTLFN_ARGS)
1782 {
1783
1784 return (NETSTAT_SYSCTL(pfsyncstat_percpu, PFSYNC_NSTATS));
1785 }
1786
1787 SYSCTL_SETUP(sysctl_net_inet_pfsync_setup, "sysctl net.inet.pfsync subtree setup")
1788 {
1789
1790 sysctl_createv(clog, 0, NULL, NULL,
1791 CTLFLAG_PERMANENT,
1792 CTLTYPE_NODE, "net", NULL,
1793 NULL, 0, NULL, 0,
1794 CTL_NET, CTL_EOL);
1795 sysctl_createv(clog, 0, NULL, NULL,
1796 CTLFLAG_PERMANENT,
1797 CTLTYPE_NODE, "inet", NULL,
1798 NULL, 0, NULL, 0,
1799 CTL_NET, PF_INET, CTL_EOL);
1800 sysctl_createv(clog, 0, NULL, NULL,
1801 CTLFLAG_PERMANENT,
1802 CTLTYPE_NODE, "pfsync",
1803 SYSCTL_DESCR("pfsync related settings"),
1804 NULL, 0, NULL, 0,
1805 CTL_NET, PF_INET, IPPROTO_PFSYNC, CTL_EOL);
1806 sysctl_createv(clog, 0, NULL, NULL,
1807 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1808 CTLTYPE_STRUCT, "stats",
1809 SYSCTL_DESCR("pfsync statistics"),
1810 sysctl_net_inet_pfsync_stats, 0, NULL, 0,
1811 CTL_NET, PF_INET, IPPROTO_PFSYNC,
1812 CTL_CREATE, CTL_EOL);
1813 }
1814