if_pfsync.c revision 1.1.1.3 1 /* $OpenBSD: if_pfsync.c,v 1.46 2005/02/20 15:58:38 mcbride Exp $ */
2
3 /*
4 * Copyright (c) 2002 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "bpfilter.h"
30 #include "pfsync.h"
31
32 #include <sys/param.h>
33 #include <sys/proc.h>
34 #include <sys/systm.h>
35 #include <sys/time.h>
36 #include <sys/mbuf.h>
37 #include <sys/socket.h>
38 #include <sys/ioctl.h>
39 #include <sys/timeout.h>
40 #include <sys/kernel.h>
41
42 #include <net/if.h>
43 #include <net/if_types.h>
44 #include <net/route.h>
45 #include <net/bpf.h>
46 #include <netinet/tcp.h>
47 #include <netinet/tcp_seq.h>
48
49 #ifdef INET
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/in_var.h>
53 #include <netinet/ip.h>
54 #include <netinet/ip_var.h>
55 #endif
56
57 #ifdef INET6
58 #ifndef INET
59 #include <netinet/in.h>
60 #endif
61 #include <netinet6/nd6.h>
62 #endif /* INET6 */
63
64 #include "carp.h"
65 #if NCARP > 0
66 extern int carp_suppress_preempt;
67 #endif
68
69 #include <net/pfvar.h>
70 #include <net/if_pfsync.h>
71
72 #define PFSYNC_MINMTU \
73 (sizeof(struct pfsync_header) + sizeof(struct pf_state))
74
75 #ifdef PFSYNCDEBUG
76 #define DPRINTF(x) do { if (pfsyncdebug) printf x ; } while (0)
77 int pfsyncdebug;
78 #else
79 #define DPRINTF(x)
80 #endif
81
82 struct pfsync_softc pfsyncif;
83 struct pfsyncstats pfsyncstats;
84
85 void pfsyncattach(int);
86 void pfsync_setmtu(struct pfsync_softc *, int);
87 int pfsync_insert_net_state(struct pfsync_state *);
88 int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
89 struct rtentry *);
90 int pfsyncioctl(struct ifnet *, u_long, caddr_t);
91 void pfsyncstart(struct ifnet *);
92
93 struct mbuf *pfsync_get_mbuf(struct pfsync_softc *, u_int8_t, void **);
94 int pfsync_request_update(struct pfsync_state_upd *, struct in_addr *);
95 int pfsync_sendout(struct pfsync_softc *);
96 void pfsync_timeout(void *);
97 void pfsync_send_bus(struct pfsync_softc *, u_int8_t);
98 void pfsync_bulk_update(void *);
99 void pfsync_bulkfail(void *);
100
101 int pfsync_sync_ok;
102 extern int ifqmaxlen;
103
104 void
105 pfsyncattach(int npfsync)
106 {
107 struct ifnet *ifp;
108
109 pfsync_sync_ok = 1;
110 bzero(&pfsyncif, sizeof(pfsyncif));
111 pfsyncif.sc_mbuf = NULL;
112 pfsyncif.sc_mbuf_net = NULL;
113 pfsyncif.sc_statep.s = NULL;
114 pfsyncif.sc_statep_net.s = NULL;
115 pfsyncif.sc_maxupdates = 128;
116 pfsyncif.sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
117 pfsyncif.sc_sendaddr.s_addr = INADDR_PFSYNC_GROUP;
118 pfsyncif.sc_ureq_received = 0;
119 pfsyncif.sc_ureq_sent = 0;
120 ifp = &pfsyncif.sc_if;
121 strlcpy(ifp->if_xname, "pfsync0", sizeof ifp->if_xname);
122 ifp->if_softc = &pfsyncif;
123 ifp->if_ioctl = pfsyncioctl;
124 ifp->if_output = pfsyncoutput;
125 ifp->if_start = pfsyncstart;
126 ifp->if_type = IFT_PFSYNC;
127 ifp->if_snd.ifq_maxlen = ifqmaxlen;
128 ifp->if_hdrlen = PFSYNC_HDRLEN;
129 pfsync_setmtu(&pfsyncif, MCLBYTES);
130 timeout_set(&pfsyncif.sc_tmo, pfsync_timeout, &pfsyncif);
131 timeout_set(&pfsyncif.sc_bulk_tmo, pfsync_bulk_update, &pfsyncif);
132 timeout_set(&pfsyncif.sc_bulkfail_tmo, pfsync_bulkfail, &pfsyncif);
133 if_attach(ifp);
134 if_alloc_sadl(ifp);
135
136 #if NBPFILTER > 0
137 bpfattach(&pfsyncif.sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
138 #endif
139 }
140
141 /*
142 * Start output on the pfsync interface.
143 */
144 void
145 pfsyncstart(struct ifnet *ifp)
146 {
147 struct mbuf *m;
148 int s;
149
150 for (;;) {
151 s = splimp();
152 IF_DROP(&ifp->if_snd);
153 IF_DEQUEUE(&ifp->if_snd, m);
154 splx(s);
155
156 if (m == NULL)
157 return;
158 else
159 m_freem(m);
160 }
161 }
162
163 int
164 pfsync_insert_net_state(struct pfsync_state *sp)
165 {
166 struct pf_state *st = NULL;
167 struct pf_rule *r = NULL;
168 struct pfi_kif *kif;
169
170 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
171 printf("pfsync_insert_net_state: invalid creator id:"
172 " %08x\n", ntohl(sp->creatorid));
173 return (EINVAL);
174 }
175
176 kif = pfi_lookup_create(sp->ifname);
177 if (kif == NULL) {
178 if (pf_status.debug >= PF_DEBUG_MISC)
179 printf("pfsync_insert_net_state: "
180 "unknown interface: %s\n", sp->ifname);
181 /* skip this state */
182 return (0);
183 }
184
185 /*
186 * Just use the default rule until we have infrastructure to find the
187 * best matching rule.
188 */
189 r = &pf_default_rule;
190
191 if (!r->max_states || r->states < r->max_states)
192 st = pool_get(&pf_state_pl, PR_NOWAIT);
193 if (st == NULL) {
194 pfi_maybe_destroy(kif);
195 return (ENOMEM);
196 }
197 bzero(st, sizeof(*st));
198
199 st->rule.ptr = r;
200 /* XXX get pointers to nat_rule and anchor */
201
202 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
203 r->states++;
204
205 /* fill in the rest of the state entry */
206 pf_state_host_ntoh(&sp->lan, &st->lan);
207 pf_state_host_ntoh(&sp->gwy, &st->gwy);
208 pf_state_host_ntoh(&sp->ext, &st->ext);
209
210 pf_state_peer_ntoh(&sp->src, &st->src);
211 pf_state_peer_ntoh(&sp->dst, &st->dst);
212
213 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
214 st->creation = time_second - ntohl(sp->creation);
215 st->expire = ntohl(sp->expire) + time_second;
216
217 st->af = sp->af;
218 st->proto = sp->proto;
219 st->direction = sp->direction;
220 st->log = sp->log;
221 st->timeout = sp->timeout;
222 st->allow_opts = sp->allow_opts;
223
224 bcopy(sp->id, &st->id, sizeof(st->id));
225 st->creatorid = sp->creatorid;
226 st->sync_flags = PFSTATE_FROMSYNC;
227
228
229 if (pf_insert_state(kif, st)) {
230 pfi_maybe_destroy(kif);
231 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
232 r->states--;
233 pool_put(&pf_state_pl, st);
234 return (EINVAL);
235 }
236
237 return (0);
238 }
239
240 void
241 pfsync_input(struct mbuf *m, ...)
242 {
243 struct ip *ip = mtod(m, struct ip *);
244 struct pfsync_header *ph;
245 struct pfsync_softc *sc = &pfsyncif;
246 struct pf_state *st, key;
247 struct pfsync_state *sp;
248 struct pfsync_state_upd *up;
249 struct pfsync_state_del *dp;
250 struct pfsync_state_clr *cp;
251 struct pfsync_state_upd_req *rup;
252 struct pfsync_state_bus *bus;
253 struct in_addr src;
254 struct mbuf *mp;
255 int iplen, action, error, i, s, count, offp, sfail, stale = 0;
256
257 pfsyncstats.pfsyncs_ipackets++;
258
259 /* verify that we have a sync interface configured */
260 if (!sc->sc_sync_ifp || !pf_status.running)
261 goto done;
262
263 /* verify that the packet came in on the right interface */
264 if (sc->sc_sync_ifp != m->m_pkthdr.rcvif) {
265 pfsyncstats.pfsyncs_badif++;
266 goto done;
267 }
268
269 /* verify that the IP TTL is 255. */
270 if (ip->ip_ttl != PFSYNC_DFLTTL) {
271 pfsyncstats.pfsyncs_badttl++;
272 goto done;
273 }
274
275 iplen = ip->ip_hl << 2;
276
277 if (m->m_pkthdr.len < iplen + sizeof(*ph)) {
278 pfsyncstats.pfsyncs_hdrops++;
279 goto done;
280 }
281
282 if (iplen + sizeof(*ph) > m->m_len) {
283 if ((m = m_pullup(m, iplen + sizeof(*ph))) == NULL) {
284 pfsyncstats.pfsyncs_hdrops++;
285 goto done;
286 }
287 ip = mtod(m, struct ip *);
288 }
289 ph = (struct pfsync_header *)((char *)ip + iplen);
290
291 /* verify the version */
292 if (ph->version != PFSYNC_VERSION) {
293 pfsyncstats.pfsyncs_badver++;
294 goto done;
295 }
296
297 action = ph->action;
298 count = ph->count;
299
300 /* make sure it's a valid action code */
301 if (action >= PFSYNC_ACT_MAX) {
302 pfsyncstats.pfsyncs_badact++;
303 goto done;
304 }
305
306 /* Cheaper to grab this now than having to mess with mbufs later */
307 src = ip->ip_src;
308
309 switch (action) {
310 case PFSYNC_ACT_CLR: {
311 struct pf_state *nexts;
312 struct pfi_kif *kif;
313 u_int32_t creatorid;
314 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
315 sizeof(*cp), &offp)) == NULL) {
316 pfsyncstats.pfsyncs_badlen++;
317 return;
318 }
319 cp = (struct pfsync_state_clr *)(mp->m_data + offp);
320 creatorid = cp->creatorid;
321
322 s = splsoftnet();
323 if (cp->ifname[0] == '\0') {
324 for (st = RB_MIN(pf_state_tree_id, &tree_id);
325 st; st = nexts) {
326 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
327 if (st->creatorid == creatorid) {
328 st->timeout = PFTM_PURGE;
329 pf_purge_expired_state(st);
330 }
331 }
332 } else {
333 kif = pfi_lookup_if(cp->ifname);
334 if (kif == NULL) {
335 if (pf_status.debug >= PF_DEBUG_MISC)
336 printf("pfsync_input: PFSYNC_ACT_CLR "
337 "bad interface: %s\n", cp->ifname);
338 splx(s);
339 goto done;
340 }
341 for (st = RB_MIN(pf_state_tree_lan_ext,
342 &kif->pfik_lan_ext); st; st = nexts) {
343 nexts = RB_NEXT(pf_state_tree_lan_ext,
344 &kif->pfik_lan_ext, st);
345 if (st->creatorid == creatorid) {
346 st->timeout = PFTM_PURGE;
347 pf_purge_expired_state(st);
348 }
349 }
350 }
351 splx(s);
352
353 break;
354 }
355 case PFSYNC_ACT_INS:
356 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
357 count * sizeof(*sp), &offp)) == NULL) {
358 pfsyncstats.pfsyncs_badlen++;
359 return;
360 }
361
362 s = splsoftnet();
363 for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
364 i < count; i++, sp++) {
365 /* check for invalid values */
366 if (sp->timeout >= PFTM_MAX ||
367 sp->src.state > PF_TCPS_PROXY_DST ||
368 sp->dst.state > PF_TCPS_PROXY_DST ||
369 sp->direction > PF_OUT ||
370 (sp->af != AF_INET && sp->af != AF_INET6)) {
371 if (pf_status.debug >= PF_DEBUG_MISC)
372 printf("pfsync_insert: PFSYNC_ACT_INS: "
373 "invalid value\n");
374 pfsyncstats.pfsyncs_badstate++;
375 continue;
376 }
377
378 if ((error = pfsync_insert_net_state(sp))) {
379 if (error == ENOMEM) {
380 splx(s);
381 goto done;
382 }
383 continue;
384 }
385 }
386 splx(s);
387 break;
388 case PFSYNC_ACT_UPD:
389 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
390 count * sizeof(*sp), &offp)) == NULL) {
391 pfsyncstats.pfsyncs_badlen++;
392 return;
393 }
394
395 s = splsoftnet();
396 for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
397 i < count; i++, sp++) {
398 int flags = PFSYNC_FLAG_STALE;
399
400 /* check for invalid values */
401 if (sp->timeout >= PFTM_MAX ||
402 sp->src.state > PF_TCPS_PROXY_DST ||
403 sp->dst.state > PF_TCPS_PROXY_DST) {
404 if (pf_status.debug >= PF_DEBUG_MISC)
405 printf("pfsync_insert: PFSYNC_ACT_UPD: "
406 "invalid value\n");
407 pfsyncstats.pfsyncs_badstate++;
408 continue;
409 }
410
411 bcopy(sp->id, &key.id, sizeof(key.id));
412 key.creatorid = sp->creatorid;
413
414 st = pf_find_state_byid(&key);
415 if (st == NULL) {
416 /* insert the update */
417 if (pfsync_insert_net_state(sp))
418 pfsyncstats.pfsyncs_badstate++;
419 continue;
420 }
421 sfail = 0;
422 if (st->proto == IPPROTO_TCP) {
423 /*
424 * The state should never go backwards except
425 * for syn-proxy states. Neither should the
426 * sequence window slide backwards.
427 */
428 if (st->src.state > sp->src.state &&
429 (st->src.state < PF_TCPS_PROXY_SRC ||
430 sp->src.state >= PF_TCPS_PROXY_SRC))
431 sfail = 1;
432 else if (SEQ_GT(st->src.seqlo,
433 ntohl(sp->src.seqlo)))
434 sfail = 3;
435 else if (st->dst.state > sp->dst.state) {
436 /* There might still be useful
437 * information about the src state here,
438 * so import that part of the update,
439 * then "fail" so we send the updated
440 * state back to the peer who is missing
441 * our what we know. */
442 pf_state_peer_ntoh(&sp->src, &st->src);
443 /* XXX do anything with timeouts? */
444 sfail = 7;
445 flags = 0;
446 } else if (st->dst.state >= TCPS_SYN_SENT &&
447 SEQ_GT(st->dst.seqlo, ntohl(sp->dst.seqlo)))
448 sfail = 4;
449 } else {
450 /*
451 * Non-TCP protocol state machine always go
452 * forwards
453 */
454 if (st->src.state > sp->src.state)
455 sfail = 5;
456 else if ( st->dst.state > sp->dst.state)
457 sfail = 6;
458 }
459 if (sfail) {
460 if (pf_status.debug >= PF_DEBUG_MISC)
461 printf("pfsync: %s stale update "
462 "(%d) id: %016llx "
463 "creatorid: %08x\n",
464 (sfail < 7 ? "ignoring"
465 : "partial"), sfail,
466 betoh64(st->id),
467 ntohl(st->creatorid));
468 pfsyncstats.pfsyncs_badstate++;
469
470 if (!(sp->sync_flags & PFSTATE_STALE)) {
471 /* we have a better state, send it */
472 if (sc->sc_mbuf != NULL && !stale)
473 pfsync_sendout(sc);
474 stale++;
475 if (!st->sync_flags)
476 pfsync_pack_state(
477 PFSYNC_ACT_UPD, st, flags);
478 }
479 continue;
480 }
481 pf_state_peer_ntoh(&sp->src, &st->src);
482 pf_state_peer_ntoh(&sp->dst, &st->dst);
483 st->expire = ntohl(sp->expire) + time_second;
484 st->timeout = sp->timeout;
485 }
486 if (stale && sc->sc_mbuf != NULL)
487 pfsync_sendout(sc);
488 splx(s);
489 break;
490 /*
491 * It's not strictly necessary for us to support the "uncompressed"
492 * delete action, but it's relatively simple and maintains consistency.
493 */
494 case PFSYNC_ACT_DEL:
495 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
496 count * sizeof(*sp), &offp)) == NULL) {
497 pfsyncstats.pfsyncs_badlen++;
498 return;
499 }
500
501 s = splsoftnet();
502 for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
503 i < count; i++, sp++) {
504 bcopy(sp->id, &key.id, sizeof(key.id));
505 key.creatorid = sp->creatorid;
506
507 st = pf_find_state_byid(&key);
508 if (st == NULL) {
509 pfsyncstats.pfsyncs_badstate++;
510 continue;
511 }
512 st->timeout = PFTM_PURGE;
513 st->sync_flags |= PFSTATE_FROMSYNC;
514 pf_purge_expired_state(st);
515 }
516 splx(s);
517 break;
518 case PFSYNC_ACT_UPD_C: {
519 int update_requested = 0;
520
521 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
522 count * sizeof(*up), &offp)) == NULL) {
523 pfsyncstats.pfsyncs_badlen++;
524 return;
525 }
526
527 s = splsoftnet();
528 for (i = 0, up = (struct pfsync_state_upd *)(mp->m_data + offp);
529 i < count; i++, up++) {
530 /* check for invalid values */
531 if (up->timeout >= PFTM_MAX ||
532 up->src.state > PF_TCPS_PROXY_DST ||
533 up->dst.state > PF_TCPS_PROXY_DST) {
534 if (pf_status.debug >= PF_DEBUG_MISC)
535 printf("pfsync_insert: "
536 "PFSYNC_ACT_UPD_C: "
537 "invalid value\n");
538 pfsyncstats.pfsyncs_badstate++;
539 continue;
540 }
541
542 bcopy(up->id, &key.id, sizeof(key.id));
543 key.creatorid = up->creatorid;
544
545 st = pf_find_state_byid(&key);
546 if (st == NULL) {
547 /* We don't have this state. Ask for it. */
548 error = pfsync_request_update(up, &src);
549 if (error == ENOMEM) {
550 splx(s);
551 goto done;
552 }
553 update_requested = 1;
554 pfsyncstats.pfsyncs_badstate++;
555 continue;
556 }
557 sfail = 0;
558 if (st->proto == IPPROTO_TCP) {
559 /*
560 * The state should never go backwards except
561 * for syn-proxy states. Neither should the
562 * sequence window slide backwards.
563 */
564 if (st->src.state > up->src.state &&
565 (st->src.state < PF_TCPS_PROXY_SRC ||
566 up->src.state >= PF_TCPS_PROXY_SRC))
567 sfail = 1;
568 else if (st->dst.state > up->dst.state)
569 sfail = 2;
570 else if (SEQ_GT(st->src.seqlo,
571 ntohl(up->src.seqlo)))
572 sfail = 3;
573 else if (st->dst.state >= TCPS_SYN_SENT &&
574 SEQ_GT(st->dst.seqlo, ntohl(up->dst.seqlo)))
575 sfail = 4;
576 } else {
577 /*
578 * Non-TCP protocol state machine always go
579 * forwards
580 */
581 if (st->src.state > up->src.state)
582 sfail = 5;
583 else if (st->dst.state > up->dst.state)
584 sfail = 6;
585 }
586 if (sfail) {
587 if (pf_status.debug >= PF_DEBUG_MISC)
588 printf("pfsync: ignoring stale update "
589 "(%d) id: %016llx "
590 "creatorid: %08x\n", sfail,
591 betoh64(st->id),
592 ntohl(st->creatorid));
593 pfsyncstats.pfsyncs_badstate++;
594
595 /* we have a better state, send it out */
596 if ((!stale || update_requested) &&
597 sc->sc_mbuf != NULL) {
598 pfsync_sendout(sc);
599 update_requested = 0;
600 }
601 stale++;
602 if (!st->sync_flags)
603 pfsync_pack_state(PFSYNC_ACT_UPD, st,
604 PFSYNC_FLAG_STALE);
605 continue;
606 }
607 pf_state_peer_ntoh(&up->src, &st->src);
608 pf_state_peer_ntoh(&up->dst, &st->dst);
609 st->expire = ntohl(up->expire) + time_second;
610 st->timeout = up->timeout;
611 }
612 if ((update_requested || stale) && sc->sc_mbuf)
613 pfsync_sendout(sc);
614 splx(s);
615 break;
616 }
617 case PFSYNC_ACT_DEL_C:
618 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
619 count * sizeof(*dp), &offp)) == NULL) {
620 pfsyncstats.pfsyncs_badlen++;
621 return;
622 }
623
624 s = splsoftnet();
625 for (i = 0, dp = (struct pfsync_state_del *)(mp->m_data + offp);
626 i < count; i++, dp++) {
627 bcopy(dp->id, &key.id, sizeof(key.id));
628 key.creatorid = dp->creatorid;
629
630 st = pf_find_state_byid(&key);
631 if (st == NULL) {
632 pfsyncstats.pfsyncs_badstate++;
633 continue;
634 }
635 st->timeout = PFTM_PURGE;
636 st->sync_flags |= PFSTATE_FROMSYNC;
637 pf_purge_expired_state(st);
638 }
639 splx(s);
640 break;
641 case PFSYNC_ACT_INS_F:
642 case PFSYNC_ACT_DEL_F:
643 /* not implemented */
644 break;
645 case PFSYNC_ACT_UREQ:
646 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
647 count * sizeof(*rup), &offp)) == NULL) {
648 pfsyncstats.pfsyncs_badlen++;
649 return;
650 }
651
652 s = splsoftnet();
653 if (sc->sc_mbuf != NULL)
654 pfsync_sendout(sc);
655 for (i = 0,
656 rup = (struct pfsync_state_upd_req *)(mp->m_data + offp);
657 i < count; i++, rup++) {
658 bcopy(rup->id, &key.id, sizeof(key.id));
659 key.creatorid = rup->creatorid;
660
661 if (key.id == 0 && key.creatorid == 0) {
662 sc->sc_ureq_received = time_uptime;
663 if (pf_status.debug >= PF_DEBUG_MISC)
664 printf("pfsync: received "
665 "bulk update request\n");
666 pfsync_send_bus(sc, PFSYNC_BUS_START);
667 timeout_add(&sc->sc_bulk_tmo, 1 * hz);
668 } else {
669 st = pf_find_state_byid(&key);
670 if (st == NULL) {
671 pfsyncstats.pfsyncs_badstate++;
672 continue;
673 }
674 if (!st->sync_flags)
675 pfsync_pack_state(PFSYNC_ACT_UPD,
676 st, 0);
677 }
678 }
679 if (sc->sc_mbuf != NULL)
680 pfsync_sendout(sc);
681 splx(s);
682 break;
683 case PFSYNC_ACT_BUS:
684 /* If we're not waiting for a bulk update, who cares. */
685 if (sc->sc_ureq_sent == 0)
686 break;
687
688 if ((mp = m_pulldown(m, iplen + sizeof(*ph),
689 sizeof(*bus), &offp)) == NULL) {
690 pfsyncstats.pfsyncs_badlen++;
691 return;
692 }
693 bus = (struct pfsync_state_bus *)(mp->m_data + offp);
694 switch (bus->status) {
695 case PFSYNC_BUS_START:
696 timeout_add(&sc->sc_bulkfail_tmo,
697 pf_pool_limits[PF_LIMIT_STATES].limit /
698 (PFSYNC_BULKPACKETS * sc->sc_maxcount));
699 if (pf_status.debug >= PF_DEBUG_MISC)
700 printf("pfsync: received bulk "
701 "update start\n");
702 break;
703 case PFSYNC_BUS_END:
704 if (time_uptime - ntohl(bus->endtime) >=
705 sc->sc_ureq_sent) {
706 /* that's it, we're happy */
707 sc->sc_ureq_sent = 0;
708 sc->sc_bulk_tries = 0;
709 timeout_del(&sc->sc_bulkfail_tmo);
710 #if NCARP > 0
711 if (!pfsync_sync_ok)
712 carp_suppress_preempt--;
713 #endif
714 pfsync_sync_ok = 1;
715 if (pf_status.debug >= PF_DEBUG_MISC)
716 printf("pfsync: received valid "
717 "bulk update end\n");
718 } else {
719 if (pf_status.debug >= PF_DEBUG_MISC)
720 printf("pfsync: received invalid "
721 "bulk update end: bad timestamp\n");
722 }
723 break;
724 }
725 break;
726 }
727
728 done:
729 if (m)
730 m_freem(m);
731 }
732
733 int
734 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
735 struct rtentry *rt)
736 {
737 m_freem(m);
738 return (0);
739 }
740
741 /* ARGSUSED */
742 int
743 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
744 {
745 struct proc *p = curproc;
746 struct pfsync_softc *sc = ifp->if_softc;
747 struct ifreq *ifr = (struct ifreq *)data;
748 struct ip_moptions *imo = &sc->sc_imo;
749 struct pfsyncreq pfsyncr;
750 struct ifnet *sifp;
751 int s, error;
752
753 switch (cmd) {
754 case SIOCSIFADDR:
755 case SIOCAIFADDR:
756 case SIOCSIFDSTADDR:
757 case SIOCSIFFLAGS:
758 if (ifp->if_flags & IFF_UP)
759 ifp->if_flags |= IFF_RUNNING;
760 else
761 ifp->if_flags &= ~IFF_RUNNING;
762 break;
763 case SIOCSIFMTU:
764 if (ifr->ifr_mtu < PFSYNC_MINMTU)
765 return (EINVAL);
766 if (ifr->ifr_mtu > MCLBYTES)
767 ifr->ifr_mtu = MCLBYTES;
768 s = splnet();
769 if (ifr->ifr_mtu < ifp->if_mtu)
770 pfsync_sendout(sc);
771 pfsync_setmtu(sc, ifr->ifr_mtu);
772 splx(s);
773 break;
774 case SIOCGETPFSYNC:
775 bzero(&pfsyncr, sizeof(pfsyncr));
776 if (sc->sc_sync_ifp)
777 strlcpy(pfsyncr.pfsyncr_syncdev,
778 sc->sc_sync_ifp->if_xname, IFNAMSIZ);
779 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
780 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
781 if ((error = copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr))))
782 return (error);
783 break;
784 case SIOCSETPFSYNC:
785 if ((error = suser(p, p->p_acflag)) != 0)
786 return (error);
787 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
788 return (error);
789
790 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
791 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
792 else
793 sc->sc_sync_peer.s_addr =
794 pfsyncr.pfsyncr_syncpeer.s_addr;
795
796 if (pfsyncr.pfsyncr_maxupdates > 255)
797 return (EINVAL);
798 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
799
800 if (pfsyncr.pfsyncr_syncdev[0] == 0) {
801 sc->sc_sync_ifp = NULL;
802 if (sc->sc_mbuf_net != NULL) {
803 /* Don't keep stale pfsync packets around. */
804 s = splnet();
805 m_freem(sc->sc_mbuf_net);
806 sc->sc_mbuf_net = NULL;
807 sc->sc_statep_net.s = NULL;
808 splx(s);
809 }
810 if (imo->imo_num_memberships > 0) {
811 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
812 imo->imo_multicast_ifp = NULL;
813 }
814 break;
815 }
816
817 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
818 return (EINVAL);
819
820 s = splnet();
821 if (sifp->if_mtu < sc->sc_if.if_mtu ||
822 (sc->sc_sync_ifp != NULL &&
823 sifp->if_mtu < sc->sc_sync_ifp->if_mtu) ||
824 sifp->if_mtu < MCLBYTES - sizeof(struct ip))
825 pfsync_sendout(sc);
826 sc->sc_sync_ifp = sifp;
827
828 pfsync_setmtu(sc, sc->sc_if.if_mtu);
829
830 if (imo->imo_num_memberships > 0) {
831 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
832 imo->imo_multicast_ifp = NULL;
833 }
834
835 if (sc->sc_sync_ifp &&
836 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
837 struct in_addr addr;
838
839 if (!(sc->sc_sync_ifp->if_flags & IFF_MULTICAST)) {
840 sc->sc_sync_ifp = NULL;
841 splx(s);
842 return (EADDRNOTAVAIL);
843 }
844
845 addr.s_addr = INADDR_PFSYNC_GROUP;
846
847 if ((imo->imo_membership[0] =
848 in_addmulti(&addr, sc->sc_sync_ifp)) == NULL) {
849 sc->sc_sync_ifp = NULL;
850 splx(s);
851 return (ENOBUFS);
852 }
853 imo->imo_num_memberships++;
854 imo->imo_multicast_ifp = sc->sc_sync_ifp;
855 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
856 imo->imo_multicast_loop = 0;
857 }
858
859 if (sc->sc_sync_ifp ||
860 sc->sc_sendaddr.s_addr != INADDR_PFSYNC_GROUP) {
861 /* Request a full state table update. */
862 sc->sc_ureq_sent = time_uptime;
863 #if NCARP > 0
864 if (pfsync_sync_ok)
865 carp_suppress_preempt++;
866 #endif
867 pfsync_sync_ok = 0;
868 if (pf_status.debug >= PF_DEBUG_MISC)
869 printf("pfsync: requesting bulk update\n");
870 timeout_add(&sc->sc_bulkfail_tmo, 5 * hz);
871 error = pfsync_request_update(NULL, NULL);
872 if (error == ENOMEM) {
873 splx(s);
874 return (ENOMEM);
875 }
876 pfsync_sendout(sc);
877 }
878 splx(s);
879
880 break;
881
882 default:
883 return (ENOTTY);
884 }
885
886 return (0);
887 }
888
889 void
890 pfsync_setmtu(struct pfsync_softc *sc, int mtu_req)
891 {
892 int mtu;
893
894 if (sc->sc_sync_ifp && sc->sc_sync_ifp->if_mtu < mtu_req)
895 mtu = sc->sc_sync_ifp->if_mtu;
896 else
897 mtu = mtu_req;
898
899 sc->sc_maxcount = (mtu - sizeof(struct pfsync_header)) /
900 sizeof(struct pfsync_state);
901 if (sc->sc_maxcount > 254)
902 sc->sc_maxcount = 254;
903 sc->sc_if.if_mtu = sizeof(struct pfsync_header) +
904 sc->sc_maxcount * sizeof(struct pfsync_state);
905 }
906
907 struct mbuf *
908 pfsync_get_mbuf(struct pfsync_softc *sc, u_int8_t action, void **sp)
909 {
910 struct pfsync_header *h;
911 struct mbuf *m;
912 int len;
913
914 MGETHDR(m, M_DONTWAIT, MT_DATA);
915 if (m == NULL) {
916 sc->sc_if.if_oerrors++;
917 return (NULL);
918 }
919
920 switch (action) {
921 case PFSYNC_ACT_CLR:
922 len = sizeof(struct pfsync_header) +
923 sizeof(struct pfsync_state_clr);
924 break;
925 case PFSYNC_ACT_UPD_C:
926 len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd)) +
927 sizeof(struct pfsync_header);
928 break;
929 case PFSYNC_ACT_DEL_C:
930 len = (sc->sc_maxcount * sizeof(struct pfsync_state_del)) +
931 sizeof(struct pfsync_header);
932 break;
933 case PFSYNC_ACT_UREQ:
934 len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd_req)) +
935 sizeof(struct pfsync_header);
936 break;
937 case PFSYNC_ACT_BUS:
938 len = sizeof(struct pfsync_header) +
939 sizeof(struct pfsync_state_bus);
940 break;
941 default:
942 len = (sc->sc_maxcount * sizeof(struct pfsync_state)) +
943 sizeof(struct pfsync_header);
944 break;
945 }
946
947 if (len > MHLEN) {
948 MCLGET(m, M_DONTWAIT);
949 if ((m->m_flags & M_EXT) == 0) {
950 m_free(m);
951 sc->sc_if.if_oerrors++;
952 return (NULL);
953 }
954 m->m_data += (MCLBYTES - len) &~ (sizeof(long) - 1);
955 } else
956 MH_ALIGN(m, len);
957
958 m->m_pkthdr.rcvif = NULL;
959 m->m_pkthdr.len = m->m_len = sizeof(struct pfsync_header);
960 h = mtod(m, struct pfsync_header *);
961 h->version = PFSYNC_VERSION;
962 h->af = 0;
963 h->count = 0;
964 h->action = action;
965
966 *sp = (void *)((char *)h + PFSYNC_HDRLEN);
967 timeout_add(&sc->sc_tmo, hz);
968 return (m);
969 }
970
971 int
972 pfsync_pack_state(u_int8_t action, struct pf_state *st, int flags)
973 {
974 struct ifnet *ifp = &pfsyncif.sc_if;
975 struct pfsync_softc *sc = ifp->if_softc;
976 struct pfsync_header *h, *h_net;
977 struct pfsync_state *sp = NULL;
978 struct pfsync_state_upd *up = NULL;
979 struct pfsync_state_del *dp = NULL;
980 struct pf_rule *r;
981 u_long secs;
982 int s, ret = 0;
983 u_int8_t i = 255, newaction = 0;
984
985 /*
986 * If a packet falls in the forest and there's nobody around to
987 * hear, does it make a sound?
988 */
989 if (ifp->if_bpf == NULL && sc->sc_sync_ifp == NULL &&
990 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
991 /* Don't leave any stale pfsync packets hanging around. */
992 if (sc->sc_mbuf != NULL) {
993 m_freem(sc->sc_mbuf);
994 sc->sc_mbuf = NULL;
995 sc->sc_statep.s = NULL;
996 }
997 return (0);
998 }
999
1000 if (action >= PFSYNC_ACT_MAX)
1001 return (EINVAL);
1002
1003 s = splnet();
1004 if (sc->sc_mbuf == NULL) {
1005 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
1006 (void *)&sc->sc_statep.s)) == NULL) {
1007 splx(s);
1008 return (ENOMEM);
1009 }
1010 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1011 } else {
1012 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1013 if (h->action != action) {
1014 pfsync_sendout(sc);
1015 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
1016 (void *)&sc->sc_statep.s)) == NULL) {
1017 splx(s);
1018 return (ENOMEM);
1019 }
1020 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1021 } else {
1022 /*
1023 * If it's an update, look in the packet to see if
1024 * we already have an update for the state.
1025 */
1026 if (action == PFSYNC_ACT_UPD && sc->sc_maxupdates) {
1027 struct pfsync_state *usp =
1028 (void *)((char *)h + PFSYNC_HDRLEN);
1029
1030 for (i = 0; i < h->count; i++) {
1031 if (!memcmp(usp->id, &st->id,
1032 PFSYNC_ID_LEN) &&
1033 usp->creatorid == st->creatorid) {
1034 sp = usp;
1035 sp->updates++;
1036 break;
1037 }
1038 usp++;
1039 }
1040 }
1041 }
1042 }
1043
1044 secs = time_second;
1045
1046 st->pfsync_time = time_uptime;
1047 TAILQ_REMOVE(&state_updates, st, u.s.entry_updates);
1048 TAILQ_INSERT_TAIL(&state_updates, st, u.s.entry_updates);
1049
1050 if (sp == NULL) {
1051 /* not a "duplicate" update */
1052 i = 255;
1053 sp = sc->sc_statep.s++;
1054 sc->sc_mbuf->m_pkthdr.len =
1055 sc->sc_mbuf->m_len += sizeof(struct pfsync_state);
1056 h->count++;
1057 bzero(sp, sizeof(*sp));
1058
1059 bcopy(&st->id, sp->id, sizeof(sp->id));
1060 sp->creatorid = st->creatorid;
1061
1062 strlcpy(sp->ifname, st->u.s.kif->pfik_name, sizeof(sp->ifname));
1063 pf_state_host_hton(&st->lan, &sp->lan);
1064 pf_state_host_hton(&st->gwy, &sp->gwy);
1065 pf_state_host_hton(&st->ext, &sp->ext);
1066
1067 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
1068
1069 sp->creation = htonl(secs - st->creation);
1070 sp->packets[0] = htonl(st->packets[0]);
1071 sp->packets[1] = htonl(st->packets[1]);
1072 sp->bytes[0] = htonl(st->bytes[0]);
1073 sp->bytes[1] = htonl(st->bytes[1]);
1074 if ((r = st->rule.ptr) == NULL)
1075 sp->rule = htonl(-1);
1076 else
1077 sp->rule = htonl(r->nr);
1078 if ((r = st->anchor.ptr) == NULL)
1079 sp->anchor = htonl(-1);
1080 else
1081 sp->anchor = htonl(r->nr);
1082 sp->af = st->af;
1083 sp->proto = st->proto;
1084 sp->direction = st->direction;
1085 sp->log = st->log;
1086 sp->allow_opts = st->allow_opts;
1087 sp->timeout = st->timeout;
1088
1089 if (flags & PFSYNC_FLAG_STALE)
1090 sp->sync_flags |= PFSTATE_STALE;
1091 }
1092
1093 pf_state_peer_hton(&st->src, &sp->src);
1094 pf_state_peer_hton(&st->dst, &sp->dst);
1095
1096 if (st->expire <= secs)
1097 sp->expire = htonl(0);
1098 else
1099 sp->expire = htonl(st->expire - secs);
1100
1101 /* do we need to build "compressed" actions for network transfer? */
1102 if (sc->sc_sync_ifp && flags & PFSYNC_FLAG_COMPRESS) {
1103 switch (action) {
1104 case PFSYNC_ACT_UPD:
1105 newaction = PFSYNC_ACT_UPD_C;
1106 break;
1107 case PFSYNC_ACT_DEL:
1108 newaction = PFSYNC_ACT_DEL_C;
1109 break;
1110 default:
1111 /* by default we just send the uncompressed states */
1112 break;
1113 }
1114 }
1115
1116 if (newaction) {
1117 if (sc->sc_mbuf_net == NULL) {
1118 if ((sc->sc_mbuf_net = pfsync_get_mbuf(sc, newaction,
1119 (void *)&sc->sc_statep_net.s)) == NULL) {
1120 splx(s);
1121 return (ENOMEM);
1122 }
1123 }
1124 h_net = mtod(sc->sc_mbuf_net, struct pfsync_header *);
1125
1126 switch (newaction) {
1127 case PFSYNC_ACT_UPD_C:
1128 if (i != 255) {
1129 up = (void *)((char *)h_net +
1130 PFSYNC_HDRLEN + (i * sizeof(*up)));
1131 up->updates++;
1132 } else {
1133 h_net->count++;
1134 sc->sc_mbuf_net->m_pkthdr.len =
1135 sc->sc_mbuf_net->m_len += sizeof(*up);
1136 up = sc->sc_statep_net.u++;
1137
1138 bzero(up, sizeof(*up));
1139 bcopy(&st->id, up->id, sizeof(up->id));
1140 up->creatorid = st->creatorid;
1141 }
1142 up->timeout = st->timeout;
1143 up->expire = sp->expire;
1144 up->src = sp->src;
1145 up->dst = sp->dst;
1146 break;
1147 case PFSYNC_ACT_DEL_C:
1148 sc->sc_mbuf_net->m_pkthdr.len =
1149 sc->sc_mbuf_net->m_len += sizeof(*dp);
1150 dp = sc->sc_statep_net.d++;
1151 h_net->count++;
1152
1153 bzero(dp, sizeof(*dp));
1154 bcopy(&st->id, dp->id, sizeof(dp->id));
1155 dp->creatorid = st->creatorid;
1156 break;
1157 }
1158 }
1159
1160 if (h->count == sc->sc_maxcount ||
1161 (sc->sc_maxupdates && (sp->updates >= sc->sc_maxupdates)))
1162 ret = pfsync_sendout(sc);
1163
1164 splx(s);
1165 return (ret);
1166 }
1167
1168 /* This must be called in splnet() */
1169 int
1170 pfsync_request_update(struct pfsync_state_upd *up, struct in_addr *src)
1171 {
1172 struct ifnet *ifp = &pfsyncif.sc_if;
1173 struct pfsync_header *h;
1174 struct pfsync_softc *sc = ifp->if_softc;
1175 struct pfsync_state_upd_req *rup;
1176 int ret = 0;
1177
1178 if (sc->sc_mbuf == NULL) {
1179 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
1180 (void *)&sc->sc_statep.s)) == NULL)
1181 return (ENOMEM);
1182 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1183 } else {
1184 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1185 if (h->action != PFSYNC_ACT_UREQ) {
1186 pfsync_sendout(sc);
1187 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
1188 (void *)&sc->sc_statep.s)) == NULL)
1189 return (ENOMEM);
1190 h = mtod(sc->sc_mbuf, struct pfsync_header *);
1191 }
1192 }
1193
1194 if (src != NULL)
1195 sc->sc_sendaddr = *src;
1196 sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*rup);
1197 h->count++;
1198 rup = sc->sc_statep.r++;
1199 bzero(rup, sizeof(*rup));
1200 if (up != NULL) {
1201 bcopy(up->id, rup->id, sizeof(rup->id));
1202 rup->creatorid = up->creatorid;
1203 }
1204
1205 if (h->count == sc->sc_maxcount)
1206 ret = pfsync_sendout(sc);
1207
1208 return (ret);
1209 }
1210
1211 int
1212 pfsync_clear_states(u_int32_t creatorid, char *ifname)
1213 {
1214 struct ifnet *ifp = &pfsyncif.sc_if;
1215 struct pfsync_softc *sc = ifp->if_softc;
1216 struct pfsync_state_clr *cp;
1217 int s, ret;
1218
1219 s = splnet();
1220 if (sc->sc_mbuf != NULL)
1221 pfsync_sendout(sc);
1222 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_CLR,
1223 (void *)&sc->sc_statep.c)) == NULL) {
1224 splx(s);
1225 return (ENOMEM);
1226 }
1227 sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*cp);
1228 cp = sc->sc_statep.c;
1229 cp->creatorid = creatorid;
1230 if (ifname != NULL)
1231 strlcpy(cp->ifname, ifname, IFNAMSIZ);
1232
1233 ret = (pfsync_sendout(sc));
1234 splx(s);
1235 return (ret);
1236 }
1237
1238 void
1239 pfsync_timeout(void *v)
1240 {
1241 struct pfsync_softc *sc = v;
1242 int s;
1243
1244 s = splnet();
1245 pfsync_sendout(sc);
1246 splx(s);
1247 }
1248
1249 /* This must be called in splnet() */
1250 void
1251 pfsync_send_bus(struct pfsync_softc *sc, u_int8_t status)
1252 {
1253 struct pfsync_state_bus *bus;
1254
1255 if (sc->sc_mbuf != NULL)
1256 pfsync_sendout(sc);
1257
1258 if (pfsync_sync_ok &&
1259 (sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_BUS,
1260 (void *)&sc->sc_statep.b)) != NULL) {
1261 sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*bus);
1262 bus = sc->sc_statep.b;
1263 bus->creatorid = pf_status.hostid;
1264 bus->status = status;
1265 bus->endtime = htonl(time_uptime - sc->sc_ureq_received);
1266 pfsync_sendout(sc);
1267 }
1268 }
1269
1270 void
1271 pfsync_bulk_update(void *v)
1272 {
1273 struct pfsync_softc *sc = v;
1274 int s, i = 0;
1275 struct pf_state *state;
1276
1277 s = splnet();
1278 if (sc->sc_mbuf != NULL)
1279 pfsync_sendout(sc);
1280
1281 /*
1282 * Grab at most PFSYNC_BULKPACKETS worth of states which have not
1283 * been sent since the latest request was made.
1284 */
1285 while ((state = TAILQ_FIRST(&state_updates)) != NULL &&
1286 ++i < (sc->sc_maxcount * PFSYNC_BULKPACKETS)) {
1287 if (state->pfsync_time > sc->sc_ureq_received) {
1288 /* we're done */
1289 pfsync_send_bus(sc, PFSYNC_BUS_END);
1290 sc->sc_ureq_received = 0;
1291 timeout_del(&sc->sc_bulk_tmo);
1292 if (pf_status.debug >= PF_DEBUG_MISC)
1293 printf("pfsync: bulk update complete\n");
1294 break;
1295 } else {
1296 /* send an update and move to end of list */
1297 if (!state->sync_flags)
1298 pfsync_pack_state(PFSYNC_ACT_UPD, state, 0);
1299 state->pfsync_time = time_uptime;
1300 TAILQ_REMOVE(&state_updates, state, u.s.entry_updates);
1301 TAILQ_INSERT_TAIL(&state_updates, state,
1302 u.s.entry_updates);
1303
1304 /* look again for more in a bit */
1305 timeout_add(&sc->sc_bulk_tmo, 1);
1306 }
1307 }
1308 if (sc->sc_mbuf != NULL)
1309 pfsync_sendout(sc);
1310 splx(s);
1311 }
1312
1313 void
1314 pfsync_bulkfail(void *v)
1315 {
1316 struct pfsync_softc *sc = v;
1317 int s, error;
1318
1319 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
1320 /* Try again in a bit */
1321 timeout_add(&sc->sc_bulkfail_tmo, 5 * hz);
1322 s = splnet();
1323 error = pfsync_request_update(NULL, NULL);
1324 if (error == ENOMEM) {
1325 if (pf_status.debug >= PF_DEBUG_MISC)
1326 printf("pfsync: cannot allocate mbufs for "
1327 "bulk update\n");
1328 } else
1329 pfsync_sendout(sc);
1330 splx(s);
1331 } else {
1332 /* Pretend like the transfer was ok */
1333 sc->sc_ureq_sent = 0;
1334 sc->sc_bulk_tries = 0;
1335 #if NCARP > 0
1336 if (!pfsync_sync_ok)
1337 carp_suppress_preempt--;
1338 #endif
1339 pfsync_sync_ok = 1;
1340 if (pf_status.debug >= PF_DEBUG_MISC)
1341 printf("pfsync: failed to receive "
1342 "bulk update status\n");
1343 timeout_del(&sc->sc_bulkfail_tmo);
1344 }
1345 }
1346
1347 /* This must be called in splnet() */
1348 int
1349 pfsync_sendout(sc)
1350 struct pfsync_softc *sc;
1351 {
1352 struct ifnet *ifp = &sc->sc_if;
1353 struct mbuf *m;
1354
1355 timeout_del(&sc->sc_tmo);
1356
1357 if (sc->sc_mbuf == NULL)
1358 return (0);
1359 m = sc->sc_mbuf;
1360 sc->sc_mbuf = NULL;
1361 sc->sc_statep.s = NULL;
1362
1363 #if NBPFILTER > 0
1364 if (ifp->if_bpf)
1365 bpf_mtap(ifp->if_bpf, m);
1366 #endif
1367
1368 if (sc->sc_mbuf_net) {
1369 m_freem(m);
1370 m = sc->sc_mbuf_net;
1371 sc->sc_mbuf_net = NULL;
1372 sc->sc_statep_net.s = NULL;
1373 }
1374
1375 if (sc->sc_sync_ifp || sc->sc_sync_peer.s_addr != INADDR_PFSYNC_GROUP) {
1376 struct ip *ip;
1377 struct sockaddr sa;
1378
1379 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
1380 if (m == NULL) {
1381 pfsyncstats.pfsyncs_onomem++;
1382 return (0);
1383 }
1384 ip = mtod(m, struct ip *);
1385 ip->ip_v = IPVERSION;
1386 ip->ip_hl = sizeof(*ip) >> 2;
1387 ip->ip_tos = IPTOS_LOWDELAY;
1388 ip->ip_len = htons(m->m_pkthdr.len);
1389 ip->ip_id = htons(ip_randomid());
1390 ip->ip_off = htons(IP_DF);
1391 ip->ip_ttl = PFSYNC_DFLTTL;
1392 ip->ip_p = IPPROTO_PFSYNC;
1393 ip->ip_sum = 0;
1394
1395 bzero(&sa, sizeof(sa));
1396 ip->ip_src.s_addr = INADDR_ANY;
1397
1398 if (sc->sc_sendaddr.s_addr == INADDR_PFSYNC_GROUP)
1399 m->m_flags |= M_MCAST;
1400 ip->ip_dst = sc->sc_sendaddr;
1401 sc->sc_sendaddr.s_addr = sc->sc_sync_peer.s_addr;
1402
1403 pfsyncstats.pfsyncs_opackets++;
1404
1405 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL))
1406 pfsyncstats.pfsyncs_oerrors++;
1407 } else
1408 m_freem(m);
1409
1410 return (0);
1411 }
1412