pf_ioctl.c revision 1.1.1.4 1 /* $NetBSD: pf_ioctl.c,v 1.1.1.4 2009/12/01 07:03:13 martti Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.182 2007/06/24 11:17:13 mcbride Exp $ */
3
4 /*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39 #include "pfsync.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/mbuf.h>
44 #include <sys/filio.h>
45 #include <sys/fcntl.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/kernel.h>
49 #include <sys/time.h>
50 #include <sys/timeout.h>
51 #include <sys/pool.h>
52 #include <sys/proc.h>
53 #include <sys/malloc.h>
54 #include <sys/kthread.h>
55 #include <sys/rwlock.h>
56 #include <uvm/uvm_extern.h>
57
58 #include <net/if.h>
59 #include <net/if_types.h>
60 #include <net/route.h>
61
62 #include <netinet/in.h>
63 #include <netinet/in_var.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip_var.h>
67 #include <netinet/ip_icmp.h>
68
69 #include <dev/rndvar.h>
70 #include <crypto/md5.h>
71 #include <net/pfvar.h>
72
73 #if NPFSYNC > 0
74 #include <net/if_pfsync.h>
75 #endif /* NPFSYNC > 0 */
76
77 #if NPFLOG > 0
78 #include <net/if_pflog.h>
79 #endif /* NPFLOG > 0 */
80
81 #ifdef INET6
82 #include <netinet/ip6.h>
83 #include <netinet/in_pcb.h>
84 #endif /* INET6 */
85
86 #ifdef ALTQ
87 #include <altq/altq.h>
88 #endif
89
90 void pfattach(int);
91 void pf_thread_create(void *);
92 int pfopen(dev_t, int, int, struct proc *);
93 int pfclose(dev_t, int, int, struct proc *);
94 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
95 u_int8_t, u_int8_t, u_int8_t);
96
97 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
98 void pf_empty_pool(struct pf_palist *);
99 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
100 #ifdef ALTQ
101 int pf_begin_altq(u_int32_t *);
102 int pf_rollback_altq(u_int32_t);
103 int pf_commit_altq(u_int32_t);
104 int pf_enable_altq(struct pf_altq *);
105 int pf_disable_altq(struct pf_altq *);
106 #endif /* ALTQ */
107 int pf_begin_rules(u_int32_t *, int, const char *);
108 int pf_rollback_rules(u_int32_t, int, char *);
109 int pf_setup_pfsync_matching(struct pf_ruleset *);
110 void pf_hash_rule(MD5_CTX *, struct pf_rule *);
111 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
112 int pf_commit_rules(u_int32_t, int, char *);
113 void pf_state_export(struct pfsync_state *,
114 struct pf_state_key *, struct pf_state *);
115 void pf_state_import(struct pfsync_state *,
116 struct pf_state_key *, struct pf_state *);
117
118 struct pf_rule pf_default_rule;
119 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk");
120 #ifdef ALTQ
121 static int pf_altq_running;
122 #endif
123
124 #define TAGID_MAX 50000
125 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
126 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
127
128 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
129 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
130 #endif
131 u_int16_t tagname2tag(struct pf_tags *, char *);
132 void tag2tagname(struct pf_tags *, u_int16_t, char *);
133 void tag_unref(struct pf_tags *, u_int16_t);
134 int pf_rtlabel_add(struct pf_addr_wrap *);
135 void pf_rtlabel_remove(struct pf_addr_wrap *);
136 void pf_rtlabel_copyout(struct pf_addr_wrap *);
137
138 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
139
140 void
141 pfattach(int num)
142 {
143 u_int32_t *timeout = pf_default_rule.timeout;
144
145 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
146 &pool_allocator_nointr);
147 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
148 "pfsrctrpl", NULL);
149 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
150 NULL);
151 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
152 "pfstatekeypl", NULL);
153 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
154 &pool_allocator_nointr);
155 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
156 "pfpooladdrpl", &pool_allocator_nointr);
157 pfr_initialize();
158 pfi_initialize();
159 pf_osfp_initialize();
160
161 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
162 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
163
164 if (ctob(physmem) <= 100*1024*1024)
165 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
166 PFR_KENTRY_HIWAT_SMALL;
167
168 RB_INIT(&tree_src_tracking);
169 RB_INIT(&pf_anchors);
170 pf_init_ruleset(&pf_main_ruleset);
171 TAILQ_INIT(&pf_altqs[0]);
172 TAILQ_INIT(&pf_altqs[1]);
173 TAILQ_INIT(&pf_pabuf);
174 pf_altqs_active = &pf_altqs[0];
175 pf_altqs_inactive = &pf_altqs[1];
176 TAILQ_INIT(&state_list);
177
178 /* default rule should never be garbage collected */
179 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
180 pf_default_rule.action = PF_PASS;
181 pf_default_rule.nr = -1;
182 pf_default_rule.rtableid = -1;
183
184 /* initialize default timeouts */
185 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
186 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
187 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
188 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
189 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
190 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
191 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
192 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
193 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
194 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
195 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
196 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
197 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
198 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
199 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
200 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
201 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
202 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
203 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
204 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
205
206 pf_normalize_init();
207 bzero(&pf_status, sizeof(pf_status));
208 pf_status.debug = PF_DEBUG_URGENT;
209
210 /* XXX do our best to avoid a conflict */
211 pf_status.hostid = arc4random();
212
213 /* require process context to purge states, so perform in a thread */
214 kthread_create_deferred(pf_thread_create, NULL);
215 }
216
217 void
218 pf_thread_create(void *v)
219 {
220 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
221 panic("pfpurge thread");
222 }
223
224 int
225 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
226 {
227 if (minor(dev) >= 1)
228 return (ENXIO);
229 return (0);
230 }
231
232 int
233 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
234 {
235 if (minor(dev) >= 1)
236 return (ENXIO);
237 return (0);
238 }
239
240 struct pf_pool *
241 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
242 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
243 u_int8_t check_ticket)
244 {
245 struct pf_ruleset *ruleset;
246 struct pf_rule *rule;
247 int rs_num;
248
249 ruleset = pf_find_ruleset(anchor);
250 if (ruleset == NULL)
251 return (NULL);
252 rs_num = pf_get_ruleset_number(rule_action);
253 if (rs_num >= PF_RULESET_MAX)
254 return (NULL);
255 if (active) {
256 if (check_ticket && ticket !=
257 ruleset->rules[rs_num].active.ticket)
258 return (NULL);
259 if (r_last)
260 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
261 pf_rulequeue);
262 else
263 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
264 } else {
265 if (check_ticket && ticket !=
266 ruleset->rules[rs_num].inactive.ticket)
267 return (NULL);
268 if (r_last)
269 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
270 pf_rulequeue);
271 else
272 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
273 }
274 if (!r_last) {
275 while ((rule != NULL) && (rule->nr != rule_number))
276 rule = TAILQ_NEXT(rule, entries);
277 }
278 if (rule == NULL)
279 return (NULL);
280
281 return (&rule->rpool);
282 }
283
284 void
285 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
286 {
287 struct pf_pooladdr *mv_pool_pa;
288
289 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
290 TAILQ_REMOVE(poola, mv_pool_pa, entries);
291 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
292 }
293 }
294
295 void
296 pf_empty_pool(struct pf_palist *poola)
297 {
298 struct pf_pooladdr *empty_pool_pa;
299
300 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
301 pfi_dynaddr_remove(&empty_pool_pa->addr);
302 pf_tbladdr_remove(&empty_pool_pa->addr);
303 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
304 TAILQ_REMOVE(poola, empty_pool_pa, entries);
305 pool_put(&pf_pooladdr_pl, empty_pool_pa);
306 }
307 }
308
309 void
310 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
311 {
312 if (rulequeue != NULL) {
313 if (rule->states <= 0) {
314 /*
315 * XXX - we need to remove the table *before* detaching
316 * the rule to make sure the table code does not delete
317 * the anchor under our feet.
318 */
319 pf_tbladdr_remove(&rule->src.addr);
320 pf_tbladdr_remove(&rule->dst.addr);
321 if (rule->overload_tbl)
322 pfr_detach_table(rule->overload_tbl);
323 }
324 TAILQ_REMOVE(rulequeue, rule, entries);
325 rule->entries.tqe_prev = NULL;
326 rule->nr = -1;
327 }
328
329 if (rule->states > 0 || rule->src_nodes > 0 ||
330 rule->entries.tqe_prev != NULL)
331 return;
332 pf_tag_unref(rule->tag);
333 pf_tag_unref(rule->match_tag);
334 #ifdef ALTQ
335 if (rule->pqid != rule->qid)
336 pf_qid_unref(rule->pqid);
337 pf_qid_unref(rule->qid);
338 #endif
339 pf_rtlabel_remove(&rule->src.addr);
340 pf_rtlabel_remove(&rule->dst.addr);
341 pfi_dynaddr_remove(&rule->src.addr);
342 pfi_dynaddr_remove(&rule->dst.addr);
343 if (rulequeue == NULL) {
344 pf_tbladdr_remove(&rule->src.addr);
345 pf_tbladdr_remove(&rule->dst.addr);
346 if (rule->overload_tbl)
347 pfr_detach_table(rule->overload_tbl);
348 }
349 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
350 pf_anchor_remove(rule);
351 pf_empty_pool(&rule->rpool.list);
352 pool_put(&pf_rule_pl, rule);
353 }
354
355 u_int16_t
356 tagname2tag(struct pf_tags *head, char *tagname)
357 {
358 struct pf_tagname *tag, *p = NULL;
359 u_int16_t new_tagid = 1;
360
361 TAILQ_FOREACH(tag, head, entries)
362 if (strcmp(tagname, tag->name) == 0) {
363 tag->ref++;
364 return (tag->tag);
365 }
366
367 /*
368 * to avoid fragmentation, we do a linear search from the beginning
369 * and take the first free slot we find. if there is none or the list
370 * is empty, append a new entry at the end.
371 */
372
373 /* new entry */
374 if (!TAILQ_EMPTY(head))
375 for (p = TAILQ_FIRST(head); p != NULL &&
376 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
377 new_tagid = p->tag + 1;
378
379 if (new_tagid > TAGID_MAX)
380 return (0);
381
382 /* allocate and fill new struct pf_tagname */
383 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
384 M_TEMP, M_NOWAIT);
385 if (tag == NULL)
386 return (0);
387 bzero(tag, sizeof(struct pf_tagname));
388 strlcpy(tag->name, tagname, sizeof(tag->name));
389 tag->tag = new_tagid;
390 tag->ref++;
391
392 if (p != NULL) /* insert new entry before p */
393 TAILQ_INSERT_BEFORE(p, tag, entries);
394 else /* either list empty or no free slot in between */
395 TAILQ_INSERT_TAIL(head, tag, entries);
396
397 return (tag->tag);
398 }
399
400 void
401 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
402 {
403 struct pf_tagname *tag;
404
405 TAILQ_FOREACH(tag, head, entries)
406 if (tag->tag == tagid) {
407 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
408 return;
409 }
410 }
411
412 void
413 tag_unref(struct pf_tags *head, u_int16_t tag)
414 {
415 struct pf_tagname *p, *next;
416
417 if (tag == 0)
418 return;
419
420 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
421 next = TAILQ_NEXT(p, entries);
422 if (tag == p->tag) {
423 if (--p->ref == 0) {
424 TAILQ_REMOVE(head, p, entries);
425 free(p, M_TEMP);
426 }
427 break;
428 }
429 }
430 }
431
432 u_int16_t
433 pf_tagname2tag(char *tagname)
434 {
435 return (tagname2tag(&pf_tags, tagname));
436 }
437
438 void
439 pf_tag2tagname(u_int16_t tagid, char *p)
440 {
441 tag2tagname(&pf_tags, tagid, p);
442 }
443
444 void
445 pf_tag_ref(u_int16_t tag)
446 {
447 struct pf_tagname *t;
448
449 TAILQ_FOREACH(t, &pf_tags, entries)
450 if (t->tag == tag)
451 break;
452 if (t != NULL)
453 t->ref++;
454 }
455
456 void
457 pf_tag_unref(u_int16_t tag)
458 {
459 tag_unref(&pf_tags, tag);
460 }
461
462 int
463 pf_rtlabel_add(struct pf_addr_wrap *a)
464 {
465 if (a->type == PF_ADDR_RTLABEL &&
466 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
467 return (-1);
468 return (0);
469 }
470
471 void
472 pf_rtlabel_remove(struct pf_addr_wrap *a)
473 {
474 if (a->type == PF_ADDR_RTLABEL)
475 rtlabel_unref(a->v.rtlabel);
476 }
477
478 void
479 pf_rtlabel_copyout(struct pf_addr_wrap *a)
480 {
481 const char *name;
482
483 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
484 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
485 strlcpy(a->v.rtlabelname, "?",
486 sizeof(a->v.rtlabelname));
487 else
488 strlcpy(a->v.rtlabelname, name,
489 sizeof(a->v.rtlabelname));
490 }
491 }
492
493 #ifdef ALTQ
494 u_int32_t
495 pf_qname2qid(char *qname)
496 {
497 return ((u_int32_t)tagname2tag(&pf_qids, qname));
498 }
499
500 void
501 pf_qid2qname(u_int32_t qid, char *p)
502 {
503 tag2tagname(&pf_qids, (u_int16_t)qid, p);
504 }
505
506 void
507 pf_qid_unref(u_int32_t qid)
508 {
509 tag_unref(&pf_qids, (u_int16_t)qid);
510 }
511
512 int
513 pf_begin_altq(u_int32_t *ticket)
514 {
515 struct pf_altq *altq;
516 int error = 0;
517
518 /* Purge the old altq list */
519 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
520 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
521 if (altq->qname[0] == 0) {
522 /* detach and destroy the discipline */
523 error = altq_remove(altq);
524 } else
525 pf_qid_unref(altq->qid);
526 pool_put(&pf_altq_pl, altq);
527 }
528 if (error)
529 return (error);
530 *ticket = ++ticket_altqs_inactive;
531 altqs_inactive_open = 1;
532 return (0);
533 }
534
535 int
536 pf_rollback_altq(u_int32_t ticket)
537 {
538 struct pf_altq *altq;
539 int error = 0;
540
541 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
542 return (0);
543 /* Purge the old altq list */
544 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
545 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
546 if (altq->qname[0] == 0) {
547 /* detach and destroy the discipline */
548 error = altq_remove(altq);
549 } else
550 pf_qid_unref(altq->qid);
551 pool_put(&pf_altq_pl, altq);
552 }
553 altqs_inactive_open = 0;
554 return (error);
555 }
556
557 int
558 pf_commit_altq(u_int32_t ticket)
559 {
560 struct pf_altqqueue *old_altqs;
561 struct pf_altq *altq;
562 int s, err, error = 0;
563
564 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
565 return (EBUSY);
566
567 /* swap altqs, keep the old. */
568 s = splsoftnet();
569 old_altqs = pf_altqs_active;
570 pf_altqs_active = pf_altqs_inactive;
571 pf_altqs_inactive = old_altqs;
572 ticket_altqs_active = ticket_altqs_inactive;
573
574 /* Attach new disciplines */
575 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
576 if (altq->qname[0] == 0) {
577 /* attach the discipline */
578 error = altq_pfattach(altq);
579 if (error == 0 && pf_altq_running)
580 error = pf_enable_altq(altq);
581 if (error != 0) {
582 splx(s);
583 return (error);
584 }
585 }
586 }
587
588 /* Purge the old altq list */
589 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
590 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
591 if (altq->qname[0] == 0) {
592 /* detach and destroy the discipline */
593 if (pf_altq_running)
594 error = pf_disable_altq(altq);
595 err = altq_pfdetach(altq);
596 if (err != 0 && error == 0)
597 error = err;
598 err = altq_remove(altq);
599 if (err != 0 && error == 0)
600 error = err;
601 } else
602 pf_qid_unref(altq->qid);
603 pool_put(&pf_altq_pl, altq);
604 }
605 splx(s);
606
607 altqs_inactive_open = 0;
608 return (error);
609 }
610
611 int
612 pf_enable_altq(struct pf_altq *altq)
613 {
614 struct ifnet *ifp;
615 struct tb_profile tb;
616 int s, error = 0;
617
618 if ((ifp = ifunit(altq->ifname)) == NULL)
619 return (EINVAL);
620
621 if (ifp->if_snd.altq_type != ALTQT_NONE)
622 error = altq_enable(&ifp->if_snd);
623
624 /* set tokenbucket regulator */
625 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
626 tb.rate = altq->ifbandwidth;
627 tb.depth = altq->tbrsize;
628 s = splnet();
629 error = tbr_set(&ifp->if_snd, &tb);
630 splx(s);
631 }
632
633 return (error);
634 }
635
636 int
637 pf_disable_altq(struct pf_altq *altq)
638 {
639 struct ifnet *ifp;
640 struct tb_profile tb;
641 int s, error;
642
643 if ((ifp = ifunit(altq->ifname)) == NULL)
644 return (EINVAL);
645
646 /*
647 * when the discipline is no longer referenced, it was overridden
648 * by a new one. if so, just return.
649 */
650 if (altq->altq_disc != ifp->if_snd.altq_disc)
651 return (0);
652
653 error = altq_disable(&ifp->if_snd);
654
655 if (error == 0) {
656 /* clear tokenbucket regulator */
657 tb.rate = 0;
658 s = splnet();
659 error = tbr_set(&ifp->if_snd, &tb);
660 splx(s);
661 }
662
663 return (error);
664 }
665 #endif /* ALTQ */
666
667 int
668 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
669 {
670 struct pf_ruleset *rs;
671 struct pf_rule *rule;
672
673 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
674 return (EINVAL);
675 rs = pf_find_or_create_ruleset(anchor);
676 if (rs == NULL)
677 return (EINVAL);
678 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
679 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
680 rs->rules[rs_num].inactive.rcount--;
681 }
682 *ticket = ++rs->rules[rs_num].inactive.ticket;
683 rs->rules[rs_num].inactive.open = 1;
684 return (0);
685 }
686
687 int
688 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
689 {
690 struct pf_ruleset *rs;
691 struct pf_rule *rule;
692
693 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
694 return (EINVAL);
695 rs = pf_find_ruleset(anchor);
696 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
697 rs->rules[rs_num].inactive.ticket != ticket)
698 return (0);
699 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
700 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
701 rs->rules[rs_num].inactive.rcount--;
702 }
703 rs->rules[rs_num].inactive.open = 0;
704 return (0);
705 }
706
707 #define PF_MD5_UPD(st, elm) \
708 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
709
710 #define PF_MD5_UPD_STR(st, elm) \
711 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
712
713 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
714 (stor) = htonl((st)->elm); \
715 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
716 } while (0)
717
718 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
719 (stor) = htons((st)->elm); \
720 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
721 } while (0)
722
723 void
724 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
725 {
726 PF_MD5_UPD(pfr, addr.type);
727 switch (pfr->addr.type) {
728 case PF_ADDR_DYNIFTL:
729 PF_MD5_UPD(pfr, addr.v.ifname);
730 PF_MD5_UPD(pfr, addr.iflags);
731 break;
732 case PF_ADDR_TABLE:
733 PF_MD5_UPD(pfr, addr.v.tblname);
734 break;
735 case PF_ADDR_ADDRMASK:
736 /* XXX ignore af? */
737 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
738 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
739 break;
740 case PF_ADDR_RTLABEL:
741 PF_MD5_UPD(pfr, addr.v.rtlabelname);
742 break;
743 }
744
745 PF_MD5_UPD(pfr, port[0]);
746 PF_MD5_UPD(pfr, port[1]);
747 PF_MD5_UPD(pfr, neg);
748 PF_MD5_UPD(pfr, port_op);
749 }
750
751 void
752 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
753 {
754 u_int16_t x;
755 u_int32_t y;
756
757 pf_hash_rule_addr(ctx, &rule->src);
758 pf_hash_rule_addr(ctx, &rule->dst);
759 PF_MD5_UPD_STR(rule, label);
760 PF_MD5_UPD_STR(rule, ifname);
761 PF_MD5_UPD_STR(rule, match_tagname);
762 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
763 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
764 PF_MD5_UPD_HTONL(rule, prob, y);
765 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
766 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
767 PF_MD5_UPD(rule, uid.op);
768 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
769 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
770 PF_MD5_UPD(rule, gid.op);
771 PF_MD5_UPD_HTONL(rule, rule_flag, y);
772 PF_MD5_UPD(rule, action);
773 PF_MD5_UPD(rule, direction);
774 PF_MD5_UPD(rule, af);
775 PF_MD5_UPD(rule, quick);
776 PF_MD5_UPD(rule, ifnot);
777 PF_MD5_UPD(rule, match_tag_not);
778 PF_MD5_UPD(rule, natpass);
779 PF_MD5_UPD(rule, keep_state);
780 PF_MD5_UPD(rule, proto);
781 PF_MD5_UPD(rule, type);
782 PF_MD5_UPD(rule, code);
783 PF_MD5_UPD(rule, flags);
784 PF_MD5_UPD(rule, flagset);
785 PF_MD5_UPD(rule, allow_opts);
786 PF_MD5_UPD(rule, rt);
787 PF_MD5_UPD(rule, tos);
788 }
789
790 int
791 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
792 {
793 struct pf_ruleset *rs;
794 struct pf_rule *rule, **old_array;
795 struct pf_rulequeue *old_rules;
796 int s, error;
797 u_int32_t old_rcount;
798
799 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
800 return (EINVAL);
801 rs = pf_find_ruleset(anchor);
802 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
803 ticket != rs->rules[rs_num].inactive.ticket)
804 return (EBUSY);
805
806 /* Calculate checksum for the main ruleset */
807 if (rs == &pf_main_ruleset) {
808 error = pf_setup_pfsync_matching(rs);
809 if (error != 0)
810 return (error);
811 }
812
813 /* Swap rules, keep the old. */
814 s = splsoftnet();
815 old_rules = rs->rules[rs_num].active.ptr;
816 old_rcount = rs->rules[rs_num].active.rcount;
817 old_array = rs->rules[rs_num].active.ptr_array;
818
819 rs->rules[rs_num].active.ptr =
820 rs->rules[rs_num].inactive.ptr;
821 rs->rules[rs_num].active.ptr_array =
822 rs->rules[rs_num].inactive.ptr_array;
823 rs->rules[rs_num].active.rcount =
824 rs->rules[rs_num].inactive.rcount;
825 rs->rules[rs_num].inactive.ptr = old_rules;
826 rs->rules[rs_num].inactive.ptr_array = old_array;
827 rs->rules[rs_num].inactive.rcount = old_rcount;
828
829 rs->rules[rs_num].active.ticket =
830 rs->rules[rs_num].inactive.ticket;
831 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
832
833
834 /* Purge the old rule list. */
835 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
836 pf_rm_rule(old_rules, rule);
837 if (rs->rules[rs_num].inactive.ptr_array)
838 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
839 rs->rules[rs_num].inactive.ptr_array = NULL;
840 rs->rules[rs_num].inactive.rcount = 0;
841 rs->rules[rs_num].inactive.open = 0;
842 pf_remove_if_empty_ruleset(rs);
843 splx(s);
844 return (0);
845 }
846
847 void
848 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
849 struct pf_state *s)
850 {
851 int secs = time_second;
852 bzero(sp, sizeof(struct pfsync_state));
853
854 /* copy from state key */
855 sp->lan.addr = sk->lan.addr;
856 sp->lan.port = sk->lan.port;
857 sp->gwy.addr = sk->gwy.addr;
858 sp->gwy.port = sk->gwy.port;
859 sp->ext.addr = sk->ext.addr;
860 sp->ext.port = sk->ext.port;
861 sp->proto = sk->proto;
862 sp->af = sk->af;
863 sp->direction = sk->direction;
864
865 /* copy from state */
866 memcpy(&sp->id, &s->id, sizeof(sp->id));
867 sp->creatorid = s->creatorid;
868 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
869 pf_state_peer_to_pfsync(&s->src, &sp->src);
870 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
871
872 sp->rule = s->rule.ptr->nr;
873 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr;
874 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr;
875
876 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
877 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
878 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
879 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
880 sp->creation = secs - s->creation;
881 sp->expire = pf_state_expires(s);
882 sp->log = s->log;
883 sp->allow_opts = s->allow_opts;
884 sp->timeout = s->timeout;
885
886 if (s->src_node)
887 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
888 if (s->nat_src_node)
889 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
890
891 if (sp->expire > secs)
892 sp->expire -= secs;
893 else
894 sp->expire = 0;
895
896 }
897
898 void
899 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
900 struct pf_state *s)
901 {
902 /* copy to state key */
903 sk->lan.addr = sp->lan.addr;
904 sk->lan.port = sp->lan.port;
905 sk->gwy.addr = sp->gwy.addr;
906 sk->gwy.port = sp->gwy.port;
907 sk->ext.addr = sp->ext.addr;
908 sk->ext.port = sp->ext.port;
909 sk->proto = sp->proto;
910 sk->af = sp->af;
911 sk->direction = sp->direction;
912
913 /* copy to state */
914 memcpy(&s->id, &sp->id, sizeof(sp->id));
915 s->creatorid = sp->creatorid;
916 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
917 pf_state_peer_from_pfsync(&sp->src, &s->src);
918 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
919
920 s->rule.ptr = &pf_default_rule;
921 s->nat_rule.ptr = NULL;
922 s->anchor.ptr = NULL;
923 s->rt_kif = NULL;
924 s->creation = time_second;
925 s->pfsync_time = 0;
926 s->packets[0] = s->packets[1] = 0;
927 s->bytes[0] = s->bytes[1] = 0;
928 }
929
930 int
931 pf_setup_pfsync_matching(struct pf_ruleset *rs)
932 {
933 MD5_CTX ctx;
934 struct pf_rule *rule;
935 int rs_cnt;
936 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
937
938 MD5Init(&ctx);
939 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
940 /* XXX PF_RULESET_SCRUB as well? */
941 if (rs_cnt == PF_RULESET_SCRUB)
942 continue;
943
944 if (rs->rules[rs_cnt].inactive.ptr_array)
945 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
946 rs->rules[rs_cnt].inactive.ptr_array = NULL;
947
948 if (rs->rules[rs_cnt].inactive.rcount) {
949 rs->rules[rs_cnt].inactive.ptr_array =
950 malloc(sizeof(caddr_t) *
951 rs->rules[rs_cnt].inactive.rcount,
952 M_TEMP, M_NOWAIT);
953
954 if (!rs->rules[rs_cnt].inactive.ptr_array)
955 return (ENOMEM);
956 }
957
958 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
959 entries) {
960 pf_hash_rule(&ctx, rule);
961 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
962 }
963 }
964
965 MD5Final(digest, &ctx);
966 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
967 return (0);
968 }
969
970 int
971 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
972 {
973 struct pf_pooladdr *pa = NULL;
974 struct pf_pool *pool = NULL;
975 int s;
976 int error = 0;
977
978 /* XXX keep in sync with switch() below */
979 if (securelevel > 1)
980 switch (cmd) {
981 case DIOCGETRULES:
982 case DIOCGETRULE:
983 case DIOCGETADDRS:
984 case DIOCGETADDR:
985 case DIOCGETSTATE:
986 case DIOCSETSTATUSIF:
987 case DIOCGETSTATUS:
988 case DIOCCLRSTATUS:
989 case DIOCNATLOOK:
990 case DIOCSETDEBUG:
991 case DIOCGETSTATES:
992 case DIOCGETTIMEOUT:
993 case DIOCCLRRULECTRS:
994 case DIOCGETLIMIT:
995 case DIOCGETALTQS:
996 case DIOCGETALTQ:
997 case DIOCGETQSTATS:
998 case DIOCGETRULESETS:
999 case DIOCGETRULESET:
1000 case DIOCRGETTABLES:
1001 case DIOCRGETTSTATS:
1002 case DIOCRCLRTSTATS:
1003 case DIOCRCLRADDRS:
1004 case DIOCRADDADDRS:
1005 case DIOCRDELADDRS:
1006 case DIOCRSETADDRS:
1007 case DIOCRGETADDRS:
1008 case DIOCRGETASTATS:
1009 case DIOCRCLRASTATS:
1010 case DIOCRTSTADDRS:
1011 case DIOCOSFPGET:
1012 case DIOCGETSRCNODES:
1013 case DIOCCLRSRCNODES:
1014 case DIOCIGETIFACES:
1015 case DIOCSETIFFLAG:
1016 case DIOCCLRIFFLAG:
1017 break;
1018 case DIOCRCLRTABLES:
1019 case DIOCRADDTABLES:
1020 case DIOCRDELTABLES:
1021 case DIOCRSETTFLAGS:
1022 if (((struct pfioc_table *)addr)->pfrio_flags &
1023 PFR_FLAG_DUMMY)
1024 break; /* dummy operation ok */
1025 return (EPERM);
1026 default:
1027 return (EPERM);
1028 }
1029
1030 if (!(flags & FWRITE))
1031 switch (cmd) {
1032 case DIOCGETRULES:
1033 case DIOCGETADDRS:
1034 case DIOCGETADDR:
1035 case DIOCGETSTATE:
1036 case DIOCGETSTATUS:
1037 case DIOCGETSTATES:
1038 case DIOCGETTIMEOUT:
1039 case DIOCGETLIMIT:
1040 case DIOCGETALTQS:
1041 case DIOCGETALTQ:
1042 case DIOCGETQSTATS:
1043 case DIOCGETRULESETS:
1044 case DIOCGETRULESET:
1045 case DIOCNATLOOK:
1046 case DIOCRGETTABLES:
1047 case DIOCRGETTSTATS:
1048 case DIOCRGETADDRS:
1049 case DIOCRGETASTATS:
1050 case DIOCRTSTADDRS:
1051 case DIOCOSFPGET:
1052 case DIOCGETSRCNODES:
1053 case DIOCIGETIFACES:
1054 break;
1055 case DIOCRCLRTABLES:
1056 case DIOCRADDTABLES:
1057 case DIOCRDELTABLES:
1058 case DIOCRCLRTSTATS:
1059 case DIOCRCLRADDRS:
1060 case DIOCRADDADDRS:
1061 case DIOCRDELADDRS:
1062 case DIOCRSETADDRS:
1063 case DIOCRSETTFLAGS:
1064 if (((struct pfioc_table *)addr)->pfrio_flags &
1065 PFR_FLAG_DUMMY) {
1066 flags |= FWRITE; /* need write lock for dummy */
1067 break; /* dummy operation ok */
1068 }
1069 return (EACCES);
1070 case DIOCGETRULE:
1071 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR)
1072 return (EACCES);
1073 break;
1074 default:
1075 return (EACCES);
1076 }
1077
1078 if (flags & FWRITE)
1079 rw_enter_write(&pf_consistency_lock);
1080 else
1081 rw_enter_read(&pf_consistency_lock);
1082
1083 s = splsoftnet();
1084 switch (cmd) {
1085
1086 case DIOCSTART:
1087 if (pf_status.running)
1088 error = EEXIST;
1089 else {
1090 pf_status.running = 1;
1091 pf_status.since = time_second;
1092 if (pf_status.stateid == 0) {
1093 pf_status.stateid = time_second;
1094 pf_status.stateid = pf_status.stateid << 32;
1095 }
1096 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1097 }
1098 break;
1099
1100 case DIOCSTOP:
1101 if (!pf_status.running)
1102 error = ENOENT;
1103 else {
1104 pf_status.running = 0;
1105 pf_status.since = time_second;
1106 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1107 }
1108 break;
1109
1110 case DIOCADDRULE: {
1111 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1112 struct pf_ruleset *ruleset;
1113 struct pf_rule *rule, *tail;
1114 struct pf_pooladdr *pa;
1115 int rs_num;
1116
1117 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1118 ruleset = pf_find_ruleset(pr->anchor);
1119 if (ruleset == NULL) {
1120 error = EINVAL;
1121 break;
1122 }
1123 rs_num = pf_get_ruleset_number(pr->rule.action);
1124 if (rs_num >= PF_RULESET_MAX) {
1125 error = EINVAL;
1126 break;
1127 }
1128 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1129 error = EINVAL;
1130 break;
1131 }
1132 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1133 error = EBUSY;
1134 break;
1135 }
1136 if (pr->pool_ticket != ticket_pabuf) {
1137 error = EBUSY;
1138 break;
1139 }
1140 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1141 if (rule == NULL) {
1142 error = ENOMEM;
1143 break;
1144 }
1145 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1146 rule->cuid = p->p_cred->p_ruid;
1147 rule->cpid = p->p_pid;
1148 rule->anchor = NULL;
1149 rule->kif = NULL;
1150 TAILQ_INIT(&rule->rpool.list);
1151 /* initialize refcounting */
1152 rule->states = 0;
1153 rule->src_nodes = 0;
1154 rule->entries.tqe_prev = NULL;
1155 #ifndef INET
1156 if (rule->af == AF_INET) {
1157 pool_put(&pf_rule_pl, rule);
1158 error = EAFNOSUPPORT;
1159 break;
1160 }
1161 #endif /* INET */
1162 #ifndef INET6
1163 if (rule->af == AF_INET6) {
1164 pool_put(&pf_rule_pl, rule);
1165 error = EAFNOSUPPORT;
1166 break;
1167 }
1168 #endif /* INET6 */
1169 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1170 pf_rulequeue);
1171 if (tail)
1172 rule->nr = tail->nr + 1;
1173 else
1174 rule->nr = 0;
1175 if (rule->ifname[0]) {
1176 rule->kif = pfi_kif_get(rule->ifname);
1177 if (rule->kif == NULL) {
1178 pool_put(&pf_rule_pl, rule);
1179 error = EINVAL;
1180 break;
1181 }
1182 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
1183 }
1184
1185 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid))
1186 error = EBUSY;
1187
1188 #ifdef ALTQ
1189 /* set queue IDs */
1190 if (rule->qname[0] != 0) {
1191 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1192 error = EBUSY;
1193 else if (rule->pqname[0] != 0) {
1194 if ((rule->pqid =
1195 pf_qname2qid(rule->pqname)) == 0)
1196 error = EBUSY;
1197 } else
1198 rule->pqid = rule->qid;
1199 }
1200 #endif
1201 if (rule->tagname[0])
1202 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1203 error = EBUSY;
1204 if (rule->match_tagname[0])
1205 if ((rule->match_tag =
1206 pf_tagname2tag(rule->match_tagname)) == 0)
1207 error = EBUSY;
1208 if (rule->rt && !rule->direction)
1209 error = EINVAL;
1210 #if NPFLOG > 0
1211 if (!rule->log)
1212 rule->logif = 0;
1213 if (rule->logif >= PFLOGIFS_MAX)
1214 error = EINVAL;
1215 #endif
1216 if (pf_rtlabel_add(&rule->src.addr) ||
1217 pf_rtlabel_add(&rule->dst.addr))
1218 error = EBUSY;
1219 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1220 error = EINVAL;
1221 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1222 error = EINVAL;
1223 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1224 error = EINVAL;
1225 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1226 error = EINVAL;
1227 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1228 error = EINVAL;
1229 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1230 if (pf_tbladdr_setup(ruleset, &pa->addr))
1231 error = EINVAL;
1232
1233 if (rule->overload_tblname[0]) {
1234 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1235 rule->overload_tblname)) == NULL)
1236 error = EINVAL;
1237 else
1238 rule->overload_tbl->pfrkt_flags |=
1239 PFR_TFLAG_ACTIVE;
1240 }
1241
1242 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1243 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1244 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1245 (rule->rt > PF_FASTROUTE)) &&
1246 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1247 error = EINVAL;
1248
1249 if (error) {
1250 pf_rm_rule(NULL, rule);
1251 break;
1252 }
1253 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1254 rule->evaluations = rule->packets[0] = rule->packets[1] =
1255 rule->bytes[0] = rule->bytes[1] = 0;
1256 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1257 rule, entries);
1258 ruleset->rules[rs_num].inactive.rcount++;
1259 break;
1260 }
1261
1262 case DIOCGETRULES: {
1263 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1264 struct pf_ruleset *ruleset;
1265 struct pf_rule *tail;
1266 int rs_num;
1267
1268 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1269 ruleset = pf_find_ruleset(pr->anchor);
1270 if (ruleset == NULL) {
1271 error = EINVAL;
1272 break;
1273 }
1274 rs_num = pf_get_ruleset_number(pr->rule.action);
1275 if (rs_num >= PF_RULESET_MAX) {
1276 error = EINVAL;
1277 break;
1278 }
1279 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1280 pf_rulequeue);
1281 if (tail)
1282 pr->nr = tail->nr + 1;
1283 else
1284 pr->nr = 0;
1285 pr->ticket = ruleset->rules[rs_num].active.ticket;
1286 break;
1287 }
1288
1289 case DIOCGETRULE: {
1290 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1291 struct pf_ruleset *ruleset;
1292 struct pf_rule *rule;
1293 int rs_num, i;
1294
1295 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1296 ruleset = pf_find_ruleset(pr->anchor);
1297 if (ruleset == NULL) {
1298 error = EINVAL;
1299 break;
1300 }
1301 rs_num = pf_get_ruleset_number(pr->rule.action);
1302 if (rs_num >= PF_RULESET_MAX) {
1303 error = EINVAL;
1304 break;
1305 }
1306 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1307 error = EBUSY;
1308 break;
1309 }
1310 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1311 while ((rule != NULL) && (rule->nr != pr->nr))
1312 rule = TAILQ_NEXT(rule, entries);
1313 if (rule == NULL) {
1314 error = EBUSY;
1315 break;
1316 }
1317 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1318 if (pf_anchor_copyout(ruleset, rule, pr)) {
1319 error = EBUSY;
1320 break;
1321 }
1322 pfi_dynaddr_copyout(&pr->rule.src.addr);
1323 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1324 pf_tbladdr_copyout(&pr->rule.src.addr);
1325 pf_tbladdr_copyout(&pr->rule.dst.addr);
1326 pf_rtlabel_copyout(&pr->rule.src.addr);
1327 pf_rtlabel_copyout(&pr->rule.dst.addr);
1328 for (i = 0; i < PF_SKIP_COUNT; ++i)
1329 if (rule->skip[i].ptr == NULL)
1330 pr->rule.skip[i].nr = -1;
1331 else
1332 pr->rule.skip[i].nr =
1333 rule->skip[i].ptr->nr;
1334
1335 if (pr->action == PF_GET_CLR_CNTR) {
1336 rule->evaluations = 0;
1337 rule->packets[0] = rule->packets[1] = 0;
1338 rule->bytes[0] = rule->bytes[1] = 0;
1339 }
1340 break;
1341 }
1342
1343 case DIOCCHANGERULE: {
1344 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1345 struct pf_ruleset *ruleset;
1346 struct pf_rule *oldrule = NULL, *newrule = NULL;
1347 u_int32_t nr = 0;
1348 int rs_num;
1349
1350 if (!(pcr->action == PF_CHANGE_REMOVE ||
1351 pcr->action == PF_CHANGE_GET_TICKET) &&
1352 pcr->pool_ticket != ticket_pabuf) {
1353 error = EBUSY;
1354 break;
1355 }
1356
1357 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1358 pcr->action > PF_CHANGE_GET_TICKET) {
1359 error = EINVAL;
1360 break;
1361 }
1362 ruleset = pf_find_ruleset(pcr->anchor);
1363 if (ruleset == NULL) {
1364 error = EINVAL;
1365 break;
1366 }
1367 rs_num = pf_get_ruleset_number(pcr->rule.action);
1368 if (rs_num >= PF_RULESET_MAX) {
1369 error = EINVAL;
1370 break;
1371 }
1372
1373 if (pcr->action == PF_CHANGE_GET_TICKET) {
1374 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1375 break;
1376 } else {
1377 if (pcr->ticket !=
1378 ruleset->rules[rs_num].active.ticket) {
1379 error = EINVAL;
1380 break;
1381 }
1382 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1383 error = EINVAL;
1384 break;
1385 }
1386 }
1387
1388 if (pcr->action != PF_CHANGE_REMOVE) {
1389 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1390 if (newrule == NULL) {
1391 error = ENOMEM;
1392 break;
1393 }
1394 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1395 newrule->cuid = p->p_cred->p_ruid;
1396 newrule->cpid = p->p_pid;
1397 TAILQ_INIT(&newrule->rpool.list);
1398 /* initialize refcounting */
1399 newrule->states = 0;
1400 newrule->entries.tqe_prev = NULL;
1401 #ifndef INET
1402 if (newrule->af == AF_INET) {
1403 pool_put(&pf_rule_pl, newrule);
1404 error = EAFNOSUPPORT;
1405 break;
1406 }
1407 #endif /* INET */
1408 #ifndef INET6
1409 if (newrule->af == AF_INET6) {
1410 pool_put(&pf_rule_pl, newrule);
1411 error = EAFNOSUPPORT;
1412 break;
1413 }
1414 #endif /* INET6 */
1415 if (newrule->ifname[0]) {
1416 newrule->kif = pfi_kif_get(newrule->ifname);
1417 if (newrule->kif == NULL) {
1418 pool_put(&pf_rule_pl, newrule);
1419 error = EINVAL;
1420 break;
1421 }
1422 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
1423 } else
1424 newrule->kif = NULL;
1425
1426 if (newrule->rtableid > 0 &&
1427 !rtable_exists(newrule->rtableid))
1428 error = EBUSY;
1429
1430 #ifdef ALTQ
1431 /* set queue IDs */
1432 if (newrule->qname[0] != 0) {
1433 if ((newrule->qid =
1434 pf_qname2qid(newrule->qname)) == 0)
1435 error = EBUSY;
1436 else if (newrule->pqname[0] != 0) {
1437 if ((newrule->pqid =
1438 pf_qname2qid(newrule->pqname)) == 0)
1439 error = EBUSY;
1440 } else
1441 newrule->pqid = newrule->qid;
1442 }
1443 #endif /* ALTQ */
1444 if (newrule->tagname[0])
1445 if ((newrule->tag =
1446 pf_tagname2tag(newrule->tagname)) == 0)
1447 error = EBUSY;
1448 if (newrule->match_tagname[0])
1449 if ((newrule->match_tag = pf_tagname2tag(
1450 newrule->match_tagname)) == 0)
1451 error = EBUSY;
1452 if (newrule->rt && !newrule->direction)
1453 error = EINVAL;
1454 #if NPFLOG > 0
1455 if (!newrule->log)
1456 newrule->logif = 0;
1457 if (newrule->logif >= PFLOGIFS_MAX)
1458 error = EINVAL;
1459 #endif
1460 if (pf_rtlabel_add(&newrule->src.addr) ||
1461 pf_rtlabel_add(&newrule->dst.addr))
1462 error = EBUSY;
1463 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1464 error = EINVAL;
1465 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1466 error = EINVAL;
1467 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1468 error = EINVAL;
1469 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1470 error = EINVAL;
1471 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1472 error = EINVAL;
1473 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1474 if (pf_tbladdr_setup(ruleset, &pa->addr))
1475 error = EINVAL;
1476
1477 if (newrule->overload_tblname[0]) {
1478 if ((newrule->overload_tbl = pfr_attach_table(
1479 ruleset, newrule->overload_tblname)) ==
1480 NULL)
1481 error = EINVAL;
1482 else
1483 newrule->overload_tbl->pfrkt_flags |=
1484 PFR_TFLAG_ACTIVE;
1485 }
1486
1487 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1488 if (((((newrule->action == PF_NAT) ||
1489 (newrule->action == PF_RDR) ||
1490 (newrule->action == PF_BINAT) ||
1491 (newrule->rt > PF_FASTROUTE)) &&
1492 !newrule->anchor)) &&
1493 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1494 error = EINVAL;
1495
1496 if (error) {
1497 pf_rm_rule(NULL, newrule);
1498 break;
1499 }
1500 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1501 newrule->evaluations = 0;
1502 newrule->packets[0] = newrule->packets[1] = 0;
1503 newrule->bytes[0] = newrule->bytes[1] = 0;
1504 }
1505 pf_empty_pool(&pf_pabuf);
1506
1507 if (pcr->action == PF_CHANGE_ADD_HEAD)
1508 oldrule = TAILQ_FIRST(
1509 ruleset->rules[rs_num].active.ptr);
1510 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1511 oldrule = TAILQ_LAST(
1512 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1513 else {
1514 oldrule = TAILQ_FIRST(
1515 ruleset->rules[rs_num].active.ptr);
1516 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1517 oldrule = TAILQ_NEXT(oldrule, entries);
1518 if (oldrule == NULL) {
1519 if (newrule != NULL)
1520 pf_rm_rule(NULL, newrule);
1521 error = EINVAL;
1522 break;
1523 }
1524 }
1525
1526 if (pcr->action == PF_CHANGE_REMOVE) {
1527 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1528 ruleset->rules[rs_num].active.rcount--;
1529 } else {
1530 if (oldrule == NULL)
1531 TAILQ_INSERT_TAIL(
1532 ruleset->rules[rs_num].active.ptr,
1533 newrule, entries);
1534 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1535 pcr->action == PF_CHANGE_ADD_BEFORE)
1536 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1537 else
1538 TAILQ_INSERT_AFTER(
1539 ruleset->rules[rs_num].active.ptr,
1540 oldrule, newrule, entries);
1541 ruleset->rules[rs_num].active.rcount++;
1542 }
1543
1544 nr = 0;
1545 TAILQ_FOREACH(oldrule,
1546 ruleset->rules[rs_num].active.ptr, entries)
1547 oldrule->nr = nr++;
1548
1549 ruleset->rules[rs_num].active.ticket++;
1550
1551 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1552 pf_remove_if_empty_ruleset(ruleset);
1553
1554 break;
1555 }
1556
1557 case DIOCCLRSTATES: {
1558 struct pf_state *s, *nexts;
1559 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1560 int killed = 0;
1561
1562 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1563 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1564
1565 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1566 s->kif->pfik_name)) {
1567 #if NPFSYNC
1568 /* don't send out individual delete messages */
1569 s->sync_flags = PFSTATE_NOSYNC;
1570 #endif
1571 pf_unlink_state(s);
1572 killed++;
1573 }
1574 }
1575 psk->psk_af = killed;
1576 #if NPFSYNC
1577 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1578 #endif
1579 break;
1580 }
1581
1582 case DIOCKILLSTATES: {
1583 struct pf_state *s, *nexts;
1584 struct pf_state_key *sk;
1585 struct pf_state_host *src, *dst;
1586 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1587 int killed = 0;
1588
1589 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1590 s = nexts) {
1591 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1592 sk = s->state_key;
1593
1594 if (sk->direction == PF_OUT) {
1595 src = &sk->lan;
1596 dst = &sk->ext;
1597 } else {
1598 src = &sk->ext;
1599 dst = &sk->lan;
1600 }
1601 if ((!psk->psk_af || sk->af == psk->psk_af)
1602 && (!psk->psk_proto || psk->psk_proto ==
1603 sk->proto) &&
1604 PF_MATCHA(psk->psk_src.neg,
1605 &psk->psk_src.addr.v.a.addr,
1606 &psk->psk_src.addr.v.a.mask,
1607 &src->addr, sk->af) &&
1608 PF_MATCHA(psk->psk_dst.neg,
1609 &psk->psk_dst.addr.v.a.addr,
1610 &psk->psk_dst.addr.v.a.mask,
1611 &dst->addr, sk->af) &&
1612 (psk->psk_src.port_op == 0 ||
1613 pf_match_port(psk->psk_src.port_op,
1614 psk->psk_src.port[0], psk->psk_src.port[1],
1615 src->port)) &&
1616 (psk->psk_dst.port_op == 0 ||
1617 pf_match_port(psk->psk_dst.port_op,
1618 psk->psk_dst.port[0], psk->psk_dst.port[1],
1619 dst->port)) &&
1620 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1621 s->kif->pfik_name))) {
1622 #if NPFSYNC > 0
1623 /* send immediate delete of state */
1624 pfsync_delete_state(s);
1625 s->sync_flags |= PFSTATE_NOSYNC;
1626 #endif
1627 pf_unlink_state(s);
1628 killed++;
1629 }
1630 }
1631 psk->psk_af = killed;
1632 break;
1633 }
1634
1635 case DIOCADDSTATE: {
1636 struct pfioc_state *ps = (struct pfioc_state *)addr;
1637 struct pfsync_state *sp = (struct pfsync_state *)ps->state;
1638 struct pf_state *s;
1639 struct pf_state_key *sk;
1640 struct pfi_kif *kif;
1641
1642 if (sp->timeout >= PFTM_MAX &&
1643 sp->timeout != PFTM_UNTIL_PACKET) {
1644 error = EINVAL;
1645 break;
1646 }
1647 s = pool_get(&pf_state_pl, PR_NOWAIT);
1648 if (s == NULL) {
1649 error = ENOMEM;
1650 break;
1651 }
1652 bzero(s, sizeof(struct pf_state));
1653 if ((sk = pf_alloc_state_key(s)) == NULL) {
1654 error = ENOMEM;
1655 break;
1656 }
1657 pf_state_import(sp, sk, s);
1658 kif = pfi_kif_get(sp->ifname);
1659 if (kif == NULL) {
1660 pool_put(&pf_state_pl, s);
1661 pool_put(&pf_state_key_pl, sk);
1662 error = ENOENT;
1663 break;
1664 }
1665 if (pf_insert_state(kif, s)) {
1666 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
1667 pool_put(&pf_state_pl, s);
1668 pool_put(&pf_state_key_pl, sk);
1669 error = ENOMEM;
1670 }
1671 break;
1672 }
1673
1674 case DIOCGETSTATE: {
1675 struct pfioc_state *ps = (struct pfioc_state *)addr;
1676 struct pf_state *s;
1677 u_int32_t nr;
1678
1679 nr = 0;
1680 RB_FOREACH(s, pf_state_tree_id, &tree_id) {
1681 if (nr >= ps->nr)
1682 break;
1683 nr++;
1684 }
1685 if (s == NULL) {
1686 error = EBUSY;
1687 break;
1688 }
1689
1690 pf_state_export((struct pfsync_state *)&ps->state,
1691 s->state_key, s);
1692 break;
1693 }
1694
1695 case DIOCGETSTATES: {
1696 struct pfioc_states *ps = (struct pfioc_states *)addr;
1697 struct pf_state *state;
1698 struct pfsync_state *p, *pstore;
1699 u_int32_t nr = 0;
1700
1701 if (ps->ps_len == 0) {
1702 nr = pf_status.states;
1703 ps->ps_len = sizeof(struct pfsync_state) * nr;
1704 break;
1705 }
1706
1707 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1708
1709 p = ps->ps_states;
1710
1711 state = TAILQ_FIRST(&state_list);
1712 while (state) {
1713 if (state->timeout != PFTM_UNLINKED) {
1714 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1715 break;
1716
1717 pf_state_export(pstore,
1718 state->state_key, state);
1719 error = copyout(pstore, p, sizeof(*p));
1720 if (error) {
1721 free(pstore, M_TEMP);
1722 goto fail;
1723 }
1724 p++;
1725 nr++;
1726 }
1727 state = TAILQ_NEXT(state, entry_list);
1728 }
1729
1730 ps->ps_len = sizeof(struct pfsync_state) * nr;
1731
1732 free(pstore, M_TEMP);
1733 break;
1734 }
1735
1736 case DIOCGETSTATUS: {
1737 struct pf_status *s = (struct pf_status *)addr;
1738 bcopy(&pf_status, s, sizeof(struct pf_status));
1739 pfi_fill_oldstatus(s);
1740 break;
1741 }
1742
1743 case DIOCSETSTATUSIF: {
1744 struct pfioc_if *pi = (struct pfioc_if *)addr;
1745
1746 if (pi->ifname[0] == 0) {
1747 bzero(pf_status.ifname, IFNAMSIZ);
1748 break;
1749 }
1750 if (ifunit(pi->ifname) == NULL) {
1751 error = EINVAL;
1752 break;
1753 }
1754 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1755 break;
1756 }
1757
1758 case DIOCCLRSTATUS: {
1759 bzero(pf_status.counters, sizeof(pf_status.counters));
1760 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1761 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1762 pf_status.since = time_second;
1763 if (*pf_status.ifname)
1764 pfi_clr_istats(pf_status.ifname);
1765 break;
1766 }
1767
1768 case DIOCNATLOOK: {
1769 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1770 struct pf_state_key *sk;
1771 struct pf_state *state;
1772 struct pf_state_key_cmp key;
1773 int m = 0, direction = pnl->direction;
1774
1775 key.af = pnl->af;
1776 key.proto = pnl->proto;
1777
1778 if (!pnl->proto ||
1779 PF_AZERO(&pnl->saddr, pnl->af) ||
1780 PF_AZERO(&pnl->daddr, pnl->af) ||
1781 ((pnl->proto == IPPROTO_TCP ||
1782 pnl->proto == IPPROTO_UDP) &&
1783 (!pnl->dport || !pnl->sport)))
1784 error = EINVAL;
1785 else {
1786 /*
1787 * userland gives us source and dest of connection,
1788 * reverse the lookup so we ask for what happens with
1789 * the return traffic, enabling us to find it in the
1790 * state tree.
1791 */
1792 if (direction == PF_IN) {
1793 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1794 key.ext.port = pnl->dport;
1795 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1796 key.gwy.port = pnl->sport;
1797 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1798 } else {
1799 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1800 key.lan.port = pnl->dport;
1801 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1802 key.ext.port = pnl->sport;
1803 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1804 }
1805 if (m > 1)
1806 error = E2BIG; /* more than one state */
1807 else if (state != NULL) {
1808 sk = state->state_key;
1809 if (direction == PF_IN) {
1810 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
1811 sk->af);
1812 pnl->rsport = sk->lan.port;
1813 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1814 pnl->af);
1815 pnl->rdport = pnl->dport;
1816 } else {
1817 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
1818 sk->af);
1819 pnl->rdport = sk->gwy.port;
1820 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1821 pnl->af);
1822 pnl->rsport = pnl->sport;
1823 }
1824 } else
1825 error = ENOENT;
1826 }
1827 break;
1828 }
1829
1830 case DIOCSETTIMEOUT: {
1831 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1832 int old;
1833
1834 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1835 pt->seconds < 0) {
1836 error = EINVAL;
1837 goto fail;
1838 }
1839 old = pf_default_rule.timeout[pt->timeout];
1840 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1841 pt->seconds = 1;
1842 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1843 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
1844 wakeup(pf_purge_thread);
1845 pt->seconds = old;
1846 break;
1847 }
1848
1849 case DIOCGETTIMEOUT: {
1850 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1851
1852 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1853 error = EINVAL;
1854 goto fail;
1855 }
1856 pt->seconds = pf_default_rule.timeout[pt->timeout];
1857 break;
1858 }
1859
1860 case DIOCGETLIMIT: {
1861 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1862
1863 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1864 error = EINVAL;
1865 goto fail;
1866 }
1867 pl->limit = pf_pool_limits[pl->index].limit;
1868 break;
1869 }
1870
1871 case DIOCSETLIMIT: {
1872 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1873 int old_limit;
1874
1875 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1876 pf_pool_limits[pl->index].pp == NULL) {
1877 error = EINVAL;
1878 goto fail;
1879 }
1880 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
1881 pl->limit, NULL, 0) != 0) {
1882 error = EBUSY;
1883 goto fail;
1884 }
1885 old_limit = pf_pool_limits[pl->index].limit;
1886 pf_pool_limits[pl->index].limit = pl->limit;
1887 pl->limit = old_limit;
1888 break;
1889 }
1890
1891 case DIOCSETDEBUG: {
1892 u_int32_t *level = (u_int32_t *)addr;
1893
1894 pf_status.debug = *level;
1895 break;
1896 }
1897
1898 case DIOCCLRRULECTRS: {
1899 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1900 struct pf_ruleset *ruleset = &pf_main_ruleset;
1901 struct pf_rule *rule;
1902
1903 TAILQ_FOREACH(rule,
1904 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1905 rule->evaluations = 0;
1906 rule->packets[0] = rule->packets[1] = 0;
1907 rule->bytes[0] = rule->bytes[1] = 0;
1908 }
1909 break;
1910 }
1911
1912 #ifdef ALTQ
1913 case DIOCSTARTALTQ: {
1914 struct pf_altq *altq;
1915
1916 /* enable all altq interfaces on active list */
1917 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1918 if (altq->qname[0] == 0) {
1919 error = pf_enable_altq(altq);
1920 if (error != 0)
1921 break;
1922 }
1923 }
1924 if (error == 0)
1925 pf_altq_running = 1;
1926 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
1927 break;
1928 }
1929
1930 case DIOCSTOPALTQ: {
1931 struct pf_altq *altq;
1932
1933 /* disable all altq interfaces on active list */
1934 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1935 if (altq->qname[0] == 0) {
1936 error = pf_disable_altq(altq);
1937 if (error != 0)
1938 break;
1939 }
1940 }
1941 if (error == 0)
1942 pf_altq_running = 0;
1943 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
1944 break;
1945 }
1946
1947 case DIOCADDALTQ: {
1948 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
1949 struct pf_altq *altq, *a;
1950
1951 if (pa->ticket != ticket_altqs_inactive) {
1952 error = EBUSY;
1953 break;
1954 }
1955 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
1956 if (altq == NULL) {
1957 error = ENOMEM;
1958 break;
1959 }
1960 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1961
1962 /*
1963 * if this is for a queue, find the discipline and
1964 * copy the necessary fields
1965 */
1966 if (altq->qname[0] != 0) {
1967 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
1968 error = EBUSY;
1969 pool_put(&pf_altq_pl, altq);
1970 break;
1971 }
1972 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
1973 if (strncmp(a->ifname, altq->ifname,
1974 IFNAMSIZ) == 0 && a->qname[0] == 0) {
1975 altq->altq_disc = a->altq_disc;
1976 break;
1977 }
1978 }
1979 }
1980
1981 error = altq_add(altq);
1982 if (error) {
1983 pool_put(&pf_altq_pl, altq);
1984 break;
1985 }
1986
1987 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
1988 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1989 break;
1990 }
1991
1992 case DIOCGETALTQS: {
1993 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
1994 struct pf_altq *altq;
1995
1996 pa->nr = 0;
1997 TAILQ_FOREACH(altq, pf_altqs_active, entries)
1998 pa->nr++;
1999 pa->ticket = ticket_altqs_active;
2000 break;
2001 }
2002
2003 case DIOCGETALTQ: {
2004 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2005 struct pf_altq *altq;
2006 u_int32_t nr;
2007
2008 if (pa->ticket != ticket_altqs_active) {
2009 error = EBUSY;
2010 break;
2011 }
2012 nr = 0;
2013 altq = TAILQ_FIRST(pf_altqs_active);
2014 while ((altq != NULL) && (nr < pa->nr)) {
2015 altq = TAILQ_NEXT(altq, entries);
2016 nr++;
2017 }
2018 if (altq == NULL) {
2019 error = EBUSY;
2020 break;
2021 }
2022 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2023 break;
2024 }
2025
2026 case DIOCCHANGEALTQ:
2027 /* CHANGEALTQ not supported yet! */
2028 error = ENODEV;
2029 break;
2030
2031 case DIOCGETQSTATS: {
2032 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2033 struct pf_altq *altq;
2034 u_int32_t nr;
2035 int nbytes;
2036
2037 if (pq->ticket != ticket_altqs_active) {
2038 error = EBUSY;
2039 break;
2040 }
2041 nbytes = pq->nbytes;
2042 nr = 0;
2043 altq = TAILQ_FIRST(pf_altqs_active);
2044 while ((altq != NULL) && (nr < pq->nr)) {
2045 altq = TAILQ_NEXT(altq, entries);
2046 nr++;
2047 }
2048 if (altq == NULL) {
2049 error = EBUSY;
2050 break;
2051 }
2052 error = altq_getqstats(altq, pq->buf, &nbytes);
2053 if (error == 0) {
2054 pq->scheduler = altq->scheduler;
2055 pq->nbytes = nbytes;
2056 }
2057 break;
2058 }
2059 #endif /* ALTQ */
2060
2061 case DIOCBEGINADDRS: {
2062 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2063
2064 pf_empty_pool(&pf_pabuf);
2065 pp->ticket = ++ticket_pabuf;
2066 break;
2067 }
2068
2069 case DIOCADDADDR: {
2070 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2071
2072 if (pp->ticket != ticket_pabuf) {
2073 error = EBUSY;
2074 break;
2075 }
2076 #ifndef INET
2077 if (pp->af == AF_INET) {
2078 error = EAFNOSUPPORT;
2079 break;
2080 }
2081 #endif /* INET */
2082 #ifndef INET6
2083 if (pp->af == AF_INET6) {
2084 error = EAFNOSUPPORT;
2085 break;
2086 }
2087 #endif /* INET6 */
2088 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2089 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2090 pp->addr.addr.type != PF_ADDR_TABLE) {
2091 error = EINVAL;
2092 break;
2093 }
2094 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2095 if (pa == NULL) {
2096 error = ENOMEM;
2097 break;
2098 }
2099 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2100 if (pa->ifname[0]) {
2101 pa->kif = pfi_kif_get(pa->ifname);
2102 if (pa->kif == NULL) {
2103 pool_put(&pf_pooladdr_pl, pa);
2104 error = EINVAL;
2105 break;
2106 }
2107 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
2108 }
2109 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2110 pfi_dynaddr_remove(&pa->addr);
2111 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
2112 pool_put(&pf_pooladdr_pl, pa);
2113 error = EINVAL;
2114 break;
2115 }
2116 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2117 break;
2118 }
2119
2120 case DIOCGETADDRS: {
2121 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2122
2123 pp->nr = 0;
2124 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2125 pp->r_num, 0, 1, 0);
2126 if (pool == NULL) {
2127 error = EBUSY;
2128 break;
2129 }
2130 TAILQ_FOREACH(pa, &pool->list, entries)
2131 pp->nr++;
2132 break;
2133 }
2134
2135 case DIOCGETADDR: {
2136 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2137 u_int32_t nr = 0;
2138
2139 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2140 pp->r_num, 0, 1, 1);
2141 if (pool == NULL) {
2142 error = EBUSY;
2143 break;
2144 }
2145 pa = TAILQ_FIRST(&pool->list);
2146 while ((pa != NULL) && (nr < pp->nr)) {
2147 pa = TAILQ_NEXT(pa, entries);
2148 nr++;
2149 }
2150 if (pa == NULL) {
2151 error = EBUSY;
2152 break;
2153 }
2154 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2155 pfi_dynaddr_copyout(&pp->addr.addr);
2156 pf_tbladdr_copyout(&pp->addr.addr);
2157 pf_rtlabel_copyout(&pp->addr.addr);
2158 break;
2159 }
2160
2161 case DIOCCHANGEADDR: {
2162 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2163 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2164 struct pf_ruleset *ruleset;
2165
2166 if (pca->action < PF_CHANGE_ADD_HEAD ||
2167 pca->action > PF_CHANGE_REMOVE) {
2168 error = EINVAL;
2169 break;
2170 }
2171 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2172 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2173 pca->addr.addr.type != PF_ADDR_TABLE) {
2174 error = EINVAL;
2175 break;
2176 }
2177
2178 ruleset = pf_find_ruleset(pca->anchor);
2179 if (ruleset == NULL) {
2180 error = EBUSY;
2181 break;
2182 }
2183 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2184 pca->r_num, pca->r_last, 1, 1);
2185 if (pool == NULL) {
2186 error = EBUSY;
2187 break;
2188 }
2189 if (pca->action != PF_CHANGE_REMOVE) {
2190 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2191 if (newpa == NULL) {
2192 error = ENOMEM;
2193 break;
2194 }
2195 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2196 #ifndef INET
2197 if (pca->af == AF_INET) {
2198 pool_put(&pf_pooladdr_pl, newpa);
2199 error = EAFNOSUPPORT;
2200 break;
2201 }
2202 #endif /* INET */
2203 #ifndef INET6
2204 if (pca->af == AF_INET6) {
2205 pool_put(&pf_pooladdr_pl, newpa);
2206 error = EAFNOSUPPORT;
2207 break;
2208 }
2209 #endif /* INET6 */
2210 if (newpa->ifname[0]) {
2211 newpa->kif = pfi_kif_get(newpa->ifname);
2212 if (newpa->kif == NULL) {
2213 pool_put(&pf_pooladdr_pl, newpa);
2214 error = EINVAL;
2215 break;
2216 }
2217 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
2218 } else
2219 newpa->kif = NULL;
2220 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2221 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2222 pfi_dynaddr_remove(&newpa->addr);
2223 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
2224 pool_put(&pf_pooladdr_pl, newpa);
2225 error = EINVAL;
2226 break;
2227 }
2228 }
2229
2230 if (pca->action == PF_CHANGE_ADD_HEAD)
2231 oldpa = TAILQ_FIRST(&pool->list);
2232 else if (pca->action == PF_CHANGE_ADD_TAIL)
2233 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2234 else {
2235 int i = 0;
2236
2237 oldpa = TAILQ_FIRST(&pool->list);
2238 while ((oldpa != NULL) && (i < pca->nr)) {
2239 oldpa = TAILQ_NEXT(oldpa, entries);
2240 i++;
2241 }
2242 if (oldpa == NULL) {
2243 error = EINVAL;
2244 break;
2245 }
2246 }
2247
2248 if (pca->action == PF_CHANGE_REMOVE) {
2249 TAILQ_REMOVE(&pool->list, oldpa, entries);
2250 pfi_dynaddr_remove(&oldpa->addr);
2251 pf_tbladdr_remove(&oldpa->addr);
2252 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
2253 pool_put(&pf_pooladdr_pl, oldpa);
2254 } else {
2255 if (oldpa == NULL)
2256 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2257 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2258 pca->action == PF_CHANGE_ADD_BEFORE)
2259 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2260 else
2261 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2262 newpa, entries);
2263 }
2264
2265 pool->cur = TAILQ_FIRST(&pool->list);
2266 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2267 pca->af);
2268 break;
2269 }
2270
2271 case DIOCGETRULESETS: {
2272 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2273 struct pf_ruleset *ruleset;
2274 struct pf_anchor *anchor;
2275
2276 pr->path[sizeof(pr->path) - 1] = 0;
2277 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2278 error = EINVAL;
2279 break;
2280 }
2281 pr->nr = 0;
2282 if (ruleset->anchor == NULL) {
2283 /* XXX kludge for pf_main_ruleset */
2284 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2285 if (anchor->parent == NULL)
2286 pr->nr++;
2287 } else {
2288 RB_FOREACH(anchor, pf_anchor_node,
2289 &ruleset->anchor->children)
2290 pr->nr++;
2291 }
2292 break;
2293 }
2294
2295 case DIOCGETRULESET: {
2296 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2297 struct pf_ruleset *ruleset;
2298 struct pf_anchor *anchor;
2299 u_int32_t nr = 0;
2300
2301 pr->path[sizeof(pr->path) - 1] = 0;
2302 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2303 error = EINVAL;
2304 break;
2305 }
2306 pr->name[0] = 0;
2307 if (ruleset->anchor == NULL) {
2308 /* XXX kludge for pf_main_ruleset */
2309 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2310 if (anchor->parent == NULL && nr++ == pr->nr) {
2311 strlcpy(pr->name, anchor->name,
2312 sizeof(pr->name));
2313 break;
2314 }
2315 } else {
2316 RB_FOREACH(anchor, pf_anchor_node,
2317 &ruleset->anchor->children)
2318 if (nr++ == pr->nr) {
2319 strlcpy(pr->name, anchor->name,
2320 sizeof(pr->name));
2321 break;
2322 }
2323 }
2324 if (!pr->name[0])
2325 error = EBUSY;
2326 break;
2327 }
2328
2329 case DIOCRCLRTABLES: {
2330 struct pfioc_table *io = (struct pfioc_table *)addr;
2331
2332 if (io->pfrio_esize != 0) {
2333 error = ENODEV;
2334 break;
2335 }
2336 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2337 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2338 break;
2339 }
2340
2341 case DIOCRADDTABLES: {
2342 struct pfioc_table *io = (struct pfioc_table *)addr;
2343
2344 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2345 error = ENODEV;
2346 break;
2347 }
2348 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2349 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2350 break;
2351 }
2352
2353 case DIOCRDELTABLES: {
2354 struct pfioc_table *io = (struct pfioc_table *)addr;
2355
2356 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2357 error = ENODEV;
2358 break;
2359 }
2360 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2361 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2362 break;
2363 }
2364
2365 case DIOCRGETTABLES: {
2366 struct pfioc_table *io = (struct pfioc_table *)addr;
2367
2368 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2369 error = ENODEV;
2370 break;
2371 }
2372 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2373 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2374 break;
2375 }
2376
2377 case DIOCRGETTSTATS: {
2378 struct pfioc_table *io = (struct pfioc_table *)addr;
2379
2380 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2381 error = ENODEV;
2382 break;
2383 }
2384 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2385 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2386 break;
2387 }
2388
2389 case DIOCRCLRTSTATS: {
2390 struct pfioc_table *io = (struct pfioc_table *)addr;
2391
2392 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2393 error = ENODEV;
2394 break;
2395 }
2396 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2397 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2398 break;
2399 }
2400
2401 case DIOCRSETTFLAGS: {
2402 struct pfioc_table *io = (struct pfioc_table *)addr;
2403
2404 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2405 error = ENODEV;
2406 break;
2407 }
2408 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2409 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2410 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2411 break;
2412 }
2413
2414 case DIOCRCLRADDRS: {
2415 struct pfioc_table *io = (struct pfioc_table *)addr;
2416
2417 if (io->pfrio_esize != 0) {
2418 error = ENODEV;
2419 break;
2420 }
2421 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2422 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2423 break;
2424 }
2425
2426 case DIOCRADDADDRS: {
2427 struct pfioc_table *io = (struct pfioc_table *)addr;
2428
2429 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2430 error = ENODEV;
2431 break;
2432 }
2433 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2434 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2435 PFR_FLAG_USERIOCTL);
2436 break;
2437 }
2438
2439 case DIOCRDELADDRS: {
2440 struct pfioc_table *io = (struct pfioc_table *)addr;
2441
2442 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2443 error = ENODEV;
2444 break;
2445 }
2446 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2447 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2448 PFR_FLAG_USERIOCTL);
2449 break;
2450 }
2451
2452 case DIOCRSETADDRS: {
2453 struct pfioc_table *io = (struct pfioc_table *)addr;
2454
2455 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2456 error = ENODEV;
2457 break;
2458 }
2459 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2460 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2461 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2462 PFR_FLAG_USERIOCTL, 0);
2463 break;
2464 }
2465
2466 case DIOCRGETADDRS: {
2467 struct pfioc_table *io = (struct pfioc_table *)addr;
2468
2469 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2470 error = ENODEV;
2471 break;
2472 }
2473 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2474 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2475 break;
2476 }
2477
2478 case DIOCRGETASTATS: {
2479 struct pfioc_table *io = (struct pfioc_table *)addr;
2480
2481 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2482 error = ENODEV;
2483 break;
2484 }
2485 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2486 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2487 break;
2488 }
2489
2490 case DIOCRCLRASTATS: {
2491 struct pfioc_table *io = (struct pfioc_table *)addr;
2492
2493 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2494 error = ENODEV;
2495 break;
2496 }
2497 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2498 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2499 PFR_FLAG_USERIOCTL);
2500 break;
2501 }
2502
2503 case DIOCRTSTADDRS: {
2504 struct pfioc_table *io = (struct pfioc_table *)addr;
2505
2506 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2507 error = ENODEV;
2508 break;
2509 }
2510 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2511 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2512 PFR_FLAG_USERIOCTL);
2513 break;
2514 }
2515
2516 case DIOCRINADEFINE: {
2517 struct pfioc_table *io = (struct pfioc_table *)addr;
2518
2519 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2520 error = ENODEV;
2521 break;
2522 }
2523 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2524 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2525 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2526 break;
2527 }
2528
2529 case DIOCOSFPADD: {
2530 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2531 error = pf_osfp_add(io);
2532 break;
2533 }
2534
2535 case DIOCOSFPGET: {
2536 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2537 error = pf_osfp_get(io);
2538 break;
2539 }
2540
2541 case DIOCXBEGIN: {
2542 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2543 struct pfioc_trans_e *ioe;
2544 struct pfr_table *table;
2545 int i;
2546
2547 if (io->esize != sizeof(*ioe)) {
2548 error = ENODEV;
2549 goto fail;
2550 }
2551 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2552 M_TEMP, M_WAITOK);
2553 table = (struct pfr_table *)malloc(sizeof(*table),
2554 M_TEMP, M_WAITOK);
2555 for (i = 0; i < io->size; i++) {
2556 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2557 free(table, M_TEMP);
2558 free(ioe, M_TEMP);
2559 error = EFAULT;
2560 goto fail;
2561 }
2562 switch (ioe->rs_num) {
2563 #ifdef ALTQ
2564 case PF_RULESET_ALTQ:
2565 if (ioe->anchor[0]) {
2566 free(table, M_TEMP);
2567 free(ioe, M_TEMP);
2568 error = EINVAL;
2569 goto fail;
2570 }
2571 if ((error = pf_begin_altq(&ioe->ticket))) {
2572 free(table, M_TEMP);
2573 free(ioe, M_TEMP);
2574 goto fail;
2575 }
2576 break;
2577 #endif /* ALTQ */
2578 case PF_RULESET_TABLE:
2579 bzero(table, sizeof(*table));
2580 strlcpy(table->pfrt_anchor, ioe->anchor,
2581 sizeof(table->pfrt_anchor));
2582 if ((error = pfr_ina_begin(table,
2583 &ioe->ticket, NULL, 0))) {
2584 free(table, M_TEMP);
2585 free(ioe, M_TEMP);
2586 goto fail;
2587 }
2588 break;
2589 default:
2590 if ((error = pf_begin_rules(&ioe->ticket,
2591 ioe->rs_num, ioe->anchor))) {
2592 free(table, M_TEMP);
2593 free(ioe, M_TEMP);
2594 goto fail;
2595 }
2596 break;
2597 }
2598 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2599 free(table, M_TEMP);
2600 free(ioe, M_TEMP);
2601 error = EFAULT;
2602 goto fail;
2603 }
2604 }
2605 free(table, M_TEMP);
2606 free(ioe, M_TEMP);
2607 break;
2608 }
2609
2610 case DIOCXROLLBACK: {
2611 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2612 struct pfioc_trans_e *ioe;
2613 struct pfr_table *table;
2614 int i;
2615
2616 if (io->esize != sizeof(*ioe)) {
2617 error = ENODEV;
2618 goto fail;
2619 }
2620 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2621 M_TEMP, M_WAITOK);
2622 table = (struct pfr_table *)malloc(sizeof(*table),
2623 M_TEMP, M_WAITOK);
2624 for (i = 0; i < io->size; i++) {
2625 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2626 free(table, M_TEMP);
2627 free(ioe, M_TEMP);
2628 error = EFAULT;
2629 goto fail;
2630 }
2631 switch (ioe->rs_num) {
2632 #ifdef ALTQ
2633 case PF_RULESET_ALTQ:
2634 if (ioe->anchor[0]) {
2635 free(table, M_TEMP);
2636 free(ioe, M_TEMP);
2637 error = EINVAL;
2638 goto fail;
2639 }
2640 if ((error = pf_rollback_altq(ioe->ticket))) {
2641 free(table, M_TEMP);
2642 free(ioe, M_TEMP);
2643 goto fail; /* really bad */
2644 }
2645 break;
2646 #endif /* ALTQ */
2647 case PF_RULESET_TABLE:
2648 bzero(table, sizeof(*table));
2649 strlcpy(table->pfrt_anchor, ioe->anchor,
2650 sizeof(table->pfrt_anchor));
2651 if ((error = pfr_ina_rollback(table,
2652 ioe->ticket, NULL, 0))) {
2653 free(table, M_TEMP);
2654 free(ioe, M_TEMP);
2655 goto fail; /* really bad */
2656 }
2657 break;
2658 default:
2659 if ((error = pf_rollback_rules(ioe->ticket,
2660 ioe->rs_num, ioe->anchor))) {
2661 free(table, M_TEMP);
2662 free(ioe, M_TEMP);
2663 goto fail; /* really bad */
2664 }
2665 break;
2666 }
2667 }
2668 free(table, M_TEMP);
2669 free(ioe, M_TEMP);
2670 break;
2671 }
2672
2673 case DIOCXCOMMIT: {
2674 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2675 struct pfioc_trans_e *ioe;
2676 struct pfr_table *table;
2677 struct pf_ruleset *rs;
2678 int i;
2679
2680 if (io->esize != sizeof(*ioe)) {
2681 error = ENODEV;
2682 goto fail;
2683 }
2684 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2685 M_TEMP, M_WAITOK);
2686 table = (struct pfr_table *)malloc(sizeof(*table),
2687 M_TEMP, M_WAITOK);
2688 /* first makes sure everything will succeed */
2689 for (i = 0; i < io->size; i++) {
2690 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2691 free(table, M_TEMP);
2692 free(ioe, M_TEMP);
2693 error = EFAULT;
2694 goto fail;
2695 }
2696 switch (ioe->rs_num) {
2697 #ifdef ALTQ
2698 case PF_RULESET_ALTQ:
2699 if (ioe->anchor[0]) {
2700 free(table, M_TEMP);
2701 free(ioe, M_TEMP);
2702 error = EINVAL;
2703 goto fail;
2704 }
2705 if (!altqs_inactive_open || ioe->ticket !=
2706 ticket_altqs_inactive) {
2707 free(table, M_TEMP);
2708 free(ioe, M_TEMP);
2709 error = EBUSY;
2710 goto fail;
2711 }
2712 break;
2713 #endif /* ALTQ */
2714 case PF_RULESET_TABLE:
2715 rs = pf_find_ruleset(ioe->anchor);
2716 if (rs == NULL || !rs->topen || ioe->ticket !=
2717 rs->tticket) {
2718 free(table, M_TEMP);
2719 free(ioe, M_TEMP);
2720 error = EBUSY;
2721 goto fail;
2722 }
2723 break;
2724 default:
2725 if (ioe->rs_num < 0 || ioe->rs_num >=
2726 PF_RULESET_MAX) {
2727 free(table, M_TEMP);
2728 free(ioe, M_TEMP);
2729 error = EINVAL;
2730 goto fail;
2731 }
2732 rs = pf_find_ruleset(ioe->anchor);
2733 if (rs == NULL ||
2734 !rs->rules[ioe->rs_num].inactive.open ||
2735 rs->rules[ioe->rs_num].inactive.ticket !=
2736 ioe->ticket) {
2737 free(table, M_TEMP);
2738 free(ioe, M_TEMP);
2739 error = EBUSY;
2740 goto fail;
2741 }
2742 break;
2743 }
2744 }
2745 /* now do the commit - no errors should happen here */
2746 for (i = 0; i < io->size; i++) {
2747 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2748 free(table, M_TEMP);
2749 free(ioe, M_TEMP);
2750 error = EFAULT;
2751 goto fail;
2752 }
2753 switch (ioe->rs_num) {
2754 #ifdef ALTQ
2755 case PF_RULESET_ALTQ:
2756 if ((error = pf_commit_altq(ioe->ticket))) {
2757 free(table, M_TEMP);
2758 free(ioe, M_TEMP);
2759 goto fail; /* really bad */
2760 }
2761 break;
2762 #endif /* ALTQ */
2763 case PF_RULESET_TABLE:
2764 bzero(table, sizeof(*table));
2765 strlcpy(table->pfrt_anchor, ioe->anchor,
2766 sizeof(table->pfrt_anchor));
2767 if ((error = pfr_ina_commit(table, ioe->ticket,
2768 NULL, NULL, 0))) {
2769 free(table, M_TEMP);
2770 free(ioe, M_TEMP);
2771 goto fail; /* really bad */
2772 }
2773 break;
2774 default:
2775 if ((error = pf_commit_rules(ioe->ticket,
2776 ioe->rs_num, ioe->anchor))) {
2777 free(table, M_TEMP);
2778 free(ioe, M_TEMP);
2779 goto fail; /* really bad */
2780 }
2781 break;
2782 }
2783 }
2784 free(table, M_TEMP);
2785 free(ioe, M_TEMP);
2786 break;
2787 }
2788
2789 case DIOCGETSRCNODES: {
2790 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2791 struct pf_src_node *n, *p, *pstore;
2792 u_int32_t nr = 0;
2793 int space = psn->psn_len;
2794
2795 if (space == 0) {
2796 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2797 nr++;
2798 psn->psn_len = sizeof(struct pf_src_node) * nr;
2799 break;
2800 }
2801
2802 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2803
2804 p = psn->psn_src_nodes;
2805 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2806 int secs = time_second, diff;
2807
2808 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2809 break;
2810
2811 bcopy(n, pstore, sizeof(*pstore));
2812 if (n->rule.ptr != NULL)
2813 pstore->rule.nr = n->rule.ptr->nr;
2814 pstore->creation = secs - pstore->creation;
2815 if (pstore->expire > secs)
2816 pstore->expire -= secs;
2817 else
2818 pstore->expire = 0;
2819
2820 /* adjust the connection rate estimate */
2821 diff = secs - n->conn_rate.last;
2822 if (diff >= n->conn_rate.seconds)
2823 pstore->conn_rate.count = 0;
2824 else
2825 pstore->conn_rate.count -=
2826 n->conn_rate.count * diff /
2827 n->conn_rate.seconds;
2828
2829 error = copyout(pstore, p, sizeof(*p));
2830 if (error) {
2831 free(pstore, M_TEMP);
2832 goto fail;
2833 }
2834 p++;
2835 nr++;
2836 }
2837 psn->psn_len = sizeof(struct pf_src_node) * nr;
2838
2839 free(pstore, M_TEMP);
2840 break;
2841 }
2842
2843 case DIOCCLRSRCNODES: {
2844 struct pf_src_node *n;
2845 struct pf_state *state;
2846
2847 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2848 state->src_node = NULL;
2849 state->nat_src_node = NULL;
2850 }
2851 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2852 n->expire = 1;
2853 n->states = 0;
2854 }
2855 pf_purge_expired_src_nodes(1);
2856 pf_status.src_nodes = 0;
2857 break;
2858 }
2859
2860 case DIOCKILLSRCNODES: {
2861 struct pf_src_node *sn;
2862 struct pf_state *s;
2863 struct pfioc_src_node_kill *psnk = \
2864 (struct pfioc_src_node_kill *) addr;
2865 int killed = 0;
2866
2867 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2868 if (PF_MATCHA(psnk->psnk_src.neg, \
2869 &psnk->psnk_src.addr.v.a.addr, \
2870 &psnk->psnk_src.addr.v.a.mask, \
2871 &sn->addr, sn->af) &&
2872 PF_MATCHA(psnk->psnk_dst.neg, \
2873 &psnk->psnk_dst.addr.v.a.addr, \
2874 &psnk->psnk_dst.addr.v.a.mask, \
2875 &sn->raddr, sn->af)) {
2876 /* Handle state to src_node linkage */
2877 if (sn->states != 0) {
2878 RB_FOREACH(s, pf_state_tree_id,
2879 &tree_id) {
2880 if (s->src_node == sn)
2881 s->src_node = NULL;
2882 if (s->nat_src_node == sn)
2883 s->nat_src_node = NULL;
2884 }
2885 sn->states = 0;
2886 }
2887 sn->expire = 1;
2888 killed++;
2889 }
2890 }
2891
2892 if (killed > 0)
2893 pf_purge_expired_src_nodes(1);
2894
2895 psnk->psnk_af = killed;
2896 break;
2897 }
2898
2899 case DIOCSETHOSTID: {
2900 u_int32_t *hostid = (u_int32_t *)addr;
2901
2902 if (*hostid == 0)
2903 pf_status.hostid = arc4random();
2904 else
2905 pf_status.hostid = *hostid;
2906 break;
2907 }
2908
2909 case DIOCOSFPFLUSH:
2910 pf_osfp_flush();
2911 break;
2912
2913 case DIOCIGETIFACES: {
2914 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2915
2916 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2917 error = ENODEV;
2918 break;
2919 }
2920 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2921 &io->pfiio_size);
2922 break;
2923 }
2924
2925 case DIOCSETIFFLAG: {
2926 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2927
2928 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2929 break;
2930 }
2931
2932 case DIOCCLRIFFLAG: {
2933 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2934
2935 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2936 break;
2937 }
2938
2939 default:
2940 error = ENODEV;
2941 break;
2942 }
2943 fail:
2944 splx(s);
2945 if (flags & FWRITE)
2946 rw_exit_write(&pf_consistency_lock);
2947 else
2948 rw_exit_read(&pf_consistency_lock);
2949 return (error);
2950 }
2951