pf_ioctl.c revision 1.2 1 /* $NetBSD: pf_ioctl.c,v 1.2 2004/06/22 14:17:07 itojun Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.112 2004/03/22 04:54:18 mcbride Exp $ */
3
4 /*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_altq.h"
42 #include "opt_pfil_hooks.h"
43 #endif
44
45 #ifdef __OpenBSD__
46 #include "pfsync.h"
47 #else
48 #define NPFSYNC 0
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/mbuf.h>
54 #include <sys/filio.h>
55 #include <sys/fcntl.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/kernel.h>
59 #include <sys/time.h>
60 #ifdef __OpenBSD__
61 #include <sys/timeout.h>
62 #else
63 #include <sys/callout.h>
64 #endif
65 #include <sys/pool.h>
66 #include <sys/malloc.h>
67 #ifdef __NetBSD__
68 #include <sys/conf.h>
69 #endif
70
71 #include <net/if.h>
72 #include <net/if_types.h>
73 #include <net/route.h>
74
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <netinet/in_systm.h>
78 #include <netinet/ip.h>
79 #include <netinet/ip_var.h>
80 #include <netinet/ip_icmp.h>
81
82 #ifdef __OpenBSD__
83 #include <dev/rndvar.h>
84 #endif
85 #include <net/pfvar.h>
86
87 #if NPFSYNC > 0
88 #include <net/if_pfsync.h>
89 #endif /* NPFSYNC > 0 */
90
91 #ifdef INET6
92 #include <netinet/ip6.h>
93 #include <netinet/in_pcb.h>
94 #endif /* INET6 */
95
96 #ifdef ALTQ
97 #include <altq/altq.h>
98 #endif
99
100 void pfattach(int);
101 int pfopen(dev_t, int, int, struct proc *);
102 int pfclose(dev_t, int, int, struct proc *);
103 struct pf_pool *pf_get_pool(char *, char *, u_int32_t,
104 u_int8_t, u_int8_t, u_int8_t, u_int8_t, u_int8_t);
105 int pf_get_ruleset_number(u_int8_t);
106 void pf_init_ruleset(struct pf_ruleset *);
107 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
108 void pf_empty_pool(struct pf_palist *);
109 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
110 #ifdef ALTQ
111 int pf_begin_altq(u_int32_t *);
112 int pf_rollback_altq(u_int32_t);
113 int pf_commit_altq(u_int32_t);
114 #endif /* ALTQ */
115 int pf_begin_rules(u_int32_t *, int, char *, char *);
116 int pf_rollback_rules(u_int32_t, int, char *, char *);
117 int pf_commit_rules(u_int32_t, int, char *, char *);
118
119 #ifdef __NetBSD__
120 const struct cdevsw pf_cdevsw = {
121 pfopen, pfclose, noread, nowrite, pfioctl,
122 nostop, notty, nopoll, nommap, nokqfilter,
123 };
124
125 static int pf_pfil_attach(void);
126 static int pf_pfil_detach(void);
127
128 POOL_INIT(pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
129 &pool_allocator_nointr);
130 POOL_INIT(pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
131 "pfsrctrpl", NULL);
132 POOL_INIT(pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", NULL);
133 POOL_INIT(pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", NULL);
134 POOL_INIT(pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
135 "pfpooladdrpl", NULL);
136 #endif
137
138 #ifdef __OpenBSD__
139 extern struct timeout pf_expire_to;
140 #else
141 extern struct callout pf_expire_to;
142 #endif
143
144 struct pf_rule pf_default_rule;
145
146 #define TAGID_MAX 50000
147 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
148 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
149
150 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
151 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
152 #endif
153 static u_int16_t tagname2tag(struct pf_tags *, char *);
154 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
155 static void tag_unref(struct pf_tags *, u_int16_t);
156
157 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
158
159 #ifdef __NetBSD__
160 struct pfil_head pf_ioctl_head;
161 struct pfil_head pf_newif_head;
162 extern struct pfil_head if_pfil;
163 #endif
164
165 void
166 pfattach(int num)
167 {
168 u_int32_t *timeout = pf_default_rule.timeout;
169
170 #ifdef __NetBSD__
171 pfil_head_register(&pf_ioctl_head);
172 pfil_head_register(&pf_newif_head);
173 #endif
174
175 #ifdef __OpenBSD__
176 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
177 &pool_allocator_nointr);
178 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
179 "pfsrctrpl", NULL);
180 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
181 NULL);
182 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
183 NULL);
184 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
185 "pfpooladdrpl", NULL);
186 #endif
187 pfr_initialize();
188 pfi_initialize();
189 pf_osfp_initialize();
190
191 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
192 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
193
194 RB_INIT(&tree_src_tracking);
195 TAILQ_INIT(&pf_anchors);
196 pf_init_ruleset(&pf_main_ruleset);
197 TAILQ_INIT(&pf_altqs[0]);
198 TAILQ_INIT(&pf_altqs[1]);
199 TAILQ_INIT(&pf_pabuf);
200 pf_altqs_active = &pf_altqs[0];
201 pf_altqs_inactive = &pf_altqs[1];
202 TAILQ_INIT(&state_updates);
203
204 /* default rule should never be garbage collected */
205 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
206 pf_default_rule.action = PF_PASS;
207 pf_default_rule.nr = -1;
208
209 /* initialize default timeouts */
210 timeout[PFTM_TCP_FIRST_PACKET] = 120; /* First TCP packet */
211 timeout[PFTM_TCP_OPENING] = 30; /* No response yet */
212 timeout[PFTM_TCP_ESTABLISHED] = 24*60*60; /* Established */
213 timeout[PFTM_TCP_CLOSING] = 15 * 60; /* Half closed */
214 timeout[PFTM_TCP_FIN_WAIT] = 45; /* Got both FINs */
215 timeout[PFTM_TCP_CLOSED] = 90; /* Got a RST */
216 timeout[PFTM_UDP_FIRST_PACKET] = 60; /* First UDP packet */
217 timeout[PFTM_UDP_SINGLE] = 30; /* Unidirectional */
218 timeout[PFTM_UDP_MULTIPLE] = 60; /* Bidirectional */
219 timeout[PFTM_ICMP_FIRST_PACKET] = 20; /* First ICMP packet */
220 timeout[PFTM_ICMP_ERROR_REPLY] = 10; /* Got error response */
221 timeout[PFTM_OTHER_FIRST_PACKET] = 60; /* First packet */
222 timeout[PFTM_OTHER_SINGLE] = 30; /* Unidirectional */
223 timeout[PFTM_OTHER_MULTIPLE] = 60; /* Bidirectional */
224 timeout[PFTM_FRAG] = 30; /* Fragment expire */
225 timeout[PFTM_INTERVAL] = 10; /* Expire interval */
226 timeout[PFTM_SRC_NODE] = 0; /* Source tracking */
227
228 #ifdef __OpenBSD__
229 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
230 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
231 #else
232 callout_init(&pf_expire_to);
233 callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz,
234 pf_purge_timeout, &pf_expire_to);
235 #endif
236
237 pf_normalize_init();
238 bzero(&pf_status, sizeof(pf_status));
239 pf_status.debug = PF_DEBUG_URGENT;
240
241 /* XXX do our best to avoid a conflict */
242 pf_status.hostid = arc4random();
243 }
244
245 int
246 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
247 {
248 if (minor(dev) >= 1)
249 return (ENXIO);
250 return (0);
251 }
252
253 int
254 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
255 {
256 if (minor(dev) >= 1)
257 return (ENXIO);
258 return (0);
259 }
260
261 struct pf_pool *
262 pf_get_pool(char *anchorname, char *rulesetname, u_int32_t ticket,
263 u_int8_t rule_action, u_int8_t rule_number, u_int8_t r_last,
264 u_int8_t active, u_int8_t check_ticket)
265 {
266 struct pf_ruleset *ruleset;
267 struct pf_rule *rule;
268 int rs_num;
269
270 ruleset = pf_find_ruleset(anchorname, rulesetname);
271 if (ruleset == NULL)
272 return (NULL);
273 rs_num = pf_get_ruleset_number(rule_action);
274 if (rs_num >= PF_RULESET_MAX)
275 return (NULL);
276 if (active) {
277 if (check_ticket && ticket !=
278 ruleset->rules[rs_num].active.ticket)
279 return (NULL);
280 if (r_last)
281 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
282 pf_rulequeue);
283 else
284 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
285 } else {
286 if (check_ticket && ticket !=
287 ruleset->rules[rs_num].inactive.ticket)
288 return (NULL);
289 if (r_last)
290 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
291 pf_rulequeue);
292 else
293 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
294 }
295 if (!r_last) {
296 while ((rule != NULL) && (rule->nr != rule_number))
297 rule = TAILQ_NEXT(rule, entries);
298 }
299 if (rule == NULL)
300 return (NULL);
301
302 return (&rule->rpool);
303 }
304
305 int
306 pf_get_ruleset_number(u_int8_t action)
307 {
308 switch (action) {
309 case PF_SCRUB:
310 return (PF_RULESET_SCRUB);
311 break;
312 case PF_PASS:
313 case PF_DROP:
314 return (PF_RULESET_FILTER);
315 break;
316 case PF_NAT:
317 case PF_NONAT:
318 return (PF_RULESET_NAT);
319 break;
320 case PF_BINAT:
321 case PF_NOBINAT:
322 return (PF_RULESET_BINAT);
323 break;
324 case PF_RDR:
325 case PF_NORDR:
326 return (PF_RULESET_RDR);
327 break;
328 default:
329 return (PF_RULESET_MAX);
330 break;
331 }
332 }
333
334 void
335 pf_init_ruleset(struct pf_ruleset *ruleset)
336 {
337 int i;
338
339 memset(ruleset, 0, sizeof(struct pf_ruleset));
340 for (i = 0; i < PF_RULESET_MAX; i++) {
341 TAILQ_INIT(&ruleset->rules[i].queues[0]);
342 TAILQ_INIT(&ruleset->rules[i].queues[1]);
343 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
344 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
345 }
346 }
347
348 struct pf_anchor *
349 pf_find_anchor(const char *anchorname)
350 {
351 struct pf_anchor *anchor;
352 int n = -1;
353
354 anchor = TAILQ_FIRST(&pf_anchors);
355 while (anchor != NULL && (n = strcmp(anchor->name, anchorname)) < 0)
356 anchor = TAILQ_NEXT(anchor, entries);
357 if (n == 0)
358 return (anchor);
359 else
360 return (NULL);
361 }
362
363 struct pf_ruleset *
364 pf_find_ruleset(char *anchorname, char *rulesetname)
365 {
366 struct pf_anchor *anchor;
367 struct pf_ruleset *ruleset;
368
369 if (!anchorname[0] && !rulesetname[0])
370 return (&pf_main_ruleset);
371 if (!anchorname[0] || !rulesetname[0])
372 return (NULL);
373 anchorname[PF_ANCHOR_NAME_SIZE-1] = 0;
374 rulesetname[PF_RULESET_NAME_SIZE-1] = 0;
375 anchor = pf_find_anchor(anchorname);
376 if (anchor == NULL)
377 return (NULL);
378 ruleset = TAILQ_FIRST(&anchor->rulesets);
379 while (ruleset != NULL && strcmp(ruleset->name, rulesetname) < 0)
380 ruleset = TAILQ_NEXT(ruleset, entries);
381 if (ruleset != NULL && !strcmp(ruleset->name, rulesetname))
382 return (ruleset);
383 else
384 return (NULL);
385 }
386
387 struct pf_ruleset *
388 pf_find_or_create_ruleset(char anchorname[PF_ANCHOR_NAME_SIZE],
389 char rulesetname[PF_RULESET_NAME_SIZE])
390 {
391 struct pf_anchor *anchor, *a;
392 struct pf_ruleset *ruleset, *r;
393
394 if (!anchorname[0] && !rulesetname[0])
395 return (&pf_main_ruleset);
396 if (!anchorname[0] || !rulesetname[0])
397 return (NULL);
398 anchorname[PF_ANCHOR_NAME_SIZE-1] = 0;
399 rulesetname[PF_RULESET_NAME_SIZE-1] = 0;
400 a = TAILQ_FIRST(&pf_anchors);
401 while (a != NULL && strcmp(a->name, anchorname) < 0)
402 a = TAILQ_NEXT(a, entries);
403 if (a != NULL && !strcmp(a->name, anchorname))
404 anchor = a;
405 else {
406 anchor = (struct pf_anchor *)malloc(sizeof(struct pf_anchor),
407 M_TEMP, M_NOWAIT);
408 if (anchor == NULL)
409 return (NULL);
410 memset(anchor, 0, sizeof(struct pf_anchor));
411 bcopy(anchorname, anchor->name, sizeof(anchor->name));
412 TAILQ_INIT(&anchor->rulesets);
413 if (a != NULL)
414 TAILQ_INSERT_BEFORE(a, anchor, entries);
415 else
416 TAILQ_INSERT_TAIL(&pf_anchors, anchor, entries);
417 }
418 r = TAILQ_FIRST(&anchor->rulesets);
419 while (r != NULL && strcmp(r->name, rulesetname) < 0)
420 r = TAILQ_NEXT(r, entries);
421 if (r != NULL && !strcmp(r->name, rulesetname))
422 return (r);
423 ruleset = (struct pf_ruleset *)malloc(sizeof(struct pf_ruleset),
424 M_TEMP, M_NOWAIT);
425 if (ruleset != NULL) {
426 pf_init_ruleset(ruleset);
427 bcopy(rulesetname, ruleset->name, sizeof(ruleset->name));
428 ruleset->anchor = anchor;
429 if (r != NULL)
430 TAILQ_INSERT_BEFORE(r, ruleset, entries);
431 else
432 TAILQ_INSERT_TAIL(&anchor->rulesets, ruleset, entries);
433 }
434 return (ruleset);
435 }
436
437 void
438 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
439 {
440 struct pf_anchor *anchor;
441 int i;
442
443 if (ruleset == NULL || ruleset->anchor == NULL || ruleset->tables > 0 ||
444 ruleset->topen)
445 return;
446 for (i = 0; i < PF_RULESET_MAX; ++i)
447 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
448 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
449 ruleset->rules[i].inactive.open)
450 return;
451
452 anchor = ruleset->anchor;
453 TAILQ_REMOVE(&anchor->rulesets, ruleset, entries);
454 free(ruleset, M_TEMP);
455
456 if (TAILQ_EMPTY(&anchor->rulesets)) {
457 TAILQ_REMOVE(&pf_anchors, anchor, entries);
458 free(anchor, M_TEMP);
459 pf_update_anchor_rules();
460 }
461 }
462
463 void
464 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
465 {
466 struct pf_pooladdr *mv_pool_pa;
467
468 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
469 TAILQ_REMOVE(poola, mv_pool_pa, entries);
470 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
471 }
472 }
473
474 void
475 pf_empty_pool(struct pf_palist *poola)
476 {
477 struct pf_pooladdr *empty_pool_pa;
478
479 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
480 pfi_dynaddr_remove(&empty_pool_pa->addr);
481 pf_tbladdr_remove(&empty_pool_pa->addr);
482 pfi_detach_rule(empty_pool_pa->kif);
483 TAILQ_REMOVE(poola, empty_pool_pa, entries);
484 pool_put(&pf_pooladdr_pl, empty_pool_pa);
485 }
486 }
487
488 void
489 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
490 {
491 if (rulequeue != NULL) {
492 if (rule->states <= 0) {
493 /*
494 * XXX - we need to remove the table *before* detaching
495 * the rule to make sure the table code does not delete
496 * the anchor under our feet.
497 */
498 pf_tbladdr_remove(&rule->src.addr);
499 pf_tbladdr_remove(&rule->dst.addr);
500 }
501 TAILQ_REMOVE(rulequeue, rule, entries);
502 rule->entries.tqe_prev = NULL;
503 rule->nr = -1;
504 }
505
506 if (rule->states > 0 || rule->src_nodes > 0 ||
507 rule->entries.tqe_prev != NULL)
508 return;
509 pf_tag_unref(rule->tag);
510 pf_tag_unref(rule->match_tag);
511 #ifdef ALTQ
512 if (rule->pqid != rule->qid)
513 pf_qid_unref(rule->pqid);
514 pf_qid_unref(rule->qid);
515 #endif
516 pfi_dynaddr_remove(&rule->src.addr);
517 pfi_dynaddr_remove(&rule->dst.addr);
518 if (rulequeue == NULL) {
519 pf_tbladdr_remove(&rule->src.addr);
520 pf_tbladdr_remove(&rule->dst.addr);
521 }
522 pfi_detach_rule(rule->kif);
523 pf_empty_pool(&rule->rpool.list);
524 pool_put(&pf_rule_pl, rule);
525 }
526
527 static u_int16_t
528 tagname2tag(struct pf_tags *head, char *tagname)
529 {
530 struct pf_tagname *tag, *p = NULL;
531 u_int16_t new_tagid = 1;
532
533 TAILQ_FOREACH(tag, head, entries)
534 if (strcmp(tagname, tag->name) == 0) {
535 tag->ref++;
536 return (tag->tag);
537 }
538
539 /*
540 * to avoid fragmentation, we do a linear search from the beginning
541 * and take the first free slot we find. if there is none or the list
542 * is empty, append a new entry at the end.
543 */
544
545 /* new entry */
546 if (!TAILQ_EMPTY(head))
547 for (p = TAILQ_FIRST(head); p != NULL &&
548 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
549 new_tagid = p->tag + 1;
550
551 if (new_tagid > TAGID_MAX)
552 return (0);
553
554 /* allocate and fill new struct pf_tagname */
555 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
556 M_TEMP, M_NOWAIT);
557 if (tag == NULL)
558 return (0);
559 bzero(tag, sizeof(struct pf_tagname));
560 strlcpy(tag->name, tagname, sizeof(tag->name));
561 tag->tag = new_tagid;
562 tag->ref++;
563
564 if (p != NULL) /* insert new entry before p */
565 TAILQ_INSERT_BEFORE(p, tag, entries);
566 else /* either list empty or no free slot in between */
567 TAILQ_INSERT_TAIL(head, tag, entries);
568
569 return (tag->tag);
570 }
571
572 static void
573 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
574 {
575 struct pf_tagname *tag;
576
577 TAILQ_FOREACH(tag, head, entries)
578 if (tag->tag == tagid) {
579 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
580 return;
581 }
582 }
583
584 static void
585 tag_unref(struct pf_tags *head, u_int16_t tag)
586 {
587 struct pf_tagname *p, *next;
588
589 if (tag == 0)
590 return;
591
592 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
593 next = TAILQ_NEXT(p, entries);
594 if (tag == p->tag) {
595 if (--p->ref == 0) {
596 TAILQ_REMOVE(head, p, entries);
597 free(p, M_TEMP);
598 }
599 break;
600 }
601 }
602 }
603
604 u_int16_t
605 pf_tagname2tag(char *tagname)
606 {
607 return (tagname2tag(&pf_tags, tagname));
608 }
609
610 void
611 pf_tag2tagname(u_int16_t tagid, char *p)
612 {
613 return (tag2tagname(&pf_tags, tagid, p));
614 }
615
616 void
617 pf_tag_unref(u_int16_t tag)
618 {
619 return (tag_unref(&pf_tags, tag));
620 }
621
622 #ifdef ALTQ
623 u_int32_t
624 pf_qname2qid(char *qname)
625 {
626 return ((u_int32_t)tagname2tag(&pf_qids, qname));
627 }
628
629 void
630 pf_qid2qname(u_int32_t qid, char *p)
631 {
632 return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
633 }
634
635 void
636 pf_qid_unref(u_int32_t qid)
637 {
638 return (tag_unref(&pf_qids, (u_int16_t)qid));
639 }
640
641 int
642 pf_begin_altq(u_int32_t *ticket)
643 {
644 struct pf_altq *altq;
645 int error = 0;
646
647 /* Purge the old altq list */
648 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
649 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
650 if (altq->qname[0] == 0) {
651 /* detach and destroy the discipline */
652 error = altq_remove(altq);
653 } else
654 pf_qid_unref(altq->qid);
655 pool_put(&pf_altq_pl, altq);
656 }
657 if (error)
658 return (error);
659 *ticket = ++ticket_altqs_inactive;
660 altqs_inactive_open = 1;
661 return (0);
662 }
663
664 int
665 pf_rollback_altq(u_int32_t ticket)
666 {
667 struct pf_altq *altq;
668 int error = 0;
669
670 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
671 return (0);
672 /* Purge the old altq list */
673 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
674 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
675 if (altq->qname[0] == 0) {
676 /* detach and destroy the discipline */
677 error = altq_remove(altq);
678 } else
679 pf_qid_unref(altq->qid);
680 pool_put(&pf_altq_pl, altq);
681 }
682 altqs_inactive_open = 0;
683 return (error);
684 }
685
686 int
687 pf_commit_altq(u_int32_t ticket)
688 {
689 struct pf_altqqueue *old_altqs;
690 struct pf_altq *altq;
691 int s, err, error = 0;
692
693 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
694 return (EBUSY);
695
696 /* swap altqs, keep the old. */
697 s = splsoftnet();
698 old_altqs = pf_altqs_active;
699 pf_altqs_active = pf_altqs_inactive;
700 pf_altqs_inactive = old_altqs;
701 ticket_altqs_active = ticket_altqs_inactive;
702
703 /* Attach new disciplines */
704 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
705 if (altq->qname[0] == 0) {
706 /* attach the discipline */
707 error = altq_pfattach(altq);
708 if (error) {
709 splx(s);
710 return (error);
711 }
712 }
713 }
714
715 /* Purge the old altq list */
716 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
717 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
718 if (altq->qname[0] == 0) {
719 /* detach and destroy the discipline */
720 err = altq_pfdetach(altq);
721 if (err != 0 && error == 0)
722 error = err;
723 err = altq_remove(altq);
724 if (err != 0 && error == 0)
725 error = err;
726 } else
727 pf_qid_unref(altq->qid);
728 pool_put(&pf_altq_pl, altq);
729 }
730 splx(s);
731
732 altqs_inactive_open = 0;
733 return (error);
734 }
735 #endif /* ALTQ */
736
737 int
738 pf_begin_rules(u_int32_t *ticket, int rs_num, char *anchor, char *ruleset)
739 {
740 struct pf_ruleset *rs;
741 struct pf_rule *rule;
742
743 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
744 return (EINVAL);
745 rs = pf_find_or_create_ruleset(anchor, ruleset);
746 if (rs == NULL)
747 return (EINVAL);
748 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
749 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
750 *ticket = ++rs->rules[rs_num].inactive.ticket;
751 rs->rules[rs_num].inactive.open = 1;
752 return (0);
753 }
754
755 int
756 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor, char *ruleset)
757 {
758 struct pf_ruleset *rs;
759 struct pf_rule *rule;
760
761 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
762 return (EINVAL);
763 rs = pf_find_ruleset(anchor, ruleset);
764 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
765 rs->rules[rs_num].inactive.ticket != ticket)
766 return (0);
767 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
768 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
769 rs->rules[rs_num].inactive.open = 0;
770 return (0);
771 }
772
773 int
774 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor, char *ruleset)
775 {
776 struct pf_ruleset *rs;
777 struct pf_rule *rule;
778 struct pf_rulequeue *old_rules;
779 int s;
780
781 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
782 return (EINVAL);
783 rs = pf_find_ruleset(anchor, ruleset);
784 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
785 ticket != rs->rules[rs_num].inactive.ticket)
786 return (EBUSY);
787
788 /* Swap rules, keep the old. */
789 s = splsoftnet();
790 old_rules = rs->rules[rs_num].active.ptr;
791 rs->rules[rs_num].active.ptr =
792 rs->rules[rs_num].inactive.ptr;
793 rs->rules[rs_num].inactive.ptr = old_rules;
794 rs->rules[rs_num].active.ticket =
795 rs->rules[rs_num].inactive.ticket;
796 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
797
798 /* Purge the old rule list. */
799 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
800 pf_rm_rule(old_rules, rule);
801 rs->rules[rs_num].inactive.open = 0;
802 pf_remove_if_empty_ruleset(rs);
803 pf_update_anchor_rules();
804 splx(s);
805 return (0);
806 }
807
808 int
809 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
810 {
811 struct pf_pooladdr *pa = NULL;
812 struct pf_pool *pool = NULL;
813 int s;
814 int error = 0;
815
816 /* XXX keep in sync with switch() below */
817 if (securelevel > 1)
818 switch (cmd) {
819 case DIOCGETRULES:
820 case DIOCGETRULE:
821 case DIOCGETADDRS:
822 case DIOCGETADDR:
823 case DIOCGETSTATE:
824 case DIOCSETSTATUSIF:
825 case DIOCGETSTATUS:
826 case DIOCCLRSTATUS:
827 case DIOCNATLOOK:
828 case DIOCSETDEBUG:
829 case DIOCGETSTATES:
830 case DIOCGETTIMEOUT:
831 case DIOCCLRRULECTRS:
832 case DIOCGETLIMIT:
833 case DIOCGETALTQS:
834 case DIOCGETALTQ:
835 case DIOCGETQSTATS:
836 case DIOCGETANCHORS:
837 case DIOCGETANCHOR:
838 case DIOCGETRULESETS:
839 case DIOCGETRULESET:
840 case DIOCRGETTABLES:
841 case DIOCRGETTSTATS:
842 case DIOCRCLRTSTATS:
843 case DIOCRCLRADDRS:
844 case DIOCRADDADDRS:
845 case DIOCRDELADDRS:
846 case DIOCRSETADDRS:
847 case DIOCRGETADDRS:
848 case DIOCRGETASTATS:
849 case DIOCRCLRASTATS:
850 case DIOCRTSTADDRS:
851 case DIOCOSFPGET:
852 case DIOCGETSRCNODES:
853 case DIOCCLRSRCNODES:
854 case DIOCIGETIFACES:
855 case DIOCICLRISTATS:
856 break;
857 case DIOCRCLRTABLES:
858 case DIOCRADDTABLES:
859 case DIOCRDELTABLES:
860 case DIOCRSETTFLAGS:
861 if (((struct pfioc_table *)addr)->pfrio_flags &
862 PFR_FLAG_DUMMY)
863 break; /* dummy operation ok */
864 return (EPERM);
865 default:
866 return (EPERM);
867 }
868
869 if (!(flags & FWRITE))
870 switch (cmd) {
871 case DIOCGETRULES:
872 case DIOCGETRULE:
873 case DIOCGETADDRS:
874 case DIOCGETADDR:
875 case DIOCGETSTATE:
876 case DIOCGETSTATUS:
877 case DIOCGETSTATES:
878 case DIOCGETTIMEOUT:
879 case DIOCGETLIMIT:
880 case DIOCGETALTQS:
881 case DIOCGETALTQ:
882 case DIOCGETQSTATS:
883 case DIOCGETANCHORS:
884 case DIOCGETANCHOR:
885 case DIOCGETRULESETS:
886 case DIOCGETRULESET:
887 case DIOCRGETTABLES:
888 case DIOCRGETTSTATS:
889 case DIOCRGETADDRS:
890 case DIOCRGETASTATS:
891 case DIOCRTSTADDRS:
892 case DIOCOSFPGET:
893 case DIOCGETSRCNODES:
894 case DIOCIGETIFACES:
895 break;
896 case DIOCRCLRTABLES:
897 case DIOCRADDTABLES:
898 case DIOCRDELTABLES:
899 case DIOCRCLRTSTATS:
900 case DIOCRCLRADDRS:
901 case DIOCRADDADDRS:
902 case DIOCRDELADDRS:
903 case DIOCRSETADDRS:
904 case DIOCRSETTFLAGS:
905 if (((struct pfioc_table *)addr)->pfrio_flags &
906 PFR_FLAG_DUMMY)
907 break; /* dummy operation ok */
908 return (EACCES);
909 default:
910 return (EACCES);
911 }
912
913 switch (cmd) {
914
915 case DIOCSTART:
916 if (pf_status.running)
917 error = EEXIST;
918 else {
919 #ifdef __NetBSD__
920 error = pf_pfil_attach();
921 if (error)
922 break;
923 #endif
924 pf_status.running = 1;
925 pf_status.since = time.tv_sec;
926 if (pf_status.stateid == 0) {
927 pf_status.stateid = time.tv_sec;
928 pf_status.stateid = pf_status.stateid << 32;
929 }
930 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
931 }
932 break;
933
934 case DIOCSTOP:
935 if (!pf_status.running)
936 error = ENOENT;
937 else {
938 #ifdef __NetBSD__
939 error = pf_pfil_detach();
940 if (error)
941 break;
942 #endif
943 pf_status.running = 0;
944 pf_status.since = time.tv_sec;
945 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
946 }
947 break;
948
949 case DIOCBEGINRULES: {
950 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
951
952 error = pf_begin_rules(&pr->ticket, pf_get_ruleset_number(
953 pr->rule.action), pr->anchor, pr->ruleset);
954 break;
955 }
956
957 case DIOCADDRULE: {
958 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
959 struct pf_ruleset *ruleset;
960 struct pf_rule *rule, *tail;
961 struct pf_pooladdr *pa;
962 int rs_num;
963
964 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset);
965 if (ruleset == NULL) {
966 error = EINVAL;
967 break;
968 }
969 rs_num = pf_get_ruleset_number(pr->rule.action);
970 if (rs_num >= PF_RULESET_MAX) {
971 error = EINVAL;
972 break;
973 }
974 if (pr->rule.anchorname[0] && ruleset != &pf_main_ruleset) {
975 error = EINVAL;
976 break;
977 }
978 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
979 error = EINVAL;
980 break;
981 }
982 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
983 error = EBUSY;
984 break;
985 }
986 if (pr->pool_ticket != ticket_pabuf) {
987 error = EBUSY;
988 break;
989 }
990 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
991 if (rule == NULL) {
992 error = ENOMEM;
993 break;
994 }
995 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
996 rule->anchor = NULL;
997 rule->kif = NULL;
998 TAILQ_INIT(&rule->rpool.list);
999 /* initialize refcounting */
1000 rule->states = 0;
1001 rule->src_nodes = 0;
1002 rule->entries.tqe_prev = NULL;
1003 #ifndef INET
1004 if (rule->af == AF_INET) {
1005 pool_put(&pf_rule_pl, rule);
1006 error = EAFNOSUPPORT;
1007 break;
1008 }
1009 #endif /* INET */
1010 #ifndef INET6
1011 if (rule->af == AF_INET6) {
1012 pool_put(&pf_rule_pl, rule);
1013 error = EAFNOSUPPORT;
1014 break;
1015 }
1016 #endif /* INET6 */
1017 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1018 pf_rulequeue);
1019 if (tail)
1020 rule->nr = tail->nr + 1;
1021 else
1022 rule->nr = 0;
1023 if (rule->ifname[0]) {
1024 rule->kif = pfi_attach_rule(rule->ifname);
1025 if (rule->kif == NULL) {
1026 pool_put(&pf_rule_pl, rule);
1027 error = EINVAL;
1028 break;
1029 }
1030 }
1031
1032 #ifdef ALTQ
1033 /* set queue IDs */
1034 if (rule->qname[0] != 0) {
1035 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1036 error = EBUSY;
1037 else if (rule->pqname[0] != 0) {
1038 if ((rule->pqid =
1039 pf_qname2qid(rule->pqname)) == 0)
1040 error = EBUSY;
1041 } else
1042 rule->pqid = rule->qid;
1043 }
1044 #endif
1045 if (rule->tagname[0])
1046 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1047 error = EBUSY;
1048 if (rule->match_tagname[0])
1049 if ((rule->match_tag =
1050 pf_tagname2tag(rule->match_tagname)) == 0)
1051 error = EBUSY;
1052 if (rule->rt && !rule->direction)
1053 error = EINVAL;
1054 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1055 error = EINVAL;
1056 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1057 error = EINVAL;
1058 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1059 error = EINVAL;
1060 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1061 error = EINVAL;
1062 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1063 if (pf_tbladdr_setup(ruleset, &pa->addr))
1064 error = EINVAL;
1065
1066 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1067 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1068 (rule->action == PF_BINAT)) && !rule->anchorname[0]) ||
1069 (rule->rt > PF_FASTROUTE)) &&
1070 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1071 error = EINVAL;
1072
1073 if (error) {
1074 pf_rm_rule(NULL, rule);
1075 break;
1076 }
1077 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1078 rule->evaluations = rule->packets = rule->bytes = 0;
1079 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1080 rule, entries);
1081 break;
1082 }
1083
1084 case DIOCCOMMITRULES: {
1085 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1086
1087 error = pf_commit_rules(pr->ticket, pf_get_ruleset_number(
1088 pr->rule.action), pr->anchor, pr->ruleset);
1089 break;
1090 }
1091
1092 case DIOCGETRULES: {
1093 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1094 struct pf_ruleset *ruleset;
1095 struct pf_rule *tail;
1096 int rs_num;
1097
1098 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset);
1099 if (ruleset == NULL) {
1100 error = EINVAL;
1101 break;
1102 }
1103 rs_num = pf_get_ruleset_number(pr->rule.action);
1104 if (rs_num >= PF_RULESET_MAX) {
1105 error = EINVAL;
1106 break;
1107 }
1108 s = splsoftnet();
1109 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1110 pf_rulequeue);
1111 if (tail)
1112 pr->nr = tail->nr + 1;
1113 else
1114 pr->nr = 0;
1115 pr->ticket = ruleset->rules[rs_num].active.ticket;
1116 splx(s);
1117 break;
1118 }
1119
1120 case DIOCGETRULE: {
1121 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1122 struct pf_ruleset *ruleset;
1123 struct pf_rule *rule;
1124 int rs_num, i;
1125
1126 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset);
1127 if (ruleset == NULL) {
1128 error = EINVAL;
1129 break;
1130 }
1131 rs_num = pf_get_ruleset_number(pr->rule.action);
1132 if (rs_num >= PF_RULESET_MAX) {
1133 error = EINVAL;
1134 break;
1135 }
1136 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1137 error = EBUSY;
1138 break;
1139 }
1140 s = splsoftnet();
1141 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1142 while ((rule != NULL) && (rule->nr != pr->nr))
1143 rule = TAILQ_NEXT(rule, entries);
1144 if (rule == NULL) {
1145 error = EBUSY;
1146 splx(s);
1147 break;
1148 }
1149 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1150 pfi_dynaddr_copyout(&pr->rule.src.addr);
1151 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1152 pf_tbladdr_copyout(&pr->rule.src.addr);
1153 pf_tbladdr_copyout(&pr->rule.dst.addr);
1154 for (i = 0; i < PF_SKIP_COUNT; ++i)
1155 if (rule->skip[i].ptr == NULL)
1156 pr->rule.skip[i].nr = -1;
1157 else
1158 pr->rule.skip[i].nr =
1159 rule->skip[i].ptr->nr;
1160 splx(s);
1161 break;
1162 }
1163
1164 case DIOCCHANGERULE: {
1165 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1166 struct pf_ruleset *ruleset;
1167 struct pf_rule *oldrule = NULL, *newrule = NULL;
1168 u_int32_t nr = 0;
1169 int rs_num;
1170
1171 if (!(pcr->action == PF_CHANGE_REMOVE ||
1172 pcr->action == PF_CHANGE_GET_TICKET) &&
1173 pcr->pool_ticket != ticket_pabuf) {
1174 error = EBUSY;
1175 break;
1176 }
1177
1178 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1179 pcr->action > PF_CHANGE_GET_TICKET) {
1180 error = EINVAL;
1181 break;
1182 }
1183 ruleset = pf_find_ruleset(pcr->anchor, pcr->ruleset);
1184 if (ruleset == NULL) {
1185 error = EINVAL;
1186 break;
1187 }
1188 rs_num = pf_get_ruleset_number(pcr->rule.action);
1189 if (rs_num >= PF_RULESET_MAX) {
1190 error = EINVAL;
1191 break;
1192 }
1193
1194 if (pcr->action == PF_CHANGE_GET_TICKET) {
1195 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1196 break;
1197 } else {
1198 if (pcr->ticket !=
1199 ruleset->rules[rs_num].active.ticket) {
1200 error = EINVAL;
1201 break;
1202 }
1203 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1204 error = EINVAL;
1205 break;
1206 }
1207 }
1208
1209 if (pcr->action != PF_CHANGE_REMOVE) {
1210 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1211 if (newrule == NULL) {
1212 error = ENOMEM;
1213 break;
1214 }
1215 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1216 TAILQ_INIT(&newrule->rpool.list);
1217 /* initialize refcounting */
1218 newrule->states = 0;
1219 newrule->entries.tqe_prev = NULL;
1220 #ifndef INET
1221 if (newrule->af == AF_INET) {
1222 pool_put(&pf_rule_pl, newrule);
1223 error = EAFNOSUPPORT;
1224 break;
1225 }
1226 #endif /* INET */
1227 #ifndef INET6
1228 if (newrule->af == AF_INET6) {
1229 pool_put(&pf_rule_pl, newrule);
1230 error = EAFNOSUPPORT;
1231 break;
1232 }
1233 #endif /* INET6 */
1234 if (newrule->ifname[0]) {
1235 newrule->kif = pfi_attach_rule(newrule->ifname);
1236 if (newrule->kif == NULL) {
1237 pool_put(&pf_rule_pl, newrule);
1238 error = EINVAL;
1239 break;
1240 }
1241 } else
1242 newrule->kif = NULL;
1243
1244 #ifdef ALTQ
1245 /* set queue IDs */
1246 if (newrule->qname[0] != 0) {
1247 if ((newrule->qid =
1248 pf_qname2qid(newrule->qname)) == 0)
1249 error = EBUSY;
1250 else if (newrule->pqname[0] != 0) {
1251 if ((newrule->pqid =
1252 pf_qname2qid(newrule->pqname)) == 0)
1253 error = EBUSY;
1254 } else
1255 newrule->pqid = newrule->qid;
1256 }
1257 #endif
1258 if (newrule->tagname[0])
1259 if ((newrule->tag =
1260 pf_tagname2tag(newrule->tagname)) == 0)
1261 error = EBUSY;
1262 if (newrule->match_tagname[0])
1263 if ((newrule->match_tag = pf_tagname2tag(
1264 newrule->match_tagname)) == 0)
1265 error = EBUSY;
1266
1267 if (newrule->rt && !newrule->direction)
1268 error = EINVAL;
1269 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1270 error = EINVAL;
1271 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1272 error = EINVAL;
1273 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1274 error = EINVAL;
1275 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1276 error = EINVAL;
1277
1278 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1279 if (((((newrule->action == PF_NAT) ||
1280 (newrule->action == PF_RDR) ||
1281 (newrule->action == PF_BINAT) ||
1282 (newrule->rt > PF_FASTROUTE)) &&
1283 !newrule->anchorname[0])) &&
1284 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1285 error = EINVAL;
1286
1287 if (error) {
1288 pf_rm_rule(NULL, newrule);
1289 break;
1290 }
1291 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1292 newrule->evaluations = newrule->packets = 0;
1293 newrule->bytes = 0;
1294 }
1295 pf_empty_pool(&pf_pabuf);
1296
1297 s = splsoftnet();
1298
1299 if (pcr->action == PF_CHANGE_ADD_HEAD)
1300 oldrule = TAILQ_FIRST(
1301 ruleset->rules[rs_num].active.ptr);
1302 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1303 oldrule = TAILQ_LAST(
1304 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1305 else {
1306 oldrule = TAILQ_FIRST(
1307 ruleset->rules[rs_num].active.ptr);
1308 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1309 oldrule = TAILQ_NEXT(oldrule, entries);
1310 if (oldrule == NULL) {
1311 pf_rm_rule(NULL, newrule);
1312 error = EINVAL;
1313 splx(s);
1314 break;
1315 }
1316 }
1317
1318 if (pcr->action == PF_CHANGE_REMOVE)
1319 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1320 else {
1321 if (oldrule == NULL)
1322 TAILQ_INSERT_TAIL(
1323 ruleset->rules[rs_num].active.ptr,
1324 newrule, entries);
1325 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1326 pcr->action == PF_CHANGE_ADD_BEFORE)
1327 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1328 else
1329 TAILQ_INSERT_AFTER(
1330 ruleset->rules[rs_num].active.ptr,
1331 oldrule, newrule, entries);
1332 }
1333
1334 nr = 0;
1335 TAILQ_FOREACH(oldrule,
1336 ruleset->rules[rs_num].active.ptr, entries)
1337 oldrule->nr = nr++;
1338
1339 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1340 pf_remove_if_empty_ruleset(ruleset);
1341 pf_update_anchor_rules();
1342
1343 ruleset->rules[rs_num].active.ticket++;
1344 splx(s);
1345 break;
1346 }
1347
1348 case DIOCCLRSTATES: {
1349 struct pf_state *state;
1350 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1351 int killed = 0;
1352
1353 s = splsoftnet();
1354 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1355 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1356 state->u.s.kif->pfik_name)) {
1357 state->timeout = PFTM_PURGE;
1358 #if NPFSYNC
1359 /* don't send out individual delete messages */
1360 state->sync_flags = PFSTATE_NOSYNC;
1361 #endif
1362 killed++;
1363 }
1364 }
1365 pf_purge_expired_states();
1366 pf_status.states = 0;
1367 psk->psk_af = killed;
1368 #if NPFSYNC
1369 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1370 #endif
1371 splx(s);
1372 break;
1373 }
1374
1375 case DIOCKILLSTATES: {
1376 struct pf_state *state;
1377 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1378 int killed = 0;
1379
1380 s = splsoftnet();
1381 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1382 if ((!psk->psk_af || state->af == psk->psk_af)
1383 && (!psk->psk_proto || psk->psk_proto ==
1384 state->proto) &&
1385 PF_MATCHA(psk->psk_src.not,
1386 &psk->psk_src.addr.v.a.addr,
1387 &psk->psk_src.addr.v.a.mask,
1388 &state->lan.addr, state->af) &&
1389 PF_MATCHA(psk->psk_dst.not,
1390 &psk->psk_dst.addr.v.a.addr,
1391 &psk->psk_dst.addr.v.a.mask,
1392 &state->ext.addr, state->af) &&
1393 (psk->psk_src.port_op == 0 ||
1394 pf_match_port(psk->psk_src.port_op,
1395 psk->psk_src.port[0], psk->psk_src.port[1],
1396 state->lan.port)) &&
1397 (psk->psk_dst.port_op == 0 ||
1398 pf_match_port(psk->psk_dst.port_op,
1399 psk->psk_dst.port[0], psk->psk_dst.port[1],
1400 state->ext.port)) &&
1401 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1402 state->u.s.kif->pfik_name))) {
1403 state->timeout = PFTM_PURGE;
1404 killed++;
1405 }
1406 }
1407 pf_purge_expired_states();
1408 splx(s);
1409 psk->psk_af = killed;
1410 break;
1411 }
1412
1413 case DIOCADDSTATE: {
1414 struct pfioc_state *ps = (struct pfioc_state *)addr;
1415 struct pf_state *state;
1416 struct pfi_kif *kif;
1417
1418 if (ps->state.timeout >= PFTM_MAX &&
1419 ps->state.timeout != PFTM_UNTIL_PACKET) {
1420 error = EINVAL;
1421 break;
1422 }
1423 state = pool_get(&pf_state_pl, PR_NOWAIT);
1424 if (state == NULL) {
1425 error = ENOMEM;
1426 break;
1427 }
1428 s = splsoftnet();
1429 kif = pfi_lookup_create(ps->state.u.ifname);
1430 if (kif == NULL) {
1431 pool_put(&pf_state_pl, state);
1432 error = ENOENT;
1433 splx(s);
1434 break;
1435 }
1436 bcopy(&ps->state, state, sizeof(struct pf_state));
1437 bzero(&state->u, sizeof(state->u));
1438 state->rule.ptr = &pf_default_rule;
1439 state->nat_rule.ptr = NULL;
1440 state->anchor.ptr = NULL;
1441 state->rt_kif = NULL;
1442 state->creation = time.tv_sec;
1443 state->pfsync_time = 0;
1444 state->packets[0] = state->packets[1] = 0;
1445 state->bytes[0] = state->bytes[1] = 0;
1446
1447 if (pf_insert_state(kif, state)) {
1448 pfi_maybe_destroy(kif);
1449 pool_put(&pf_state_pl, state);
1450 error = ENOMEM;
1451 }
1452 splx(s);
1453 break;
1454 }
1455
1456 case DIOCGETSTATE: {
1457 struct pfioc_state *ps = (struct pfioc_state *)addr;
1458 struct pf_state *state;
1459 u_int32_t nr;
1460
1461 nr = 0;
1462 s = splsoftnet();
1463 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1464 if (nr >= ps->nr)
1465 break;
1466 nr++;
1467 }
1468 if (state == NULL) {
1469 error = EBUSY;
1470 splx(s);
1471 break;
1472 }
1473 bcopy(state, &ps->state, sizeof(struct pf_state));
1474 ps->state.rule.nr = state->rule.ptr->nr;
1475 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1476 -1 : state->nat_rule.ptr->nr;
1477 ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1478 -1 : state->anchor.ptr->nr;
1479 splx(s);
1480 ps->state.expire = pf_state_expires(state);
1481 if (ps->state.expire > time.tv_sec)
1482 ps->state.expire -= time.tv_sec;
1483 else
1484 ps->state.expire = 0;
1485 break;
1486 }
1487
1488 case DIOCGETSTATES: {
1489 struct pfioc_states *ps = (struct pfioc_states *)addr;
1490 struct pf_state *state;
1491 struct pf_state *p, pstore;
1492 struct pfi_kif *kif;
1493 u_int32_t nr = 0;
1494 int space = ps->ps_len;
1495
1496 if (space == 0) {
1497 s = splsoftnet();
1498 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1499 nr += kif->pfik_states;
1500 splx(s);
1501 ps->ps_len = sizeof(struct pf_state) * nr;
1502 return (0);
1503 }
1504
1505 s = splsoftnet();
1506 p = ps->ps_states;
1507 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1508 RB_FOREACH(state, pf_state_tree_ext_gwy,
1509 &kif->pfik_ext_gwy) {
1510 int secs = time.tv_sec;
1511
1512 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1513 break;
1514
1515 bcopy(state, &pstore, sizeof(pstore));
1516 strlcpy(pstore.u.ifname, kif->pfik_name,
1517 sizeof(pstore.u.ifname));
1518 pstore.rule.nr = state->rule.ptr->nr;
1519 pstore.nat_rule.nr = (state->nat_rule.ptr ==
1520 NULL) ? -1 : state->nat_rule.ptr->nr;
1521 pstore.anchor.nr = (state->anchor.ptr ==
1522 NULL) ? -1 : state->anchor.ptr->nr;
1523 pstore.creation = secs - pstore.creation;
1524 pstore.expire = pf_state_expires(state);
1525 if (pstore.expire > secs)
1526 pstore.expire -= secs;
1527 else
1528 pstore.expire = 0;
1529 error = copyout(&pstore, p, sizeof(*p));
1530 if (error) {
1531 splx(s);
1532 goto fail;
1533 }
1534 p++;
1535 nr++;
1536 }
1537 ps->ps_len = sizeof(struct pf_state) * nr;
1538 splx(s);
1539 break;
1540 }
1541
1542 case DIOCGETSTATUS: {
1543 struct pf_status *s = (struct pf_status *)addr;
1544 bcopy(&pf_status, s, sizeof(struct pf_status));
1545 pfi_fill_oldstatus(s);
1546 break;
1547 }
1548
1549 case DIOCSETSTATUSIF: {
1550 struct pfioc_if *pi = (struct pfioc_if *)addr;
1551
1552 if (pi->ifname[0] == 0) {
1553 bzero(pf_status.ifname, IFNAMSIZ);
1554 break;
1555 }
1556 if (ifunit(pi->ifname) == NULL) {
1557 error = EINVAL;
1558 break;
1559 }
1560 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1561 break;
1562 }
1563
1564 case DIOCCLRSTATUS: {
1565 bzero(pf_status.counters, sizeof(pf_status.counters));
1566 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1567 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1568 if (*pf_status.ifname)
1569 pfi_clr_istats(pf_status.ifname, NULL,
1570 PFI_FLAG_INSTANCE);
1571 break;
1572 }
1573
1574 case DIOCNATLOOK: {
1575 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1576 struct pf_state *state;
1577 struct pf_state key;
1578 int m = 0, direction = pnl->direction;
1579
1580 key.af = pnl->af;
1581 key.proto = pnl->proto;
1582
1583 if (!pnl->proto ||
1584 PF_AZERO(&pnl->saddr, pnl->af) ||
1585 PF_AZERO(&pnl->daddr, pnl->af) ||
1586 !pnl->dport || !pnl->sport)
1587 error = EINVAL;
1588 else {
1589 s = splsoftnet();
1590
1591 /*
1592 * userland gives us source and dest of connection,
1593 * reverse the lookup so we ask for what happens with
1594 * the return traffic, enabling us to find it in the
1595 * state tree.
1596 */
1597 if (direction == PF_IN) {
1598 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1599 key.ext.port = pnl->dport;
1600 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1601 key.gwy.port = pnl->sport;
1602 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1603 } else {
1604 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1605 key.lan.port = pnl->dport;
1606 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1607 key.ext.port = pnl->sport;
1608 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1609 }
1610 if (m > 1)
1611 error = E2BIG; /* more than one state */
1612 else if (state != NULL) {
1613 if (direction == PF_IN) {
1614 PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1615 state->af);
1616 pnl->rsport = state->lan.port;
1617 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1618 pnl->af);
1619 pnl->rdport = pnl->dport;
1620 } else {
1621 PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1622 state->af);
1623 pnl->rdport = state->gwy.port;
1624 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1625 pnl->af);
1626 pnl->rsport = pnl->sport;
1627 }
1628 } else
1629 error = ENOENT;
1630 splx(s);
1631 }
1632 break;
1633 }
1634
1635 case DIOCSETTIMEOUT: {
1636 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1637 int old;
1638
1639 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1640 pt->seconds < 0) {
1641 error = EINVAL;
1642 goto fail;
1643 }
1644 old = pf_default_rule.timeout[pt->timeout];
1645 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1646 pt->seconds = old;
1647 break;
1648 }
1649
1650 case DIOCGETTIMEOUT: {
1651 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1652
1653 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1654 error = EINVAL;
1655 goto fail;
1656 }
1657 pt->seconds = pf_default_rule.timeout[pt->timeout];
1658 break;
1659 }
1660
1661 case DIOCGETLIMIT: {
1662 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1663
1664 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1665 error = EINVAL;
1666 goto fail;
1667 }
1668 pl->limit = pf_pool_limits[pl->index].limit;
1669 break;
1670 }
1671
1672 case DIOCSETLIMIT: {
1673 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1674 int old_limit;
1675
1676 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1677 pf_pool_limits[pl->index].pp == NULL) {
1678 error = EINVAL;
1679 goto fail;
1680 }
1681 #ifdef __OpenBSD__
1682 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
1683 pl->limit, NULL, 0) != 0) {
1684 error = EBUSY;
1685 goto fail;
1686 }
1687 #else
1688 pool_sethardlimit(pf_pool_limits[pl->index].pp,
1689 pl->limit, NULL, 0);
1690 #endif
1691 old_limit = pf_pool_limits[pl->index].limit;
1692 pf_pool_limits[pl->index].limit = pl->limit;
1693 pl->limit = old_limit;
1694 break;
1695 }
1696
1697 case DIOCSETDEBUG: {
1698 u_int32_t *level = (u_int32_t *)addr;
1699
1700 pf_status.debug = *level;
1701 break;
1702 }
1703
1704 case DIOCCLRRULECTRS: {
1705 struct pf_ruleset *ruleset = &pf_main_ruleset;
1706 struct pf_rule *rule;
1707
1708 s = splsoftnet();
1709 TAILQ_FOREACH(rule,
1710 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
1711 rule->evaluations = rule->packets =
1712 rule->bytes = 0;
1713 splx(s);
1714 break;
1715 }
1716
1717 #ifdef ALTQ
1718 case DIOCSTARTALTQ: {
1719 struct pf_altq *altq;
1720 struct ifnet *ifp;
1721 struct tb_profile tb;
1722
1723 /* enable all altq interfaces on active list */
1724 s = splsoftnet();
1725 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1726 if (altq->qname[0] == 0) {
1727 if ((ifp = ifunit(altq->ifname)) == NULL) {
1728 error = EINVAL;
1729 break;
1730 }
1731 if (ifp->if_snd.altq_type != ALTQT_NONE)
1732 error = altq_enable(&ifp->if_snd);
1733 if (error != 0)
1734 break;
1735 /* set tokenbucket regulator */
1736 tb.rate = altq->ifbandwidth;
1737 tb.depth = altq->tbrsize;
1738 error = tbr_set(&ifp->if_snd, &tb);
1739 if (error != 0)
1740 break;
1741 }
1742 }
1743 if (error == 0)
1744 pfaltq_running = 1;
1745 splx(s);
1746 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
1747 break;
1748 }
1749
1750 case DIOCSTOPALTQ: {
1751 struct pf_altq *altq;
1752 struct ifnet *ifp;
1753 struct tb_profile tb;
1754 int err;
1755
1756 /* disable all altq interfaces on active list */
1757 s = splsoftnet();
1758 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1759 if (altq->qname[0] == 0) {
1760 if ((ifp = ifunit(altq->ifname)) == NULL) {
1761 error = EINVAL;
1762 break;
1763 }
1764 if (ifp->if_snd.altq_type != ALTQT_NONE) {
1765 err = altq_disable(&ifp->if_snd);
1766 if (err != 0 && error == 0)
1767 error = err;
1768 }
1769 /* clear tokenbucket regulator */
1770 tb.rate = 0;
1771 err = tbr_set(&ifp->if_snd, &tb);
1772 if (err != 0 && error == 0)
1773 error = err;
1774 }
1775 }
1776 if (error == 0)
1777 pfaltq_running = 0;
1778 splx(s);
1779 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
1780 break;
1781 }
1782
1783 case DIOCBEGINALTQS: {
1784 u_int32_t *ticket = (u_int32_t *)addr;
1785
1786 error = pf_begin_altq(ticket);
1787 break;
1788 }
1789
1790 case DIOCADDALTQ: {
1791 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
1792 struct pf_altq *altq, *a;
1793
1794 if (pa->ticket != ticket_altqs_inactive) {
1795 error = EBUSY;
1796 break;
1797 }
1798 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
1799 if (altq == NULL) {
1800 error = ENOMEM;
1801 break;
1802 }
1803 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1804
1805 /*
1806 * if this is for a queue, find the discipline and
1807 * copy the necessary fields
1808 */
1809 if (altq->qname[0] != 0) {
1810 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
1811 error = EBUSY;
1812 pool_put(&pf_altq_pl, altq);
1813 break;
1814 }
1815 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
1816 if (strncmp(a->ifname, altq->ifname,
1817 IFNAMSIZ) == 0 && a->qname[0] == 0) {
1818 altq->altq_disc = a->altq_disc;
1819 break;
1820 }
1821 }
1822 }
1823
1824 error = altq_add(altq);
1825 if (error) {
1826 pool_put(&pf_altq_pl, altq);
1827 break;
1828 }
1829
1830 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
1831 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1832 break;
1833 }
1834
1835 case DIOCCOMMITALTQS: {
1836 u_int32_t ticket = *(u_int32_t *)addr;
1837
1838 error = pf_commit_altq(ticket);
1839 break;
1840 }
1841
1842 case DIOCGETALTQS: {
1843 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
1844 struct pf_altq *altq;
1845
1846 pa->nr = 0;
1847 s = splsoftnet();
1848 TAILQ_FOREACH(altq, pf_altqs_active, entries)
1849 pa->nr++;
1850 pa->ticket = ticket_altqs_active;
1851 splx(s);
1852 break;
1853 }
1854
1855 case DIOCGETALTQ: {
1856 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
1857 struct pf_altq *altq;
1858 u_int32_t nr;
1859
1860 if (pa->ticket != ticket_altqs_active) {
1861 error = EBUSY;
1862 break;
1863 }
1864 nr = 0;
1865 s = splsoftnet();
1866 altq = TAILQ_FIRST(pf_altqs_active);
1867 while ((altq != NULL) && (nr < pa->nr)) {
1868 altq = TAILQ_NEXT(altq, entries);
1869 nr++;
1870 }
1871 if (altq == NULL) {
1872 error = EBUSY;
1873 splx(s);
1874 break;
1875 }
1876 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1877 splx(s);
1878 break;
1879 }
1880
1881 case DIOCCHANGEALTQ:
1882 /* CHANGEALTQ not supported yet! */
1883 error = ENODEV;
1884 break;
1885
1886 case DIOCGETQSTATS: {
1887 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
1888 struct pf_altq *altq;
1889 u_int32_t nr;
1890 int nbytes;
1891
1892 if (pq->ticket != ticket_altqs_active) {
1893 error = EBUSY;
1894 break;
1895 }
1896 nbytes = pq->nbytes;
1897 nr = 0;
1898 s = splsoftnet();
1899 altq = TAILQ_FIRST(pf_altqs_active);
1900 while ((altq != NULL) && (nr < pq->nr)) {
1901 altq = TAILQ_NEXT(altq, entries);
1902 nr++;
1903 }
1904 if (altq == NULL) {
1905 error = EBUSY;
1906 splx(s);
1907 break;
1908 }
1909 error = altq_getqstats(altq, pq->buf, &nbytes);
1910 splx(s);
1911 if (error == 0) {
1912 pq->scheduler = altq->scheduler;
1913 pq->nbytes = nbytes;
1914 }
1915 break;
1916 }
1917 #endif /* ALTQ */
1918
1919 case DIOCBEGINADDRS: {
1920 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
1921
1922 pf_empty_pool(&pf_pabuf);
1923 pp->ticket = ++ticket_pabuf;
1924 break;
1925 }
1926
1927 case DIOCADDADDR: {
1928 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
1929
1930 #ifndef INET
1931 if (pp->af == AF_INET) {
1932 error = EAFNOSUPPORT;
1933 break;
1934 }
1935 #endif /* INET */
1936 #ifndef INET6
1937 if (pp->af == AF_INET6) {
1938 error = EAFNOSUPPORT;
1939 break;
1940 }
1941 #endif /* INET6 */
1942 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
1943 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
1944 pp->addr.addr.type != PF_ADDR_TABLE) {
1945 error = EINVAL;
1946 break;
1947 }
1948 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
1949 if (pa == NULL) {
1950 error = ENOMEM;
1951 break;
1952 }
1953 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
1954 if (pa->ifname[0]) {
1955 pa->kif = pfi_attach_rule(pa->ifname);
1956 if (pa->kif == NULL) {
1957 pool_put(&pf_pooladdr_pl, pa);
1958 error = EINVAL;
1959 break;
1960 }
1961 }
1962 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
1963 pfi_dynaddr_remove(&pa->addr);
1964 pfi_detach_rule(pa->kif);
1965 pool_put(&pf_pooladdr_pl, pa);
1966 error = EINVAL;
1967 break;
1968 }
1969 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
1970 break;
1971 }
1972
1973 case DIOCGETADDRS: {
1974 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
1975
1976 pp->nr = 0;
1977 s = splsoftnet();
1978 pool = pf_get_pool(pp->anchor, pp->ruleset, pp->ticket,
1979 pp->r_action, pp->r_num, 0, 1, 0);
1980 if (pool == NULL) {
1981 error = EBUSY;
1982 splx(s);
1983 break;
1984 }
1985 TAILQ_FOREACH(pa, &pool->list, entries)
1986 pp->nr++;
1987 splx(s);
1988 break;
1989 }
1990
1991 case DIOCGETADDR: {
1992 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
1993 u_int32_t nr = 0;
1994
1995 s = splsoftnet();
1996 pool = pf_get_pool(pp->anchor, pp->ruleset, pp->ticket,
1997 pp->r_action, pp->r_num, 0, 1, 1);
1998 if (pool == NULL) {
1999 error = EBUSY;
2000 splx(s);
2001 break;
2002 }
2003 pa = TAILQ_FIRST(&pool->list);
2004 while ((pa != NULL) && (nr < pp->nr)) {
2005 pa = TAILQ_NEXT(pa, entries);
2006 nr++;
2007 }
2008 if (pa == NULL) {
2009 error = EBUSY;
2010 splx(s);
2011 break;
2012 }
2013 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2014 pfi_dynaddr_copyout(&pp->addr.addr);
2015 pf_tbladdr_copyout(&pp->addr.addr);
2016 splx(s);
2017 break;
2018 }
2019
2020 case DIOCCHANGEADDR: {
2021 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2022 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2023 struct pf_ruleset *ruleset;
2024
2025 if (pca->action < PF_CHANGE_ADD_HEAD ||
2026 pca->action > PF_CHANGE_REMOVE) {
2027 error = EINVAL;
2028 break;
2029 }
2030 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2031 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2032 pca->addr.addr.type != PF_ADDR_TABLE) {
2033 error = EINVAL;
2034 break;
2035 }
2036
2037 ruleset = pf_find_ruleset(pca->anchor, pca->ruleset);
2038 if (ruleset == NULL) {
2039 error = EBUSY;
2040 break;
2041 }
2042 pool = pf_get_pool(pca->anchor, pca->ruleset, pca->ticket,
2043 pca->r_action, pca->r_num, pca->r_last, 1, 1);
2044 if (pool == NULL) {
2045 error = EBUSY;
2046 break;
2047 }
2048 if (pca->action != PF_CHANGE_REMOVE) {
2049 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2050 if (newpa == NULL) {
2051 error = ENOMEM;
2052 break;
2053 }
2054 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2055 #ifndef INET
2056 if (pca->af == AF_INET) {
2057 pool_put(&pf_pooladdr_pl, newpa);
2058 error = EAFNOSUPPORT;
2059 break;
2060 }
2061 #endif /* INET */
2062 #ifndef INET6
2063 if (pca->af == AF_INET6) {
2064 pool_put(&pf_pooladdr_pl, newpa);
2065 error = EAFNOSUPPORT;
2066 break;
2067 }
2068 #endif /* INET6 */
2069 if (newpa->ifname[0]) {
2070 newpa->kif = pfi_attach_rule(newpa->ifname);
2071 if (newpa->kif == NULL) {
2072 pool_put(&pf_pooladdr_pl, newpa);
2073 error = EINVAL;
2074 break;
2075 }
2076 } else
2077 newpa->kif = NULL;
2078 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2079 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2080 pfi_dynaddr_remove(&newpa->addr);
2081 pfi_detach_rule(newpa->kif);
2082 pool_put(&pf_pooladdr_pl, newpa);
2083 error = EINVAL;
2084 break;
2085 }
2086 }
2087
2088 s = splsoftnet();
2089
2090 if (pca->action == PF_CHANGE_ADD_HEAD)
2091 oldpa = TAILQ_FIRST(&pool->list);
2092 else if (pca->action == PF_CHANGE_ADD_TAIL)
2093 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2094 else {
2095 int i = 0;
2096
2097 oldpa = TAILQ_FIRST(&pool->list);
2098 while ((oldpa != NULL) && (i < pca->nr)) {
2099 oldpa = TAILQ_NEXT(oldpa, entries);
2100 i++;
2101 }
2102 if (oldpa == NULL) {
2103 error = EINVAL;
2104 splx(s);
2105 break;
2106 }
2107 }
2108
2109 if (pca->action == PF_CHANGE_REMOVE) {
2110 TAILQ_REMOVE(&pool->list, oldpa, entries);
2111 pfi_dynaddr_remove(&oldpa->addr);
2112 pf_tbladdr_remove(&oldpa->addr);
2113 pfi_detach_rule(oldpa->kif);
2114 pool_put(&pf_pooladdr_pl, oldpa);
2115 } else {
2116 if (oldpa == NULL)
2117 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2118 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2119 pca->action == PF_CHANGE_ADD_BEFORE)
2120 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2121 else
2122 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2123 newpa, entries);
2124 }
2125
2126 pool->cur = TAILQ_FIRST(&pool->list);
2127 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2128 pca->af);
2129 splx(s);
2130 break;
2131 }
2132
2133 case DIOCGETANCHORS: {
2134 struct pfioc_anchor *pa = (struct pfioc_anchor *)addr;
2135 struct pf_anchor *anchor;
2136
2137 pa->nr = 0;
2138 TAILQ_FOREACH(anchor, &pf_anchors, entries)
2139 pa->nr++;
2140 break;
2141 }
2142
2143 case DIOCGETANCHOR: {
2144 struct pfioc_anchor *pa = (struct pfioc_anchor *)addr;
2145 struct pf_anchor *anchor;
2146 u_int32_t nr = 0;
2147
2148 anchor = TAILQ_FIRST(&pf_anchors);
2149 while (anchor != NULL && nr < pa->nr) {
2150 anchor = TAILQ_NEXT(anchor, entries);
2151 nr++;
2152 }
2153 if (anchor == NULL)
2154 error = EBUSY;
2155 else
2156 bcopy(anchor->name, pa->name, sizeof(pa->name));
2157 break;
2158 }
2159
2160 case DIOCGETRULESETS: {
2161 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2162 struct pf_anchor *anchor;
2163 struct pf_ruleset *ruleset;
2164
2165 pr->anchor[PF_ANCHOR_NAME_SIZE-1] = 0;
2166 if ((anchor = pf_find_anchor(pr->anchor)) == NULL) {
2167 error = EINVAL;
2168 break;
2169 }
2170 pr->nr = 0;
2171 TAILQ_FOREACH(ruleset, &anchor->rulesets, entries)
2172 pr->nr++;
2173 break;
2174 }
2175
2176 case DIOCGETRULESET: {
2177 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2178 struct pf_anchor *anchor;
2179 struct pf_ruleset *ruleset;
2180 u_int32_t nr = 0;
2181
2182 if ((anchor = pf_find_anchor(pr->anchor)) == NULL) {
2183 error = EINVAL;
2184 break;
2185 }
2186 ruleset = TAILQ_FIRST(&anchor->rulesets);
2187 while (ruleset != NULL && nr < pr->nr) {
2188 ruleset = TAILQ_NEXT(ruleset, entries);
2189 nr++;
2190 }
2191 if (ruleset == NULL)
2192 error = EBUSY;
2193 else
2194 bcopy(ruleset->name, pr->name, sizeof(pr->name));
2195 break;
2196 }
2197
2198 case DIOCRCLRTABLES: {
2199 struct pfioc_table *io = (struct pfioc_table *)addr;
2200
2201 if (io->pfrio_esize != 0) {
2202 error = ENODEV;
2203 break;
2204 }
2205 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2206 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2207 break;
2208 }
2209
2210 case DIOCRADDTABLES: {
2211 struct pfioc_table *io = (struct pfioc_table *)addr;
2212
2213 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2214 error = ENODEV;
2215 break;
2216 }
2217 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2218 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2219 break;
2220 }
2221
2222 case DIOCRDELTABLES: {
2223 struct pfioc_table *io = (struct pfioc_table *)addr;
2224
2225 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2226 error = ENODEV;
2227 break;
2228 }
2229 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2230 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2231 break;
2232 }
2233
2234 case DIOCRGETTABLES: {
2235 struct pfioc_table *io = (struct pfioc_table *)addr;
2236
2237 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2238 error = ENODEV;
2239 break;
2240 }
2241 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2242 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2243 break;
2244 }
2245
2246 case DIOCRGETTSTATS: {
2247 struct pfioc_table *io = (struct pfioc_table *)addr;
2248
2249 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2250 error = ENODEV;
2251 break;
2252 }
2253 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2254 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2255 break;
2256 }
2257
2258 case DIOCRCLRTSTATS: {
2259 struct pfioc_table *io = (struct pfioc_table *)addr;
2260
2261 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2262 error = ENODEV;
2263 break;
2264 }
2265 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2266 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2267 break;
2268 }
2269
2270 case DIOCRSETTFLAGS: {
2271 struct pfioc_table *io = (struct pfioc_table *)addr;
2272
2273 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2274 error = ENODEV;
2275 break;
2276 }
2277 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2278 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2279 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2280 break;
2281 }
2282
2283 case DIOCRCLRADDRS: {
2284 struct pfioc_table *io = (struct pfioc_table *)addr;
2285
2286 if (io->pfrio_esize != 0) {
2287 error = ENODEV;
2288 break;
2289 }
2290 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2291 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2292 break;
2293 }
2294
2295 case DIOCRADDADDRS: {
2296 struct pfioc_table *io = (struct pfioc_table *)addr;
2297
2298 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2299 error = ENODEV;
2300 break;
2301 }
2302 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2303 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2304 PFR_FLAG_USERIOCTL);
2305 break;
2306 }
2307
2308 case DIOCRDELADDRS: {
2309 struct pfioc_table *io = (struct pfioc_table *)addr;
2310
2311 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2312 error = ENODEV;
2313 break;
2314 }
2315 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2316 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2317 PFR_FLAG_USERIOCTL);
2318 break;
2319 }
2320
2321 case DIOCRSETADDRS: {
2322 struct pfioc_table *io = (struct pfioc_table *)addr;
2323
2324 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2325 error = ENODEV;
2326 break;
2327 }
2328 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2329 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2330 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2331 PFR_FLAG_USERIOCTL);
2332 break;
2333 }
2334
2335 case DIOCRGETADDRS: {
2336 struct pfioc_table *io = (struct pfioc_table *)addr;
2337
2338 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2339 error = ENODEV;
2340 break;
2341 }
2342 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2343 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2344 break;
2345 }
2346
2347 case DIOCRGETASTATS: {
2348 struct pfioc_table *io = (struct pfioc_table *)addr;
2349
2350 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2351 error = ENODEV;
2352 break;
2353 }
2354 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2355 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2356 break;
2357 }
2358
2359 case DIOCRCLRASTATS: {
2360 struct pfioc_table *io = (struct pfioc_table *)addr;
2361
2362 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2363 error = ENODEV;
2364 break;
2365 }
2366 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2367 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2368 PFR_FLAG_USERIOCTL);
2369 break;
2370 }
2371
2372 case DIOCRTSTADDRS: {
2373 struct pfioc_table *io = (struct pfioc_table *)addr;
2374
2375 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2376 error = ENODEV;
2377 break;
2378 }
2379 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2380 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2381 PFR_FLAG_USERIOCTL);
2382 break;
2383 }
2384
2385 case DIOCRINABEGIN: {
2386 struct pfioc_table *io = (struct pfioc_table *)addr;
2387
2388 if (io->pfrio_esize != 0) {
2389 error = ENODEV;
2390 break;
2391 }
2392 error = pfr_ina_begin(&io->pfrio_table, &io->pfrio_ticket,
2393 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2394 break;
2395 }
2396
2397 case DIOCRINACOMMIT: {
2398 struct pfioc_table *io = (struct pfioc_table *)addr;
2399
2400 if (io->pfrio_esize != 0) {
2401 error = ENODEV;
2402 break;
2403 }
2404 error = pfr_ina_commit(&io->pfrio_table, io->pfrio_ticket,
2405 &io->pfrio_nadd, &io->pfrio_nchange, io->pfrio_flags |
2406 PFR_FLAG_USERIOCTL);
2407 break;
2408 }
2409
2410 case DIOCRINADEFINE: {
2411 struct pfioc_table *io = (struct pfioc_table *)addr;
2412
2413 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2414 error = ENODEV;
2415 break;
2416 }
2417 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2418 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2419 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2420 break;
2421 }
2422
2423 case DIOCOSFPADD: {
2424 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2425 s = splsoftnet();
2426 error = pf_osfp_add(io);
2427 splx(s);
2428 break;
2429 }
2430
2431 case DIOCOSFPGET: {
2432 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2433 s = splsoftnet();
2434 error = pf_osfp_get(io);
2435 splx(s);
2436 break;
2437 }
2438
2439 case DIOCXBEGIN: {
2440 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2441 struct pfioc_trans_e ioe;
2442 struct pfr_table table;
2443 int i;
2444
2445 if (io->esize != sizeof(ioe)) {
2446 error = ENODEV;
2447 goto fail;
2448 }
2449 for (i = 0; i < io->size; i++) {
2450 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2451 error = EFAULT;
2452 goto fail;
2453 }
2454 switch (ioe.rs_num) {
2455 #ifdef ALTQ
2456 case PF_RULESET_ALTQ:
2457 if (ioe.anchor[0] || ioe.ruleset[0]) {
2458 error = EINVAL;
2459 goto fail;
2460 }
2461 if ((error = pf_begin_altq(&ioe.ticket)))
2462 goto fail;
2463 break;
2464 #endif /* ALTQ */
2465 case PF_RULESET_TABLE:
2466 bzero(&table, sizeof(table));
2467 strlcpy(table.pfrt_anchor, ioe.anchor,
2468 sizeof(table.pfrt_anchor));
2469 strlcpy(table.pfrt_ruleset, ioe.ruleset,
2470 sizeof(table.pfrt_ruleset));
2471 if ((error = pfr_ina_begin(&table,
2472 &ioe.ticket, NULL, 0)))
2473 goto fail;
2474 break;
2475 default:
2476 if ((error = pf_begin_rules(&ioe.ticket,
2477 ioe.rs_num, ioe.anchor, ioe.ruleset)))
2478 goto fail;
2479 break;
2480 }
2481 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2482 error = EFAULT;
2483 goto fail;
2484 }
2485 }
2486 break;
2487 }
2488
2489 case DIOCXROLLBACK: {
2490 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2491 struct pfioc_trans_e ioe;
2492 struct pfr_table table;
2493 int i;
2494
2495 if (io->esize != sizeof(ioe)) {
2496 error = ENODEV;
2497 goto fail;
2498 }
2499 for (i = 0; i < io->size; i++) {
2500 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2501 error = EFAULT;
2502 goto fail;
2503 }
2504 switch (ioe.rs_num) {
2505 #ifdef ALTQ
2506 case PF_RULESET_ALTQ:
2507 if (ioe.anchor[0] || ioe.ruleset[0]) {
2508 error = EINVAL;
2509 goto fail;
2510 }
2511 if ((error = pf_rollback_altq(ioe.ticket)))
2512 goto fail; /* really bad */
2513 break;
2514 #endif /* ALTQ */
2515 case PF_RULESET_TABLE:
2516 bzero(&table, sizeof(table));
2517 strlcpy(table.pfrt_anchor, ioe.anchor,
2518 sizeof(table.pfrt_anchor));
2519 strlcpy(table.pfrt_ruleset, ioe.ruleset,
2520 sizeof(table.pfrt_ruleset));
2521 if ((error = pfr_ina_rollback(&table,
2522 ioe.ticket, NULL, 0)))
2523 goto fail; /* really bad */
2524 break;
2525 default:
2526 if ((error = pf_rollback_rules(ioe.ticket,
2527 ioe.rs_num, ioe.anchor, ioe.ruleset)))
2528 goto fail; /* really bad */
2529 break;
2530 }
2531 }
2532 break;
2533 }
2534
2535 case DIOCXCOMMIT: {
2536 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2537 struct pfioc_trans_e ioe;
2538 struct pfr_table table;
2539 struct pf_ruleset *rs;
2540 int i;
2541
2542 if (io->esize != sizeof(ioe)) {
2543 error = ENODEV;
2544 goto fail;
2545 }
2546 /* first makes sure everything will succeed */
2547 for (i = 0; i < io->size; i++) {
2548 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2549 error = EFAULT;
2550 goto fail;
2551 }
2552 switch (ioe.rs_num) {
2553 #ifdef ALTQ
2554 case PF_RULESET_ALTQ:
2555 if (ioe.anchor[0] || ioe.ruleset[0]) {
2556 error = EINVAL;
2557 goto fail;
2558 }
2559 if (!altqs_inactive_open || ioe.ticket !=
2560 ticket_altqs_inactive) {
2561 error = EBUSY;
2562 goto fail;
2563 }
2564 break;
2565 #endif /* ALTQ */
2566 case PF_RULESET_TABLE:
2567 rs = pf_find_ruleset(ioe.anchor, ioe.ruleset);
2568 if (rs == NULL || !rs->topen || ioe.ticket !=
2569 rs->tticket) {
2570 error = EBUSY;
2571 goto fail;
2572 }
2573 break;
2574 default:
2575 if (ioe.rs_num < 0 || ioe.rs_num >=
2576 PF_RULESET_MAX) {
2577 error = EINVAL;
2578 goto fail;
2579 }
2580 rs = pf_find_ruleset(ioe.anchor, ioe.ruleset);
2581 if (rs == NULL ||
2582 !rs->rules[ioe.rs_num].inactive.open ||
2583 rs->rules[ioe.rs_num].inactive.ticket !=
2584 ioe.ticket) {
2585 error = EBUSY;
2586 goto fail;
2587 }
2588 break;
2589 }
2590 }
2591 /* now do the commit - no errors should happen here */
2592 for (i = 0; i < io->size; i++) {
2593 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2594 error = EFAULT;
2595 goto fail;
2596 }
2597 switch (ioe.rs_num) {
2598 #ifdef ALTQ
2599 case PF_RULESET_ALTQ:
2600 if ((error = pf_commit_altq(ioe.ticket)))
2601 goto fail; /* really bad */
2602 break;
2603 #endif /* ALTQ */
2604 case PF_RULESET_TABLE:
2605 bzero(&table, sizeof(table));
2606 strlcpy(table.pfrt_anchor, ioe.anchor,
2607 sizeof(table.pfrt_anchor));
2608 strlcpy(table.pfrt_ruleset, ioe.ruleset,
2609 sizeof(table.pfrt_ruleset));
2610 if ((error = pfr_ina_commit(&table, ioe.ticket,
2611 NULL, NULL, 0)))
2612 goto fail; /* really bad */
2613 break;
2614 default:
2615 if ((error = pf_commit_rules(ioe.ticket,
2616 ioe.rs_num, ioe.anchor, ioe.ruleset)))
2617 goto fail; /* really bad */
2618 break;
2619 }
2620 }
2621 break;
2622 }
2623
2624 case DIOCGETSRCNODES: {
2625 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2626 struct pf_src_node *n;
2627 struct pf_src_node *p, pstore;
2628 u_int32_t nr = 0;
2629 int space = psn->psn_len;
2630
2631 if (space == 0) {
2632 s = splsoftnet();
2633 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2634 nr++;
2635 splx(s);
2636 psn->psn_len = sizeof(struct pf_src_node) * nr;
2637 return (0);
2638 }
2639
2640 s = splsoftnet();
2641 p = psn->psn_src_nodes;
2642 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2643 int secs = time.tv_sec;
2644
2645 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2646 break;
2647
2648 bcopy(n, &pstore, sizeof(pstore));
2649 if (n->rule.ptr != NULL)
2650 pstore.rule.nr = n->rule.ptr->nr;
2651 pstore.creation = secs - pstore.creation;
2652 if (pstore.expire > secs)
2653 pstore.expire -= secs;
2654 else
2655 pstore.expire = 0;
2656 error = copyout(&pstore, p, sizeof(*p));
2657 if (error) {
2658 splx(s);
2659 goto fail;
2660 }
2661 p++;
2662 nr++;
2663 }
2664 psn->psn_len = sizeof(struct pf_src_node) * nr;
2665 splx(s);
2666 break;
2667 }
2668
2669 case DIOCCLRSRCNODES: {
2670 struct pf_src_node *n;
2671 struct pf_state *state;
2672
2673 s = splsoftnet();
2674 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2675 state->src_node = NULL;
2676 state->nat_src_node = NULL;
2677 }
2678 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2679 n->expire = 1;
2680 n->states = 0;
2681 }
2682 pf_purge_expired_src_nodes();
2683 pf_status.src_nodes = 0;
2684 splx(s);
2685 break;
2686 }
2687
2688 case DIOCSETHOSTID: {
2689 u_int32_t *hostid = (u_int32_t *)addr;
2690
2691 if (*hostid == 0) {
2692 error = EINVAL;
2693 goto fail;
2694 }
2695 pf_status.hostid = *hostid;
2696 break;
2697 }
2698
2699 case DIOCOSFPFLUSH:
2700 s = splsoftnet();
2701 pf_osfp_flush();
2702 splx(s);
2703 break;
2704
2705 case DIOCIGETIFACES: {
2706 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2707
2708 if (io->pfiio_esize != sizeof(struct pfi_if)) {
2709 error = ENODEV;
2710 break;
2711 }
2712 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2713 &io->pfiio_size, io->pfiio_flags);
2714 break;
2715 }
2716
2717 case DIOCICLRISTATS: {
2718 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2719
2720 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2721 io->pfiio_flags);
2722 break;
2723 }
2724
2725 default:
2726 error = ENODEV;
2727 break;
2728 }
2729 fail:
2730
2731 return (error);
2732 }
2733
2734 #ifdef __NetBSD__
2735 int
2736 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2737 {
2738
2739 /*
2740 * If the packet is out-bound, we can't delay checksums
2741 * here. For in-bound, the checksum has already been
2742 * validated.
2743 */
2744 if (dir == PFIL_OUT) {
2745 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2746 in_delayed_cksum(*mp);
2747 (*mp)->m_pkthdr.csum_flags &=
2748 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2749 }
2750 }
2751
2752 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp) != PF_PASS)
2753 return EHOSTUNREACH;
2754 else
2755 return (0);
2756 }
2757
2758 int
2759 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2760 {
2761
2762 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp) != PF_PASS)
2763 return EHOSTUNREACH;
2764 else
2765 return (0);
2766 }
2767
2768 extern void pfi_kifaddr_update(void *);
2769
2770 int
2771 pfil_if_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2772 {
2773 u_long cmd = (u_long)mp;
2774
2775 switch (cmd) {
2776 case 0:
2777 pfi_attach_ifnet(ifp);
2778 break;
2779 case SIOCSIFADDR:
2780 case SIOCGIFALIAS:
2781 case SIOCDIFADDR:
2782 pfi_kifaddr_update((struct ifnet *)arg);
2783 break;
2784 default:
2785 panic("unexpected ioctl");
2786 }
2787
2788 return 0;
2789 }
2790
2791 static int
2792 pf_pfil_attach(void)
2793 {
2794 struct pfil_head *ph_inet;
2795 #ifdef INET6
2796 struct pfil_head *ph_inet6;
2797 #endif
2798 int error;
2799 int i;
2800
2801 error = pfil_add_hook(pfil_if_wrapper, NULL, PFIL_IFADDR|PFIL_NEWIF,
2802 &if_pfil);
2803 if (error)
2804 return (error);
2805 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
2806 if (ph_inet)
2807 error = pfil_add_hook((void *)pfil4_wrapper, NULL,
2808 PFIL_IN|PFIL_OUT, ph_inet);
2809 else
2810 error = ENOENT;
2811 if (error) {
2812 pfil_remove_hook(pfil_if_wrapper, NULL, PFIL_IFADDR|PFIL_NEWIF,
2813 &if_pfil);
2814 }
2815 #ifdef INET6
2816 if (error) {
2817 return (error);
2818 }
2819 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
2820 if (ph_inet6)
2821 error = pfil_add_hook((void *)pfil6_wrapper, NULL,
2822 PFIL_IN|PFIL_OUT, ph_inet6);
2823 else
2824 error = ENOENT;
2825 if (error) {
2826 pfil_remove_hook(pfil_if_wrapper, NULL, PFIL_IFADDR|PFIL_NEWIF,
2827 &if_pfil);
2828 pfil_remove_hook((void *)pfil4_wrapper, NULL,
2829 PFIL_IN|PFIL_OUT, ph_inet);
2830 }
2831 #endif
2832 if (!error) {
2833 for (i = 0; i < if_indexlim; i++) {
2834 if (ifindex2ifnet[i])
2835 pfi_attach_ifnet(ifindex2ifnet[i]);
2836 }
2837 }
2838 return (error);
2839 }
2840
2841 int
2842 pf_pfil_detach(void)
2843 {
2844 struct pfil_head *ph_inet;
2845 #ifdef INET6
2846 struct pfil_head *ph_inet6;
2847 #endif
2848 int i;
2849
2850 for (i = 0; i < if_indexlim; i++) {
2851 if (pfi_index2kif[i])
2852 pfi_detach_ifnet(ifindex2ifnet[i]);
2853 }
2854 pfil_remove_hook(pfil_if_wrapper, NULL, PFIL_IFADDR|PFIL_NEWIF,
2855 &if_pfil);
2856 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
2857 if (ph_inet)
2858 pfil_remove_hook((void *)pfil4_wrapper, NULL,
2859 PFIL_IN|PFIL_OUT, ph_inet);
2860 #ifdef INET6
2861 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
2862 if (ph_inet)
2863 pfil_remove_hook((void *)pfil6_wrapper, NULL,
2864 PFIL_IN|PFIL_OUT, ph_inet6);
2865 #endif
2866 return (0);
2867 }
2868 #endif
2869