pf_ioctl.c revision 1.34 1 /* $NetBSD: pf_ioctl.c,v 1.34 2008/06/22 11:36:33 peter Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.182 2007/06/24 11:17:13 mcbride Exp $ */
3
4 /*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: pf_ioctl.c,v 1.34 2008/06/22 11:36:33 peter Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_inet.h"
44 #include "opt_pfil_hooks.h"
45 #endif
46
47 #ifndef __NetBSD__
48 #include "pfsync.h"
49 #else
50 #define NPFSYNC 0
51 #endif /* __NetBSD__ */
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/mbuf.h>
56 #include <sys/filio.h>
57 #include <sys/fcntl.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/kernel.h>
61 #include <sys/time.h>
62 #include <sys/pool.h>
63 #include <sys/proc.h>
64 #include <sys/malloc.h>
65 #include <sys/kthread.h>
66 #include <sys/rwlock.h>
67 #include <uvm/uvm_extern.h>
68 #ifdef __NetBSD__
69 #include <sys/conf.h>
70 #include <sys/lwp.h>
71 #include <sys/kauth.h>
72 #endif /* __NetBSD__ */
73
74 #include <net/if.h>
75 #include <net/if_types.h>
76 #include <net/route.h>
77
78 #include <netinet/in.h>
79 #include <netinet/in_var.h>
80 #include <netinet/in_systm.h>
81 #include <netinet/ip.h>
82 #include <netinet/ip_var.h>
83 #include <netinet/ip_icmp.h>
84
85 #ifndef __NetBSD__
86 #include <dev/rndvar.h>
87 #include <crypto/md5.h>
88 #else
89 #include <sys/md5.h>
90 #endif /* __NetBSD__ */
91 #include <net/pfvar.h>
92
93 #if NPFSYNC > 0
94 #include <net/if_pfsync.h>
95 #endif /* NPFSYNC > 0 */
96
97 #if NPFLOG > 0
98 #include <net/if_pflog.h>
99 #endif /* NPFLOG > 0 */
100
101 #ifdef INET6
102 #include <netinet/ip6.h>
103 #include <netinet/in_pcb.h>
104 #endif /* INET6 */
105
106 #ifdef ALTQ
107 #include <altq/altq.h>
108 #endif
109
110 void pfattach(int);
111 #ifdef _LKM
112 void pfdetach(void);
113 #endif /* _LKM */
114 #ifndef __NetBSD__
115 void pf_thread_create(void *);
116 #endif /* !__NetBSD__ */
117 int pfopen(dev_t, int, int, struct lwp *);
118 int pfclose(dev_t, int, int, struct lwp *);
119 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
120 u_int8_t, u_int8_t, u_int8_t);
121
122 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
123 void pf_empty_pool(struct pf_palist *);
124 int pfioctl(dev_t, u_long, void *, int, struct lwp *);
125 #ifdef ALTQ
126 int pf_begin_altq(u_int32_t *);
127 int pf_rollback_altq(u_int32_t);
128 int pf_commit_altq(u_int32_t);
129 int pf_enable_altq(struct pf_altq *);
130 int pf_disable_altq(struct pf_altq *);
131 #endif /* ALTQ */
132 int pf_begin_rules(u_int32_t *, int, const char *);
133 int pf_rollback_rules(u_int32_t, int, char *);
134 int pf_setup_pfsync_matching(struct pf_ruleset *);
135 void pf_hash_rule(MD5_CTX *, struct pf_rule *);
136 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
137 int pf_commit_rules(u_int32_t, int, char *);
138 void pf_state_export(struct pfsync_state *,
139 struct pf_state_key *, struct pf_state *);
140 void pf_state_import(struct pfsync_state *,
141 struct pf_state_key *, struct pf_state *);
142
143 struct pf_rule pf_default_rule;
144 #ifdef __NetBSD__
145 krwlock_t pf_consistency_lock;
146 #else
147 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk");
148 #endif /* __NetBSD__ */
149 #ifdef ALTQ
150 static int pf_altq_running;
151 #endif
152
153 #define TAGID_MAX 50000
154 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
155 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
156
157 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
158 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
159 #endif
160 u_int16_t tagname2tag(struct pf_tags *, char *);
161 void tag2tagname(struct pf_tags *, u_int16_t, char *);
162 void tag_unref(struct pf_tags *, u_int16_t);
163 int pf_rtlabel_add(struct pf_addr_wrap *);
164 void pf_rtlabel_remove(struct pf_addr_wrap *);
165 void pf_rtlabel_copyout(struct pf_addr_wrap *);
166
167 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
168
169 #ifdef __NetBSD__
170 const struct cdevsw pf_cdevsw = {
171 pfopen, pfclose, noread, nowrite, pfioctl,
172 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER
173 };
174
175 static int pfil4_wrapper(void *, struct mbuf **, struct ifnet *, int);
176 #ifdef INET6
177 static int pfil6_wrapper(void *, struct mbuf **, struct ifnet *, int);
178 #endif /* INET6 */
179
180 static int pf_pfil_attach(void);
181 static int pf_pfil_detach(void);
182
183 static int pf_pfil_attached;
184 #endif /* __NetBSD__ */
185
186 void
187 pfattach(int num)
188 {
189 u_int32_t *timeout = pf_default_rule.timeout;
190
191 #ifdef __NetBSD__
192 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
193 &pool_allocator_nointr, IPL_NONE);
194 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
195 "pfsrctrpl", NULL, IPL_SOFTNET);
196 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
197 NULL, IPL_SOFTNET);
198 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
199 "pfstatekeypl", NULL, IPL_SOFTNET);
200 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
201 &pool_allocator_nointr, IPL_NONE);
202 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
203 "pfpooladdrpl", &pool_allocator_nointr, IPL_NONE);
204 #else
205 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
206 &pool_allocator_nointr);
207 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
208 "pfsrctrpl", NULL);
209 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
210 NULL);
211 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
212 "pfstatekeypl", NULL);
213 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
214 &pool_allocator_nointr);
215 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
216 "pfpooladdrpl", &pool_allocator_nointr);
217 #endif /* !__NetBSD__ */
218
219 pfr_initialize();
220 pfi_initialize();
221 pf_osfp_initialize();
222
223 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
224 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
225
226 if (ctob(physmem) <= 100*1024*1024)
227 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
228 PFR_KENTRY_HIWAT_SMALL;
229
230 RB_INIT(&tree_src_tracking);
231 RB_INIT(&pf_anchors);
232 pf_init_ruleset(&pf_main_ruleset);
233 TAILQ_INIT(&pf_altqs[0]);
234 TAILQ_INIT(&pf_altqs[1]);
235 TAILQ_INIT(&pf_pabuf);
236 pf_altqs_active = &pf_altqs[0];
237 pf_altqs_inactive = &pf_altqs[1];
238 TAILQ_INIT(&state_list);
239
240 #ifdef __NetBSD__
241 rw_init(&pf_consistency_lock);
242 #endif /* __NetBSD__ */
243
244 /* default rule should never be garbage collected */
245 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
246 pf_default_rule.action = PF_PASS;
247 pf_default_rule.nr = -1;
248 pf_default_rule.rtableid = -1;
249
250 /* initialize default timeouts */
251 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
252 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
253 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
254 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
255 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
256 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
257 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
258 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
259 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
260 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
261 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
262 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
263 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
264 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
265 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
266 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
267 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
268 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
269 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
270 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
271
272 pf_normalize_init();
273 bzero(&pf_status, sizeof(pf_status));
274 pf_status.debug = PF_DEBUG_URGENT;
275
276 /* XXX do our best to avoid a conflict */
277 pf_status.hostid = arc4random();
278
279 /* require process context to purge states, so perform in a thread */
280 #ifdef __NetBSD__
281 if (kthread_create(PRI_NONE, 0, NULL, pf_purge_thread, NULL, NULL,
282 "pfpurge"))
283 panic("pfpurge thread");
284 #else
285 kthread_create_deferred(pf_thread_create, NULL);
286 #endif /* !__NetBSD__ */
287 }
288
289 #ifdef _LKM
290 void
291 pfdetach(void)
292 {
293 extern int pf_purge_thread_running;
294 extern int pf_purge_thread_stop;
295 struct pf_anchor *anchor;
296 struct pf_state *state;
297 struct pf_src_node *node;
298 struct pfioc_table pt;
299 u_int32_t ticket;
300 int i;
301 char r = '\0';
302
303 pf_purge_thread_stop = 1;
304 wakeup(pf_purge_thread);
305
306 /* wait until the kthread exits */
307 while (pf_purge_thread_running)
308 tsleep(&pf_purge_thread_running, PWAIT, "pfdown", 0);
309
310 (void)pf_pfil_detach();
311
312 pf_status.running = 0;
313
314 /* clear the rulesets */
315 for (i = 0; i < PF_RULESET_MAX; i++)
316 if (pf_begin_rules(&ticket, i, &r) == 0)
317 pf_commit_rules(ticket, i, &r);
318 #ifdef ALTQ
319 if (pf_begin_altq(&ticket) == 0)
320 pf_commit_altq(ticket);
321 #endif /* ALTQ */
322
323 /* clear states */
324 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
325 state->timeout = PFTM_PURGE;
326 #if NPFSYNC > 0
327 state->sync_flags = PFSTATE_NOSYNC;
328 #endif /* NPFSYNC > 0 */
329 }
330 pf_purge_expired_states(pf_status.states);
331 #if NPFSYNC > 0
332 pfsync_clear_states(pf_status.hostid, NULL);
333 #endif /* NPFSYNC > 0 */
334
335 /* clear source nodes */
336 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
337 state->src_node = NULL;
338 state->nat_src_node = NULL;
339 }
340 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
341 node->expire = 1;
342 node->states = 0;
343 }
344 pf_purge_expired_src_nodes(0);
345
346 /* clear tables */
347 memset(&pt, '\0', sizeof(pt));
348 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
349
350 /* destroy anchors */
351 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
352 for (i = 0; i < PF_RULESET_MAX; i++)
353 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
354 pf_commit_rules(ticket, i, anchor->name);
355 }
356
357 /* destroy main ruleset */
358 pf_remove_if_empty_ruleset(&pf_main_ruleset);
359
360 /* destroy the pools */
361 pool_destroy(&pf_pooladdr_pl);
362 pool_destroy(&pf_altq_pl);
363 pool_destroy(&pf_state_key_pl);
364 pool_destroy(&pf_state_pl);
365 pool_destroy(&pf_rule_pl);
366 pool_destroy(&pf_src_tree_pl);
367
368 rw_destroy(&pf_consistency_lock);
369
370 /* destroy subsystems */
371 pf_normalize_destroy();
372 pf_osfp_destroy();
373 pfr_destroy();
374 pfi_destroy();
375 }
376 #endif /* _LKM */
377
378 #ifndef __NetBSD__
379 void
380 pf_thread_create(void *v)
381 {
382 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
383 panic("pfpurge thread");
384 }
385 #endif /* !__NetBSD__ */
386
387 int
388 pfopen(dev_t dev, int flags, int fmt, struct lwp *l)
389 {
390 if (minor(dev) >= 1)
391 return (ENXIO);
392 return (0);
393 }
394
395 int
396 pfclose(dev_t dev, int flags, int fmt, struct lwp *l)
397 {
398 if (minor(dev) >= 1)
399 return (ENXIO);
400 return (0);
401 }
402
403 struct pf_pool *
404 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
405 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
406 u_int8_t check_ticket)
407 {
408 struct pf_ruleset *ruleset;
409 struct pf_rule *rule;
410 int rs_num;
411
412 ruleset = pf_find_ruleset(anchor);
413 if (ruleset == NULL)
414 return (NULL);
415 rs_num = pf_get_ruleset_number(rule_action);
416 if (rs_num >= PF_RULESET_MAX)
417 return (NULL);
418 if (active) {
419 if (check_ticket && ticket !=
420 ruleset->rules[rs_num].active.ticket)
421 return (NULL);
422 if (r_last)
423 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
424 pf_rulequeue);
425 else
426 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
427 } else {
428 if (check_ticket && ticket !=
429 ruleset->rules[rs_num].inactive.ticket)
430 return (NULL);
431 if (r_last)
432 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
433 pf_rulequeue);
434 else
435 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
436 }
437 if (!r_last) {
438 while ((rule != NULL) && (rule->nr != rule_number))
439 rule = TAILQ_NEXT(rule, entries);
440 }
441 if (rule == NULL)
442 return (NULL);
443
444 return (&rule->rpool);
445 }
446
447 void
448 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
449 {
450 struct pf_pooladdr *mv_pool_pa;
451
452 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
453 TAILQ_REMOVE(poola, mv_pool_pa, entries);
454 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
455 }
456 }
457
458 void
459 pf_empty_pool(struct pf_palist *poola)
460 {
461 struct pf_pooladdr *empty_pool_pa;
462
463 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
464 pfi_dynaddr_remove(&empty_pool_pa->addr);
465 pf_tbladdr_remove(&empty_pool_pa->addr);
466 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
467 TAILQ_REMOVE(poola, empty_pool_pa, entries);
468 pool_put(&pf_pooladdr_pl, empty_pool_pa);
469 }
470 }
471
472 void
473 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
474 {
475 if (rulequeue != NULL) {
476 if (rule->states <= 0) {
477 /*
478 * XXX - we need to remove the table *before* detaching
479 * the rule to make sure the table code does not delete
480 * the anchor under our feet.
481 */
482 pf_tbladdr_remove(&rule->src.addr);
483 pf_tbladdr_remove(&rule->dst.addr);
484 if (rule->overload_tbl)
485 pfr_detach_table(rule->overload_tbl);
486 }
487 TAILQ_REMOVE(rulequeue, rule, entries);
488 rule->entries.tqe_prev = NULL;
489 rule->nr = -1;
490 }
491
492 if (rule->states > 0 || rule->src_nodes > 0 ||
493 rule->entries.tqe_prev != NULL)
494 return;
495 pf_tag_unref(rule->tag);
496 pf_tag_unref(rule->match_tag);
497 #ifdef ALTQ
498 if (rule->pqid != rule->qid)
499 pf_qid_unref(rule->pqid);
500 pf_qid_unref(rule->qid);
501 #endif
502 pf_rtlabel_remove(&rule->src.addr);
503 pf_rtlabel_remove(&rule->dst.addr);
504 pfi_dynaddr_remove(&rule->src.addr);
505 pfi_dynaddr_remove(&rule->dst.addr);
506 if (rulequeue == NULL) {
507 pf_tbladdr_remove(&rule->src.addr);
508 pf_tbladdr_remove(&rule->dst.addr);
509 if (rule->overload_tbl)
510 pfr_detach_table(rule->overload_tbl);
511 }
512 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
513 pf_anchor_remove(rule);
514 pf_empty_pool(&rule->rpool.list);
515 pool_put(&pf_rule_pl, rule);
516 }
517
518 u_int16_t
519 tagname2tag(struct pf_tags *head, char *tagname)
520 {
521 struct pf_tagname *tag, *p = NULL;
522 u_int16_t new_tagid = 1;
523
524 TAILQ_FOREACH(tag, head, entries)
525 if (strcmp(tagname, tag->name) == 0) {
526 tag->ref++;
527 return (tag->tag);
528 }
529
530 /*
531 * to avoid fragmentation, we do a linear search from the beginning
532 * and take the first free slot we find. if there is none or the list
533 * is empty, append a new entry at the end.
534 */
535
536 /* new entry */
537 if (!TAILQ_EMPTY(head))
538 for (p = TAILQ_FIRST(head); p != NULL &&
539 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
540 new_tagid = p->tag + 1;
541
542 if (new_tagid > TAGID_MAX)
543 return (0);
544
545 /* allocate and fill new struct pf_tagname */
546 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
547 M_TEMP, M_NOWAIT);
548 if (tag == NULL)
549 return (0);
550 bzero(tag, sizeof(struct pf_tagname));
551 strlcpy(tag->name, tagname, sizeof(tag->name));
552 tag->tag = new_tagid;
553 tag->ref++;
554
555 if (p != NULL) /* insert new entry before p */
556 TAILQ_INSERT_BEFORE(p, tag, entries);
557 else /* either list empty or no free slot in between */
558 TAILQ_INSERT_TAIL(head, tag, entries);
559
560 return (tag->tag);
561 }
562
563 void
564 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
565 {
566 struct pf_tagname *tag;
567
568 TAILQ_FOREACH(tag, head, entries)
569 if (tag->tag == tagid) {
570 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
571 return;
572 }
573 }
574
575 void
576 tag_unref(struct pf_tags *head, u_int16_t tag)
577 {
578 struct pf_tagname *p, *next;
579
580 if (tag == 0)
581 return;
582
583 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
584 next = TAILQ_NEXT(p, entries);
585 if (tag == p->tag) {
586 if (--p->ref == 0) {
587 TAILQ_REMOVE(head, p, entries);
588 free(p, M_TEMP);
589 }
590 break;
591 }
592 }
593 }
594
595 u_int16_t
596 pf_tagname2tag(char *tagname)
597 {
598 return (tagname2tag(&pf_tags, tagname));
599 }
600
601 void
602 pf_tag2tagname(u_int16_t tagid, char *p)
603 {
604 tag2tagname(&pf_tags, tagid, p);
605 }
606
607 void
608 pf_tag_ref(u_int16_t tag)
609 {
610 struct pf_tagname *t;
611
612 TAILQ_FOREACH(t, &pf_tags, entries)
613 if (t->tag == tag)
614 break;
615 if (t != NULL)
616 t->ref++;
617 }
618
619 void
620 pf_tag_unref(u_int16_t tag)
621 {
622 tag_unref(&pf_tags, tag);
623 }
624
625 int
626 pf_rtlabel_add(struct pf_addr_wrap *a)
627 {
628 #ifndef __NetBSD__
629 if (a->type == PF_ADDR_RTLABEL &&
630 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
631 return (-1);
632 #endif /* !__NetBSD__ */
633 return (0);
634 }
635
636 void
637 pf_rtlabel_remove(struct pf_addr_wrap *a)
638 {
639 #ifndef __NetBSD__
640 if (a->type == PF_ADDR_RTLABEL)
641 rtlabel_unref(a->v.rtlabel);
642 #endif /* !__NetBSD__ */
643 }
644
645 void
646 pf_rtlabel_copyout(struct pf_addr_wrap *a)
647 {
648 #ifndef __NetBSD__
649 const char *name;
650
651 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
652 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
653 strlcpy(a->v.rtlabelname, "?",
654 sizeof(a->v.rtlabelname));
655 else
656 strlcpy(a->v.rtlabelname, name,
657 sizeof(a->v.rtlabelname));
658 }
659 #endif /* !__NetBSD__ */
660 }
661
662 #ifdef ALTQ
663 u_int32_t
664 pf_qname2qid(char *qname)
665 {
666 return ((u_int32_t)tagname2tag(&pf_qids, qname));
667 }
668
669 void
670 pf_qid2qname(u_int32_t qid, char *p)
671 {
672 tag2tagname(&pf_qids, (u_int16_t)qid, p);
673 }
674
675 void
676 pf_qid_unref(u_int32_t qid)
677 {
678 tag_unref(&pf_qids, (u_int16_t)qid);
679 }
680
681 int
682 pf_begin_altq(u_int32_t *ticket)
683 {
684 struct pf_altq *altq;
685 int error = 0;
686
687 /* Purge the old altq list */
688 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
689 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
690 if (altq->qname[0] == 0) {
691 /* detach and destroy the discipline */
692 error = altq_remove(altq);
693 } else
694 pf_qid_unref(altq->qid);
695 pool_put(&pf_altq_pl, altq);
696 }
697 if (error)
698 return (error);
699 *ticket = ++ticket_altqs_inactive;
700 altqs_inactive_open = 1;
701 return (0);
702 }
703
704 int
705 pf_rollback_altq(u_int32_t ticket)
706 {
707 struct pf_altq *altq;
708 int error = 0;
709
710 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
711 return (0);
712 /* Purge the old altq list */
713 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
714 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
715 if (altq->qname[0] == 0) {
716 /* detach and destroy the discipline */
717 error = altq_remove(altq);
718 } else
719 pf_qid_unref(altq->qid);
720 pool_put(&pf_altq_pl, altq);
721 }
722 altqs_inactive_open = 0;
723 return (error);
724 }
725
726 int
727 pf_commit_altq(u_int32_t ticket)
728 {
729 struct pf_altqqueue *old_altqs;
730 struct pf_altq *altq;
731 int s, err, error = 0;
732
733 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
734 return (EBUSY);
735
736 /* swap altqs, keep the old. */
737 s = splsoftnet();
738 old_altqs = pf_altqs_active;
739 pf_altqs_active = pf_altqs_inactive;
740 pf_altqs_inactive = old_altqs;
741 ticket_altqs_active = ticket_altqs_inactive;
742
743 /* Attach new disciplines */
744 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
745 if (altq->qname[0] == 0) {
746 /* attach the discipline */
747 error = altq_pfattach(altq);
748 if (error == 0 && pf_altq_running)
749 error = pf_enable_altq(altq);
750 if (error != 0) {
751 splx(s);
752 return (error);
753 }
754 }
755 }
756
757 /* Purge the old altq list */
758 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
759 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
760 if (altq->qname[0] == 0) {
761 /* detach and destroy the discipline */
762 if (pf_altq_running)
763 error = pf_disable_altq(altq);
764 err = altq_pfdetach(altq);
765 if (err != 0 && error == 0)
766 error = err;
767 err = altq_remove(altq);
768 if (err != 0 && error == 0)
769 error = err;
770 } else
771 pf_qid_unref(altq->qid);
772 pool_put(&pf_altq_pl, altq);
773 }
774 splx(s);
775
776 altqs_inactive_open = 0;
777 return (error);
778 }
779
780 int
781 pf_enable_altq(struct pf_altq *altq)
782 {
783 struct ifnet *ifp;
784 struct tb_profile tb;
785 int s, error = 0;
786
787 if ((ifp = ifunit(altq->ifname)) == NULL)
788 return (EINVAL);
789
790 if (ifp->if_snd.altq_type != ALTQT_NONE)
791 error = altq_enable(&ifp->if_snd);
792
793 /* set tokenbucket regulator */
794 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
795 tb.rate = altq->ifbandwidth;
796 tb.depth = altq->tbrsize;
797 s = splnet();
798 error = tbr_set(&ifp->if_snd, &tb);
799 splx(s);
800 }
801
802 return (error);
803 }
804
805 int
806 pf_disable_altq(struct pf_altq *altq)
807 {
808 struct ifnet *ifp;
809 struct tb_profile tb;
810 int s, error;
811
812 if ((ifp = ifunit(altq->ifname)) == NULL)
813 return (EINVAL);
814
815 /*
816 * when the discipline is no longer referenced, it was overridden
817 * by a new one. if so, just return.
818 */
819 if (altq->altq_disc != ifp->if_snd.altq_disc)
820 return (0);
821
822 error = altq_disable(&ifp->if_snd);
823
824 if (error == 0) {
825 /* clear tokenbucket regulator */
826 tb.rate = 0;
827 s = splnet();
828 error = tbr_set(&ifp->if_snd, &tb);
829 splx(s);
830 }
831
832 return (error);
833 }
834 #endif /* ALTQ */
835
836 int
837 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
838 {
839 struct pf_ruleset *rs;
840 struct pf_rule *rule;
841
842 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
843 return (EINVAL);
844 rs = pf_find_or_create_ruleset(anchor);
845 if (rs == NULL)
846 return (EINVAL);
847 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
848 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
849 rs->rules[rs_num].inactive.rcount--;
850 }
851 *ticket = ++rs->rules[rs_num].inactive.ticket;
852 rs->rules[rs_num].inactive.open = 1;
853 return (0);
854 }
855
856 int
857 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
858 {
859 struct pf_ruleset *rs;
860 struct pf_rule *rule;
861
862 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
863 return (EINVAL);
864 rs = pf_find_ruleset(anchor);
865 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
866 rs->rules[rs_num].inactive.ticket != ticket)
867 return (0);
868 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
869 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
870 rs->rules[rs_num].inactive.rcount--;
871 }
872 rs->rules[rs_num].inactive.open = 0;
873 return (0);
874 }
875
876 #define PF_MD5_UPD(st, elm) \
877 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
878
879 #define PF_MD5_UPD_STR(st, elm) \
880 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
881
882 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
883 (stor) = htonl((st)->elm); \
884 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
885 } while (0)
886
887 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
888 (stor) = htons((st)->elm); \
889 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
890 } while (0)
891
892 void
893 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
894 {
895 PF_MD5_UPD(pfr, addr.type);
896 switch (pfr->addr.type) {
897 case PF_ADDR_DYNIFTL:
898 PF_MD5_UPD(pfr, addr.v.ifname);
899 PF_MD5_UPD(pfr, addr.iflags);
900 break;
901 case PF_ADDR_TABLE:
902 PF_MD5_UPD(pfr, addr.v.tblname);
903 break;
904 case PF_ADDR_ADDRMASK:
905 /* XXX ignore af? */
906 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
907 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
908 break;
909 case PF_ADDR_RTLABEL:
910 PF_MD5_UPD(pfr, addr.v.rtlabelname);
911 break;
912 }
913
914 PF_MD5_UPD(pfr, port[0]);
915 PF_MD5_UPD(pfr, port[1]);
916 PF_MD5_UPD(pfr, neg);
917 PF_MD5_UPD(pfr, port_op);
918 }
919
920 void
921 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
922 {
923 u_int16_t x;
924 u_int32_t y;
925
926 pf_hash_rule_addr(ctx, &rule->src);
927 pf_hash_rule_addr(ctx, &rule->dst);
928 PF_MD5_UPD_STR(rule, label);
929 PF_MD5_UPD_STR(rule, ifname);
930 PF_MD5_UPD_STR(rule, match_tagname);
931 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
932 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
933 PF_MD5_UPD_HTONL(rule, prob, y);
934 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
935 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
936 PF_MD5_UPD(rule, uid.op);
937 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
938 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
939 PF_MD5_UPD(rule, gid.op);
940 PF_MD5_UPD_HTONL(rule, rule_flag, y);
941 PF_MD5_UPD(rule, action);
942 PF_MD5_UPD(rule, direction);
943 PF_MD5_UPD(rule, af);
944 PF_MD5_UPD(rule, quick);
945 PF_MD5_UPD(rule, ifnot);
946 PF_MD5_UPD(rule, match_tag_not);
947 PF_MD5_UPD(rule, natpass);
948 PF_MD5_UPD(rule, keep_state);
949 PF_MD5_UPD(rule, proto);
950 PF_MD5_UPD(rule, type);
951 PF_MD5_UPD(rule, code);
952 PF_MD5_UPD(rule, flags);
953 PF_MD5_UPD(rule, flagset);
954 PF_MD5_UPD(rule, allow_opts);
955 PF_MD5_UPD(rule, rt);
956 PF_MD5_UPD(rule, tos);
957 }
958
959 int
960 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
961 {
962 struct pf_ruleset *rs;
963 struct pf_rule *rule, **old_array;
964 struct pf_rulequeue *old_rules;
965 int s, error;
966 u_int32_t old_rcount;
967
968 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
969 return (EINVAL);
970 rs = pf_find_ruleset(anchor);
971 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
972 ticket != rs->rules[rs_num].inactive.ticket)
973 return (EBUSY);
974
975 /* Calculate checksum for the main ruleset */
976 if (rs == &pf_main_ruleset) {
977 error = pf_setup_pfsync_matching(rs);
978 if (error != 0)
979 return (error);
980 }
981
982 /* Swap rules, keep the old. */
983 s = splsoftnet();
984 old_rules = rs->rules[rs_num].active.ptr;
985 old_rcount = rs->rules[rs_num].active.rcount;
986 old_array = rs->rules[rs_num].active.ptr_array;
987
988 rs->rules[rs_num].active.ptr =
989 rs->rules[rs_num].inactive.ptr;
990 rs->rules[rs_num].active.ptr_array =
991 rs->rules[rs_num].inactive.ptr_array;
992 rs->rules[rs_num].active.rcount =
993 rs->rules[rs_num].inactive.rcount;
994 rs->rules[rs_num].inactive.ptr = old_rules;
995 rs->rules[rs_num].inactive.ptr_array = old_array;
996 rs->rules[rs_num].inactive.rcount = old_rcount;
997
998 rs->rules[rs_num].active.ticket =
999 rs->rules[rs_num].inactive.ticket;
1000 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1001
1002
1003 /* Purge the old rule list. */
1004 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1005 pf_rm_rule(old_rules, rule);
1006 if (rs->rules[rs_num].inactive.ptr_array)
1007 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1008 rs->rules[rs_num].inactive.ptr_array = NULL;
1009 rs->rules[rs_num].inactive.rcount = 0;
1010 rs->rules[rs_num].inactive.open = 0;
1011 pf_remove_if_empty_ruleset(rs);
1012 splx(s);
1013 return (0);
1014 }
1015
1016 void
1017 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1018 struct pf_state *s)
1019 {
1020 int secs = time_second;
1021 bzero(sp, sizeof(struct pfsync_state));
1022
1023 /* copy from state key */
1024 sp->lan.addr = sk->lan.addr;
1025 sp->lan.port = sk->lan.port;
1026 sp->gwy.addr = sk->gwy.addr;
1027 sp->gwy.port = sk->gwy.port;
1028 sp->ext.addr = sk->ext.addr;
1029 sp->ext.port = sk->ext.port;
1030 sp->proto = sk->proto;
1031 sp->af = sk->af;
1032 sp->direction = sk->direction;
1033
1034 /* copy from state */
1035 memcpy(&sp->id, &s->id, sizeof(sp->id));
1036 sp->creatorid = s->creatorid;
1037 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
1038 pf_state_peer_to_pfsync(&s->src, &sp->src);
1039 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1040
1041 sp->rule = s->rule.ptr->nr;
1042 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr;
1043 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr;
1044
1045 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1046 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1047 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1048 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1049 sp->creation = secs - s->creation;
1050 sp->expire = pf_state_expires(s);
1051 sp->log = s->log;
1052 sp->allow_opts = s->allow_opts;
1053 sp->timeout = s->timeout;
1054
1055 if (s->src_node)
1056 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1057 if (s->nat_src_node)
1058 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1059
1060 if (sp->expire > secs)
1061 sp->expire -= secs;
1062 else
1063 sp->expire = 0;
1064
1065 }
1066
1067 void
1068 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1069 struct pf_state *s)
1070 {
1071 /* copy to state key */
1072 sk->lan.addr = sp->lan.addr;
1073 sk->lan.port = sp->lan.port;
1074 sk->gwy.addr = sp->gwy.addr;
1075 sk->gwy.port = sp->gwy.port;
1076 sk->ext.addr = sp->ext.addr;
1077 sk->ext.port = sp->ext.port;
1078 sk->proto = sp->proto;
1079 sk->af = sp->af;
1080 sk->direction = sp->direction;
1081
1082 /* copy to state */
1083 memcpy(&s->id, &sp->id, sizeof(sp->id));
1084 s->creatorid = sp->creatorid;
1085 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
1086 pf_state_peer_from_pfsync(&sp->src, &s->src);
1087 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1088
1089 s->rule.ptr = &pf_default_rule;
1090 s->nat_rule.ptr = NULL;
1091 s->anchor.ptr = NULL;
1092 s->rt_kif = NULL;
1093 s->creation = time_second;
1094 s->pfsync_time = 0;
1095 s->packets[0] = s->packets[1] = 0;
1096 s->bytes[0] = s->bytes[1] = 0;
1097 }
1098
1099 int
1100 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1101 {
1102 MD5_CTX ctx;
1103 struct pf_rule *rule;
1104 int rs_cnt;
1105 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1106
1107 MD5Init(&ctx);
1108 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1109 /* XXX PF_RULESET_SCRUB as well? */
1110 if (rs_cnt == PF_RULESET_SCRUB)
1111 continue;
1112
1113 if (rs->rules[rs_cnt].inactive.ptr_array)
1114 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1115 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1116
1117 if (rs->rules[rs_cnt].inactive.rcount) {
1118 rs->rules[rs_cnt].inactive.ptr_array =
1119 malloc(sizeof(void *) *
1120 rs->rules[rs_cnt].inactive.rcount,
1121 M_TEMP, M_NOWAIT);
1122
1123 if (!rs->rules[rs_cnt].inactive.ptr_array)
1124 return (ENOMEM);
1125 }
1126
1127 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1128 entries) {
1129 pf_hash_rule(&ctx, rule);
1130 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1131 }
1132 }
1133
1134 MD5Final(digest, &ctx);
1135 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1136 return (0);
1137 }
1138
1139 int
1140 pfioctl(dev_t dev, u_long cmd, void *addr, int flags, struct lwp *l)
1141 {
1142 struct pf_pooladdr *pa = NULL;
1143 struct pf_pool *pool = NULL;
1144 int s;
1145 int error = 0;
1146
1147 /* XXX keep in sync with switch() below */
1148 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
1149 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL))
1150 switch (cmd) {
1151 case DIOCGETRULES:
1152 case DIOCGETRULE:
1153 case DIOCGETADDRS:
1154 case DIOCGETADDR:
1155 case DIOCGETSTATE:
1156 case DIOCSETSTATUSIF:
1157 case DIOCGETSTATUS:
1158 case DIOCCLRSTATUS:
1159 case DIOCNATLOOK:
1160 case DIOCSETDEBUG:
1161 case DIOCGETSTATES:
1162 case DIOCGETTIMEOUT:
1163 case DIOCCLRRULECTRS:
1164 case DIOCGETLIMIT:
1165 case DIOCGETALTQS:
1166 case DIOCGETALTQ:
1167 case DIOCGETQSTATS:
1168 case DIOCGETRULESETS:
1169 case DIOCGETRULESET:
1170 case DIOCRGETTABLES:
1171 case DIOCRGETTSTATS:
1172 case DIOCRCLRTSTATS:
1173 case DIOCRCLRADDRS:
1174 case DIOCRADDADDRS:
1175 case DIOCRDELADDRS:
1176 case DIOCRSETADDRS:
1177 case DIOCRGETADDRS:
1178 case DIOCRGETASTATS:
1179 case DIOCRCLRASTATS:
1180 case DIOCRTSTADDRS:
1181 case DIOCOSFPGET:
1182 case DIOCGETSRCNODES:
1183 case DIOCCLRSRCNODES:
1184 case DIOCIGETIFACES:
1185 case DIOCSETIFFLAG:
1186 case DIOCCLRIFFLAG:
1187 break;
1188 case DIOCRCLRTABLES:
1189 case DIOCRADDTABLES:
1190 case DIOCRDELTABLES:
1191 case DIOCRSETTFLAGS:
1192 if (((struct pfioc_table *)addr)->pfrio_flags &
1193 PFR_FLAG_DUMMY)
1194 break; /* dummy operation ok */
1195 return (EPERM);
1196 default:
1197 return (EPERM);
1198 }
1199
1200 if (!(flags & FWRITE))
1201 switch (cmd) {
1202 case DIOCGETRULES:
1203 case DIOCGETADDRS:
1204 case DIOCGETADDR:
1205 case DIOCGETSTATE:
1206 case DIOCGETSTATUS:
1207 case DIOCGETSTATES:
1208 case DIOCGETTIMEOUT:
1209 case DIOCGETLIMIT:
1210 case DIOCGETALTQS:
1211 case DIOCGETALTQ:
1212 case DIOCGETQSTATS:
1213 case DIOCGETRULESETS:
1214 case DIOCGETRULESET:
1215 case DIOCNATLOOK:
1216 case DIOCRGETTABLES:
1217 case DIOCRGETTSTATS:
1218 case DIOCRGETADDRS:
1219 case DIOCRGETASTATS:
1220 case DIOCRTSTADDRS:
1221 case DIOCOSFPGET:
1222 case DIOCGETSRCNODES:
1223 case DIOCIGETIFACES:
1224 break;
1225 case DIOCRCLRTABLES:
1226 case DIOCRADDTABLES:
1227 case DIOCRDELTABLES:
1228 case DIOCRCLRTSTATS:
1229 case DIOCRCLRADDRS:
1230 case DIOCRADDADDRS:
1231 case DIOCRDELADDRS:
1232 case DIOCRSETADDRS:
1233 case DIOCRSETTFLAGS:
1234 if (((struct pfioc_table *)addr)->pfrio_flags &
1235 PFR_FLAG_DUMMY) {
1236 flags |= FWRITE; /* need write lock for dummy */
1237 break; /* dummy operation ok */
1238 }
1239 return (EACCES);
1240 case DIOCGETRULE:
1241 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR)
1242 return (EACCES);
1243 break;
1244 default:
1245 return (EACCES);
1246 }
1247
1248 if (flags & FWRITE)
1249 rw_enter_write(&pf_consistency_lock);
1250 else
1251 rw_enter_read(&pf_consistency_lock);
1252
1253 s = splsoftnet();
1254 switch (cmd) {
1255
1256 case DIOCSTART:
1257 if (pf_status.running)
1258 error = EEXIST;
1259 else {
1260 #ifdef __NetBSD__
1261 error = pf_pfil_attach();
1262 if (error)
1263 break;
1264 #endif /* __NetBSD__ */
1265 pf_status.running = 1;
1266 pf_status.since = time_second;
1267 if (pf_status.stateid == 0) {
1268 pf_status.stateid = time_second;
1269 pf_status.stateid = pf_status.stateid << 32;
1270 }
1271 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1272 }
1273 break;
1274
1275 case DIOCSTOP:
1276 if (!pf_status.running)
1277 error = ENOENT;
1278 else {
1279 #ifdef __NetBSD__
1280 error = pf_pfil_detach();
1281 if (error)
1282 break;
1283 #endif /* __NetBSD__ */
1284 pf_status.running = 0;
1285 pf_status.since = time_second;
1286 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1287 }
1288 break;
1289
1290 case DIOCADDRULE: {
1291 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1292 struct pf_ruleset *ruleset;
1293 struct pf_rule *rule, *tail;
1294 struct pf_pooladdr *pa;
1295 int rs_num;
1296
1297 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1298 ruleset = pf_find_ruleset(pr->anchor);
1299 if (ruleset == NULL) {
1300 error = EINVAL;
1301 break;
1302 }
1303 rs_num = pf_get_ruleset_number(pr->rule.action);
1304 if (rs_num >= PF_RULESET_MAX) {
1305 error = EINVAL;
1306 break;
1307 }
1308 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1309 error = EINVAL;
1310 break;
1311 }
1312 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1313 error = EBUSY;
1314 break;
1315 }
1316 if (pr->pool_ticket != ticket_pabuf) {
1317 error = EBUSY;
1318 break;
1319 }
1320 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1321 if (rule == NULL) {
1322 error = ENOMEM;
1323 break;
1324 }
1325 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1326 #ifdef __NetBSD__
1327 rule->cuid = kauth_cred_getuid(l->l_cred);
1328 rule->cpid = l->l_proc->p_pid;
1329 #else
1330 rule->cuid = p->p_cred->p_ruid;
1331 rule->cpid = p->p_pid;
1332 #endif /* !__NetBSD__ */
1333 rule->anchor = NULL;
1334 rule->kif = NULL;
1335 TAILQ_INIT(&rule->rpool.list);
1336 /* initialize refcounting */
1337 rule->states = 0;
1338 rule->src_nodes = 0;
1339 rule->entries.tqe_prev = NULL;
1340 #ifndef INET
1341 if (rule->af == AF_INET) {
1342 pool_put(&pf_rule_pl, rule);
1343 error = EAFNOSUPPORT;
1344 break;
1345 }
1346 #endif /* INET */
1347 #ifndef INET6
1348 if (rule->af == AF_INET6) {
1349 pool_put(&pf_rule_pl, rule);
1350 error = EAFNOSUPPORT;
1351 break;
1352 }
1353 #endif /* INET6 */
1354 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1355 pf_rulequeue);
1356 if (tail)
1357 rule->nr = tail->nr + 1;
1358 else
1359 rule->nr = 0;
1360 if (rule->ifname[0]) {
1361 rule->kif = pfi_kif_get(rule->ifname);
1362 if (rule->kif == NULL) {
1363 pool_put(&pf_rule_pl, rule);
1364 error = EINVAL;
1365 break;
1366 }
1367 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
1368 }
1369
1370 #ifndef __NetBSD__
1371 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid))
1372 error = EBUSY;
1373 #endif /* !__NetBSD__ */
1374
1375 #ifdef ALTQ
1376 /* set queue IDs */
1377 if (rule->qname[0] != 0) {
1378 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1379 error = EBUSY;
1380 else if (rule->pqname[0] != 0) {
1381 if ((rule->pqid =
1382 pf_qname2qid(rule->pqname)) == 0)
1383 error = EBUSY;
1384 } else
1385 rule->pqid = rule->qid;
1386 }
1387 #endif
1388 if (rule->tagname[0])
1389 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1390 error = EBUSY;
1391 if (rule->match_tagname[0])
1392 if ((rule->match_tag =
1393 pf_tagname2tag(rule->match_tagname)) == 0)
1394 error = EBUSY;
1395 if (rule->rt && !rule->direction)
1396 error = EINVAL;
1397 #if NPFLOG > 0
1398 if (!rule->log)
1399 rule->logif = 0;
1400 if (rule->logif >= PFLOGIFS_MAX)
1401 error = EINVAL;
1402 #endif
1403 if (pf_rtlabel_add(&rule->src.addr) ||
1404 pf_rtlabel_add(&rule->dst.addr))
1405 error = EBUSY;
1406 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1407 error = EINVAL;
1408 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1409 error = EINVAL;
1410 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1411 error = EINVAL;
1412 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1413 error = EINVAL;
1414 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1415 error = EINVAL;
1416 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1417 if (pf_tbladdr_setup(ruleset, &pa->addr))
1418 error = EINVAL;
1419
1420 if (rule->overload_tblname[0]) {
1421 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1422 rule->overload_tblname)) == NULL)
1423 error = EINVAL;
1424 else
1425 rule->overload_tbl->pfrkt_flags |=
1426 PFR_TFLAG_ACTIVE;
1427 }
1428
1429 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1430 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1431 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1432 (rule->rt > PF_FASTROUTE)) &&
1433 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1434 error = EINVAL;
1435
1436 if (error) {
1437 pf_rm_rule(NULL, rule);
1438 break;
1439 }
1440 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1441 rule->evaluations = rule->packets[0] = rule->packets[1] =
1442 rule->bytes[0] = rule->bytes[1] = 0;
1443 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1444 rule, entries);
1445 ruleset->rules[rs_num].inactive.rcount++;
1446 break;
1447 }
1448
1449 case DIOCGETRULES: {
1450 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1451 struct pf_ruleset *ruleset;
1452 struct pf_rule *tail;
1453 int rs_num;
1454
1455 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1456 ruleset = pf_find_ruleset(pr->anchor);
1457 if (ruleset == NULL) {
1458 error = EINVAL;
1459 break;
1460 }
1461 rs_num = pf_get_ruleset_number(pr->rule.action);
1462 if (rs_num >= PF_RULESET_MAX) {
1463 error = EINVAL;
1464 break;
1465 }
1466 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1467 pf_rulequeue);
1468 if (tail)
1469 pr->nr = tail->nr + 1;
1470 else
1471 pr->nr = 0;
1472 pr->ticket = ruleset->rules[rs_num].active.ticket;
1473 break;
1474 }
1475
1476 case DIOCGETRULE: {
1477 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1478 struct pf_ruleset *ruleset;
1479 struct pf_rule *rule;
1480 int rs_num, i;
1481
1482 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1483 ruleset = pf_find_ruleset(pr->anchor);
1484 if (ruleset == NULL) {
1485 error = EINVAL;
1486 break;
1487 }
1488 rs_num = pf_get_ruleset_number(pr->rule.action);
1489 if (rs_num >= PF_RULESET_MAX) {
1490 error = EINVAL;
1491 break;
1492 }
1493 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1494 error = EBUSY;
1495 break;
1496 }
1497 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1498 while ((rule != NULL) && (rule->nr != pr->nr))
1499 rule = TAILQ_NEXT(rule, entries);
1500 if (rule == NULL) {
1501 error = EBUSY;
1502 break;
1503 }
1504 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1505 if (pf_anchor_copyout(ruleset, rule, pr)) {
1506 error = EBUSY;
1507 break;
1508 }
1509 pfi_dynaddr_copyout(&pr->rule.src.addr);
1510 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1511 pf_tbladdr_copyout(&pr->rule.src.addr);
1512 pf_tbladdr_copyout(&pr->rule.dst.addr);
1513 pf_rtlabel_copyout(&pr->rule.src.addr);
1514 pf_rtlabel_copyout(&pr->rule.dst.addr);
1515 for (i = 0; i < PF_SKIP_COUNT; ++i)
1516 if (rule->skip[i].ptr == NULL)
1517 pr->rule.skip[i].nr = -1;
1518 else
1519 pr->rule.skip[i].nr =
1520 rule->skip[i].ptr->nr;
1521
1522 if (pr->action == PF_GET_CLR_CNTR) {
1523 rule->evaluations = 0;
1524 rule->packets[0] = rule->packets[1] = 0;
1525 rule->bytes[0] = rule->bytes[1] = 0;
1526 }
1527 break;
1528 }
1529
1530 case DIOCCHANGERULE: {
1531 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1532 struct pf_ruleset *ruleset;
1533 struct pf_rule *oldrule = NULL, *newrule = NULL;
1534 u_int32_t nr = 0;
1535 int rs_num;
1536
1537 if (!(pcr->action == PF_CHANGE_REMOVE ||
1538 pcr->action == PF_CHANGE_GET_TICKET) &&
1539 pcr->pool_ticket != ticket_pabuf) {
1540 error = EBUSY;
1541 break;
1542 }
1543
1544 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1545 pcr->action > PF_CHANGE_GET_TICKET) {
1546 error = EINVAL;
1547 break;
1548 }
1549 ruleset = pf_find_ruleset(pcr->anchor);
1550 if (ruleset == NULL) {
1551 error = EINVAL;
1552 break;
1553 }
1554 rs_num = pf_get_ruleset_number(pcr->rule.action);
1555 if (rs_num >= PF_RULESET_MAX) {
1556 error = EINVAL;
1557 break;
1558 }
1559
1560 if (pcr->action == PF_CHANGE_GET_TICKET) {
1561 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1562 break;
1563 } else {
1564 if (pcr->ticket !=
1565 ruleset->rules[rs_num].active.ticket) {
1566 error = EINVAL;
1567 break;
1568 }
1569 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1570 error = EINVAL;
1571 break;
1572 }
1573 }
1574
1575 if (pcr->action != PF_CHANGE_REMOVE) {
1576 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1577 if (newrule == NULL) {
1578 error = ENOMEM;
1579 break;
1580 }
1581 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1582 #ifdef __NetBSD__
1583 newrule->cuid = kauth_cred_getuid(l->l_cred);
1584 newrule->cpid = l->l_proc->p_pid;
1585 #else
1586 newrule->cuid = p->p_cred->p_ruid;
1587 newrule->cpid = p->p_pid;
1588 #endif /* !__NetBSD__ */
1589 TAILQ_INIT(&newrule->rpool.list);
1590 /* initialize refcounting */
1591 newrule->states = 0;
1592 newrule->entries.tqe_prev = NULL;
1593 #ifndef INET
1594 if (newrule->af == AF_INET) {
1595 pool_put(&pf_rule_pl, newrule);
1596 error = EAFNOSUPPORT;
1597 break;
1598 }
1599 #endif /* INET */
1600 #ifndef INET6
1601 if (newrule->af == AF_INET6) {
1602 pool_put(&pf_rule_pl, newrule);
1603 error = EAFNOSUPPORT;
1604 break;
1605 }
1606 #endif /* INET6 */
1607 if (newrule->ifname[0]) {
1608 newrule->kif = pfi_kif_get(newrule->ifname);
1609 if (newrule->kif == NULL) {
1610 pool_put(&pf_rule_pl, newrule);
1611 error = EINVAL;
1612 break;
1613 }
1614 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
1615 } else
1616 newrule->kif = NULL;
1617
1618 #ifndef __NetBSD__
1619 if (newrule->rtableid > 0 &&
1620 !rtable_exists(newrule->rtableid))
1621 error = EBUSY;
1622 #endif /* !__NetBSD__ */
1623
1624 #ifdef ALTQ
1625 /* set queue IDs */
1626 if (newrule->qname[0] != 0) {
1627 if ((newrule->qid =
1628 pf_qname2qid(newrule->qname)) == 0)
1629 error = EBUSY;
1630 else if (newrule->pqname[0] != 0) {
1631 if ((newrule->pqid =
1632 pf_qname2qid(newrule->pqname)) == 0)
1633 error = EBUSY;
1634 } else
1635 newrule->pqid = newrule->qid;
1636 }
1637 #endif /* ALTQ */
1638 if (newrule->tagname[0])
1639 if ((newrule->tag =
1640 pf_tagname2tag(newrule->tagname)) == 0)
1641 error = EBUSY;
1642 if (newrule->match_tagname[0])
1643 if ((newrule->match_tag = pf_tagname2tag(
1644 newrule->match_tagname)) == 0)
1645 error = EBUSY;
1646 if (newrule->rt && !newrule->direction)
1647 error = EINVAL;
1648 #if NPFLOG > 0
1649 if (!newrule->log)
1650 newrule->logif = 0;
1651 if (newrule->logif >= PFLOGIFS_MAX)
1652 error = EINVAL;
1653 #endif
1654 if (pf_rtlabel_add(&newrule->src.addr) ||
1655 pf_rtlabel_add(&newrule->dst.addr))
1656 error = EBUSY;
1657 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1658 error = EINVAL;
1659 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1660 error = EINVAL;
1661 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1662 error = EINVAL;
1663 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1664 error = EINVAL;
1665 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1666 error = EINVAL;
1667 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1668 if (pf_tbladdr_setup(ruleset, &pa->addr))
1669 error = EINVAL;
1670
1671 if (newrule->overload_tblname[0]) {
1672 if ((newrule->overload_tbl = pfr_attach_table(
1673 ruleset, newrule->overload_tblname)) ==
1674 NULL)
1675 error = EINVAL;
1676 else
1677 newrule->overload_tbl->pfrkt_flags |=
1678 PFR_TFLAG_ACTIVE;
1679 }
1680
1681 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1682 if (((((newrule->action == PF_NAT) ||
1683 (newrule->action == PF_RDR) ||
1684 (newrule->action == PF_BINAT) ||
1685 (newrule->rt > PF_FASTROUTE)) &&
1686 !newrule->anchor)) &&
1687 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1688 error = EINVAL;
1689
1690 if (error) {
1691 pf_rm_rule(NULL, newrule);
1692 break;
1693 }
1694 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1695 newrule->evaluations = 0;
1696 newrule->packets[0] = newrule->packets[1] = 0;
1697 newrule->bytes[0] = newrule->bytes[1] = 0;
1698 }
1699 pf_empty_pool(&pf_pabuf);
1700
1701 if (pcr->action == PF_CHANGE_ADD_HEAD)
1702 oldrule = TAILQ_FIRST(
1703 ruleset->rules[rs_num].active.ptr);
1704 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1705 oldrule = TAILQ_LAST(
1706 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1707 else {
1708 oldrule = TAILQ_FIRST(
1709 ruleset->rules[rs_num].active.ptr);
1710 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1711 oldrule = TAILQ_NEXT(oldrule, entries);
1712 if (oldrule == NULL) {
1713 if (newrule != NULL)
1714 pf_rm_rule(NULL, newrule);
1715 error = EINVAL;
1716 break;
1717 }
1718 }
1719
1720 if (pcr->action == PF_CHANGE_REMOVE) {
1721 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1722 ruleset->rules[rs_num].active.rcount--;
1723 } else {
1724 if (oldrule == NULL)
1725 TAILQ_INSERT_TAIL(
1726 ruleset->rules[rs_num].active.ptr,
1727 newrule, entries);
1728 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1729 pcr->action == PF_CHANGE_ADD_BEFORE)
1730 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1731 else
1732 TAILQ_INSERT_AFTER(
1733 ruleset->rules[rs_num].active.ptr,
1734 oldrule, newrule, entries);
1735 ruleset->rules[rs_num].active.rcount++;
1736 }
1737
1738 nr = 0;
1739 TAILQ_FOREACH(oldrule,
1740 ruleset->rules[rs_num].active.ptr, entries)
1741 oldrule->nr = nr++;
1742
1743 ruleset->rules[rs_num].active.ticket++;
1744
1745 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1746 pf_remove_if_empty_ruleset(ruleset);
1747
1748 break;
1749 }
1750
1751 case DIOCCLRSTATES: {
1752 struct pf_state *s, *nexts;
1753 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1754 int killed = 0;
1755
1756 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1757 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1758
1759 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1760 s->kif->pfik_name)) {
1761 #if NPFSYNC
1762 /* don't send out individual delete messages */
1763 s->sync_flags = PFSTATE_NOSYNC;
1764 #endif
1765 pf_unlink_state(s);
1766 killed++;
1767 }
1768 }
1769 psk->psk_af = killed;
1770 #if NPFSYNC
1771 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1772 #endif
1773 break;
1774 }
1775
1776 case DIOCKILLSTATES: {
1777 struct pf_state *s, *nexts;
1778 struct pf_state_key *sk;
1779 struct pf_state_host *src, *dst;
1780 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1781 int killed = 0;
1782
1783 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1784 s = nexts) {
1785 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1786 sk = s->state_key;
1787
1788 if (sk->direction == PF_OUT) {
1789 src = &sk->lan;
1790 dst = &sk->ext;
1791 } else {
1792 src = &sk->ext;
1793 dst = &sk->lan;
1794 }
1795 if ((!psk->psk_af || sk->af == psk->psk_af)
1796 && (!psk->psk_proto || psk->psk_proto ==
1797 sk->proto) &&
1798 PF_MATCHA(psk->psk_src.neg,
1799 &psk->psk_src.addr.v.a.addr,
1800 &psk->psk_src.addr.v.a.mask,
1801 &src->addr, sk->af) &&
1802 PF_MATCHA(psk->psk_dst.neg,
1803 &psk->psk_dst.addr.v.a.addr,
1804 &psk->psk_dst.addr.v.a.mask,
1805 &dst->addr, sk->af) &&
1806 (psk->psk_src.port_op == 0 ||
1807 pf_match_port(psk->psk_src.port_op,
1808 psk->psk_src.port[0], psk->psk_src.port[1],
1809 src->port)) &&
1810 (psk->psk_dst.port_op == 0 ||
1811 pf_match_port(psk->psk_dst.port_op,
1812 psk->psk_dst.port[0], psk->psk_dst.port[1],
1813 dst->port)) &&
1814 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1815 s->kif->pfik_name))) {
1816 #if NPFSYNC > 0
1817 /* send immediate delete of state */
1818 pfsync_delete_state(s);
1819 s->sync_flags |= PFSTATE_NOSYNC;
1820 #endif
1821 pf_unlink_state(s);
1822 killed++;
1823 }
1824 }
1825 psk->psk_af = killed;
1826 break;
1827 }
1828
1829 case DIOCADDSTATE: {
1830 struct pfioc_state *ps = (struct pfioc_state *)addr;
1831 struct pfsync_state *sp = (struct pfsync_state *)ps->state;
1832 struct pf_state *s;
1833 struct pf_state_key *sk;
1834 struct pfi_kif *kif;
1835
1836 if (sp->timeout >= PFTM_MAX &&
1837 sp->timeout != PFTM_UNTIL_PACKET) {
1838 error = EINVAL;
1839 break;
1840 }
1841 s = pool_get(&pf_state_pl, PR_NOWAIT);
1842 if (s == NULL) {
1843 error = ENOMEM;
1844 break;
1845 }
1846 bzero(s, sizeof(struct pf_state));
1847 if ((sk = pf_alloc_state_key(s)) == NULL) {
1848 error = ENOMEM;
1849 break;
1850 }
1851 pf_state_import(sp, sk, s);
1852 kif = pfi_kif_get(sp->ifname);
1853 if (kif == NULL) {
1854 pool_put(&pf_state_pl, s);
1855 pool_put(&pf_state_key_pl, sk);
1856 error = ENOENT;
1857 break;
1858 }
1859 if (pf_insert_state(kif, s)) {
1860 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
1861 pool_put(&pf_state_pl, s);
1862 pool_put(&pf_state_key_pl, sk);
1863 error = ENOMEM;
1864 }
1865 break;
1866 }
1867
1868 case DIOCGETSTATE: {
1869 struct pfioc_state *ps = (struct pfioc_state *)addr;
1870 struct pf_state *s;
1871 u_int32_t nr;
1872
1873 nr = 0;
1874 RB_FOREACH(s, pf_state_tree_id, &tree_id) {
1875 if (nr >= ps->nr)
1876 break;
1877 nr++;
1878 }
1879 if (s == NULL) {
1880 error = EBUSY;
1881 break;
1882 }
1883
1884 pf_state_export((struct pfsync_state *)&ps->state,
1885 s->state_key, s);
1886 break;
1887 }
1888
1889 case DIOCGETSTATES: {
1890 struct pfioc_states *ps = (struct pfioc_states *)addr;
1891 struct pf_state *state;
1892 struct pfsync_state *p, *pstore;
1893 u_int32_t nr = 0;
1894
1895 if (ps->ps_len == 0) {
1896 nr = pf_status.states;
1897 ps->ps_len = sizeof(struct pfsync_state) * nr;
1898 break;
1899 }
1900
1901 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1902
1903 p = ps->ps_states;
1904
1905 state = TAILQ_FIRST(&state_list);
1906 while (state) {
1907 if (state->timeout != PFTM_UNLINKED) {
1908 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1909 break;
1910
1911 pf_state_export(pstore,
1912 state->state_key, state);
1913 error = copyout(pstore, p, sizeof(*p));
1914 if (error) {
1915 free(pstore, M_TEMP);
1916 goto fail;
1917 }
1918 p++;
1919 nr++;
1920 }
1921 state = TAILQ_NEXT(state, entry_list);
1922 }
1923
1924 ps->ps_len = sizeof(struct pfsync_state) * nr;
1925
1926 free(pstore, M_TEMP);
1927 break;
1928 }
1929
1930 case DIOCGETSTATUS: {
1931 struct pf_status *s = (struct pf_status *)addr;
1932 bcopy(&pf_status, s, sizeof(struct pf_status));
1933 pfi_fill_oldstatus(s);
1934 break;
1935 }
1936
1937 case DIOCSETSTATUSIF: {
1938 struct pfioc_if *pi = (struct pfioc_if *)addr;
1939
1940 if (pi->ifname[0] == 0) {
1941 bzero(pf_status.ifname, IFNAMSIZ);
1942 break;
1943 }
1944 if (ifunit(pi->ifname) == NULL) {
1945 error = EINVAL;
1946 break;
1947 }
1948 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1949 break;
1950 }
1951
1952 case DIOCCLRSTATUS: {
1953 bzero(pf_status.counters, sizeof(pf_status.counters));
1954 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1955 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1956 pf_status.since = time_second;
1957 if (*pf_status.ifname)
1958 pfi_clr_istats(pf_status.ifname);
1959 break;
1960 }
1961
1962 case DIOCNATLOOK: {
1963 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1964 struct pf_state_key *sk;
1965 struct pf_state *state;
1966 struct pf_state_key_cmp key;
1967 int m = 0, direction = pnl->direction;
1968
1969 key.af = pnl->af;
1970 key.proto = pnl->proto;
1971
1972 if (!pnl->proto ||
1973 PF_AZERO(&pnl->saddr, pnl->af) ||
1974 PF_AZERO(&pnl->daddr, pnl->af) ||
1975 ((pnl->proto == IPPROTO_TCP ||
1976 pnl->proto == IPPROTO_UDP) &&
1977 (!pnl->dport || !pnl->sport)))
1978 error = EINVAL;
1979 else {
1980 /*
1981 * userland gives us source and dest of connection,
1982 * reverse the lookup so we ask for what happens with
1983 * the return traffic, enabling us to find it in the
1984 * state tree.
1985 */
1986 if (direction == PF_IN) {
1987 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1988 key.ext.port = pnl->dport;
1989 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1990 key.gwy.port = pnl->sport;
1991 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1992 } else {
1993 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1994 key.lan.port = pnl->dport;
1995 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1996 key.ext.port = pnl->sport;
1997 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1998 }
1999 if (m > 1)
2000 error = E2BIG; /* more than one state */
2001 else if (state != NULL) {
2002 sk = state->state_key;
2003 if (direction == PF_IN) {
2004 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
2005 sk->af);
2006 pnl->rsport = sk->lan.port;
2007 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
2008 pnl->af);
2009 pnl->rdport = pnl->dport;
2010 } else {
2011 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
2012 sk->af);
2013 pnl->rdport = sk->gwy.port;
2014 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
2015 pnl->af);
2016 pnl->rsport = pnl->sport;
2017 }
2018 } else
2019 error = ENOENT;
2020 }
2021 break;
2022 }
2023
2024 case DIOCSETTIMEOUT: {
2025 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2026 int old;
2027
2028 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2029 pt->seconds < 0) {
2030 error = EINVAL;
2031 goto fail;
2032 }
2033 old = pf_default_rule.timeout[pt->timeout];
2034 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
2035 pt->seconds = 1;
2036 pf_default_rule.timeout[pt->timeout] = pt->seconds;
2037 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
2038 wakeup(pf_purge_thread);
2039 pt->seconds = old;
2040 break;
2041 }
2042
2043 case DIOCGETTIMEOUT: {
2044 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2045
2046 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2047 error = EINVAL;
2048 goto fail;
2049 }
2050 pt->seconds = pf_default_rule.timeout[pt->timeout];
2051 break;
2052 }
2053
2054 case DIOCGETLIMIT: {
2055 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2056
2057 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2058 error = EINVAL;
2059 goto fail;
2060 }
2061 pl->limit = pf_pool_limits[pl->index].limit;
2062 break;
2063 }
2064
2065 case DIOCSETLIMIT: {
2066 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2067 int old_limit;
2068
2069 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2070 pf_pool_limits[pl->index].pp == NULL) {
2071 error = EINVAL;
2072 goto fail;
2073 }
2074 #ifdef __NetBSD__
2075 pool_sethardlimit(pf_pool_limits[pl->index].pp,
2076 pl->limit, NULL, 0);
2077 #else
2078 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
2079 pl->limit, NULL, 0) != 0) {
2080 error = EBUSY;
2081 goto fail;
2082 }
2083 #endif /* !__NetBSD__ */
2084 old_limit = pf_pool_limits[pl->index].limit;
2085 pf_pool_limits[pl->index].limit = pl->limit;
2086 pl->limit = old_limit;
2087 break;
2088 }
2089
2090 case DIOCSETDEBUG: {
2091 u_int32_t *level = (u_int32_t *)addr;
2092
2093 pf_status.debug = *level;
2094 break;
2095 }
2096
2097 case DIOCCLRRULECTRS: {
2098 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
2099 struct pf_ruleset *ruleset = &pf_main_ruleset;
2100 struct pf_rule *rule;
2101
2102 TAILQ_FOREACH(rule,
2103 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
2104 rule->evaluations = 0;
2105 rule->packets[0] = rule->packets[1] = 0;
2106 rule->bytes[0] = rule->bytes[1] = 0;
2107 }
2108 break;
2109 }
2110
2111 #ifdef ALTQ
2112 case DIOCSTARTALTQ: {
2113 struct pf_altq *altq;
2114
2115 /* enable all altq interfaces on active list */
2116 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2117 if (altq->qname[0] == 0) {
2118 error = pf_enable_altq(altq);
2119 if (error != 0)
2120 break;
2121 }
2122 }
2123 if (error == 0)
2124 pf_altq_running = 1;
2125 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2126 break;
2127 }
2128
2129 case DIOCSTOPALTQ: {
2130 struct pf_altq *altq;
2131
2132 /* disable all altq interfaces on active list */
2133 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2134 if (altq->qname[0] == 0) {
2135 error = pf_disable_altq(altq);
2136 if (error != 0)
2137 break;
2138 }
2139 }
2140 if (error == 0)
2141 pf_altq_running = 0;
2142 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2143 break;
2144 }
2145
2146 case DIOCADDALTQ: {
2147 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2148 struct pf_altq *altq, *a;
2149
2150 if (pa->ticket != ticket_altqs_inactive) {
2151 error = EBUSY;
2152 break;
2153 }
2154 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2155 if (altq == NULL) {
2156 error = ENOMEM;
2157 break;
2158 }
2159 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2160
2161 /*
2162 * if this is for a queue, find the discipline and
2163 * copy the necessary fields
2164 */
2165 if (altq->qname[0] != 0) {
2166 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2167 error = EBUSY;
2168 pool_put(&pf_altq_pl, altq);
2169 break;
2170 }
2171 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2172 if (strncmp(a->ifname, altq->ifname,
2173 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2174 altq->altq_disc = a->altq_disc;
2175 break;
2176 }
2177 }
2178 }
2179
2180 error = altq_add(altq);
2181 if (error) {
2182 pool_put(&pf_altq_pl, altq);
2183 break;
2184 }
2185
2186 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2187 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2188 break;
2189 }
2190
2191 case DIOCGETALTQS: {
2192 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2193 struct pf_altq *altq;
2194
2195 pa->nr = 0;
2196 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2197 pa->nr++;
2198 pa->ticket = ticket_altqs_active;
2199 break;
2200 }
2201
2202 case DIOCGETALTQ: {
2203 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2204 struct pf_altq *altq;
2205 u_int32_t nr;
2206
2207 if (pa->ticket != ticket_altqs_active) {
2208 error = EBUSY;
2209 break;
2210 }
2211 nr = 0;
2212 altq = TAILQ_FIRST(pf_altqs_active);
2213 while ((altq != NULL) && (nr < pa->nr)) {
2214 altq = TAILQ_NEXT(altq, entries);
2215 nr++;
2216 }
2217 if (altq == NULL) {
2218 error = EBUSY;
2219 break;
2220 }
2221 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2222 break;
2223 }
2224
2225 case DIOCCHANGEALTQ:
2226 /* CHANGEALTQ not supported yet! */
2227 error = ENODEV;
2228 break;
2229
2230 case DIOCGETQSTATS: {
2231 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2232 struct pf_altq *altq;
2233 u_int32_t nr;
2234 int nbytes;
2235
2236 if (pq->ticket != ticket_altqs_active) {
2237 error = EBUSY;
2238 break;
2239 }
2240 nbytes = pq->nbytes;
2241 nr = 0;
2242 altq = TAILQ_FIRST(pf_altqs_active);
2243 while ((altq != NULL) && (nr < pq->nr)) {
2244 altq = TAILQ_NEXT(altq, entries);
2245 nr++;
2246 }
2247 if (altq == NULL) {
2248 error = EBUSY;
2249 break;
2250 }
2251 error = altq_getqstats(altq, pq->buf, &nbytes);
2252 if (error == 0) {
2253 pq->scheduler = altq->scheduler;
2254 pq->nbytes = nbytes;
2255 }
2256 break;
2257 }
2258 #endif /* ALTQ */
2259
2260 case DIOCBEGINADDRS: {
2261 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2262
2263 pf_empty_pool(&pf_pabuf);
2264 pp->ticket = ++ticket_pabuf;
2265 break;
2266 }
2267
2268 case DIOCADDADDR: {
2269 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2270
2271 if (pp->ticket != ticket_pabuf) {
2272 error = EBUSY;
2273 break;
2274 }
2275 #ifndef INET
2276 if (pp->af == AF_INET) {
2277 error = EAFNOSUPPORT;
2278 break;
2279 }
2280 #endif /* INET */
2281 #ifndef INET6
2282 if (pp->af == AF_INET6) {
2283 error = EAFNOSUPPORT;
2284 break;
2285 }
2286 #endif /* INET6 */
2287 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2288 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2289 pp->addr.addr.type != PF_ADDR_TABLE) {
2290 error = EINVAL;
2291 break;
2292 }
2293 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2294 if (pa == NULL) {
2295 error = ENOMEM;
2296 break;
2297 }
2298 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2299 if (pa->ifname[0]) {
2300 pa->kif = pfi_kif_get(pa->ifname);
2301 if (pa->kif == NULL) {
2302 pool_put(&pf_pooladdr_pl, pa);
2303 error = EINVAL;
2304 break;
2305 }
2306 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
2307 }
2308 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2309 pfi_dynaddr_remove(&pa->addr);
2310 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
2311 pool_put(&pf_pooladdr_pl, pa);
2312 error = EINVAL;
2313 break;
2314 }
2315 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2316 break;
2317 }
2318
2319 case DIOCGETADDRS: {
2320 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2321
2322 pp->nr = 0;
2323 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2324 pp->r_num, 0, 1, 0);
2325 if (pool == NULL) {
2326 error = EBUSY;
2327 break;
2328 }
2329 TAILQ_FOREACH(pa, &pool->list, entries)
2330 pp->nr++;
2331 break;
2332 }
2333
2334 case DIOCGETADDR: {
2335 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2336 u_int32_t nr = 0;
2337
2338 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2339 pp->r_num, 0, 1, 1);
2340 if (pool == NULL) {
2341 error = EBUSY;
2342 break;
2343 }
2344 pa = TAILQ_FIRST(&pool->list);
2345 while ((pa != NULL) && (nr < pp->nr)) {
2346 pa = TAILQ_NEXT(pa, entries);
2347 nr++;
2348 }
2349 if (pa == NULL) {
2350 error = EBUSY;
2351 break;
2352 }
2353 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2354 pfi_dynaddr_copyout(&pp->addr.addr);
2355 pf_tbladdr_copyout(&pp->addr.addr);
2356 pf_rtlabel_copyout(&pp->addr.addr);
2357 break;
2358 }
2359
2360 case DIOCCHANGEADDR: {
2361 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2362 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2363 struct pf_ruleset *ruleset;
2364
2365 if (pca->action < PF_CHANGE_ADD_HEAD ||
2366 pca->action > PF_CHANGE_REMOVE) {
2367 error = EINVAL;
2368 break;
2369 }
2370 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2371 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2372 pca->addr.addr.type != PF_ADDR_TABLE) {
2373 error = EINVAL;
2374 break;
2375 }
2376
2377 ruleset = pf_find_ruleset(pca->anchor);
2378 if (ruleset == NULL) {
2379 error = EBUSY;
2380 break;
2381 }
2382 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2383 pca->r_num, pca->r_last, 1, 1);
2384 if (pool == NULL) {
2385 error = EBUSY;
2386 break;
2387 }
2388 if (pca->action != PF_CHANGE_REMOVE) {
2389 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2390 if (newpa == NULL) {
2391 error = ENOMEM;
2392 break;
2393 }
2394 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2395 #ifndef INET
2396 if (pca->af == AF_INET) {
2397 pool_put(&pf_pooladdr_pl, newpa);
2398 error = EAFNOSUPPORT;
2399 break;
2400 }
2401 #endif /* INET */
2402 #ifndef INET6
2403 if (pca->af == AF_INET6) {
2404 pool_put(&pf_pooladdr_pl, newpa);
2405 error = EAFNOSUPPORT;
2406 break;
2407 }
2408 #endif /* INET6 */
2409 if (newpa->ifname[0]) {
2410 newpa->kif = pfi_kif_get(newpa->ifname);
2411 if (newpa->kif == NULL) {
2412 pool_put(&pf_pooladdr_pl, newpa);
2413 error = EINVAL;
2414 break;
2415 }
2416 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
2417 } else
2418 newpa->kif = NULL;
2419 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2420 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2421 pfi_dynaddr_remove(&newpa->addr);
2422 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
2423 pool_put(&pf_pooladdr_pl, newpa);
2424 error = EINVAL;
2425 break;
2426 }
2427 }
2428
2429 if (pca->action == PF_CHANGE_ADD_HEAD)
2430 oldpa = TAILQ_FIRST(&pool->list);
2431 else if (pca->action == PF_CHANGE_ADD_TAIL)
2432 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2433 else {
2434 int i = 0;
2435
2436 oldpa = TAILQ_FIRST(&pool->list);
2437 while ((oldpa != NULL) && (i < pca->nr)) {
2438 oldpa = TAILQ_NEXT(oldpa, entries);
2439 i++;
2440 }
2441 if (oldpa == NULL) {
2442 error = EINVAL;
2443 break;
2444 }
2445 }
2446
2447 if (pca->action == PF_CHANGE_REMOVE) {
2448 TAILQ_REMOVE(&pool->list, oldpa, entries);
2449 pfi_dynaddr_remove(&oldpa->addr);
2450 pf_tbladdr_remove(&oldpa->addr);
2451 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
2452 pool_put(&pf_pooladdr_pl, oldpa);
2453 } else {
2454 if (oldpa == NULL)
2455 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2456 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2457 pca->action == PF_CHANGE_ADD_BEFORE)
2458 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2459 else
2460 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2461 newpa, entries);
2462 }
2463
2464 pool->cur = TAILQ_FIRST(&pool->list);
2465 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2466 pca->af);
2467 break;
2468 }
2469
2470 case DIOCGETRULESETS: {
2471 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2472 struct pf_ruleset *ruleset;
2473 struct pf_anchor *anchor;
2474
2475 pr->path[sizeof(pr->path) - 1] = 0;
2476 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2477 error = EINVAL;
2478 break;
2479 }
2480 pr->nr = 0;
2481 if (ruleset->anchor == NULL) {
2482 /* XXX kludge for pf_main_ruleset */
2483 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2484 if (anchor->parent == NULL)
2485 pr->nr++;
2486 } else {
2487 RB_FOREACH(anchor, pf_anchor_node,
2488 &ruleset->anchor->children)
2489 pr->nr++;
2490 }
2491 break;
2492 }
2493
2494 case DIOCGETRULESET: {
2495 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2496 struct pf_ruleset *ruleset;
2497 struct pf_anchor *anchor;
2498 u_int32_t nr = 0;
2499
2500 pr->path[sizeof(pr->path) - 1] = 0;
2501 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2502 error = EINVAL;
2503 break;
2504 }
2505 pr->name[0] = 0;
2506 if (ruleset->anchor == NULL) {
2507 /* XXX kludge for pf_main_ruleset */
2508 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2509 if (anchor->parent == NULL && nr++ == pr->nr) {
2510 strlcpy(pr->name, anchor->name,
2511 sizeof(pr->name));
2512 break;
2513 }
2514 } else {
2515 RB_FOREACH(anchor, pf_anchor_node,
2516 &ruleset->anchor->children)
2517 if (nr++ == pr->nr) {
2518 strlcpy(pr->name, anchor->name,
2519 sizeof(pr->name));
2520 break;
2521 }
2522 }
2523 if (!pr->name[0])
2524 error = EBUSY;
2525 break;
2526 }
2527
2528 case DIOCRCLRTABLES: {
2529 struct pfioc_table *io = (struct pfioc_table *)addr;
2530
2531 if (io->pfrio_esize != 0) {
2532 error = ENODEV;
2533 break;
2534 }
2535 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2536 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2537 break;
2538 }
2539
2540 case DIOCRADDTABLES: {
2541 struct pfioc_table *io = (struct pfioc_table *)addr;
2542
2543 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2544 error = ENODEV;
2545 break;
2546 }
2547 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2548 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2549 break;
2550 }
2551
2552 case DIOCRDELTABLES: {
2553 struct pfioc_table *io = (struct pfioc_table *)addr;
2554
2555 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2556 error = ENODEV;
2557 break;
2558 }
2559 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2560 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2561 break;
2562 }
2563
2564 case DIOCRGETTABLES: {
2565 struct pfioc_table *io = (struct pfioc_table *)addr;
2566
2567 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2568 error = ENODEV;
2569 break;
2570 }
2571 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2572 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2573 break;
2574 }
2575
2576 case DIOCRGETTSTATS: {
2577 struct pfioc_table *io = (struct pfioc_table *)addr;
2578
2579 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2580 error = ENODEV;
2581 break;
2582 }
2583 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2584 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2585 break;
2586 }
2587
2588 case DIOCRCLRTSTATS: {
2589 struct pfioc_table *io = (struct pfioc_table *)addr;
2590
2591 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2592 error = ENODEV;
2593 break;
2594 }
2595 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2596 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2597 break;
2598 }
2599
2600 case DIOCRSETTFLAGS: {
2601 struct pfioc_table *io = (struct pfioc_table *)addr;
2602
2603 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2604 error = ENODEV;
2605 break;
2606 }
2607 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2608 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2609 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2610 break;
2611 }
2612
2613 case DIOCRCLRADDRS: {
2614 struct pfioc_table *io = (struct pfioc_table *)addr;
2615
2616 if (io->pfrio_esize != 0) {
2617 error = ENODEV;
2618 break;
2619 }
2620 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2621 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2622 break;
2623 }
2624
2625 case DIOCRADDADDRS: {
2626 struct pfioc_table *io = (struct pfioc_table *)addr;
2627
2628 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2629 error = ENODEV;
2630 break;
2631 }
2632 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2633 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2634 PFR_FLAG_USERIOCTL);
2635 break;
2636 }
2637
2638 case DIOCRDELADDRS: {
2639 struct pfioc_table *io = (struct pfioc_table *)addr;
2640
2641 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2642 error = ENODEV;
2643 break;
2644 }
2645 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2646 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2647 PFR_FLAG_USERIOCTL);
2648 break;
2649 }
2650
2651 case DIOCRSETADDRS: {
2652 struct pfioc_table *io = (struct pfioc_table *)addr;
2653
2654 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2655 error = ENODEV;
2656 break;
2657 }
2658 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2659 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2660 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2661 PFR_FLAG_USERIOCTL, 0);
2662 break;
2663 }
2664
2665 case DIOCRGETADDRS: {
2666 struct pfioc_table *io = (struct pfioc_table *)addr;
2667
2668 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2669 error = ENODEV;
2670 break;
2671 }
2672 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2673 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2674 break;
2675 }
2676
2677 case DIOCRGETASTATS: {
2678 struct pfioc_table *io = (struct pfioc_table *)addr;
2679
2680 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2681 error = ENODEV;
2682 break;
2683 }
2684 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2685 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2686 break;
2687 }
2688
2689 case DIOCRCLRASTATS: {
2690 struct pfioc_table *io = (struct pfioc_table *)addr;
2691
2692 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2693 error = ENODEV;
2694 break;
2695 }
2696 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2697 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2698 PFR_FLAG_USERIOCTL);
2699 break;
2700 }
2701
2702 case DIOCRTSTADDRS: {
2703 struct pfioc_table *io = (struct pfioc_table *)addr;
2704
2705 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2706 error = ENODEV;
2707 break;
2708 }
2709 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2710 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2711 PFR_FLAG_USERIOCTL);
2712 break;
2713 }
2714
2715 case DIOCRINADEFINE: {
2716 struct pfioc_table *io = (struct pfioc_table *)addr;
2717
2718 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2719 error = ENODEV;
2720 break;
2721 }
2722 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2723 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2724 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2725 break;
2726 }
2727
2728 case DIOCOSFPADD: {
2729 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2730 error = pf_osfp_add(io);
2731 break;
2732 }
2733
2734 case DIOCOSFPGET: {
2735 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2736 error = pf_osfp_get(io);
2737 break;
2738 }
2739
2740 case DIOCXBEGIN: {
2741 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2742 struct pfioc_trans_e *ioe;
2743 struct pfr_table *table;
2744 int i;
2745
2746 if (io->esize != sizeof(*ioe)) {
2747 error = ENODEV;
2748 goto fail;
2749 }
2750 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2751 M_TEMP, M_WAITOK);
2752 table = (struct pfr_table *)malloc(sizeof(*table),
2753 M_TEMP, M_WAITOK);
2754 for (i = 0; i < io->size; i++) {
2755 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2756 free(table, M_TEMP);
2757 free(ioe, M_TEMP);
2758 error = EFAULT;
2759 goto fail;
2760 }
2761 switch (ioe->rs_num) {
2762 #ifdef ALTQ
2763 case PF_RULESET_ALTQ:
2764 if (ioe->anchor[0]) {
2765 free(table, M_TEMP);
2766 free(ioe, M_TEMP);
2767 error = EINVAL;
2768 goto fail;
2769 }
2770 if ((error = pf_begin_altq(&ioe->ticket))) {
2771 free(table, M_TEMP);
2772 free(ioe, M_TEMP);
2773 goto fail;
2774 }
2775 break;
2776 #endif /* ALTQ */
2777 case PF_RULESET_TABLE:
2778 bzero(table, sizeof(*table));
2779 strlcpy(table->pfrt_anchor, ioe->anchor,
2780 sizeof(table->pfrt_anchor));
2781 if ((error = pfr_ina_begin(table,
2782 &ioe->ticket, NULL, 0))) {
2783 free(table, M_TEMP);
2784 free(ioe, M_TEMP);
2785 goto fail;
2786 }
2787 break;
2788 default:
2789 if ((error = pf_begin_rules(&ioe->ticket,
2790 ioe->rs_num, ioe->anchor))) {
2791 free(table, M_TEMP);
2792 free(ioe, M_TEMP);
2793 goto fail;
2794 }
2795 break;
2796 }
2797 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2798 free(table, M_TEMP);
2799 free(ioe, M_TEMP);
2800 error = EFAULT;
2801 goto fail;
2802 }
2803 }
2804 free(table, M_TEMP);
2805 free(ioe, M_TEMP);
2806 break;
2807 }
2808
2809 case DIOCXROLLBACK: {
2810 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2811 struct pfioc_trans_e *ioe;
2812 struct pfr_table *table;
2813 int i;
2814
2815 if (io->esize != sizeof(*ioe)) {
2816 error = ENODEV;
2817 goto fail;
2818 }
2819 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2820 M_TEMP, M_WAITOK);
2821 table = (struct pfr_table *)malloc(sizeof(*table),
2822 M_TEMP, M_WAITOK);
2823 for (i = 0; i < io->size; i++) {
2824 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2825 free(table, M_TEMP);
2826 free(ioe, M_TEMP);
2827 error = EFAULT;
2828 goto fail;
2829 }
2830 switch (ioe->rs_num) {
2831 #ifdef ALTQ
2832 case PF_RULESET_ALTQ:
2833 if (ioe->anchor[0]) {
2834 free(table, M_TEMP);
2835 free(ioe, M_TEMP);
2836 error = EINVAL;
2837 goto fail;
2838 }
2839 if ((error = pf_rollback_altq(ioe->ticket))) {
2840 free(table, M_TEMP);
2841 free(ioe, M_TEMP);
2842 goto fail; /* really bad */
2843 }
2844 break;
2845 #endif /* ALTQ */
2846 case PF_RULESET_TABLE:
2847 bzero(table, sizeof(*table));
2848 strlcpy(table->pfrt_anchor, ioe->anchor,
2849 sizeof(table->pfrt_anchor));
2850 if ((error = pfr_ina_rollback(table,
2851 ioe->ticket, NULL, 0))) {
2852 free(table, M_TEMP);
2853 free(ioe, M_TEMP);
2854 goto fail; /* really bad */
2855 }
2856 break;
2857 default:
2858 if ((error = pf_rollback_rules(ioe->ticket,
2859 ioe->rs_num, ioe->anchor))) {
2860 free(table, M_TEMP);
2861 free(ioe, M_TEMP);
2862 goto fail; /* really bad */
2863 }
2864 break;
2865 }
2866 }
2867 free(table, M_TEMP);
2868 free(ioe, M_TEMP);
2869 break;
2870 }
2871
2872 case DIOCXCOMMIT: {
2873 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2874 struct pfioc_trans_e *ioe;
2875 struct pfr_table *table;
2876 struct pf_ruleset *rs;
2877 int i;
2878
2879 if (io->esize != sizeof(*ioe)) {
2880 error = ENODEV;
2881 goto fail;
2882 }
2883 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2884 M_TEMP, M_WAITOK);
2885 table = (struct pfr_table *)malloc(sizeof(*table),
2886 M_TEMP, M_WAITOK);
2887 /* first makes sure everything will succeed */
2888 for (i = 0; i < io->size; i++) {
2889 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2890 free(table, M_TEMP);
2891 free(ioe, M_TEMP);
2892 error = EFAULT;
2893 goto fail;
2894 }
2895 switch (ioe->rs_num) {
2896 #ifdef ALTQ
2897 case PF_RULESET_ALTQ:
2898 if (ioe->anchor[0]) {
2899 free(table, M_TEMP);
2900 free(ioe, M_TEMP);
2901 error = EINVAL;
2902 goto fail;
2903 }
2904 if (!altqs_inactive_open || ioe->ticket !=
2905 ticket_altqs_inactive) {
2906 free(table, M_TEMP);
2907 free(ioe, M_TEMP);
2908 error = EBUSY;
2909 goto fail;
2910 }
2911 break;
2912 #endif /* ALTQ */
2913 case PF_RULESET_TABLE:
2914 rs = pf_find_ruleset(ioe->anchor);
2915 if (rs == NULL || !rs->topen || ioe->ticket !=
2916 rs->tticket) {
2917 free(table, M_TEMP);
2918 free(ioe, M_TEMP);
2919 error = EBUSY;
2920 goto fail;
2921 }
2922 break;
2923 default:
2924 if (ioe->rs_num < 0 || ioe->rs_num >=
2925 PF_RULESET_MAX) {
2926 free(table, M_TEMP);
2927 free(ioe, M_TEMP);
2928 error = EINVAL;
2929 goto fail;
2930 }
2931 rs = pf_find_ruleset(ioe->anchor);
2932 if (rs == NULL ||
2933 !rs->rules[ioe->rs_num].inactive.open ||
2934 rs->rules[ioe->rs_num].inactive.ticket !=
2935 ioe->ticket) {
2936 free(table, M_TEMP);
2937 free(ioe, M_TEMP);
2938 error = EBUSY;
2939 goto fail;
2940 }
2941 break;
2942 }
2943 }
2944 /* now do the commit - no errors should happen here */
2945 for (i = 0; i < io->size; i++) {
2946 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2947 free(table, M_TEMP);
2948 free(ioe, M_TEMP);
2949 error = EFAULT;
2950 goto fail;
2951 }
2952 switch (ioe->rs_num) {
2953 #ifdef ALTQ
2954 case PF_RULESET_ALTQ:
2955 if ((error = pf_commit_altq(ioe->ticket))) {
2956 free(table, M_TEMP);
2957 free(ioe, M_TEMP);
2958 goto fail; /* really bad */
2959 }
2960 break;
2961 #endif /* ALTQ */
2962 case PF_RULESET_TABLE:
2963 bzero(table, sizeof(*table));
2964 strlcpy(table->pfrt_anchor, ioe->anchor,
2965 sizeof(table->pfrt_anchor));
2966 if ((error = pfr_ina_commit(table, ioe->ticket,
2967 NULL, NULL, 0))) {
2968 free(table, M_TEMP);
2969 free(ioe, M_TEMP);
2970 goto fail; /* really bad */
2971 }
2972 break;
2973 default:
2974 if ((error = pf_commit_rules(ioe->ticket,
2975 ioe->rs_num, ioe->anchor))) {
2976 free(table, M_TEMP);
2977 free(ioe, M_TEMP);
2978 goto fail; /* really bad */
2979 }
2980 break;
2981 }
2982 }
2983 free(table, M_TEMP);
2984 free(ioe, M_TEMP);
2985 break;
2986 }
2987
2988 case DIOCGETSRCNODES: {
2989 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2990 struct pf_src_node *n, *p, *pstore;
2991 u_int32_t nr = 0;
2992 int space = psn->psn_len;
2993
2994 if (space == 0) {
2995 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2996 nr++;
2997 psn->psn_len = sizeof(struct pf_src_node) * nr;
2998 break;
2999 }
3000
3001 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
3002
3003 p = psn->psn_src_nodes;
3004 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3005 int secs = time_second, diff;
3006
3007 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3008 break;
3009
3010 bcopy(n, pstore, sizeof(*pstore));
3011 if (n->rule.ptr != NULL)
3012 pstore->rule.nr = n->rule.ptr->nr;
3013 pstore->creation = secs - pstore->creation;
3014 if (pstore->expire > secs)
3015 pstore->expire -= secs;
3016 else
3017 pstore->expire = 0;
3018
3019 /* adjust the connection rate estimate */
3020 diff = secs - n->conn_rate.last;
3021 if (diff >= n->conn_rate.seconds)
3022 pstore->conn_rate.count = 0;
3023 else
3024 pstore->conn_rate.count -=
3025 n->conn_rate.count * diff /
3026 n->conn_rate.seconds;
3027
3028 error = copyout(pstore, p, sizeof(*p));
3029 if (error) {
3030 free(pstore, M_TEMP);
3031 goto fail;
3032 }
3033 p++;
3034 nr++;
3035 }
3036 psn->psn_len = sizeof(struct pf_src_node) * nr;
3037
3038 free(pstore, M_TEMP);
3039 break;
3040 }
3041
3042 case DIOCCLRSRCNODES: {
3043 struct pf_src_node *n;
3044 struct pf_state *state;
3045
3046 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3047 state->src_node = NULL;
3048 state->nat_src_node = NULL;
3049 }
3050 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3051 n->expire = 1;
3052 n->states = 0;
3053 }
3054 pf_purge_expired_src_nodes(1);
3055 pf_status.src_nodes = 0;
3056 break;
3057 }
3058
3059 case DIOCKILLSRCNODES: {
3060 struct pf_src_node *sn;
3061 struct pf_state *s;
3062 struct pfioc_src_node_kill *psnk = \
3063 (struct pfioc_src_node_kill *) addr;
3064 int killed = 0;
3065
3066 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
3067 if (PF_MATCHA(psnk->psnk_src.neg, \
3068 &psnk->psnk_src.addr.v.a.addr, \
3069 &psnk->psnk_src.addr.v.a.mask, \
3070 &sn->addr, sn->af) &&
3071 PF_MATCHA(psnk->psnk_dst.neg, \
3072 &psnk->psnk_dst.addr.v.a.addr, \
3073 &psnk->psnk_dst.addr.v.a.mask, \
3074 &sn->raddr, sn->af)) {
3075 /* Handle state to src_node linkage */
3076 if (sn->states != 0) {
3077 RB_FOREACH(s, pf_state_tree_id,
3078 &tree_id) {
3079 if (s->src_node == sn)
3080 s->src_node = NULL;
3081 if (s->nat_src_node == sn)
3082 s->nat_src_node = NULL;
3083 }
3084 sn->states = 0;
3085 }
3086 sn->expire = 1;
3087 killed++;
3088 }
3089 }
3090
3091 if (killed > 0)
3092 pf_purge_expired_src_nodes(1);
3093
3094 psnk->psnk_af = killed;
3095 break;
3096 }
3097
3098 case DIOCSETHOSTID: {
3099 u_int32_t *hostid = (u_int32_t *)addr;
3100
3101 if (*hostid == 0)
3102 pf_status.hostid = arc4random();
3103 else
3104 pf_status.hostid = *hostid;
3105 break;
3106 }
3107
3108 case DIOCOSFPFLUSH:
3109 pf_osfp_flush();
3110 break;
3111
3112 case DIOCIGETIFACES: {
3113 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3114
3115 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
3116 error = ENODEV;
3117 break;
3118 }
3119 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
3120 &io->pfiio_size);
3121 break;
3122 }
3123
3124 case DIOCSETIFFLAG: {
3125 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3126
3127 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3128 break;
3129 }
3130
3131 case DIOCCLRIFFLAG: {
3132 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3133
3134 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3135 break;
3136 }
3137
3138 default:
3139 error = ENODEV;
3140 break;
3141 }
3142 fail:
3143 splx(s);
3144 if (flags & FWRITE)
3145 rw_exit_write(&pf_consistency_lock);
3146 else
3147 rw_exit_read(&pf_consistency_lock);
3148 return (error);
3149 }
3150
3151 #ifdef __NetBSD__
3152 #ifdef INET
3153 static int
3154 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3155 {
3156 int error;
3157
3158 /*
3159 * ensure that mbufs are writable beforehand
3160 * as it's assumed by pf code.
3161 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
3162 * XXX inefficient
3163 */
3164 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
3165 if (error) {
3166 m_freem(*mp);
3167 *mp = NULL;
3168 return error;
3169 }
3170
3171 /*
3172 * If the packet is out-bound, we can't delay checksums
3173 * here. For in-bound, the checksum has already been
3174 * validated.
3175 */
3176 if (dir == PFIL_OUT) {
3177 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
3178 in_delayed_cksum(*mp);
3179 (*mp)->m_pkthdr.csum_flags &=
3180 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
3181 }
3182 }
3183
3184 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3185 != PF_PASS) {
3186 m_freem(*mp);
3187 *mp = NULL;
3188 return EHOSTUNREACH;
3189 }
3190
3191 /*
3192 * we're not compatible with fast-forward.
3193 */
3194
3195 if (dir == PFIL_IN && *mp) {
3196 (*mp)->m_flags &= ~M_CANFASTFWD;
3197 }
3198
3199 return (0);
3200 }
3201 #endif /* INET */
3202
3203 #ifdef INET6
3204 static int
3205 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3206 {
3207 int error;
3208
3209 /*
3210 * ensure that mbufs are writable beforehand
3211 * as it's assumed by pf code.
3212 * XXX inefficient
3213 */
3214 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
3215 if (error) {
3216 m_freem(*mp);
3217 *mp = NULL;
3218 return error;
3219 }
3220
3221 /*
3222 * If the packet is out-bound, we can't delay checksums
3223 * here. For in-bound, the checksum has already been
3224 * validated.
3225 */
3226 if (dir == PFIL_OUT) {
3227 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3228 in6_delayed_cksum(*mp);
3229 (*mp)->m_pkthdr.csum_flags &=
3230 ~(M_CSUM_TCPv6|M_CSUM_UDPv6);
3231 }
3232 }
3233
3234 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3235 != PF_PASS) {
3236 m_freem(*mp);
3237 *mp = NULL;
3238 return EHOSTUNREACH;
3239 } else
3240 return (0);
3241 }
3242 #endif /* INET6 */
3243
3244 static int
3245 pf_pfil_attach(void)
3246 {
3247 struct pfil_head *ph_inet;
3248 #ifdef INET6
3249 struct pfil_head *ph_inet6;
3250 #endif /* INET6 */
3251 int error;
3252
3253 if (pf_pfil_attached)
3254 return (EBUSY);
3255
3256 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3257 if (ph_inet)
3258 error = pfil_add_hook((void *)pfil4_wrapper, NULL,
3259 PFIL_IN|PFIL_OUT, ph_inet);
3260 else
3261 error = ENOENT;
3262 if (error)
3263 return (error);
3264
3265 #ifdef INET6
3266 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3267 if (ph_inet6)
3268 error = pfil_add_hook((void *)pfil6_wrapper, NULL,
3269 PFIL_IN|PFIL_OUT, ph_inet6);
3270 else
3271 error = ENOENT;
3272 if (error)
3273 goto bad;
3274 #endif /* INET6 */
3275
3276 pf_pfil_attached = 1;
3277
3278 return (0);
3279
3280 #ifdef INET6
3281 bad:
3282 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3283 #endif /* INET6 */
3284
3285 return (error);
3286 }
3287
3288 static int
3289 pf_pfil_detach(void)
3290 {
3291 struct pfil_head *ph_inet;
3292 #ifdef INET6
3293 struct pfil_head *ph_inet6;
3294 #endif /* INET6 */
3295
3296 if (pf_pfil_attached == 0)
3297 return (EBUSY);
3298
3299 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3300 if (ph_inet)
3301 pfil_remove_hook((void *)pfil4_wrapper, NULL,
3302 PFIL_IN|PFIL_OUT, ph_inet);
3303 #ifdef INET6
3304 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3305 if (ph_inet6)
3306 pfil_remove_hook((void *)pfil6_wrapper, NULL,
3307 PFIL_IN|PFIL_OUT, ph_inet6);
3308 #endif /* INET6 */
3309 pf_pfil_attached = 0;
3310
3311 return (0);
3312 }
3313 #endif /* __NetBSD__ */
3314