npf_conn.c revision 1.5 1 /* $NetBSD: npf_conn.c,v 1.5 2014/07/20 14:16:00 joerg Exp $ */
2
3 /*-
4 * Copyright (c) 2014 Mindaugas Rasiukevicius <rmind at netbsd org>
5 * Copyright (c) 2010-2014 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This material is based upon work partially supported by The
9 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * NPF connection tracking for stateful filtering and translation.
35 *
36 * Overview
37 *
38 * Connection direction is identified by the direction of its first
39 * packet. Packets can be incoming or outgoing with respect to an
40 * interface. To describe the packet in the context of connection
41 * direction we will use the terms "forwards stream" and "backwards
42 * stream". All connections have two keys and thus two entries:
43 *
44 * npf_conn_t::c_forw_entry for the forwards stream and
45 * npf_conn_t::c_back_entry for the backwards stream.
46 *
47 * The keys are formed from the 5-tuple (source/destination address,
48 * source/destination port and the protocol). Additional matching
49 * is performed for the interface (a common behaviour is equivalent
50 * to the 6-tuple lookup including the interface ID). Note that the
51 * key may be formed using translated values in a case of NAT.
52 *
53 * Connections can serve two purposes: for the implicit passing or
54 * to accommodate the dynamic NAT. Connections for the former purpose
55 * are created by the rules with "stateful" attribute and are used for
56 * stateful filtering. Such connections indicate that the packet of
57 * the backwards stream should be passed without inspection of the
58 * ruleset. The other purpose is to associate a dynamic NAT mechanism
59 * with a connection. Such connections are created by the NAT policies
60 * and they have a relationship with NAT translation structure via
61 * npf_conn_t::c_nat. A single connection can serve both purposes,
62 * which is a common case.
63 *
64 * Connection life-cycle
65 *
66 * Connections are established when a packet matches said rule or
67 * NAT policy. Both keys of the established connection are inserted
68 * into the connection database. A garbage collection thread
69 * periodically scans all connections and depending on connection
70 * properties (e.g. last activity time, protocol) removes connection
71 * entries and expires the actual connections.
72 *
73 * Each connection has a reference count. The reference is acquired
74 * on lookup and should be released by the caller. It guarantees that
75 * the connection will not be destroyed, although it may be expired.
76 *
77 * Synchronisation
78 *
79 * Connection database is accessed in a lock-less manner by the main
80 * routines: npf_conn_inspect() and npf_conn_establish(). Since they
81 * are always called from a software interrupt, the database is
82 * protected using passive serialisation. The main place which can
83 * destroy a connection is npf_conn_worker(). The database itself
84 * can be replaced and destroyed in npf_conn_reload().
85 *
86 * ALG support
87 *
88 * Application-level gateways (ALGs) can override generic connection
89 * inspection (npf_alg_conn() call in npf_conn_inspect() function) by
90 * performing their own lookup using different key. Recursive call
91 * to npf_conn_inspect() is not allowed. The ALGs ought to use the
92 * npf_conn_lookup() function for this purpose.
93 *
94 * Lock order
95 *
96 * conn_lock ->
97 * [ npf_config_lock -> ]
98 * npf_hashbucket_t::cd_lock ->
99 * npf_conn_t::c_lock
100 */
101
102 #include <sys/cdefs.h>
103 __KERNEL_RCSID(0, "$NetBSD: npf_conn.c,v 1.5 2014/07/20 14:16:00 joerg Exp $");
104
105 #include <sys/param.h>
106 #include <sys/types.h>
107
108 #include <netinet/in.h>
109 #include <netinet/tcp.h>
110
111 #include <sys/atomic.h>
112 #include <sys/condvar.h>
113 #include <sys/kmem.h>
114 #include <sys/kthread.h>
115 #include <sys/mutex.h>
116 #include <net/pfil.h>
117 #include <sys/pool.h>
118 #include <sys/queue.h>
119 #include <sys/systm.h>
120
121 #define __NPF_CONN_PRIVATE
122 #include "npf_conn.h"
123 #include "npf_impl.h"
124
125 /*
126 * Connection flags: PFIL_IN and PFIL_OUT values are reserved for direction.
127 */
128 CTASSERT(PFIL_ALL == (0x001 | 0x002));
129 #define CONN_ACTIVE 0x004 /* visible on inspection */
130 #define CONN_PASS 0x008 /* perform implicit passing */
131 #define CONN_EXPIRE 0x010 /* explicitly expire */
132 #define CONN_REMOVED 0x020 /* "forw/back" entries removed */
133
134 /*
135 * Connection tracking state: disabled (off), enabled (on) or flush request.
136 */
137 enum { CONN_TRACKING_OFF, CONN_TRACKING_ON, CONN_TRACKING_FLUSH };
138 static volatile int conn_tracking __cacheline_aligned;
139
140 /* Connection tracking database, connection cache and the lock. */
141 static npf_conndb_t * conn_db __read_mostly;
142 static pool_cache_t conn_cache __read_mostly;
143 static kmutex_t conn_lock __cacheline_aligned;
144 static kcondvar_t conn_cv __cacheline_aligned;
145
146 static void npf_conn_worker(void);
147 static void npf_conn_destroy(npf_conn_t *);
148
149 /*
150 * npf_conn_sys{init,fini}: initialise/destroy connection tracking.
151 *
152 * Connection database is initialised when connection tracking gets
153 * enabled via npf_conn_tracking() interface.
154 */
155
156 void
157 npf_conn_sysinit(void)
158 {
159 conn_cache = pool_cache_init(sizeof(npf_conn_t), coherency_unit,
160 0, 0, "npfconpl", NULL, IPL_NET, NULL, NULL, NULL);
161 mutex_init(&conn_lock, MUTEX_DEFAULT, IPL_NONE);
162 cv_init(&conn_cv, "npfconcv");
163 conn_tracking = CONN_TRACKING_OFF;
164 conn_db = NULL;
165
166 npf_worker_register(npf_conn_worker);
167 }
168
169 void
170 npf_conn_sysfini(void)
171 {
172 /* Disable tracking, flush all connections. */
173 npf_conn_tracking(false);
174 npf_worker_unregister(npf_conn_worker);
175
176 KASSERT(conn_tracking == CONN_TRACKING_OFF);
177 KASSERT(conn_db == NULL);
178 pool_cache_destroy(conn_cache);
179 mutex_destroy(&conn_lock);
180 cv_destroy(&conn_cv);
181 }
182
183 /*
184 * npf_conn_reload: perform the reload by flushing the current connection
185 * database and replacing with the new one or just destroying.
186 *
187 * Key routine synchronising with all other readers and writers.
188 */
189 static void
190 npf_conn_reload(npf_conndb_t *ndb, int tracking)
191 {
192 npf_conndb_t *odb;
193
194 /* Must synchronise with G/C thread and connection saving/restoring. */
195 mutex_enter(&conn_lock);
196 while (conn_tracking == CONN_TRACKING_FLUSH) {
197 cv_wait(&conn_cv, &conn_lock);
198 }
199
200 /*
201 * Set the flush status. It disables connection inspection as well
202 * as creation. There may be some operations in-flight, drain them.
203 */
204 npf_config_enter();
205 conn_tracking = CONN_TRACKING_FLUSH;
206 npf_config_sync();
207 npf_config_exit();
208
209 /* Notify the worker to G/C all connections. */
210 npf_worker_signal();
211 while (conn_tracking == CONN_TRACKING_FLUSH) {
212 cv_wait(&conn_cv, &conn_lock);
213 }
214
215 /* Install the new database, make it visible. */
216 odb = atomic_swap_ptr(&conn_db, ndb);
217 membar_sync();
218 conn_tracking = tracking;
219
220 /* Done. Destroy the old database, if any. */
221 mutex_exit(&conn_lock);
222 if (odb) {
223 npf_conndb_destroy(odb);
224 }
225 }
226
227 /*
228 * npf_conn_tracking: enable/disable connection tracking.
229 */
230 void
231 npf_conn_tracking(bool track)
232 {
233 if (conn_tracking == CONN_TRACKING_OFF && track) {
234 /* Disabled -> Enable. */
235 npf_conndb_t *cd = npf_conndb_create();
236 npf_conn_reload(cd, CONN_TRACKING_ON);
237 return;
238 }
239 if (conn_tracking == CONN_TRACKING_ON && !track) {
240 /* Enabled -> Disable. */
241 npf_conn_reload(NULL, CONN_TRACKING_OFF);
242 pool_cache_invalidate(conn_cache);
243 return;
244 }
245 }
246
247 static bool
248 npf_conn_trackable_p(const npf_cache_t *npc)
249 {
250 /*
251 * Check if connection tracking is on. Also, if layer 3 and 4 are
252 * not cached - protocol is not supported or packet is invalid.
253 */
254 if (conn_tracking != CONN_TRACKING_ON) {
255 return false;
256 }
257 if (!npf_iscached(npc, NPC_IP46) || !npf_iscached(npc, NPC_LAYER4)) {
258 return false;
259 }
260 return true;
261 }
262
263 /*
264 * npf_conn_conkey: construct a key for the connection lookup.
265 */
266 bool
267 npf_conn_conkey(const npf_cache_t *npc, npf_connkey_t *key, const bool forw)
268 {
269 const u_int alen = npc->npc_alen;
270 const struct tcphdr *th;
271 const struct udphdr *uh;
272 u_int keylen, isrc, idst;
273 uint16_t id[2];
274
275 switch (npc->npc_proto) {
276 case IPPROTO_TCP:
277 KASSERT(npf_iscached(npc, NPC_TCP));
278 th = npc->npc_l4.tcp;
279 id[NPF_SRC] = th->th_sport;
280 id[NPF_DST] = th->th_dport;
281 break;
282 case IPPROTO_UDP:
283 KASSERT(npf_iscached(npc, NPC_UDP));
284 uh = npc->npc_l4.udp;
285 id[NPF_SRC] = uh->uh_sport;
286 id[NPF_DST] = uh->uh_dport;
287 break;
288 case IPPROTO_ICMP:
289 if (npf_iscached(npc, NPC_ICMP_ID)) {
290 const struct icmp *ic = npc->npc_l4.icmp;
291 id[NPF_SRC] = ic->icmp_id;
292 id[NPF_DST] = ic->icmp_id;
293 break;
294 }
295 return false;
296 case IPPROTO_ICMPV6:
297 if (npf_iscached(npc, NPC_ICMP_ID)) {
298 const struct icmp6_hdr *ic6 = npc->npc_l4.icmp6;
299 id[NPF_SRC] = ic6->icmp6_id;
300 id[NPF_DST] = ic6->icmp6_id;
301 break;
302 }
303 return false;
304 default:
305 /* Unsupported protocol. */
306 return false;
307 }
308
309 /*
310 * Finally, construct a key formed out of 32-bit integers.
311 */
312 if (__predict_true(forw)) {
313 isrc = NPF_SRC, idst = NPF_DST;
314 } else {
315 isrc = NPF_DST, idst = NPF_SRC;
316 }
317
318 key->ck_key[0] = ((uint32_t)npc->npc_proto << 16) | (alen & 0xffff);
319 key->ck_key[1] = ((uint32_t)id[isrc] << 16) | id[idst];
320
321 if (__predict_true(alen == sizeof(in_addr_t))) {
322 key->ck_key[2] = npc->npc_ips[isrc]->s6_addr32[0];
323 key->ck_key[3] = npc->npc_ips[idst]->s6_addr32[0];
324 keylen = 4 * sizeof(uint32_t);
325 } else {
326 const u_int nwords = alen >> 2;
327 memcpy(&key->ck_key[2], npc->npc_ips[isrc], alen);
328 memcpy(&key->ck_key[2 + nwords], npc->npc_ips[idst], alen);
329 keylen = (2 + (nwords * 2)) * sizeof(uint32_t);
330 }
331 (void)keylen;
332 return true;
333 }
334
335 static __inline void
336 connkey_set_addr(npf_connkey_t *key, const npf_addr_t *naddr, const int di)
337 {
338 const u_int alen = key->ck_key[0] & 0xffff;
339 uint32_t *addr = &key->ck_key[2 + ((alen >> 2) * di)];
340
341 KASSERT(alen > 0);
342 memcpy(addr, naddr, alen);
343 }
344
345 static __inline void
346 connkey_set_id(npf_connkey_t *key, const uint16_t id, const int di)
347 {
348 const uint32_t oid = key->ck_key[1];
349 const u_int shift = 16 * !di;
350 const uint32_t mask = 0xffff0000 >> shift;
351
352 key->ck_key[1] = ((uint32_t)id << shift) | (oid & mask);
353 }
354
355 /*
356 * npf_conn_lookup: lookup if there is an established connection.
357 *
358 * => If found, we will hold a reference for the caller.
359 */
360 npf_conn_t *
361 npf_conn_lookup(const npf_cache_t *npc, const int di, bool *forw)
362 {
363 const nbuf_t *nbuf = npc->npc_nbuf;
364 npf_conn_t *con;
365 npf_connkey_t key;
366 u_int flags, cifid;
367 bool ok, pforw;
368
369 /* Construct a key and lookup for a connection in the store. */
370 if (!npf_conn_conkey(npc, &key, true)) {
371 return NULL;
372 }
373 con = npf_conndb_lookup(conn_db, &key, forw);
374 if (con == NULL) {
375 return NULL;
376 }
377 KASSERT(npc->npc_proto == con->c_proto);
378
379 /* Check if connection is active and not expired. */
380 flags = con->c_flags;
381 ok = (flags & (CONN_ACTIVE | CONN_EXPIRE)) == CONN_ACTIVE;
382
383 if (__predict_false(!ok)) {
384 atomic_dec_uint(&con->c_refcnt);
385 return NULL;
386 }
387
388 /*
389 * Match the interface and the direction of the connection entry
390 * and the packet.
391 */
392 cifid = con->c_ifid;
393 if (__predict_false(cifid && cifid != nbuf->nb_ifid)) {
394 atomic_dec_uint(&con->c_refcnt);
395 return NULL;
396 }
397 pforw = (flags & PFIL_ALL) == di;
398 if (__predict_false(*forw != pforw)) {
399 atomic_dec_uint(&con->c_refcnt);
400 return NULL;
401 }
402
403 /* Update the last activity time. */
404 getnanouptime(&con->c_atime);
405 return con;
406 }
407
408 /*
409 * npf_conn_inspect: lookup a connection and inspecting the protocol data.
410 *
411 * => If found, we will hold a reference for the caller.
412 */
413 npf_conn_t *
414 npf_conn_inspect(npf_cache_t *npc, const int di, int *error)
415 {
416 nbuf_t *nbuf = npc->npc_nbuf;
417 npf_conn_t *con;
418 bool forw, ok;
419
420 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
421 if (!npf_conn_trackable_p(npc)) {
422 return NULL;
423 }
424
425 /* Query ALG which may lookup connection for us. */
426 if ((con = npf_alg_conn(npc, di)) != NULL) {
427 /* Note: reference is held. */
428 return con;
429 }
430 if (nbuf_head_mbuf(nbuf) == NULL) {
431 *error = ENOMEM;
432 return NULL;
433 }
434 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
435
436 /* Main lookup of the connection. */
437 if ((con = npf_conn_lookup(npc, di, &forw)) == NULL) {
438 return NULL;
439 }
440
441 /* Inspect the protocol data and handle state changes. */
442 mutex_enter(&con->c_lock);
443 ok = npf_state_inspect(npc, &con->c_state, forw);
444 mutex_exit(&con->c_lock);
445
446 if (__predict_false(!ok)) {
447 /* Invalid: let the rules deal with it. */
448 npf_conn_release(con);
449 npf_stats_inc(NPF_STAT_INVALID_STATE);
450 con = NULL;
451 }
452 return con;
453 }
454
455 /*
456 * npf_conn_establish: create a new connection, insert into the global list.
457 *
458 * => Connection is created with the reference held for the caller.
459 * => Connection will be activated on the first reference release.
460 */
461 npf_conn_t *
462 npf_conn_establish(npf_cache_t *npc, int di, bool per_if)
463 {
464 const nbuf_t *nbuf = npc->npc_nbuf;
465 npf_conn_t *con;
466
467 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
468
469 if (!npf_conn_trackable_p(npc)) {
470 return NULL;
471 }
472
473 /* Allocate and initialise the new connection. */
474 con = pool_cache_get(conn_cache, PR_NOWAIT);
475 if (__predict_false(!con)) {
476 return NULL;
477 }
478 NPF_PRINTF(("NPF: create conn %p\n", con));
479 npf_stats_inc(NPF_STAT_SESSION_CREATE);
480
481 /* Reference count and flags (indicate direction). */
482 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
483 con->c_flags = (di & PFIL_ALL);
484 con->c_refcnt = 1;
485 con->c_rproc = NULL;
486 con->c_nat = NULL;
487
488 /* Initialize protocol state. */
489 if (!npf_state_init(npc, &con->c_state)) {
490 goto err;
491 }
492
493 KASSERT(npf_iscached(npc, NPC_IP46));
494 npf_connkey_t *fw = &con->c_forw_entry;
495 npf_connkey_t *bk = &con->c_back_entry;
496
497 /*
498 * Construct "forwards" and "backwards" keys. Also, set the
499 * interface ID for this connection (unless it is global).
500 */
501 if (!npf_conn_conkey(npc, fw, true)) {
502 goto err;
503 }
504 if (!npf_conn_conkey(npc, bk, false)) {
505 goto err;
506 }
507 fw->ck_backptr = bk->ck_backptr = con;
508 con->c_ifid = per_if ? nbuf->nb_ifid : 0;
509 con->c_proto = npc->npc_proto;
510
511 /* Set last activity time for a new connection. */
512 getnanouptime(&con->c_atime);
513
514 /*
515 * Insert both keys (entries representing directions) of the
516 * connection. At this point, it becomes visible.
517 */
518 if (!npf_conndb_insert(conn_db, fw, con)) {
519 goto err;
520 }
521 if (!npf_conndb_insert(conn_db, bk, con)) {
522 /* We have hit the duplicate. */
523 npf_conndb_remove(conn_db, fw);
524 npf_stats_inc(NPF_STAT_RACE_SESSION);
525 goto err;
526 }
527
528 /* Finally, insert into the connection list. */
529 NPF_PRINTF(("NPF: establish conn %p\n", con));
530 npf_conndb_enqueue(conn_db, con);
531 return con;
532 err:
533 npf_conn_destroy(con);
534 return NULL;
535 }
536
537 static void
538 npf_conn_destroy(npf_conn_t *con)
539 {
540 if (con->c_nat) {
541 /* Release any NAT structures. */
542 npf_nat_destroy(con->c_nat);
543 }
544 if (con->c_rproc) {
545 /* Release the rule procedure. */
546 npf_rproc_release(con->c_rproc);
547 }
548
549 /* Destroy the state. */
550 npf_state_destroy(&con->c_state);
551 mutex_destroy(&con->c_lock);
552
553 /* Free the structure, increase the counter. */
554 pool_cache_put(conn_cache, con);
555 npf_stats_inc(NPF_STAT_SESSION_DESTROY);
556 NPF_PRINTF(("NPF: conn %p destroyed\n", con));
557 }
558
559 /*
560 * npf_conn_setnat: associate NAT entry with the connection, update and
561 * re-insert connection entry using the translation values.
562 */
563 int
564 npf_conn_setnat(const npf_cache_t *npc, npf_conn_t *con,
565 npf_nat_t *nt, u_int ntype)
566 {
567 static const u_int nat_type_dimap[] = {
568 [NPF_NATOUT] = NPF_DST,
569 [NPF_NATIN] = NPF_SRC,
570 };
571 npf_connkey_t key, *bk;
572 npf_conn_t *ret __diagused;
573 npf_addr_t *taddr;
574 in_port_t tport;
575 u_int tidx;
576
577 KASSERT(con->c_refcnt > 0);
578
579 npf_nat_gettrans(nt, &taddr, &tport);
580 KASSERT(ntype == NPF_NATOUT || ntype == NPF_NATIN);
581 tidx = nat_type_dimap[ntype];
582
583 /* Construct a "backwards" key. */
584 if (!npf_conn_conkey(npc, &key, false)) {
585 return EINVAL;
586 }
587
588 /* Acquire the lock and check for the races. */
589 mutex_enter(&con->c_lock);
590 if (__predict_false(con->c_flags & CONN_EXPIRE)) {
591 /* The connection got expired. */
592 mutex_exit(&con->c_lock);
593 return EINVAL;
594 }
595 if (__predict_false(con->c_nat != NULL)) {
596 /* Race with a duplicate packet. */
597 mutex_exit(&con->c_lock);
598 npf_stats_inc(NPF_STAT_RACE_NAT);
599 return EISCONN;
600 }
601
602 /* Remove the "backwards" entry. */
603 ret = npf_conndb_remove(conn_db, &key);
604 KASSERT(ret == con);
605
606 /* Set the source/destination IDs to the translation values. */
607 bk = &con->c_back_entry;
608 connkey_set_addr(bk, taddr, tidx);
609 if (tport) {
610 connkey_set_id(bk, tport, tidx);
611 }
612
613 /* Finally, re-insert the "backwards" entry. */
614 if (!npf_conndb_insert(conn_db, bk, con)) {
615 /*
616 * Race: we have hit the duplicate, remove the "forwards"
617 * entry and expire our connection; it is no longer valid.
618 */
619 (void)npf_conndb_remove(conn_db, &con->c_forw_entry);
620 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
621 mutex_exit(&con->c_lock);
622
623 npf_stats_inc(NPF_STAT_RACE_NAT);
624 return EISCONN;
625 }
626
627 /* Associate the NAT entry and release the lock. */
628 con->c_nat = nt;
629 mutex_exit(&con->c_lock);
630 return 0;
631 }
632
633 /*
634 * npf_conn_expire: explicitly mark connection as expired.
635 */
636 void
637 npf_conn_expire(npf_conn_t *con)
638 {
639 /* KASSERT(con->c_refcnt > 0); XXX: npf_nat_freepolicy() */
640 atomic_or_uint(&con->c_flags, CONN_EXPIRE);
641 }
642
643 /*
644 * npf_conn_pass: return true if connection is "pass" one, otherwise false.
645 */
646 bool
647 npf_conn_pass(const npf_conn_t *con, npf_rproc_t **rp)
648 {
649 KASSERT(con->c_refcnt > 0);
650 if (__predict_true(con->c_flags & CONN_PASS)) {
651 *rp = con->c_rproc;
652 return true;
653 }
654 return false;
655 }
656
657 /*
658 * npf_conn_setpass: mark connection as a "pass" one and associate the
659 * rule procedure with it.
660 */
661 void
662 npf_conn_setpass(npf_conn_t *con, npf_rproc_t *rp)
663 {
664 KASSERT((con->c_flags & CONN_ACTIVE) == 0);
665 KASSERT(con->c_refcnt > 0);
666 KASSERT(con->c_rproc == NULL);
667
668 /*
669 * No need for atomic since the connection is not yet active.
670 * If rproc is set, the caller transfers its reference to us,
671 * which will be released on npf_conn_destroy().
672 */
673 con->c_flags |= CONN_PASS;
674 con->c_rproc = rp;
675 }
676
677 /*
678 * npf_conn_release: release a reference, which might allow G/C thread
679 * to destroy this connection.
680 */
681 void
682 npf_conn_release(npf_conn_t *con)
683 {
684 if ((con->c_flags & (CONN_ACTIVE | CONN_EXPIRE)) == 0) {
685 /* Activate: after this, connection is globally visible. */
686 con->c_flags |= CONN_ACTIVE;
687 }
688 KASSERT(con->c_refcnt > 0);
689 atomic_dec_uint(&con->c_refcnt);
690 }
691
692 /*
693 * npf_conn_retnat: return associated NAT data entry and indicate
694 * whether it is a "forwards" or "backwards" stream.
695 */
696 npf_nat_t *
697 npf_conn_retnat(npf_conn_t *con, const int di, bool *forw)
698 {
699 KASSERT(con->c_refcnt > 0);
700 *forw = (con->c_flags & PFIL_ALL) == di;
701 return con->c_nat;
702 }
703
704 /*
705 * npf_conn_expired: criterion to check if connection is expired.
706 */
707 static inline bool
708 npf_conn_expired(const npf_conn_t *con, const struct timespec *tsnow)
709 {
710 const int etime = npf_state_etime(&con->c_state, con->c_proto);
711 struct timespec tsdiff;
712
713 if (__predict_false(con->c_flags & CONN_EXPIRE)) {
714 /* Explicitly marked to be expired. */
715 return true;
716 }
717 timespecsub(tsnow, &con->c_atime, &tsdiff);
718 return tsdiff.tv_sec > etime;
719 }
720
721 /*
722 * npf_conn_worker: G/C to run from a worker thread.
723 */
724 static void
725 npf_conn_worker(void)
726 {
727 npf_conn_t *con, *prev, *gclist = NULL;
728 npf_conndb_t *cd;
729 struct timespec tsnow;
730 bool flushall;
731
732 mutex_enter(&conn_lock);
733 if ((cd = conn_db) == NULL) {
734 goto done;
735 }
736 flushall = (conn_tracking != CONN_TRACKING_ON);
737 getnanouptime(&tsnow);
738
739 /*
740 * Scan all connections and check them for expiration.
741 */
742 prev = NULL;
743 con = npf_conndb_getlist(cd);
744 while (con) {
745 npf_conn_t *next = con->c_next;
746
747 /* Expired? Flushing all? */
748 if (!npf_conn_expired(con, &tsnow) && !flushall) {
749 prev = con;
750 con = next;
751 continue;
752 }
753
754 /* Remove both entries of the connection. */
755 mutex_enter(&con->c_lock);
756 if ((con->c_flags & CONN_REMOVED) == 0) {
757 npf_conn_t *ret __diagused;
758
759 ret = npf_conndb_remove(cd, &con->c_forw_entry);
760 KASSERT(ret == con);
761 ret = npf_conndb_remove(cd, &con->c_back_entry);
762 KASSERT(ret == con);
763 }
764
765 /* Flag the removal and expiration. */
766 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
767 mutex_exit(&con->c_lock);
768
769 /* Move to the G/C list. */
770 npf_conndb_dequeue(cd, con, prev);
771 con->c_next = gclist;
772 gclist = con;
773
774 /* Next.. */
775 con = next;
776 }
777 npf_conndb_settail(cd, prev);
778 done:
779 /* Ensure we it is safe to destroy the connections. */
780 if (gclist) {
781 npf_config_enter();
782 npf_config_sync();
783 npf_config_exit();
784 }
785
786 /*
787 * Garbage collect all expired connections.
788 * May need to wait for the references to drain.
789 */
790 con = gclist;
791 while (con) {
792 npf_conn_t *next = con->c_next;
793
794 /*
795 * Destroy only if removed and no references.
796 * Otherwise, wait for a tiny moment.
797 */
798 if (__predict_false(con->c_refcnt)) {
799 kpause("npfcongc", false, 1, NULL);
800 continue;
801 }
802 npf_conn_destroy(con);
803 con = next;
804 }
805
806 if (conn_tracking == CONN_TRACKING_FLUSH) {
807 /* Flush was requested - indicate we are done. */
808 conn_tracking = CONN_TRACKING_OFF;
809 cv_broadcast(&conn_cv);
810 }
811 mutex_exit(&conn_lock);
812 }
813
814 void
815 npf_conn_load(npf_conndb_t *cd)
816 {
817 KASSERT(cd != NULL);
818 npf_conn_reload(cd, CONN_TRACKING_ON);
819 }
820
821 /*
822 * npf_conn_save: construct a list of connections prepared for saving.
823 * Note: this is expected to be an expensive operation.
824 */
825 int
826 npf_conn_save(prop_array_t conlist, prop_array_t nplist)
827 {
828 npf_conn_t *con, *prev;
829
830 /*
831 * Note: acquire conn_lock to prevent from the database
832 * destruction and G/C thread.
833 */
834 mutex_enter(&conn_lock);
835 if (!conn_db || conn_tracking != CONN_TRACKING_ON) {
836 mutex_exit(&conn_lock);
837 return 0;
838 }
839 prev = NULL;
840 con = npf_conndb_getlist(conn_db);
841 while (con) {
842 npf_conn_t *next = con->c_next;
843 prop_data_t d;
844
845 if ((con->c_flags & (CONN_ACTIVE|CONN_EXPIRE)) != CONN_ACTIVE)
846 goto skip;
847
848 prop_dictionary_t cdict = prop_dictionary_create();
849 prop_dictionary_set_uint32(cdict, "flags", con->c_flags);
850 prop_dictionary_set_uint32(cdict, "proto", con->c_proto);
851 /* FIXME: interface-id */
852
853 d = prop_data_create_data(&con->c_state, sizeof(npf_state_t));
854 prop_dictionary_set_and_rel(cdict, "state", d);
855
856 const uint32_t *fkey = con->c_forw_entry.ck_key;
857 d = prop_data_create_data(fkey, NPF_CONN_MAXKEYLEN);
858 prop_dictionary_set_and_rel(cdict, "forw-key", d);
859
860 const uint32_t *bkey = con->c_back_entry.ck_key;
861 d = prop_data_create_data(bkey, NPF_CONN_MAXKEYLEN);
862 prop_dictionary_set_and_rel(cdict, "back-key", d);
863
864 CTASSERT(sizeof(uintptr_t) <= sizeof(uint64_t));
865 prop_dictionary_set_uint64(cdict, "id-ptr", (uintptr_t)con);
866
867 if (con->c_nat) {
868 npf_nat_save(cdict, nplist, con->c_nat);
869 }
870 prop_array_add(conlist, cdict);
871 prop_object_release(cdict);
872 skip:
873 prev = con;
874 con = next;
875 }
876 npf_conndb_settail(conn_db, prev);
877 mutex_exit(&conn_lock);
878
879 return 0;
880 }
881
882 /*
883 * npf_conn_restore: fully reconstruct a single connection from a directory
884 * and insert into the given database.
885 */
886 int
887 npf_conn_restore(npf_conndb_t *cd, prop_dictionary_t cdict)
888 {
889 npf_conn_t *con;
890 npf_connkey_t *fw, *bk;
891 prop_object_t obj;
892 const void *d;
893
894 /* Allocate a connection and initialise it (clear first). */
895 con = pool_cache_get(conn_cache, PR_WAITOK);
896 memset(con, 0, sizeof(npf_conn_t));
897 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
898
899 prop_dictionary_get_uint32(cdict, "proto", &con->c_proto);
900 prop_dictionary_get_uint32(cdict, "flags", &con->c_flags);
901 con->c_flags &= PFIL_ALL | CONN_ACTIVE | CONN_PASS;
902 getnanouptime(&con->c_atime);
903
904 obj = prop_dictionary_get(cdict, "state");
905 if ((d = prop_data_data_nocopy(obj)) == NULL ||
906 prop_data_size(obj) != sizeof(npf_state_t)) {
907 goto err;
908 }
909 memcpy(&con->c_state, d, sizeof(npf_state_t));
910
911 /* Reconstruct NAT association, if any, or return NULL. */
912 con->c_nat = npf_nat_restore(cdict, con);
913
914 /*
915 * Fetch and copy the keys for each direction.
916 */
917 obj = prop_dictionary_get(cdict, "forw-key");
918 if ((d = prop_data_data_nocopy(obj)) == NULL ||
919 prop_data_size(obj) != NPF_CONN_MAXKEYLEN) {
920 goto err;
921 }
922 fw = &con->c_forw_entry;
923 memcpy(&fw->ck_key, d, NPF_CONN_MAXKEYLEN);
924
925 obj = prop_dictionary_get(cdict, "back-key");
926 if ((d = prop_data_data_nocopy(obj)) == NULL ||
927 prop_data_size(obj) != NPF_CONN_MAXKEYLEN) {
928 goto err;
929 }
930 bk = &con->c_back_entry;
931 memcpy(&bk->ck_key, d, NPF_CONN_MAXKEYLEN);
932
933 fw->ck_backptr = bk->ck_backptr = con;
934
935 /* Insert the entries and the connection itself. */
936 if (!npf_conndb_insert(cd, fw, con)) {
937 goto err;
938 }
939 if (!npf_conndb_insert(cd, bk, con)) {
940 npf_conndb_remove(cd, fw);
941 goto err;
942 }
943 npf_conndb_enqueue(cd, con);
944 return 0;
945 err:
946 npf_conn_destroy(con);
947 return EINVAL;
948 }
949
950 #if defined(DDB) || defined(_NPF_TESTING)
951
952 void
953 npf_conn_print(const npf_conn_t *con)
954 {
955 const u_int alen = NPF_CONN_GETALEN(&con->c_forw_entry);
956 const uint32_t *fkey = con->c_forw_entry.ck_key;
957 const uint32_t *bkey = con->c_back_entry.ck_key;
958 const u_int proto = con->c_proto;
959 struct timespec tsnow, tsdiff;
960 const void *src, *dst;
961 int etime;
962
963 getnanouptime(&tsnow);
964 timespecsub(&tsnow, &con->c_atime, &tsdiff);
965 etime = npf_state_etime(&con->c_state, proto);
966
967 printf("%p:\n\tproto %d flags 0x%x tsdiff %d etime %d\n",
968 con, proto, con->c_flags, (int)tsdiff.tv_sec, etime);
969
970 src = &fkey[2], dst = &fkey[2 + (alen >> 2)];
971 printf("\tforw %s:%d", npf_addr_dump(src, alen), ntohs(fkey[1] >> 16));
972 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(fkey[1] & 0xffff));
973
974 src = &bkey[2], dst = &bkey[2 + (alen >> 2)];
975 printf("\tback %s:%d", npf_addr_dump(src, alen), ntohs(bkey[1] >> 16));
976 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(bkey[1] & 0xffff));
977
978 npf_state_dump(&con->c_state);
979 if (con->c_nat) {
980 npf_nat_dump(con->c_nat);
981 }
982 }
983
984 #endif
985