npf_conn.c revision 1.6 1 /* $NetBSD: npf_conn.c,v 1.6 2014/07/23 01:25:34 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2014 Mindaugas Rasiukevicius <rmind at netbsd org>
5 * Copyright (c) 2010-2014 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This material is based upon work partially supported by The
9 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * NPF connection tracking for stateful filtering and translation.
35 *
36 * Overview
37 *
38 * Connection direction is identified by the direction of its first
39 * packet. Packets can be incoming or outgoing with respect to an
40 * interface. To describe the packet in the context of connection
41 * direction we will use the terms "forwards stream" and "backwards
42 * stream". All connections have two keys and thus two entries:
43 *
44 * npf_conn_t::c_forw_entry for the forwards stream and
45 * npf_conn_t::c_back_entry for the backwards stream.
46 *
47 * The keys are formed from the 5-tuple (source/destination address,
48 * source/destination port and the protocol). Additional matching
49 * is performed for the interface (a common behaviour is equivalent
50 * to the 6-tuple lookup including the interface ID). Note that the
51 * key may be formed using translated values in a case of NAT.
52 *
53 * Connections can serve two purposes: for the implicit passing or
54 * to accommodate the dynamic NAT. Connections for the former purpose
55 * are created by the rules with "stateful" attribute and are used for
56 * stateful filtering. Such connections indicate that the packet of
57 * the backwards stream should be passed without inspection of the
58 * ruleset. The other purpose is to associate a dynamic NAT mechanism
59 * with a connection. Such connections are created by the NAT policies
60 * and they have a relationship with NAT translation structure via
61 * npf_conn_t::c_nat. A single connection can serve both purposes,
62 * which is a common case.
63 *
64 * Connection life-cycle
65 *
66 * Connections are established when a packet matches said rule or
67 * NAT policy. Both keys of the established connection are inserted
68 * into the connection database. A garbage collection thread
69 * periodically scans all connections and depending on connection
70 * properties (e.g. last activity time, protocol) removes connection
71 * entries and expires the actual connections.
72 *
73 * Each connection has a reference count. The reference is acquired
74 * on lookup and should be released by the caller. It guarantees that
75 * the connection will not be destroyed, although it may be expired.
76 *
77 * Synchronisation
78 *
79 * Connection database is accessed in a lock-less manner by the main
80 * routines: npf_conn_inspect() and npf_conn_establish(). Since they
81 * are always called from a software interrupt, the database is
82 * protected using passive serialisation. The main place which can
83 * destroy a connection is npf_conn_worker(). The database itself
84 * can be replaced and destroyed in npf_conn_reload().
85 *
86 * ALG support
87 *
88 * Application-level gateways (ALGs) can override generic connection
89 * inspection (npf_alg_conn() call in npf_conn_inspect() function) by
90 * performing their own lookup using different key. Recursive call
91 * to npf_conn_inspect() is not allowed. The ALGs ought to use the
92 * npf_conn_lookup() function for this purpose.
93 *
94 * Lock order
95 *
96 * npf_config_lock ->
97 * conn_lock ->
98 * npf_conn_t::c_lock
99 */
100
101 #include <sys/cdefs.h>
102 __KERNEL_RCSID(0, "$NetBSD: npf_conn.c,v 1.6 2014/07/23 01:25:34 rmind Exp $");
103
104 #include <sys/param.h>
105 #include <sys/types.h>
106
107 #include <netinet/in.h>
108 #include <netinet/tcp.h>
109
110 #include <sys/atomic.h>
111 #include <sys/condvar.h>
112 #include <sys/kmem.h>
113 #include <sys/kthread.h>
114 #include <sys/mutex.h>
115 #include <net/pfil.h>
116 #include <sys/pool.h>
117 #include <sys/queue.h>
118 #include <sys/systm.h>
119
120 #define __NPF_CONN_PRIVATE
121 #include "npf_conn.h"
122 #include "npf_impl.h"
123
124 /*
125 * Connection flags: PFIL_IN and PFIL_OUT values are reserved for direction.
126 */
127 CTASSERT(PFIL_ALL == (0x001 | 0x002));
128 #define CONN_ACTIVE 0x004 /* visible on inspection */
129 #define CONN_PASS 0x008 /* perform implicit passing */
130 #define CONN_EXPIRE 0x010 /* explicitly expire */
131 #define CONN_REMOVED 0x020 /* "forw/back" entries removed */
132
133 /*
134 * Connection tracking state: disabled (off) or enabled (on).
135 */
136 enum { CONN_TRACKING_OFF, CONN_TRACKING_ON };
137 static volatile int conn_tracking __cacheline_aligned;
138
139 /* Connection tracking database, connection cache and the lock. */
140 static npf_conndb_t * conn_db __read_mostly;
141 static pool_cache_t conn_cache __read_mostly;
142 static kmutex_t conn_lock __cacheline_aligned;
143
144 static void npf_conn_gc(npf_conndb_t *, bool, bool);
145 static void npf_conn_worker(void);
146 static void npf_conn_destroy(npf_conn_t *);
147
148 /*
149 * npf_conn_sys{init,fini}: initialise/destroy connection tracking.
150 */
151
152 void
153 npf_conn_sysinit(void)
154 {
155 conn_cache = pool_cache_init(sizeof(npf_conn_t), coherency_unit,
156 0, 0, "npfconpl", NULL, IPL_NET, NULL, NULL, NULL);
157 mutex_init(&conn_lock, MUTEX_DEFAULT, IPL_NONE);
158 conn_tracking = CONN_TRACKING_OFF;
159 conn_db = npf_conndb_create();
160
161 npf_worker_register(npf_conn_worker);
162 }
163
164 void
165 npf_conn_sysfini(void)
166 {
167 /* Note: the caller should have flushed the connections. */
168 KASSERT(conn_tracking == CONN_TRACKING_OFF);
169 npf_worker_unregister(npf_conn_worker);
170
171 npf_conndb_destroy(conn_db);
172 pool_cache_destroy(conn_cache);
173 mutex_destroy(&conn_lock);
174 }
175
176 /*
177 * npf_conn_load: perform the load by flushing the current connection
178 * database and replacing it with the new one or just destroying.
179 *
180 * => The caller must disable the connection tracking and ensure that
181 * there are no connection database lookups or references in-flight.
182 */
183 void
184 npf_conn_load(npf_conndb_t *ndb, bool track)
185 {
186 npf_conndb_t *odb = NULL;
187
188 KASSERT(npf_config_locked_p());
189
190 /*
191 * The connection database is in the quiescent state.
192 * Prevent G/C thread from running and install a new database.
193 */
194 mutex_enter(&conn_lock);
195 if (ndb) {
196 KASSERT(conn_tracking == CONN_TRACKING_OFF);
197 odb = conn_db;
198 conn_db = ndb;
199 membar_sync();
200 }
201 if (track) {
202 /* After this point lookups start flying in. */
203 conn_tracking = CONN_TRACKING_ON;
204 }
205 mutex_exit(&conn_lock);
206
207 if (odb) {
208 /*
209 * Flush all, no sync since the caller did it for us.
210 * Also, release the pool cache memory.
211 */
212 npf_conn_gc(odb, true, false);
213 npf_conndb_destroy(odb);
214 pool_cache_invalidate(conn_cache);
215 }
216 }
217
218 /*
219 * npf_conn_tracking: enable/disable connection tracking.
220 */
221 void
222 npf_conn_tracking(bool track)
223 {
224 KASSERT(npf_config_locked_p());
225 conn_tracking = track ? CONN_TRACKING_ON : CONN_TRACKING_OFF;
226 }
227
228 static inline bool
229 npf_conn_trackable_p(const npf_cache_t *npc)
230 {
231 /*
232 * Check if connection tracking is on. Also, if layer 3 and 4 are
233 * not cached - protocol is not supported or packet is invalid.
234 */
235 if (conn_tracking != CONN_TRACKING_ON) {
236 return false;
237 }
238 if (!npf_iscached(npc, NPC_IP46) || !npf_iscached(npc, NPC_LAYER4)) {
239 return false;
240 }
241 return true;
242 }
243
244 /*
245 * npf_conn_conkey: construct a key for the connection lookup.
246 */
247 bool
248 npf_conn_conkey(const npf_cache_t *npc, npf_connkey_t *key, const bool forw)
249 {
250 const u_int alen = npc->npc_alen;
251 const struct tcphdr *th;
252 const struct udphdr *uh;
253 u_int keylen, isrc, idst;
254 uint16_t id[2];
255
256 switch (npc->npc_proto) {
257 case IPPROTO_TCP:
258 KASSERT(npf_iscached(npc, NPC_TCP));
259 th = npc->npc_l4.tcp;
260 id[NPF_SRC] = th->th_sport;
261 id[NPF_DST] = th->th_dport;
262 break;
263 case IPPROTO_UDP:
264 KASSERT(npf_iscached(npc, NPC_UDP));
265 uh = npc->npc_l4.udp;
266 id[NPF_SRC] = uh->uh_sport;
267 id[NPF_DST] = uh->uh_dport;
268 break;
269 case IPPROTO_ICMP:
270 if (npf_iscached(npc, NPC_ICMP_ID)) {
271 const struct icmp *ic = npc->npc_l4.icmp;
272 id[NPF_SRC] = ic->icmp_id;
273 id[NPF_DST] = ic->icmp_id;
274 break;
275 }
276 return false;
277 case IPPROTO_ICMPV6:
278 if (npf_iscached(npc, NPC_ICMP_ID)) {
279 const struct icmp6_hdr *ic6 = npc->npc_l4.icmp6;
280 id[NPF_SRC] = ic6->icmp6_id;
281 id[NPF_DST] = ic6->icmp6_id;
282 break;
283 }
284 return false;
285 default:
286 /* Unsupported protocol. */
287 return false;
288 }
289
290 /*
291 * Finally, construct a key formed out of 32-bit integers.
292 */
293 if (__predict_true(forw)) {
294 isrc = NPF_SRC, idst = NPF_DST;
295 } else {
296 isrc = NPF_DST, idst = NPF_SRC;
297 }
298
299 key->ck_key[0] = ((uint32_t)npc->npc_proto << 16) | (alen & 0xffff);
300 key->ck_key[1] = ((uint32_t)id[isrc] << 16) | id[idst];
301
302 if (__predict_true(alen == sizeof(in_addr_t))) {
303 key->ck_key[2] = npc->npc_ips[isrc]->s6_addr32[0];
304 key->ck_key[3] = npc->npc_ips[idst]->s6_addr32[0];
305 keylen = 4 * sizeof(uint32_t);
306 } else {
307 const u_int nwords = alen >> 2;
308 memcpy(&key->ck_key[2], npc->npc_ips[isrc], alen);
309 memcpy(&key->ck_key[2 + nwords], npc->npc_ips[idst], alen);
310 keylen = (2 + (nwords * 2)) * sizeof(uint32_t);
311 }
312 (void)keylen;
313 return true;
314 }
315
316 static __inline void
317 connkey_set_addr(npf_connkey_t *key, const npf_addr_t *naddr, const int di)
318 {
319 const u_int alen = key->ck_key[0] & 0xffff;
320 uint32_t *addr = &key->ck_key[2 + ((alen >> 2) * di)];
321
322 KASSERT(alen > 0);
323 memcpy(addr, naddr, alen);
324 }
325
326 static __inline void
327 connkey_set_id(npf_connkey_t *key, const uint16_t id, const int di)
328 {
329 const uint32_t oid = key->ck_key[1];
330 const u_int shift = 16 * !di;
331 const uint32_t mask = 0xffff0000 >> shift;
332
333 key->ck_key[1] = ((uint32_t)id << shift) | (oid & mask);
334 }
335
336 /*
337 * npf_conn_lookup: lookup if there is an established connection.
338 *
339 * => If found, we will hold a reference for the caller.
340 */
341 npf_conn_t *
342 npf_conn_lookup(const npf_cache_t *npc, const int di, bool *forw)
343 {
344 const nbuf_t *nbuf = npc->npc_nbuf;
345 npf_conn_t *con;
346 npf_connkey_t key;
347 u_int flags, cifid;
348 bool ok, pforw;
349
350 /* Construct a key and lookup for a connection in the store. */
351 if (!npf_conn_conkey(npc, &key, true)) {
352 return NULL;
353 }
354 con = npf_conndb_lookup(conn_db, &key, forw);
355 if (con == NULL) {
356 return NULL;
357 }
358 KASSERT(npc->npc_proto == con->c_proto);
359
360 /* Check if connection is active and not expired. */
361 flags = con->c_flags;
362 ok = (flags & (CONN_ACTIVE | CONN_EXPIRE)) == CONN_ACTIVE;
363
364 if (__predict_false(!ok)) {
365 atomic_dec_uint(&con->c_refcnt);
366 return NULL;
367 }
368
369 /*
370 * Match the interface and the direction of the connection entry
371 * and the packet.
372 */
373 cifid = con->c_ifid;
374 if (__predict_false(cifid && cifid != nbuf->nb_ifid)) {
375 atomic_dec_uint(&con->c_refcnt);
376 return NULL;
377 }
378 pforw = (flags & PFIL_ALL) == di;
379 if (__predict_false(*forw != pforw)) {
380 atomic_dec_uint(&con->c_refcnt);
381 return NULL;
382 }
383
384 /* Update the last activity time. */
385 getnanouptime(&con->c_atime);
386 return con;
387 }
388
389 /*
390 * npf_conn_inspect: lookup a connection and inspecting the protocol data.
391 *
392 * => If found, we will hold a reference for the caller.
393 */
394 npf_conn_t *
395 npf_conn_inspect(npf_cache_t *npc, const int di, int *error)
396 {
397 nbuf_t *nbuf = npc->npc_nbuf;
398 npf_conn_t *con;
399 bool forw, ok;
400
401 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
402 if (!npf_conn_trackable_p(npc)) {
403 return NULL;
404 }
405
406 /* Query ALG which may lookup connection for us. */
407 if ((con = npf_alg_conn(npc, di)) != NULL) {
408 /* Note: reference is held. */
409 return con;
410 }
411 if (nbuf_head_mbuf(nbuf) == NULL) {
412 *error = ENOMEM;
413 return NULL;
414 }
415 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
416
417 /* Main lookup of the connection. */
418 if ((con = npf_conn_lookup(npc, di, &forw)) == NULL) {
419 return NULL;
420 }
421
422 /* Inspect the protocol data and handle state changes. */
423 mutex_enter(&con->c_lock);
424 ok = npf_state_inspect(npc, &con->c_state, forw);
425 mutex_exit(&con->c_lock);
426
427 if (__predict_false(!ok)) {
428 /* Invalid: let the rules deal with it. */
429 npf_conn_release(con);
430 npf_stats_inc(NPF_STAT_INVALID_STATE);
431 con = NULL;
432 }
433 return con;
434 }
435
436 /*
437 * npf_conn_establish: create a new connection, insert into the global list.
438 *
439 * => Connection is created with the reference held for the caller.
440 * => Connection will be activated on the first reference release.
441 */
442 npf_conn_t *
443 npf_conn_establish(npf_cache_t *npc, int di, bool per_if)
444 {
445 const nbuf_t *nbuf = npc->npc_nbuf;
446 npf_conn_t *con;
447
448 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
449
450 if (!npf_conn_trackable_p(npc)) {
451 return NULL;
452 }
453
454 /* Allocate and initialise the new connection. */
455 con = pool_cache_get(conn_cache, PR_NOWAIT);
456 if (__predict_false(!con)) {
457 return NULL;
458 }
459 NPF_PRINTF(("NPF: create conn %p\n", con));
460 npf_stats_inc(NPF_STAT_CONN_CREATE);
461
462 /* Reference count and flags (indicate direction). */
463 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
464 con->c_flags = (di & PFIL_ALL);
465 con->c_refcnt = 1;
466 con->c_rproc = NULL;
467 con->c_nat = NULL;
468
469 /* Initialize protocol state. */
470 if (!npf_state_init(npc, &con->c_state)) {
471 goto err;
472 }
473
474 KASSERT(npf_iscached(npc, NPC_IP46));
475 npf_connkey_t *fw = &con->c_forw_entry;
476 npf_connkey_t *bk = &con->c_back_entry;
477
478 /*
479 * Construct "forwards" and "backwards" keys. Also, set the
480 * interface ID for this connection (unless it is global).
481 */
482 if (!npf_conn_conkey(npc, fw, true)) {
483 goto err;
484 }
485 if (!npf_conn_conkey(npc, bk, false)) {
486 goto err;
487 }
488 fw->ck_backptr = bk->ck_backptr = con;
489 con->c_ifid = per_if ? nbuf->nb_ifid : 0;
490 con->c_proto = npc->npc_proto;
491
492 /* Set last activity time for a new connection. */
493 getnanouptime(&con->c_atime);
494
495 /*
496 * Insert both keys (entries representing directions) of the
497 * connection. At this point, it becomes visible.
498 */
499 if (!npf_conndb_insert(conn_db, fw, con)) {
500 goto err;
501 }
502 if (!npf_conndb_insert(conn_db, bk, con)) {
503 /* We have hit the duplicate. */
504 npf_conndb_remove(conn_db, fw);
505 npf_stats_inc(NPF_STAT_RACE_CONN);
506 goto err;
507 }
508
509 /* Finally, insert into the connection list. */
510 NPF_PRINTF(("NPF: establish conn %p\n", con));
511 npf_conndb_enqueue(conn_db, con);
512 return con;
513 err:
514 npf_conn_destroy(con);
515 return NULL;
516 }
517
518 static void
519 npf_conn_destroy(npf_conn_t *con)
520 {
521 if (con->c_nat) {
522 /* Release any NAT structures. */
523 npf_nat_destroy(con->c_nat);
524 }
525 if (con->c_rproc) {
526 /* Release the rule procedure. */
527 npf_rproc_release(con->c_rproc);
528 }
529
530 /* Destroy the state. */
531 npf_state_destroy(&con->c_state);
532 mutex_destroy(&con->c_lock);
533
534 /* Free the structure, increase the counter. */
535 pool_cache_put(conn_cache, con);
536 npf_stats_inc(NPF_STAT_CONN_DESTROY);
537 NPF_PRINTF(("NPF: conn %p destroyed\n", con));
538 }
539
540 /*
541 * npf_conn_setnat: associate NAT entry with the connection, update and
542 * re-insert connection entry using the translation values.
543 */
544 int
545 npf_conn_setnat(const npf_cache_t *npc, npf_conn_t *con,
546 npf_nat_t *nt, u_int ntype)
547 {
548 static const u_int nat_type_dimap[] = {
549 [NPF_NATOUT] = NPF_DST,
550 [NPF_NATIN] = NPF_SRC,
551 };
552 npf_connkey_t key, *bk;
553 npf_conn_t *ret __diagused;
554 npf_addr_t *taddr;
555 in_port_t tport;
556 u_int tidx;
557
558 KASSERT(con->c_refcnt > 0);
559
560 npf_nat_gettrans(nt, &taddr, &tport);
561 KASSERT(ntype == NPF_NATOUT || ntype == NPF_NATIN);
562 tidx = nat_type_dimap[ntype];
563
564 /* Construct a "backwards" key. */
565 if (!npf_conn_conkey(npc, &key, false)) {
566 return EINVAL;
567 }
568
569 /* Acquire the lock and check for the races. */
570 mutex_enter(&con->c_lock);
571 if (__predict_false(con->c_flags & CONN_EXPIRE)) {
572 /* The connection got expired. */
573 mutex_exit(&con->c_lock);
574 return EINVAL;
575 }
576 if (__predict_false(con->c_nat != NULL)) {
577 /* Race with a duplicate packet. */
578 mutex_exit(&con->c_lock);
579 npf_stats_inc(NPF_STAT_RACE_NAT);
580 return EISCONN;
581 }
582
583 /* Remove the "backwards" entry. */
584 ret = npf_conndb_remove(conn_db, &key);
585 KASSERT(ret == con);
586
587 /* Set the source/destination IDs to the translation values. */
588 bk = &con->c_back_entry;
589 connkey_set_addr(bk, taddr, tidx);
590 if (tport) {
591 connkey_set_id(bk, tport, tidx);
592 }
593
594 /* Finally, re-insert the "backwards" entry. */
595 if (!npf_conndb_insert(conn_db, bk, con)) {
596 /*
597 * Race: we have hit the duplicate, remove the "forwards"
598 * entry and expire our connection; it is no longer valid.
599 */
600 (void)npf_conndb_remove(conn_db, &con->c_forw_entry);
601 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
602 mutex_exit(&con->c_lock);
603
604 npf_stats_inc(NPF_STAT_RACE_NAT);
605 return EISCONN;
606 }
607
608 /* Associate the NAT entry and release the lock. */
609 con->c_nat = nt;
610 mutex_exit(&con->c_lock);
611 return 0;
612 }
613
614 /*
615 * npf_conn_expire: explicitly mark connection as expired.
616 */
617 void
618 npf_conn_expire(npf_conn_t *con)
619 {
620 /* KASSERT(con->c_refcnt > 0); XXX: npf_nat_freepolicy() */
621 atomic_or_uint(&con->c_flags, CONN_EXPIRE);
622 }
623
624 /*
625 * npf_conn_pass: return true if connection is "pass" one, otherwise false.
626 */
627 bool
628 npf_conn_pass(const npf_conn_t *con, npf_rproc_t **rp)
629 {
630 KASSERT(con->c_refcnt > 0);
631 if (__predict_true(con->c_flags & CONN_PASS)) {
632 *rp = con->c_rproc;
633 return true;
634 }
635 return false;
636 }
637
638 /*
639 * npf_conn_setpass: mark connection as a "pass" one and associate the
640 * rule procedure with it.
641 */
642 void
643 npf_conn_setpass(npf_conn_t *con, npf_rproc_t *rp)
644 {
645 KASSERT((con->c_flags & CONN_ACTIVE) == 0);
646 KASSERT(con->c_refcnt > 0);
647 KASSERT(con->c_rproc == NULL);
648
649 /*
650 * No need for atomic since the connection is not yet active.
651 * If rproc is set, the caller transfers its reference to us,
652 * which will be released on npf_conn_destroy().
653 */
654 con->c_flags |= CONN_PASS;
655 con->c_rproc = rp;
656 }
657
658 /*
659 * npf_conn_release: release a reference, which might allow G/C thread
660 * to destroy this connection.
661 */
662 void
663 npf_conn_release(npf_conn_t *con)
664 {
665 if ((con->c_flags & (CONN_ACTIVE | CONN_EXPIRE)) == 0) {
666 /* Activate: after this, connection is globally visible. */
667 con->c_flags |= CONN_ACTIVE;
668 }
669 KASSERT(con->c_refcnt > 0);
670 atomic_dec_uint(&con->c_refcnt);
671 }
672
673 /*
674 * npf_conn_retnat: return associated NAT data entry and indicate
675 * whether it is a "forwards" or "backwards" stream.
676 */
677 npf_nat_t *
678 npf_conn_retnat(npf_conn_t *con, const int di, bool *forw)
679 {
680 KASSERT(con->c_refcnt > 0);
681 *forw = (con->c_flags & PFIL_ALL) == di;
682 return con->c_nat;
683 }
684
685 /*
686 * npf_conn_expired: criterion to check if connection is expired.
687 */
688 static inline bool
689 npf_conn_expired(const npf_conn_t *con, const struct timespec *tsnow)
690 {
691 const int etime = npf_state_etime(&con->c_state, con->c_proto);
692 struct timespec tsdiff;
693
694 if (__predict_false(con->c_flags & CONN_EXPIRE)) {
695 /* Explicitly marked to be expired. */
696 return true;
697 }
698 timespecsub(tsnow, &con->c_atime, &tsdiff);
699 return tsdiff.tv_sec > etime;
700 }
701
702 /*
703 * npf_conn_gc: garbage collect the expired connections.
704 *
705 * => Must run in a single-threaded manner.
706 * => If it is a flush request, then destroy all connections.
707 * => If 'sync' is true, then perform passive serialisation.
708 */
709 static void
710 npf_conn_gc(npf_conndb_t *cd, bool flush, bool sync)
711 {
712 npf_conn_t *con, *prev, *gclist = NULL;
713 struct timespec tsnow;
714
715 getnanouptime(&tsnow);
716
717 /*
718 * Scan all connections and check them for expiration.
719 */
720 prev = NULL;
721 con = npf_conndb_getlist(cd);
722 while (con) {
723 npf_conn_t *next = con->c_next;
724
725 /* Expired? Flushing all? */
726 if (!npf_conn_expired(con, &tsnow) && !flush) {
727 prev = con;
728 con = next;
729 continue;
730 }
731
732 /* Remove both entries of the connection. */
733 mutex_enter(&con->c_lock);
734 if ((con->c_flags & CONN_REMOVED) == 0) {
735 npf_conn_t *ret __diagused;
736
737 ret = npf_conndb_remove(cd, &con->c_forw_entry);
738 KASSERT(ret == con);
739 ret = npf_conndb_remove(cd, &con->c_back_entry);
740 KASSERT(ret == con);
741 }
742
743 /* Flag the removal and expiration. */
744 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
745 mutex_exit(&con->c_lock);
746
747 /* Move to the G/C list. */
748 npf_conndb_dequeue(cd, con, prev);
749 con->c_next = gclist;
750 gclist = con;
751
752 /* Next.. */
753 con = next;
754 }
755 npf_conndb_settail(cd, prev);
756
757 /*
758 * Ensure it is safe to destroy the connections.
759 * Note: drop the conn_lock (see the lock order).
760 */
761 if (sync) {
762 mutex_exit(&conn_lock);
763 if (gclist) {
764 npf_config_enter();
765 npf_config_sync();
766 npf_config_exit();
767 }
768 }
769
770 /*
771 * Garbage collect all expired connections.
772 * May need to wait for the references to drain.
773 */
774 con = gclist;
775 while (con) {
776 npf_conn_t *next = con->c_next;
777
778 /*
779 * Destroy only if removed and no references.
780 * Otherwise, wait for a tiny moment.
781 */
782 if (__predict_false(con->c_refcnt)) {
783 kpause("npfcongc", false, 1, NULL);
784 continue;
785 }
786 npf_conn_destroy(con);
787 con = next;
788 }
789 }
790
791 /*
792 * npf_conn_worker: G/C to run from a worker thread.
793 */
794 static void
795 npf_conn_worker(void)
796 {
797 mutex_enter(&conn_lock);
798 /* Note: the conn_lock will be released (sync == true). */
799 npf_conn_gc(conn_db, false, true);
800 }
801
802 /*
803 * npf_conn_export: construct a list of connections prepared for saving.
804 * Note: this is expected to be an expensive operation.
805 */
806 int
807 npf_conn_export(prop_array_t conlist)
808 {
809 npf_conn_t *con, *prev;
810
811 /*
812 * Note: acquire conn_lock to prevent from the database
813 * destruction and G/C thread.
814 */
815 mutex_enter(&conn_lock);
816 if (conn_tracking != CONN_TRACKING_ON) {
817 mutex_exit(&conn_lock);
818 return 0;
819 }
820 prev = NULL;
821 con = npf_conndb_getlist(conn_db);
822 while (con) {
823 npf_conn_t *next = con->c_next;
824 prop_data_t d;
825
826 if ((con->c_flags & (CONN_ACTIVE|CONN_EXPIRE)) != CONN_ACTIVE)
827 goto skip;
828
829 prop_dictionary_t cdict = prop_dictionary_create();
830 prop_dictionary_set_uint32(cdict, "flags", con->c_flags);
831 prop_dictionary_set_uint32(cdict, "proto", con->c_proto);
832 /* FIXME: interface-id */
833
834 d = prop_data_create_data(&con->c_state, sizeof(npf_state_t));
835 prop_dictionary_set_and_rel(cdict, "state", d);
836
837 const uint32_t *fkey = con->c_forw_entry.ck_key;
838 d = prop_data_create_data(fkey, NPF_CONN_MAXKEYLEN);
839 prop_dictionary_set_and_rel(cdict, "forw-key", d);
840
841 const uint32_t *bkey = con->c_back_entry.ck_key;
842 d = prop_data_create_data(bkey, NPF_CONN_MAXKEYLEN);
843 prop_dictionary_set_and_rel(cdict, "back-key", d);
844
845 if (con->c_nat) {
846 npf_nat_export(cdict, con->c_nat);
847 }
848 prop_array_add(conlist, cdict);
849 prop_object_release(cdict);
850 skip:
851 prev = con;
852 con = next;
853 }
854 npf_conndb_settail(conn_db, prev);
855 mutex_exit(&conn_lock);
856 return 0;
857 }
858
859 /*
860 * npf_conn_import: fully reconstruct a single connection from a
861 * directory and insert into the given database.
862 */
863 int
864 npf_conn_import(npf_conndb_t *cd, prop_dictionary_t cdict,
865 npf_ruleset_t *natlist)
866 {
867 npf_conn_t *con;
868 npf_connkey_t *fw, *bk;
869 prop_object_t obj;
870 const void *d;
871
872 /* Allocate a connection and initialise it (clear first). */
873 con = pool_cache_get(conn_cache, PR_WAITOK);
874 memset(con, 0, sizeof(npf_conn_t));
875 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
876
877 prop_dictionary_get_uint32(cdict, "proto", &con->c_proto);
878 prop_dictionary_get_uint32(cdict, "flags", &con->c_flags);
879 con->c_flags &= PFIL_ALL | CONN_ACTIVE | CONN_PASS;
880 getnanouptime(&con->c_atime);
881
882 obj = prop_dictionary_get(cdict, "state");
883 if ((d = prop_data_data_nocopy(obj)) == NULL ||
884 prop_data_size(obj) != sizeof(npf_state_t)) {
885 goto err;
886 }
887 memcpy(&con->c_state, d, sizeof(npf_state_t));
888
889 /* Reconstruct NAT association, if any, or return NULL. */
890 con->c_nat = npf_nat_import(cdict, natlist, con);
891
892 /*
893 * Fetch and copy the keys for each direction.
894 */
895 obj = prop_dictionary_get(cdict, "forw-key");
896 if ((d = prop_data_data_nocopy(obj)) == NULL ||
897 prop_data_size(obj) != NPF_CONN_MAXKEYLEN) {
898 goto err;
899 }
900 fw = &con->c_forw_entry;
901 memcpy(&fw->ck_key, d, NPF_CONN_MAXKEYLEN);
902
903 obj = prop_dictionary_get(cdict, "back-key");
904 if ((d = prop_data_data_nocopy(obj)) == NULL ||
905 prop_data_size(obj) != NPF_CONN_MAXKEYLEN) {
906 goto err;
907 }
908 bk = &con->c_back_entry;
909 memcpy(&bk->ck_key, d, NPF_CONN_MAXKEYLEN);
910
911 fw->ck_backptr = bk->ck_backptr = con;
912
913 /* Insert the entries and the connection itself. */
914 if (!npf_conndb_insert(cd, fw, con)) {
915 goto err;
916 }
917 if (!npf_conndb_insert(cd, bk, con)) {
918 npf_conndb_remove(cd, fw);
919 goto err;
920 }
921 npf_conndb_enqueue(cd, con);
922 return 0;
923 err:
924 npf_conn_destroy(con);
925 return EINVAL;
926 }
927
928 #if defined(DDB) || defined(_NPF_TESTING)
929
930 void
931 npf_conn_print(const npf_conn_t *con)
932 {
933 const u_int alen = NPF_CONN_GETALEN(&con->c_forw_entry);
934 const uint32_t *fkey = con->c_forw_entry.ck_key;
935 const uint32_t *bkey = con->c_back_entry.ck_key;
936 const u_int proto = con->c_proto;
937 struct timespec tsnow, tsdiff;
938 const void *src, *dst;
939 int etime;
940
941 getnanouptime(&tsnow);
942 timespecsub(&tsnow, &con->c_atime, &tsdiff);
943 etime = npf_state_etime(&con->c_state, proto);
944
945 printf("%p:\n\tproto %d flags 0x%x tsdiff %d etime %d\n",
946 con, proto, con->c_flags, (int)tsdiff.tv_sec, etime);
947
948 src = &fkey[2], dst = &fkey[2 + (alen >> 2)];
949 printf("\tforw %s:%d", npf_addr_dump(src, alen), ntohs(fkey[1] >> 16));
950 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(fkey[1] & 0xffff));
951
952 src = &bkey[2], dst = &bkey[2 + (alen >> 2)];
953 printf("\tback %s:%d", npf_addr_dump(src, alen), ntohs(bkey[1] >> 16));
954 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(bkey[1] & 0xffff));
955
956 npf_state_dump(&con->c_state);
957 if (con->c_nat) {
958 npf_nat_dump(con->c_nat);
959 }
960 }
961
962 #endif
963