npf_conn.c revision 1.21 1 /* $NetBSD: npf_conn.c,v 1.21 2016/12/10 22:09:49 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2014-2015 Mindaugas Rasiukevicius <rmind at netbsd org>
5 * Copyright (c) 2010-2014 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This material is based upon work partially supported by The
9 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * NPF connection tracking for stateful filtering and translation.
35 *
36 * Overview
37 *
38 * Connection direction is identified by the direction of its first
39 * packet. Packets can be incoming or outgoing with respect to an
40 * interface. To describe the packet in the context of connection
41 * direction we will use the terms "forwards stream" and "backwards
42 * stream". All connections have two keys and thus two entries:
43 *
44 * npf_conn_t::c_forw_entry for the forwards stream and
45 * npf_conn_t::c_back_entry for the backwards stream.
46 *
47 * The keys are formed from the 5-tuple (source/destination address,
48 * source/destination port and the protocol). Additional matching
49 * is performed for the interface (a common behaviour is equivalent
50 * to the 6-tuple lookup including the interface ID). Note that the
51 * key may be formed using translated values in a case of NAT.
52 *
53 * Connections can serve two purposes: for the implicit passing or
54 * to accommodate the dynamic NAT. Connections for the former purpose
55 * are created by the rules with "stateful" attribute and are used for
56 * stateful filtering. Such connections indicate that the packet of
57 * the backwards stream should be passed without inspection of the
58 * ruleset. The other purpose is to associate a dynamic NAT mechanism
59 * with a connection. Such connections are created by the NAT policies
60 * and they have a relationship with NAT translation structure via
61 * npf_conn_t::c_nat. A single connection can serve both purposes,
62 * which is a common case.
63 *
64 * Connection life-cycle
65 *
66 * Connections are established when a packet matches said rule or
67 * NAT policy. Both keys of the established connection are inserted
68 * into the connection database. A garbage collection thread
69 * periodically scans all connections and depending on connection
70 * properties (e.g. last activity time, protocol) removes connection
71 * entries and expires the actual connections.
72 *
73 * Each connection has a reference count. The reference is acquired
74 * on lookup and should be released by the caller. It guarantees that
75 * the connection will not be destroyed, although it may be expired.
76 *
77 * Synchronisation
78 *
79 * Connection database is accessed in a lock-less manner by the main
80 * routines: npf_conn_inspect() and npf_conn_establish(). Since they
81 * are always called from a software interrupt, the database is
82 * protected using passive serialisation. The main place which can
83 * destroy a connection is npf_conn_worker(). The database itself
84 * can be replaced and destroyed in npf_conn_reload().
85 *
86 * ALG support
87 *
88 * Application-level gateways (ALGs) can override generic connection
89 * inspection (npf_alg_conn() call in npf_conn_inspect() function) by
90 * performing their own lookup using different key. Recursive call
91 * to npf_conn_inspect() is not allowed. The ALGs ought to use the
92 * npf_conn_lookup() function for this purpose.
93 *
94 * Lock order
95 *
96 * npf_config_lock ->
97 * conn_lock ->
98 * npf_conn_t::c_lock
99 */
100
101 #include <sys/cdefs.h>
102 __KERNEL_RCSID(0, "$NetBSD: npf_conn.c,v 1.21 2016/12/10 22:09:49 christos Exp $");
103
104 #include <sys/param.h>
105 #include <sys/types.h>
106
107 #include <netinet/in.h>
108 #include <netinet/tcp.h>
109
110 #include <sys/atomic.h>
111 #include <sys/condvar.h>
112 #include <sys/kmem.h>
113 #include <sys/kthread.h>
114 #include <sys/mutex.h>
115 #include <net/pfil.h>
116 #include <sys/pool.h>
117 #include <sys/queue.h>
118 #include <sys/systm.h>
119
120 #define __NPF_CONN_PRIVATE
121 #include "npf_conn.h"
122 #include "npf_impl.h"
123
124 /*
125 * Connection flags: PFIL_IN and PFIL_OUT values are reserved for direction.
126 */
127 CTASSERT(PFIL_ALL == (0x001 | 0x002));
128 #define CONN_ACTIVE 0x004 /* visible on inspection */
129 #define CONN_PASS 0x008 /* perform implicit passing */
130 #define CONN_EXPIRE 0x010 /* explicitly expire */
131 #define CONN_REMOVED 0x020 /* "forw/back" entries removed */
132
133 /*
134 * Connection tracking state: disabled (off) or enabled (on).
135 */
136 enum { CONN_TRACKING_OFF, CONN_TRACKING_ON };
137 static volatile int conn_tracking __cacheline_aligned;
138
139 /* Connection tracking database, connection cache and the lock. */
140 static npf_conndb_t * conn_db __read_mostly;
141 static pool_cache_t conn_cache __read_mostly;
142 static kmutex_t conn_lock __cacheline_aligned;
143
144 static void npf_conn_worker(void);
145 static void npf_conn_destroy(npf_conn_t *);
146
147 /*
148 * npf_conn_sys{init,fini}: initialise/destroy connection tracking.
149 */
150
151 void
152 npf_conn_sysinit(void)
153 {
154 conn_cache = pool_cache_init(sizeof(npf_conn_t), coherency_unit,
155 0, 0, "npfconpl", NULL, IPL_NET, NULL, NULL, NULL);
156 mutex_init(&conn_lock, MUTEX_DEFAULT, IPL_NONE);
157 conn_tracking = CONN_TRACKING_OFF;
158 conn_db = npf_conndb_create();
159
160 npf_worker_register(npf_conn_worker);
161 }
162
163 void
164 npf_conn_sysfini(void)
165 {
166 /* Note: the caller should have flushed the connections. */
167 KASSERT(conn_tracking == CONN_TRACKING_OFF);
168 npf_worker_unregister(npf_conn_worker);
169
170 npf_conndb_destroy(conn_db);
171 pool_cache_destroy(conn_cache);
172 mutex_destroy(&conn_lock);
173 }
174
175 /*
176 * npf_conn_load: perform the load by flushing the current connection
177 * database and replacing it with the new one or just destroying.
178 *
179 * => The caller must disable the connection tracking and ensure that
180 * there are no connection database lookups or references in-flight.
181 */
182 void
183 npf_conn_load(npf_conndb_t *ndb, bool track)
184 {
185 npf_conndb_t *odb = NULL;
186
187 KASSERT(npf_config_locked_p());
188
189 /*
190 * The connection database is in the quiescent state.
191 * Prevent G/C thread from running and install a new database.
192 */
193 mutex_enter(&conn_lock);
194 if (ndb) {
195 KASSERT(conn_tracking == CONN_TRACKING_OFF);
196 odb = conn_db;
197 conn_db = ndb;
198 membar_sync();
199 }
200 if (track) {
201 /* After this point lookups start flying in. */
202 conn_tracking = CONN_TRACKING_ON;
203 }
204 mutex_exit(&conn_lock);
205
206 if (odb) {
207 /*
208 * Flush all, no sync since the caller did it for us.
209 * Also, release the pool cache memory.
210 */
211 npf_conn_gc(odb, true, false);
212 npf_conndb_destroy(odb);
213 pool_cache_invalidate(conn_cache);
214 }
215 }
216
217 /*
218 * npf_conn_tracking: enable/disable connection tracking.
219 */
220 void
221 npf_conn_tracking(bool track)
222 {
223 KASSERT(npf_config_locked_p());
224 conn_tracking = track ? CONN_TRACKING_ON : CONN_TRACKING_OFF;
225 }
226
227 static inline bool
228 npf_conn_trackable_p(const npf_cache_t *npc)
229 {
230 /*
231 * Check if connection tracking is on. Also, if layer 3 and 4 are
232 * not cached - protocol is not supported or packet is invalid.
233 */
234 if (conn_tracking != CONN_TRACKING_ON) {
235 return false;
236 }
237 if (!npf_iscached(npc, NPC_IP46) || !npf_iscached(npc, NPC_LAYER4)) {
238 return false;
239 }
240 return true;
241 }
242
243 static uint32_t
244 connkey_setkey(npf_connkey_t *key, uint16_t proto, const void *ipv,
245 const uint16_t *id, uint16_t alen, bool forw)
246 {
247 uint32_t isrc, idst, *k = key->ck_key;
248 const npf_addr_t * const *ips = ipv;
249 if (__predict_true(forw)) {
250 isrc = NPF_SRC, idst = NPF_DST;
251 } else {
252 isrc = NPF_DST, idst = NPF_SRC;
253 }
254
255 /*
256 * Construct a key formed out of 32-bit integers. The key layout:
257 *
258 * Field: | proto | alen | src-id | dst-id | src-addr | dst-addr |
259 * +--------+--------+--------+--------+----------+----------+
260 * Bits: | 16 | 16 | 16 | 16 | 32-128 | 32-128 |
261 *
262 * The source and destination are inverted if they key is for the
263 * backwards stream (forw == false). The address length depends
264 * on the 'alen' field; it is a length in bytes, either 4 or 16.
265 */
266
267 k[0] = ((uint32_t)proto << 16) | (alen & 0xffff);
268 k[1] = ((uint32_t)id[isrc] << 16) | id[idst];
269
270 if (__predict_true(alen == sizeof(in_addr_t))) {
271 k[2] = ips[isrc]->s6_addr32[0];
272 k[3] = ips[idst]->s6_addr32[0];
273 return 4 * sizeof(uint32_t);
274 } else {
275 const u_int nwords = alen >> 2;
276 memcpy(&k[2], ips[isrc], alen);
277 memcpy(&k[2 + nwords], ips[idst], alen);
278 return (2 + (nwords * 2)) * sizeof(uint32_t);
279 }
280 }
281
282 static void
283 connkey_getkey(const npf_connkey_t *key, uint16_t *proto, npf_addr_t *ips,
284 uint16_t *id, uint16_t *alen)
285 {
286 const uint32_t *k = key->ck_key;
287
288 *proto = k[0] >> 16;
289 *alen = k[0] & 0xffff;
290 id[NPF_SRC] = k[1] >> 16;
291 id[NPF_DST] = k[1] & 0xffff;
292
293 switch (*alen) {
294 case sizeof(struct in6_addr):
295 case sizeof(struct in_addr):
296 memcpy(&ips[NPF_SRC], &k[2], *alen);
297 memcpy(&ips[NPF_DST], &k[2 + ((unsigned)*alen >> 2)], *alen);
298 return;
299 default:
300 KASSERT(0);
301 }
302 }
303
304 /*
305 * npf_conn_conkey: construct a key for the connection lookup.
306 *
307 * => Returns the key length in bytes or zero on failure.
308 */
309 unsigned
310 npf_conn_conkey(const npf_cache_t *npc, npf_connkey_t *key, const bool forw)
311 {
312 const uint16_t alen = npc->npc_alen;
313 const struct tcphdr *th;
314 const struct udphdr *uh;
315 uint16_t id[2];
316
317 switch (npc->npc_proto) {
318 case IPPROTO_TCP:
319 KASSERT(npf_iscached(npc, NPC_TCP));
320 th = npc->npc_l4.tcp;
321 id[NPF_SRC] = th->th_sport;
322 id[NPF_DST] = th->th_dport;
323 break;
324 case IPPROTO_UDP:
325 KASSERT(npf_iscached(npc, NPC_UDP));
326 uh = npc->npc_l4.udp;
327 id[NPF_SRC] = uh->uh_sport;
328 id[NPF_DST] = uh->uh_dport;
329 break;
330 case IPPROTO_ICMP:
331 if (npf_iscached(npc, NPC_ICMP_ID)) {
332 const struct icmp *ic = npc->npc_l4.icmp;
333 id[NPF_SRC] = ic->icmp_id;
334 id[NPF_DST] = ic->icmp_id;
335 break;
336 }
337 return 0;
338 case IPPROTO_ICMPV6:
339 if (npf_iscached(npc, NPC_ICMP_ID)) {
340 const struct icmp6_hdr *ic6 = npc->npc_l4.icmp6;
341 id[NPF_SRC] = ic6->icmp6_id;
342 id[NPF_DST] = ic6->icmp6_id;
343 break;
344 }
345 return 0;
346 default:
347 /* Unsupported protocol. */
348 return 0;
349 }
350
351 return connkey_setkey(key, npc->npc_proto, npc->npc_ips, id, alen,
352 forw);
353 }
354
355 static __inline void
356 connkey_set_addr(npf_connkey_t *key, const npf_addr_t *naddr, const int di)
357 {
358 const u_int alen = key->ck_key[0] & 0xffff;
359 uint32_t *addr = &key->ck_key[2 + ((alen >> 2) * di)];
360
361 KASSERT(alen > 0);
362 memcpy(addr, naddr, alen);
363 }
364
365 static __inline void
366 connkey_set_id(npf_connkey_t *key, const uint16_t id, const int di)
367 {
368 const uint32_t oid = key->ck_key[1];
369 const u_int shift = 16 * !di;
370 const uint32_t mask = 0xffff0000 >> shift;
371
372 key->ck_key[1] = ((uint32_t)id << shift) | (oid & mask);
373 }
374
375 /*
376 * npf_conn_ok: check if the connection is active, and has the right direction.
377 */
378 static bool
379 npf_conn_ok(npf_conn_t *con, const int di, bool forw)
380 {
381 uint32_t flags = con->c_flags;
382
383 /* Check if connection is active and not expired. */
384 bool ok = (flags & (CONN_ACTIVE | CONN_EXPIRE)) == CONN_ACTIVE;
385 if (__predict_false(!ok)) {
386 return false;
387 }
388
389 /* Check if the direction is consistent */
390 bool pforw = (flags & PFIL_ALL) == di;
391 if (__predict_false(forw != pforw)) {
392 return false;
393 }
394 return true;
395 }
396
397 /*
398 * npf_conn_lookup: lookup if there is an established connection.
399 *
400 * => If found, we will hold a reference for the caller.
401 */
402 npf_conn_t *
403 npf_conn_lookup(const npf_cache_t *npc, const int di, bool *forw)
404 {
405 const nbuf_t *nbuf = npc->npc_nbuf;
406 npf_conn_t *con;
407 npf_connkey_t key;
408 u_int cifid;
409
410 /* Construct a key and lookup for a connection in the store. */
411 if (!npf_conn_conkey(npc, &key, true)) {
412 return NULL;
413 }
414 con = npf_conndb_lookup(conn_db, &key, forw);
415 if (con == NULL) {
416 return NULL;
417 }
418 KASSERT(npc->npc_proto == con->c_proto);
419
420 /* Check if connection is active and not expired. */
421 if (!npf_conn_ok(con, di, *forw)) {
422 atomic_dec_uint(&con->c_refcnt);
423 return NULL;
424 }
425
426 /*
427 * Match the interface and the direction of the connection entry
428 * and the packet.
429 */
430 cifid = con->c_ifid;
431 if (__predict_false(cifid && cifid != nbuf->nb_ifid)) {
432 atomic_dec_uint(&con->c_refcnt);
433 return NULL;
434 }
435
436 /* Update the last activity time. */
437 getnanouptime(&con->c_atime);
438 return con;
439 }
440
441 /*
442 * npf_conn_inspect: lookup a connection and inspecting the protocol data.
443 *
444 * => If found, we will hold a reference for the caller.
445 */
446 npf_conn_t *
447 npf_conn_inspect(npf_cache_t *npc, const int di, int *error)
448 {
449 nbuf_t *nbuf = npc->npc_nbuf;
450 npf_conn_t *con;
451 bool forw, ok;
452
453 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
454 if (!npf_conn_trackable_p(npc)) {
455 return NULL;
456 }
457
458 /* Query ALG which may lookup connection for us. */
459 if ((con = npf_alg_conn(npc, di)) != NULL) {
460 /* Note: reference is held. */
461 return con;
462 }
463 if (nbuf_head_mbuf(nbuf) == NULL) {
464 *error = ENOMEM;
465 return NULL;
466 }
467 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
468
469 /* Main lookup of the connection. */
470 if ((con = npf_conn_lookup(npc, di, &forw)) == NULL) {
471 return NULL;
472 }
473
474 /* Inspect the protocol data and handle state changes. */
475 mutex_enter(&con->c_lock);
476 ok = npf_state_inspect(npc, &con->c_state, forw);
477 mutex_exit(&con->c_lock);
478
479 /* If invalid state: let the rules deal with it. */
480 if (__predict_false(!ok)) {
481 npf_conn_release(con);
482 npf_stats_inc(NPF_STAT_INVALID_STATE);
483 return NULL;
484 }
485
486 /*
487 * If this is multi-end state, then specially tag the packet
488 * so it will be just passed-through on other interfaces.
489 */
490 if (con->c_ifid == 0 && nbuf_add_tag(nbuf, NPF_NTAG_PASS) != 0) {
491 npf_conn_release(con);
492 *error = ENOMEM;
493 return NULL;
494 }
495 return con;
496 }
497
498 /*
499 * npf_conn_establish: create a new connection, insert into the global list.
500 *
501 * => Connection is created with the reference held for the caller.
502 * => Connection will be activated on the first reference release.
503 */
504 npf_conn_t *
505 npf_conn_establish(npf_cache_t *npc, int di, bool per_if)
506 {
507 const nbuf_t *nbuf = npc->npc_nbuf;
508 npf_conn_t *con;
509 int error = 0;
510
511 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
512
513 if (!npf_conn_trackable_p(npc)) {
514 return NULL;
515 }
516
517 /* Allocate and initialise the new connection. */
518 con = pool_cache_get(conn_cache, PR_NOWAIT);
519 if (__predict_false(!con)) {
520 return NULL;
521 }
522 NPF_PRINTF(("NPF: create conn %p\n", con));
523 npf_stats_inc(NPF_STAT_CONN_CREATE);
524
525 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
526 con->c_flags = (di & PFIL_ALL);
527 con->c_refcnt = 0;
528 con->c_rproc = NULL;
529 con->c_nat = NULL;
530
531 /* Initialize the protocol state. */
532 if (!npf_state_init(npc, &con->c_state)) {
533 npf_conn_destroy(con);
534 return NULL;
535 }
536
537 KASSERT(npf_iscached(npc, NPC_IP46));
538 npf_connkey_t *fw = &con->c_forw_entry;
539 npf_connkey_t *bk = &con->c_back_entry;
540
541 /*
542 * Construct "forwards" and "backwards" keys. Also, set the
543 * interface ID for this connection (unless it is global).
544 */
545 if (!npf_conn_conkey(npc, fw, true) ||
546 !npf_conn_conkey(npc, bk, false)) {
547 npf_conn_destroy(con);
548 return NULL;
549 }
550 fw->ck_backptr = bk->ck_backptr = con;
551 con->c_ifid = per_if ? nbuf->nb_ifid : 0;
552 con->c_proto = npc->npc_proto;
553
554 /*
555 * Set last activity time for a new connection and acquire
556 * a reference for the caller before we make it visible.
557 */
558 getnanouptime(&con->c_atime);
559 con->c_refcnt = 1;
560
561 /*
562 * Insert both keys (entries representing directions) of the
563 * connection. At this point it becomes visible, but we activate
564 * the connection later.
565 */
566 mutex_enter(&con->c_lock);
567 if (!npf_conndb_insert(conn_db, fw, con)) {
568 error = EISCONN;
569 goto err;
570 }
571 if (!npf_conndb_insert(conn_db, bk, con)) {
572 npf_conn_t *ret __diagused;
573 ret = npf_conndb_remove(conn_db, fw);
574 KASSERT(ret == con);
575 error = EISCONN;
576 goto err;
577 }
578 err:
579 /*
580 * If we have hit the duplicate: mark the connection as expired
581 * and let the G/C thread to take care of it. We cannot do it
582 * here since there might be references acquired already.
583 */
584 if (error) {
585 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
586 atomic_dec_uint(&con->c_refcnt);
587 npf_stats_inc(NPF_STAT_RACE_CONN);
588 } else {
589 NPF_PRINTF(("NPF: establish conn %p\n", con));
590 }
591
592 /* Finally, insert into the connection list. */
593 npf_conndb_enqueue(conn_db, con);
594 mutex_exit(&con->c_lock);
595
596 return error ? NULL : con;
597 }
598
599 static void
600 npf_conn_destroy(npf_conn_t *con)
601 {
602 KASSERT(con->c_refcnt == 0);
603
604 if (con->c_nat) {
605 /* Release any NAT structures. */
606 npf_nat_destroy(con->c_nat);
607 }
608 if (con->c_rproc) {
609 /* Release the rule procedure. */
610 npf_rproc_release(con->c_rproc);
611 }
612
613 /* Destroy the state. */
614 npf_state_destroy(&con->c_state);
615 mutex_destroy(&con->c_lock);
616
617 /* Free the structure, increase the counter. */
618 pool_cache_put(conn_cache, con);
619 npf_stats_inc(NPF_STAT_CONN_DESTROY);
620 NPF_PRINTF(("NPF: conn %p destroyed\n", con));
621 }
622
623 /*
624 * npf_conn_setnat: associate NAT entry with the connection, update and
625 * re-insert connection entry using the translation values.
626 *
627 * => The caller must be holding a reference.
628 */
629 int
630 npf_conn_setnat(const npf_cache_t *npc, npf_conn_t *con,
631 npf_nat_t *nt, u_int ntype)
632 {
633 static const u_int nat_type_dimap[] = {
634 [NPF_NATOUT] = NPF_DST,
635 [NPF_NATIN] = NPF_SRC,
636 };
637 npf_connkey_t key, *bk;
638 npf_conn_t *ret __diagused;
639 npf_addr_t *taddr;
640 in_port_t tport;
641 u_int tidx;
642
643 KASSERT(con->c_refcnt > 0);
644
645 npf_nat_gettrans(nt, &taddr, &tport);
646 KASSERT(ntype == NPF_NATOUT || ntype == NPF_NATIN);
647 tidx = nat_type_dimap[ntype];
648
649 /* Construct a "backwards" key. */
650 if (!npf_conn_conkey(npc, &key, false)) {
651 return EINVAL;
652 }
653
654 /* Acquire the lock and check for the races. */
655 mutex_enter(&con->c_lock);
656 if (__predict_false(con->c_flags & CONN_EXPIRE)) {
657 /* The connection got expired. */
658 mutex_exit(&con->c_lock);
659 return EINVAL;
660 }
661 KASSERT((con->c_flags & CONN_REMOVED) == 0);
662
663 if (__predict_false(con->c_nat != NULL)) {
664 /* Race with a duplicate packet. */
665 mutex_exit(&con->c_lock);
666 npf_stats_inc(NPF_STAT_RACE_NAT);
667 return EISCONN;
668 }
669
670 /* Remove the "backwards" entry. */
671 ret = npf_conndb_remove(conn_db, &con->c_back_entry);
672 KASSERT(ret == con);
673
674 /* Set the source/destination IDs to the translation values. */
675 bk = &con->c_back_entry;
676 connkey_set_addr(bk, taddr, tidx);
677 if (tport) {
678 connkey_set_id(bk, tport, tidx);
679 }
680
681 /* Finally, re-insert the "backwards" entry. */
682 if (!npf_conndb_insert(conn_db, bk, con)) {
683 /*
684 * Race: we have hit the duplicate, remove the "forwards"
685 * entry and expire our connection; it is no longer valid.
686 */
687 ret = npf_conndb_remove(conn_db, &con->c_forw_entry);
688 KASSERT(ret == con);
689
690 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
691 mutex_exit(&con->c_lock);
692
693 npf_stats_inc(NPF_STAT_RACE_NAT);
694 return EISCONN;
695 }
696
697 /* Associate the NAT entry and release the lock. */
698 con->c_nat = nt;
699 mutex_exit(&con->c_lock);
700 return 0;
701 }
702
703 /*
704 * npf_conn_expire: explicitly mark connection as expired.
705 */
706 void
707 npf_conn_expire(npf_conn_t *con)
708 {
709 /* KASSERT(con->c_refcnt > 0); XXX: npf_nat_freepolicy() */
710 atomic_or_uint(&con->c_flags, CONN_EXPIRE);
711 }
712
713 /*
714 * npf_conn_pass: return true if connection is "pass" one, otherwise false.
715 */
716 bool
717 npf_conn_pass(const npf_conn_t *con, npf_rproc_t **rp)
718 {
719 KASSERT(con->c_refcnt > 0);
720 if (__predict_true(con->c_flags & CONN_PASS)) {
721 *rp = con->c_rproc;
722 return true;
723 }
724 return false;
725 }
726
727 /*
728 * npf_conn_setpass: mark connection as a "pass" one and associate the
729 * rule procedure with it.
730 */
731 void
732 npf_conn_setpass(npf_conn_t *con, npf_rproc_t *rp)
733 {
734 KASSERT((con->c_flags & CONN_ACTIVE) == 0);
735 KASSERT(con->c_refcnt > 0);
736 KASSERT(con->c_rproc == NULL);
737
738 /*
739 * No need for atomic since the connection is not yet active.
740 * If rproc is set, the caller transfers its reference to us,
741 * which will be released on npf_conn_destroy().
742 */
743 atomic_or_uint(&con->c_flags, CONN_PASS);
744 con->c_rproc = rp;
745 }
746
747 /*
748 * npf_conn_release: release a reference, which might allow G/C thread
749 * to destroy this connection.
750 */
751 void
752 npf_conn_release(npf_conn_t *con)
753 {
754 if ((con->c_flags & (CONN_ACTIVE | CONN_EXPIRE)) == 0) {
755 /* Activate: after this, connection is globally visible. */
756 atomic_or_uint(&con->c_flags, CONN_ACTIVE);
757 }
758 KASSERT(con->c_refcnt > 0);
759 atomic_dec_uint(&con->c_refcnt);
760 }
761
762 /*
763 * npf_conn_getnat: return associated NAT data entry and indicate
764 * whether it is a "forwards" or "backwards" stream.
765 */
766 npf_nat_t *
767 npf_conn_getnat(npf_conn_t *con, const int di, bool *forw)
768 {
769 KASSERT(con->c_refcnt > 0);
770 *forw = (con->c_flags & PFIL_ALL) == di;
771 return con->c_nat;
772 }
773
774 /*
775 * npf_conn_expired: criterion to check if connection is expired.
776 */
777 static inline bool
778 npf_conn_expired(const npf_conn_t *con, const struct timespec *tsnow)
779 {
780 const int etime = npf_state_etime(&con->c_state, con->c_proto);
781 struct timespec tsdiff;
782
783 if (__predict_false(con->c_flags & CONN_EXPIRE)) {
784 /* Explicitly marked to be expired. */
785 return true;
786 }
787 timespecsub(tsnow, &con->c_atime, &tsdiff);
788 return tsdiff.tv_sec > etime;
789 }
790
791 /*
792 * npf_conn_gc: garbage collect the expired connections.
793 *
794 * => Must run in a single-threaded manner.
795 * => If it is a flush request, then destroy all connections.
796 * => If 'sync' is true, then perform passive serialisation.
797 */
798 void
799 npf_conn_gc(npf_conndb_t *cd, bool flush, bool sync)
800 {
801 npf_conn_t *con, *prev, *gclist = NULL;
802 struct timespec tsnow;
803
804 getnanouptime(&tsnow);
805
806 /*
807 * Scan all connections and check them for expiration.
808 */
809 prev = NULL;
810 con = npf_conndb_getlist(cd);
811 while (con) {
812 npf_conn_t *next = con->c_next;
813
814 /* Expired? Flushing all? */
815 if (!npf_conn_expired(con, &tsnow) && !flush) {
816 prev = con;
817 con = next;
818 continue;
819 }
820
821 /* Remove both entries of the connection. */
822 mutex_enter(&con->c_lock);
823 if ((con->c_flags & CONN_REMOVED) == 0) {
824 npf_conn_t *ret __diagused;
825
826 ret = npf_conndb_remove(cd, &con->c_forw_entry);
827 KASSERT(ret == con);
828 ret = npf_conndb_remove(cd, &con->c_back_entry);
829 KASSERT(ret == con);
830 }
831
832 /* Flag the removal and expiration. */
833 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
834 mutex_exit(&con->c_lock);
835
836 /* Move to the G/C list. */
837 npf_conndb_dequeue(cd, con, prev);
838 con->c_next = gclist;
839 gclist = con;
840
841 /* Next.. */
842 con = next;
843 }
844 npf_conndb_settail(cd, prev);
845
846 /*
847 * Ensure it is safe to destroy the connections.
848 * Note: drop the conn_lock (see the lock order).
849 */
850 if (sync) {
851 mutex_exit(&conn_lock);
852 if (gclist) {
853 npf_config_enter();
854 npf_config_sync();
855 npf_config_exit();
856 }
857 }
858
859 /*
860 * Garbage collect all expired connections.
861 * May need to wait for the references to drain.
862 */
863 con = gclist;
864 while (con) {
865 npf_conn_t *next = con->c_next;
866
867 /*
868 * Destroy only if removed and no references.
869 * Otherwise, wait for a tiny moment.
870 */
871 if (__predict_false(con->c_refcnt)) {
872 kpause("npfcongc", false, 1, NULL);
873 continue;
874 }
875 npf_conn_destroy(con);
876 con = next;
877 }
878 }
879
880 /*
881 * npf_conn_worker: G/C to run from a worker thread.
882 */
883 static void
884 npf_conn_worker(void)
885 {
886 mutex_enter(&conn_lock);
887 /* Note: the conn_lock will be released (sync == true). */
888 npf_conn_gc(conn_db, false, true);
889 }
890
891 /*
892 * npf_conndb_export: construct a list of connections prepared for saving.
893 * Note: this is expected to be an expensive operation.
894 */
895 int
896 npf_conndb_export(prop_array_t conlist)
897 {
898 npf_conn_t *con, *prev;
899
900 /*
901 * Note: acquire conn_lock to prevent from the database
902 * destruction and G/C thread.
903 */
904 mutex_enter(&conn_lock);
905 if (conn_tracking != CONN_TRACKING_ON) {
906 mutex_exit(&conn_lock);
907 return 0;
908 }
909 prev = NULL;
910 con = npf_conndb_getlist(conn_db);
911 while (con) {
912 npf_conn_t *next = con->c_next;
913 prop_dictionary_t cdict;
914
915 if ((cdict = npf_conn_export(con)) != NULL) {
916 prop_array_add(conlist, cdict);
917 prop_object_release(cdict);
918 }
919 prev = con;
920 con = next;
921 }
922 npf_conndb_settail(conn_db, prev);
923 mutex_exit(&conn_lock);
924 return 0;
925 }
926
927 static prop_dictionary_t
928 npf_connkey_export(const npf_connkey_t *key)
929 {
930 uint16_t id[2], alen, proto;
931 npf_addr_t ips[2];
932 prop_data_t d;
933 prop_dictionary_t kdict = prop_dictionary_create();
934
935 connkey_getkey(key, &proto, ips, id, &alen);
936
937 prop_dictionary_set_uint16(kdict, "proto", proto);
938
939 prop_dictionary_set_uint16(kdict, "sport", id[NPF_SRC]);
940 prop_dictionary_set_uint16(kdict, "dport", id[NPF_DST]);
941
942 d = prop_data_create_data(&ips[NPF_SRC], alen);
943 prop_dictionary_set_and_rel(kdict, "saddr", d);
944
945 d = prop_data_create_data(&ips[NPF_DST], alen);
946 prop_dictionary_set_and_rel(kdict, "daddr", d);
947
948 return kdict;
949 }
950
951 /*
952 * npf_conn_export: serialise a single connection.
953 */
954 prop_dictionary_t
955 npf_conn_export(const npf_conn_t *con)
956 {
957 prop_dictionary_t cdict, kdict;
958 prop_data_t d;
959
960 if ((con->c_flags & (CONN_ACTIVE|CONN_EXPIRE)) != CONN_ACTIVE) {
961 return NULL;
962 }
963 cdict = prop_dictionary_create();
964 prop_dictionary_set_uint32(cdict, "flags", con->c_flags);
965 prop_dictionary_set_uint32(cdict, "proto", con->c_proto);
966 if (con->c_ifid) {
967 const char *ifname = npf_ifmap_getname(con->c_ifid);
968 prop_dictionary_set_cstring(cdict, "ifname", ifname);
969 }
970
971 d = prop_data_create_data(&con->c_state, sizeof(npf_state_t));
972 prop_dictionary_set_and_rel(cdict, "state", d);
973
974 kdict = npf_connkey_export(&con->c_forw_entry);
975 prop_dictionary_set_and_rel(cdict, "forw-key", kdict);
976
977 kdict = npf_connkey_export(&con->c_back_entry);
978 prop_dictionary_set_and_rel(cdict, "back-key", kdict);
979
980 if (con->c_nat) {
981 npf_nat_export(cdict, con->c_nat);
982 }
983 return cdict;
984 }
985
986 static uint32_t
987 npf_connkey_import(prop_dictionary_t kdict, npf_connkey_t *key)
988 {
989 uint16_t proto;
990 prop_object_t sobj, dobj;
991 uint16_t id[2];
992 npf_addr_t const * ips[2];
993
994 if (!prop_dictionary_get_uint16(kdict, "proto", &proto))
995 return 0;
996
997 if (!prop_dictionary_get_uint16(kdict, "sport", &id[NPF_SRC]))
998 return 0;
999
1000 if (!prop_dictionary_get_uint16(kdict, "dport", &id[NPF_DST]))
1001 return 0;
1002
1003 sobj = prop_dictionary_get(kdict, "saddr");
1004 if ((ips[NPF_SRC] = prop_data_data_nocopy(sobj)) == NULL)
1005 return 0;
1006
1007 dobj = prop_dictionary_get(kdict, "daddr");
1008 if ((ips[NPF_DST] = prop_data_data_nocopy(dobj)) == NULL)
1009 return 0;
1010
1011 uint16_t alen = prop_data_size(sobj);
1012 if (alen != prop_data_size(dobj))
1013 return 0;
1014
1015 return connkey_setkey(key, proto, ips, id, alen, true);
1016 }
1017
1018 /*
1019 * npf_conn_import: fully reconstruct a single connection from a
1020 * directory and insert into the given database.
1021 */
1022 int
1023 npf_conn_import(npf_conndb_t *cd, prop_dictionary_t cdict,
1024 npf_ruleset_t *natlist)
1025 {
1026 npf_conn_t *con;
1027 npf_connkey_t *fw, *bk;
1028 prop_object_t obj;
1029 const char *ifname;
1030 const void *d;
1031
1032 /* Allocate a connection and initialise it (clear first). */
1033 con = pool_cache_get(conn_cache, PR_WAITOK);
1034 memset(con, 0, sizeof(npf_conn_t));
1035 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1036 npf_stats_inc(NPF_STAT_CONN_CREATE);
1037
1038 prop_dictionary_get_uint32(cdict, "proto", &con->c_proto);
1039 prop_dictionary_get_uint32(cdict, "flags", &con->c_flags);
1040 con->c_flags &= PFIL_ALL | CONN_ACTIVE | CONN_PASS;
1041 getnanouptime(&con->c_atime);
1042
1043 if (prop_dictionary_get_cstring_nocopy(cdict, "ifname", &ifname) &&
1044 (con->c_ifid = npf_ifmap_register(ifname)) == 0) {
1045 goto err;
1046 }
1047
1048 obj = prop_dictionary_get(cdict, "state");
1049 if ((d = prop_data_data_nocopy(obj)) == NULL ||
1050 prop_data_size(obj) != sizeof(npf_state_t)) {
1051 goto err;
1052 }
1053 memcpy(&con->c_state, d, sizeof(npf_state_t));
1054
1055 /* Reconstruct NAT association, if any. */
1056 if ((obj = prop_dictionary_get(cdict, "nat")) != NULL &&
1057 (con->c_nat = npf_nat_import(obj, natlist, con)) == NULL) {
1058 goto err;
1059 }
1060
1061 /*
1062 * Fetch and copy the keys for each direction.
1063 */
1064 obj = prop_dictionary_get(cdict, "forw-key");
1065 fw = &con->c_forw_entry;
1066 if (obj == NULL || !npf_connkey_import(obj, fw)) {
1067 goto err;
1068 }
1069
1070 obj = prop_dictionary_get(cdict, "back-key");
1071 bk = &con->c_back_entry;
1072 if (obj == NULL || !npf_connkey_import(obj, bk)) {
1073 goto err;
1074 }
1075
1076 fw->ck_backptr = bk->ck_backptr = con;
1077
1078 /* Insert the entries and the connection itself. */
1079 if (!npf_conndb_insert(cd, fw, con)) {
1080 goto err;
1081 }
1082 if (!npf_conndb_insert(cd, bk, con)) {
1083 npf_conndb_remove(cd, fw);
1084 goto err;
1085 }
1086
1087 NPF_PRINTF(("NPF: imported conn %p\n", con));
1088 npf_conndb_enqueue(cd, con);
1089 return 0;
1090 err:
1091 npf_conn_destroy(con);
1092 return EINVAL;
1093 }
1094
1095 int
1096 npf_conn_find(prop_dictionary_t idict, prop_dictionary_t *odict)
1097 {
1098 npf_connkey_t key;
1099 npf_conn_t *con;
1100 uint16_t dir;
1101 bool forw;
1102 prop_dictionary_t kdict;
1103
1104 if ((kdict = prop_dictionary_get(idict, "key")) == NULL)
1105 return EINVAL;
1106
1107 if (!npf_connkey_import(kdict, &key))
1108 return EINVAL;
1109
1110 if (!prop_dictionary_get_uint16(idict, "direction", &dir))
1111 return EINVAL;
1112
1113 con = npf_conndb_lookup(conn_db, &key, &forw);
1114 if (con == NULL) {
1115 return ESRCH;
1116 }
1117
1118 if (!npf_conn_ok(con, dir, true)) {
1119 atomic_dec_uint(&con->c_refcnt);
1120 return ESRCH;
1121 }
1122
1123 *odict = npf_conn_export(con);
1124 if (*odict == NULL) {
1125 atomic_dec_uint(&con->c_refcnt);
1126 return ENOSPC;
1127 }
1128 atomic_dec_uint(&con->c_refcnt);
1129
1130 return 0;
1131 }
1132
1133 #if defined(DDB) || defined(_NPF_TESTING)
1134
1135 void
1136 npf_conn_print(const npf_conn_t *con)
1137 {
1138 const u_int alen = NPF_CONN_GETALEN(&con->c_forw_entry);
1139 const uint32_t *fkey = con->c_forw_entry.ck_key;
1140 const uint32_t *bkey = con->c_back_entry.ck_key;
1141 const u_int proto = con->c_proto;
1142 struct timespec tsnow, tsdiff;
1143 const void *src, *dst;
1144 int etime;
1145
1146 getnanouptime(&tsnow);
1147 timespecsub(&tsnow, &con->c_atime, &tsdiff);
1148 etime = npf_state_etime(&con->c_state, proto);
1149
1150 printf("%p:\n\tproto %d flags 0x%x tsdiff %d etime %d\n",
1151 con, proto, con->c_flags, (int)tsdiff.tv_sec, etime);
1152
1153 src = &fkey[2], dst = &fkey[2 + (alen >> 2)];
1154 printf("\tforw %s:%d", npf_addr_dump(src, alen), ntohs(fkey[1] >> 16));
1155 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(fkey[1] & 0xffff));
1156
1157 src = &bkey[2], dst = &bkey[2 + (alen >> 2)];
1158 printf("\tback %s:%d", npf_addr_dump(src, alen), ntohs(bkey[1] >> 16));
1159 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(bkey[1] & 0xffff));
1160
1161 npf_state_dump(&con->c_state);
1162 if (con->c_nat) {
1163 npf_nat_dump(con->c_nat);
1164 }
1165 }
1166
1167 #endif
1168