npf_conn.c revision 1.22 1 /* $NetBSD: npf_conn.c,v 1.22 2016/12/26 23:05:06 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2014-2015 Mindaugas Rasiukevicius <rmind at netbsd org>
5 * Copyright (c) 2010-2014 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This material is based upon work partially supported by The
9 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * NPF connection tracking for stateful filtering and translation.
35 *
36 * Overview
37 *
38 * Connection direction is identified by the direction of its first
39 * packet. Packets can be incoming or outgoing with respect to an
40 * interface. To describe the packet in the context of connection
41 * direction we will use the terms "forwards stream" and "backwards
42 * stream". All connections have two keys and thus two entries:
43 *
44 * npf_conn_t::c_forw_entry for the forwards stream and
45 * npf_conn_t::c_back_entry for the backwards stream.
46 *
47 * The keys are formed from the 5-tuple (source/destination address,
48 * source/destination port and the protocol). Additional matching
49 * is performed for the interface (a common behaviour is equivalent
50 * to the 6-tuple lookup including the interface ID). Note that the
51 * key may be formed using translated values in a case of NAT.
52 *
53 * Connections can serve two purposes: for the implicit passing or
54 * to accommodate the dynamic NAT. Connections for the former purpose
55 * are created by the rules with "stateful" attribute and are used for
56 * stateful filtering. Such connections indicate that the packet of
57 * the backwards stream should be passed without inspection of the
58 * ruleset. The other purpose is to associate a dynamic NAT mechanism
59 * with a connection. Such connections are created by the NAT policies
60 * and they have a relationship with NAT translation structure via
61 * npf_conn_t::c_nat. A single connection can serve both purposes,
62 * which is a common case.
63 *
64 * Connection life-cycle
65 *
66 * Connections are established when a packet matches said rule or
67 * NAT policy. Both keys of the established connection are inserted
68 * into the connection database. A garbage collection thread
69 * periodically scans all connections and depending on connection
70 * properties (e.g. last activity time, protocol) removes connection
71 * entries and expires the actual connections.
72 *
73 * Each connection has a reference count. The reference is acquired
74 * on lookup and should be released by the caller. It guarantees that
75 * the connection will not be destroyed, although it may be expired.
76 *
77 * Synchronisation
78 *
79 * Connection database is accessed in a lock-less manner by the main
80 * routines: npf_conn_inspect() and npf_conn_establish(). Since they
81 * are always called from a software interrupt, the database is
82 * protected using passive serialisation. The main place which can
83 * destroy a connection is npf_conn_worker(). The database itself
84 * can be replaced and destroyed in npf_conn_reload().
85 *
86 * ALG support
87 *
88 * Application-level gateways (ALGs) can override generic connection
89 * inspection (npf_alg_conn() call in npf_conn_inspect() function) by
90 * performing their own lookup using different key. Recursive call
91 * to npf_conn_inspect() is not allowed. The ALGs ought to use the
92 * npf_conn_lookup() function for this purpose.
93 *
94 * Lock order
95 *
96 * npf_config_lock ->
97 * conn_lock ->
98 * npf_conn_t::c_lock
99 */
100
101 #ifdef _KERNEL
102 #include <sys/cdefs.h>
103 __KERNEL_RCSID(0, "$NetBSD: npf_conn.c,v 1.22 2016/12/26 23:05:06 christos Exp $");
104
105 #include <sys/param.h>
106 #include <sys/types.h>
107
108 #include <netinet/in.h>
109 #include <netinet/tcp.h>
110
111 #include <sys/atomic.h>
112 #include <sys/condvar.h>
113 #include <sys/kmem.h>
114 #include <sys/kthread.h>
115 #include <sys/mutex.h>
116 #include <net/pfil.h>
117 #include <sys/pool.h>
118 #include <sys/queue.h>
119 #include <sys/systm.h>
120 #endif
121
122 #define __NPF_CONN_PRIVATE
123 #include "npf_conn.h"
124 #include "npf_impl.h"
125
126 /*
127 * Connection flags: PFIL_IN and PFIL_OUT values are reserved for direction.
128 */
129 CTASSERT(PFIL_ALL == (0x001 | 0x002));
130 #define CONN_ACTIVE 0x004 /* visible on inspection */
131 #define CONN_PASS 0x008 /* perform implicit passing */
132 #define CONN_EXPIRE 0x010 /* explicitly expire */
133 #define CONN_REMOVED 0x020 /* "forw/back" entries removed */
134
135 enum { CONN_TRACKING_OFF, CONN_TRACKING_ON };
136
137 static void npf_conn_destroy(npf_t *, npf_conn_t *);
138
139 /*
140 * npf_conn_sys{init,fini}: initialise/destroy connection tracking.
141 */
142
143 void
144 npf_conn_init(npf_t *npf, int flags)
145 {
146 npf->conn_cache = pool_cache_init(sizeof(npf_conn_t), coherency_unit,
147 0, 0, "npfconpl", NULL, IPL_NET, NULL, NULL, NULL);
148 mutex_init(&npf->conn_lock, MUTEX_DEFAULT, IPL_NONE);
149 npf->conn_tracking = CONN_TRACKING_OFF;
150 npf->conn_db = npf_conndb_create();
151
152 if ((flags & NPF_NO_GC) == 0) {
153 npf_worker_register(npf, npf_conn_worker);
154 }
155 }
156
157 void
158 npf_conn_fini(npf_t *npf)
159 {
160 /* Note: the caller should have flushed the connections. */
161 KASSERT(npf->conn_tracking == CONN_TRACKING_OFF);
162 npf_worker_unregister(npf, npf_conn_worker);
163
164 npf_conndb_destroy(npf->conn_db);
165 pool_cache_destroy(npf->conn_cache);
166 mutex_destroy(&npf->conn_lock);
167 }
168
169 /*
170 * npf_conn_load: perform the load by flushing the current connection
171 * database and replacing it with the new one or just destroying.
172 *
173 * => The caller must disable the connection tracking and ensure that
174 * there are no connection database lookups or references in-flight.
175 */
176 void
177 npf_conn_load(npf_t *npf, npf_conndb_t *ndb, bool track)
178 {
179 npf_conndb_t *odb = NULL;
180
181 KASSERT(npf_config_locked_p(npf));
182
183 /*
184 * The connection database is in the quiescent state.
185 * Prevent G/C thread from running and install a new database.
186 */
187 mutex_enter(&npf->conn_lock);
188 if (ndb) {
189 KASSERT(npf->conn_tracking == CONN_TRACKING_OFF);
190 odb = npf->conn_db;
191 npf->conn_db = ndb;
192 membar_sync();
193 }
194 if (track) {
195 /* After this point lookups start flying in. */
196 npf->conn_tracking = CONN_TRACKING_ON;
197 }
198 mutex_exit(&npf->conn_lock);
199
200 if (odb) {
201 /*
202 * Flush all, no sync since the caller did it for us.
203 * Also, release the pool cache memory.
204 */
205 npf_conn_gc(npf, odb, true, false);
206 npf_conndb_destroy(odb);
207 pool_cache_invalidate(npf->conn_cache);
208 }
209 }
210
211 /*
212 * npf_conn_tracking: enable/disable connection tracking.
213 */
214 void
215 npf_conn_tracking(npf_t *npf, bool track)
216 {
217 KASSERT(npf_config_locked_p(npf));
218 npf->conn_tracking = track ? CONN_TRACKING_ON : CONN_TRACKING_OFF;
219 }
220
221 static inline bool
222 npf_conn_trackable_p(const npf_cache_t *npc)
223 {
224 const npf_t *npf = npc->npc_ctx;
225
226 /*
227 * Check if connection tracking is on. Also, if layer 3 and 4 are
228 * not cached - protocol is not supported or packet is invalid.
229 */
230 if (npf->conn_tracking != CONN_TRACKING_ON) {
231 return false;
232 }
233 if (!npf_iscached(npc, NPC_IP46) || !npf_iscached(npc, NPC_LAYER4)) {
234 return false;
235 }
236 return true;
237 }
238
239 static uint32_t
240 connkey_setkey(npf_connkey_t *key, uint16_t proto, const void *ipv,
241 const uint16_t *id, unsigned alen, bool forw)
242 {
243 uint32_t isrc, idst, *k = key->ck_key;
244 const npf_addr_t * const *ips = ipv;
245
246 if (__predict_true(forw)) {
247 isrc = NPF_SRC, idst = NPF_DST;
248 } else {
249 isrc = NPF_DST, idst = NPF_SRC;
250 }
251
252 /*
253 * Construct a key formed out of 32-bit integers. The key layout:
254 *
255 * Field: | proto | alen | src-id | dst-id | src-addr | dst-addr |
256 * +--------+--------+--------+--------+----------+----------+
257 * Bits: | 16 | 16 | 16 | 16 | 32-128 | 32-128 |
258 *
259 * The source and destination are inverted if they key is for the
260 * backwards stream (forw == false). The address length depends
261 * on the 'alen' field; it is a length in bytes, either 4 or 16.
262 */
263
264 k[0] = ((uint32_t)proto << 16) | (alen & 0xffff);
265 k[1] = ((uint32_t)id[isrc] << 16) | id[idst];
266
267 if (__predict_true(alen == sizeof(in_addr_t))) {
268 k[2] = ips[isrc]->word32[0];
269 k[3] = ips[idst]->word32[0];
270 return 4 * sizeof(uint32_t);
271 } else {
272 const u_int nwords = alen >> 2;
273 memcpy(&k[2], ips[isrc], alen);
274 memcpy(&k[2 + nwords], ips[idst], alen);
275 return (2 + (nwords * 2)) * sizeof(uint32_t);
276 }
277 }
278
279 static void
280 connkey_getkey(const npf_connkey_t *key, uint16_t *proto, npf_addr_t *ips,
281 uint16_t *id, uint16_t *alen)
282 {
283 const uint32_t *k = key->ck_key;
284
285 *proto = k[0] >> 16;
286 *alen = k[0] & 0xffff;
287 id[NPF_SRC] = k[1] >> 16;
288 id[NPF_DST] = k[1] & 0xffff;
289
290 switch (*alen) {
291 case sizeof(struct in6_addr):
292 case sizeof(struct in_addr):
293 memcpy(&ips[NPF_SRC], &k[2], *alen);
294 memcpy(&ips[NPF_DST], &k[2 + ((unsigned)*alen >> 2)], *alen);
295 return;
296 default:
297 KASSERT(0);
298 }
299 }
300
301 /*
302 * npf_conn_conkey: construct a key for the connection lookup.
303 *
304 * => Returns the key length in bytes or zero on failure.
305 */
306 unsigned
307 npf_conn_conkey(const npf_cache_t *npc, npf_connkey_t *key, const bool forw)
308 {
309 const u_int proto = npc->npc_proto;
310 const u_int alen = npc->npc_alen;
311 const struct tcphdr *th;
312 const struct udphdr *uh;
313 uint16_t id[2];
314
315 switch (proto) {
316 case IPPROTO_TCP:
317 KASSERT(npf_iscached(npc, NPC_TCP));
318 th = npc->npc_l4.tcp;
319 id[NPF_SRC] = th->th_sport;
320 id[NPF_DST] = th->th_dport;
321 break;
322 case IPPROTO_UDP:
323 KASSERT(npf_iscached(npc, NPC_UDP));
324 uh = npc->npc_l4.udp;
325 id[NPF_SRC] = uh->uh_sport;
326 id[NPF_DST] = uh->uh_dport;
327 break;
328 case IPPROTO_ICMP:
329 if (npf_iscached(npc, NPC_ICMP_ID)) {
330 const struct icmp *ic = npc->npc_l4.icmp;
331 id[NPF_SRC] = ic->icmp_id;
332 id[NPF_DST] = ic->icmp_id;
333 break;
334 }
335 return 0;
336 case IPPROTO_ICMPV6:
337 if (npf_iscached(npc, NPC_ICMP_ID)) {
338 const struct icmp6_hdr *ic6 = npc->npc_l4.icmp6;
339 id[NPF_SRC] = ic6->icmp6_id;
340 id[NPF_DST] = ic6->icmp6_id;
341 break;
342 }
343 return 0;
344 default:
345 /* Unsupported protocol. */
346 return 0;
347 }
348 return connkey_setkey(key, proto, npc->npc_ips, id, alen, forw);
349 }
350
351 static __inline void
352 connkey_set_addr(npf_connkey_t *key, const npf_addr_t *naddr, const int di)
353 {
354 const u_int alen = key->ck_key[0] & 0xffff;
355 uint32_t *addr = &key->ck_key[2 + ((alen >> 2) * di)];
356
357 KASSERT(alen > 0);
358 memcpy(addr, naddr, alen);
359 }
360
361 static __inline void
362 connkey_set_id(npf_connkey_t *key, const uint16_t id, const int di)
363 {
364 const uint32_t oid = key->ck_key[1];
365 const u_int shift = 16 * !di;
366 const uint32_t mask = 0xffff0000 >> shift;
367
368 key->ck_key[1] = ((uint32_t)id << shift) | (oid & mask);
369 }
370
371 static inline void
372 conn_update_atime(npf_conn_t *con)
373 {
374 struct timespec tsnow;
375
376 getnanouptime(&tsnow);
377 con->c_atime = tsnow.tv_sec;
378 }
379
380 /*
381 * npf_conn_ok: check if the connection is active, and has the right direction.
382 */
383 static bool
384 npf_conn_ok(const npf_conn_t *con, const int di, bool forw)
385 {
386 const uint32_t flags = con->c_flags;
387
388 /* Check if connection is active and not expired. */
389 bool ok = (flags & (CONN_ACTIVE | CONN_EXPIRE)) == CONN_ACTIVE;
390 if (__predict_false(!ok)) {
391 return false;
392 }
393
394 /* Check if the direction is consistent */
395 bool pforw = (flags & PFIL_ALL) == (unsigned)di;
396 if (__predict_false(forw != pforw)) {
397 return false;
398 }
399 return true;
400 }
401
402 /*
403 * npf_conn_lookup: lookup if there is an established connection.
404 *
405 * => If found, we will hold a reference for the caller.
406 */
407 npf_conn_t *
408 npf_conn_lookup(const npf_cache_t *npc, const int di, bool *forw)
409 {
410 npf_t *npf = npc->npc_ctx;
411 const nbuf_t *nbuf = npc->npc_nbuf;
412 npf_conn_t *con;
413 npf_connkey_t key;
414 u_int cifid;
415
416 /* Construct a key and lookup for a connection in the store. */
417 if (!npf_conn_conkey(npc, &key, true)) {
418 return NULL;
419 }
420 con = npf_conndb_lookup(npf->conn_db, &key, forw);
421 if (con == NULL) {
422 return NULL;
423 }
424 KASSERT(npc->npc_proto == con->c_proto);
425
426 /* Check if connection is active and not expired. */
427 if (!npf_conn_ok(con, di, *forw)) {
428 atomic_dec_uint(&con->c_refcnt);
429 return NULL;
430 }
431
432 /*
433 * Match the interface and the direction of the connection entry
434 * and the packet.
435 */
436 cifid = con->c_ifid;
437 if (__predict_false(cifid && cifid != nbuf->nb_ifid)) {
438 atomic_dec_uint(&con->c_refcnt);
439 return NULL;
440 }
441
442 /* Update the last activity time. */
443 conn_update_atime(con);
444 return con;
445 }
446
447 /*
448 * npf_conn_inspect: lookup a connection and inspecting the protocol data.
449 *
450 * => If found, we will hold a reference for the caller.
451 */
452 npf_conn_t *
453 npf_conn_inspect(npf_cache_t *npc, const int di, int *error)
454 {
455 nbuf_t *nbuf = npc->npc_nbuf;
456 npf_conn_t *con;
457 bool forw, ok;
458
459 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
460 if (!npf_conn_trackable_p(npc)) {
461 return NULL;
462 }
463
464 /* Query ALG which may lookup connection for us. */
465 if ((con = npf_alg_conn(npc, di)) != NULL) {
466 /* Note: reference is held. */
467 return con;
468 }
469 if (nbuf_head_mbuf(nbuf) == NULL) {
470 *error = ENOMEM;
471 return NULL;
472 }
473 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
474
475 /* Main lookup of the connection. */
476 if ((con = npf_conn_lookup(npc, di, &forw)) == NULL) {
477 return NULL;
478 }
479
480 /* Inspect the protocol data and handle state changes. */
481 mutex_enter(&con->c_lock);
482 ok = npf_state_inspect(npc, &con->c_state, forw);
483 mutex_exit(&con->c_lock);
484
485 /* If invalid state: let the rules deal with it. */
486 if (__predict_false(!ok)) {
487 npf_conn_release(con);
488 npf_stats_inc(npc->npc_ctx, NPF_STAT_INVALID_STATE);
489 return NULL;
490 }
491
492 /*
493 * If this is multi-end state, then specially tag the packet
494 * so it will be just passed-through on other interfaces.
495 */
496 if (con->c_ifid == 0 && nbuf_add_tag(nbuf, NPF_NTAG_PASS) != 0) {
497 npf_conn_release(con);
498 *error = ENOMEM;
499 return NULL;
500 }
501 return con;
502 }
503
504 /*
505 * npf_conn_establish: create a new connection, insert into the global list.
506 *
507 * => Connection is created with the reference held for the caller.
508 * => Connection will be activated on the first reference release.
509 */
510 npf_conn_t *
511 npf_conn_establish(npf_cache_t *npc, int di, bool per_if)
512 {
513 npf_t *npf = npc->npc_ctx;
514 const nbuf_t *nbuf = npc->npc_nbuf;
515 npf_conn_t *con;
516 int error = 0;
517
518 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
519
520 if (!npf_conn_trackable_p(npc)) {
521 return NULL;
522 }
523
524 /* Allocate and initialise the new connection. */
525 con = pool_cache_get(npf->conn_cache, PR_NOWAIT);
526 if (__predict_false(!con)) {
527 npf_worker_signal(npf);
528 return NULL;
529 }
530 NPF_PRINTF(("NPF: create conn %p\n", con));
531 npf_stats_inc(npf, NPF_STAT_CONN_CREATE);
532
533 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
534 con->c_flags = (di & PFIL_ALL);
535 con->c_refcnt = 0;
536 con->c_rproc = NULL;
537 con->c_nat = NULL;
538
539 /* Initialize the protocol state. */
540 if (!npf_state_init(npc, &con->c_state)) {
541 npf_conn_destroy(npf, con);
542 return NULL;
543 }
544
545 KASSERT(npf_iscached(npc, NPC_IP46));
546 npf_connkey_t *fw = &con->c_forw_entry;
547 npf_connkey_t *bk = &con->c_back_entry;
548
549 /*
550 * Construct "forwards" and "backwards" keys. Also, set the
551 * interface ID for this connection (unless it is global).
552 */
553 if (!npf_conn_conkey(npc, fw, true) ||
554 !npf_conn_conkey(npc, bk, false)) {
555 npf_conn_destroy(npf, con);
556 return NULL;
557 }
558 fw->ck_backptr = bk->ck_backptr = con;
559 con->c_ifid = per_if ? nbuf->nb_ifid : 0;
560 con->c_proto = npc->npc_proto;
561
562 /*
563 * Set last activity time for a new connection and acquire
564 * a reference for the caller before we make it visible.
565 */
566 conn_update_atime(con);
567 con->c_refcnt = 1;
568
569 /*
570 * Insert both keys (entries representing directions) of the
571 * connection. At this point it becomes visible, but we activate
572 * the connection later.
573 */
574 mutex_enter(&con->c_lock);
575 if (!npf_conndb_insert(npf->conn_db, fw, con)) {
576 error = EISCONN;
577 goto err;
578 }
579 if (!npf_conndb_insert(npf->conn_db, bk, con)) {
580 npf_conn_t *ret __diagused;
581 ret = npf_conndb_remove(npf->conn_db, fw);
582 KASSERT(ret == con);
583 error = EISCONN;
584 goto err;
585 }
586 err:
587 /*
588 * If we have hit the duplicate: mark the connection as expired
589 * and let the G/C thread to take care of it. We cannot do it
590 * here since there might be references acquired already.
591 */
592 if (error) {
593 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
594 atomic_dec_uint(&con->c_refcnt);
595 npf_stats_inc(npf, NPF_STAT_RACE_CONN);
596 } else {
597 NPF_PRINTF(("NPF: establish conn %p\n", con));
598 }
599
600 /* Finally, insert into the connection list. */
601 npf_conndb_enqueue(npf->conn_db, con);
602 mutex_exit(&con->c_lock);
603
604 return error ? NULL : con;
605 }
606
607 static void
608 npf_conn_destroy(npf_t *npf, npf_conn_t *con)
609 {
610 KASSERT(con->c_refcnt == 0);
611
612 if (con->c_nat) {
613 /* Release any NAT structures. */
614 npf_nat_destroy(con->c_nat);
615 }
616 if (con->c_rproc) {
617 /* Release the rule procedure. */
618 npf_rproc_release(con->c_rproc);
619 }
620
621 /* Destroy the state. */
622 npf_state_destroy(&con->c_state);
623 mutex_destroy(&con->c_lock);
624
625 /* Free the structure, increase the counter. */
626 pool_cache_put(npf->conn_cache, con);
627 npf_stats_inc(npf, NPF_STAT_CONN_DESTROY);
628 NPF_PRINTF(("NPF: conn %p destroyed\n", con));
629 }
630
631 /*
632 * npf_conn_setnat: associate NAT entry with the connection, update and
633 * re-insert connection entry using the translation values.
634 *
635 * => The caller must be holding a reference.
636 */
637 int
638 npf_conn_setnat(const npf_cache_t *npc, npf_conn_t *con,
639 npf_nat_t *nt, u_int ntype)
640 {
641 static const u_int nat_type_dimap[] = {
642 [NPF_NATOUT] = NPF_DST,
643 [NPF_NATIN] = NPF_SRC,
644 };
645 npf_t *npf = npc->npc_ctx;
646 npf_connkey_t key, *bk;
647 npf_conn_t *ret __diagused;
648 npf_addr_t *taddr;
649 in_port_t tport;
650 u_int tidx;
651
652 KASSERT(con->c_refcnt > 0);
653
654 npf_nat_gettrans(nt, &taddr, &tport);
655 KASSERT(ntype == NPF_NATOUT || ntype == NPF_NATIN);
656 tidx = nat_type_dimap[ntype];
657
658 /* Construct a "backwards" key. */
659 if (!npf_conn_conkey(npc, &key, false)) {
660 return EINVAL;
661 }
662
663 /* Acquire the lock and check for the races. */
664 mutex_enter(&con->c_lock);
665 if (__predict_false(con->c_flags & CONN_EXPIRE)) {
666 /* The connection got expired. */
667 mutex_exit(&con->c_lock);
668 return EINVAL;
669 }
670 KASSERT((con->c_flags & CONN_REMOVED) == 0);
671
672 if (__predict_false(con->c_nat != NULL)) {
673 /* Race with a duplicate packet. */
674 mutex_exit(&con->c_lock);
675 npf_stats_inc(npc->npc_ctx, NPF_STAT_RACE_NAT);
676 return EISCONN;
677 }
678
679 /* Remove the "backwards" entry. */
680 ret = npf_conndb_remove(npf->conn_db, &con->c_back_entry);
681 KASSERT(ret == con);
682
683 /* Set the source/destination IDs to the translation values. */
684 bk = &con->c_back_entry;
685 connkey_set_addr(bk, taddr, tidx);
686 if (tport) {
687 connkey_set_id(bk, tport, tidx);
688 }
689
690 /* Finally, re-insert the "backwards" entry. */
691 if (!npf_conndb_insert(npf->conn_db, bk, con)) {
692 /*
693 * Race: we have hit the duplicate, remove the "forwards"
694 * entry and expire our connection; it is no longer valid.
695 */
696 ret = npf_conndb_remove(npf->conn_db, &con->c_forw_entry);
697 KASSERT(ret == con);
698
699 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
700 mutex_exit(&con->c_lock);
701
702 npf_stats_inc(npc->npc_ctx, NPF_STAT_RACE_NAT);
703 return EISCONN;
704 }
705
706 /* Associate the NAT entry and release the lock. */
707 con->c_nat = nt;
708 mutex_exit(&con->c_lock);
709 return 0;
710 }
711
712 /*
713 * npf_conn_expire: explicitly mark connection as expired.
714 */
715 void
716 npf_conn_expire(npf_conn_t *con)
717 {
718 /* KASSERT(con->c_refcnt > 0); XXX: npf_nat_freepolicy() */
719 atomic_or_uint(&con->c_flags, CONN_EXPIRE);
720 }
721
722 /*
723 * npf_conn_pass: return true if connection is "pass" one, otherwise false.
724 */
725 bool
726 npf_conn_pass(const npf_conn_t *con, npf_rproc_t **rp)
727 {
728 KASSERT(con->c_refcnt > 0);
729 if (__predict_true(con->c_flags & CONN_PASS)) {
730 *rp = con->c_rproc;
731 return true;
732 }
733 return false;
734 }
735
736 /*
737 * npf_conn_setpass: mark connection as a "pass" one and associate the
738 * rule procedure with it.
739 */
740 void
741 npf_conn_setpass(npf_conn_t *con, npf_rproc_t *rp)
742 {
743 KASSERT((con->c_flags & CONN_ACTIVE) == 0);
744 KASSERT(con->c_refcnt > 0);
745 KASSERT(con->c_rproc == NULL);
746
747 /*
748 * No need for atomic since the connection is not yet active.
749 * If rproc is set, the caller transfers its reference to us,
750 * which will be released on npf_conn_destroy().
751 */
752 atomic_or_uint(&con->c_flags, CONN_PASS);
753 con->c_rproc = rp;
754 }
755
756 /*
757 * npf_conn_release: release a reference, which might allow G/C thread
758 * to destroy this connection.
759 */
760 void
761 npf_conn_release(npf_conn_t *con)
762 {
763 if ((con->c_flags & (CONN_ACTIVE | CONN_EXPIRE)) == 0) {
764 /* Activate: after this, connection is globally visible. */
765 atomic_or_uint(&con->c_flags, CONN_ACTIVE);
766 }
767 KASSERT(con->c_refcnt > 0);
768 atomic_dec_uint(&con->c_refcnt);
769 }
770
771 /*
772 * npf_conn_getnat: return associated NAT data entry and indicate
773 * whether it is a "forwards" or "backwards" stream.
774 */
775 npf_nat_t *
776 npf_conn_getnat(npf_conn_t *con, const int di, bool *forw)
777 {
778 KASSERT(con->c_refcnt > 0);
779 *forw = (con->c_flags & PFIL_ALL) == (u_int)di;
780 return con->c_nat;
781 }
782
783 /*
784 * npf_conn_expired: criterion to check if connection is expired.
785 */
786 static inline bool
787 npf_conn_expired(const npf_conn_t *con, uint64_t tsnow)
788 {
789 const int etime = npf_state_etime(&con->c_state, con->c_proto);
790 int elapsed;
791
792 if (__predict_false(con->c_flags & CONN_EXPIRE)) {
793 /* Explicitly marked to be expired. */
794 return true;
795 }
796
797 /*
798 * Note: another thread may update 'atime' and it might
799 * become greater than 'now'.
800 */
801 elapsed = (int64_t)tsnow - con->c_atime;
802 return elapsed > etime;
803 }
804
805 /*
806 * npf_conn_gc: garbage collect the expired connections.
807 *
808 * => Must run in a single-threaded manner.
809 * => If it is a flush request, then destroy all connections.
810 * => If 'sync' is true, then perform passive serialisation.
811 */
812 void
813 npf_conn_gc(npf_t *npf, npf_conndb_t *cd, bool flush, bool sync)
814 {
815 npf_conn_t *con, *prev, *gclist = NULL;
816 struct timespec tsnow;
817
818 getnanouptime(&tsnow);
819
820 /*
821 * Scan all connections and check them for expiration.
822 */
823 prev = NULL;
824 con = npf_conndb_getlist(cd);
825 while (con) {
826 npf_conn_t *next = con->c_next;
827
828 /* Expired? Flushing all? */
829 if (!npf_conn_expired(con, tsnow.tv_sec) && !flush) {
830 prev = con;
831 con = next;
832 continue;
833 }
834
835 /* Remove both entries of the connection. */
836 mutex_enter(&con->c_lock);
837 if ((con->c_flags & CONN_REMOVED) == 0) {
838 npf_conn_t *ret __diagused;
839
840 ret = npf_conndb_remove(cd, &con->c_forw_entry);
841 KASSERT(ret == con);
842 ret = npf_conndb_remove(cd, &con->c_back_entry);
843 KASSERT(ret == con);
844 }
845
846 /* Flag the removal and expiration. */
847 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
848 mutex_exit(&con->c_lock);
849
850 /* Move to the G/C list. */
851 npf_conndb_dequeue(cd, con, prev);
852 con->c_next = gclist;
853 gclist = con;
854
855 /* Next.. */
856 con = next;
857 }
858 npf_conndb_settail(cd, prev);
859
860 /*
861 * Ensure it is safe to destroy the connections.
862 * Note: drop the conn_lock (see the lock order).
863 */
864 if (sync) {
865 mutex_exit(&npf->conn_lock);
866 if (gclist) {
867 npf_config_enter(npf);
868 npf_config_sync(npf);
869 npf_config_exit(npf);
870 }
871 }
872
873 /*
874 * Garbage collect all expired connections.
875 * May need to wait for the references to drain.
876 */
877 con = gclist;
878 while (con) {
879 npf_conn_t *next = con->c_next;
880
881 /*
882 * Destroy only if removed and no references.
883 * Otherwise, wait for a tiny moment.
884 */
885 if (__predict_false(con->c_refcnt)) {
886 kpause("npfcongc", false, 1, NULL);
887 continue;
888 }
889 npf_conn_destroy(npf, con);
890 con = next;
891 }
892 }
893
894 /*
895 * npf_conn_worker: G/C to run from a worker thread.
896 */
897 void
898 npf_conn_worker(npf_t *npf)
899 {
900 mutex_enter(&npf->conn_lock);
901 /* Note: the conn_lock will be released (sync == true). */
902 npf_conn_gc(npf, npf->conn_db, false, true);
903 }
904
905 /*
906 * npf_conndb_export: construct a list of connections prepared for saving.
907 * Note: this is expected to be an expensive operation.
908 */
909 int
910 npf_conndb_export(npf_t *npf, prop_array_t conlist)
911 {
912 npf_conn_t *con, *prev;
913
914 /*
915 * Note: acquire conn_lock to prevent from the database
916 * destruction and G/C thread.
917 */
918 mutex_enter(&npf->conn_lock);
919 if (npf->conn_tracking != CONN_TRACKING_ON) {
920 mutex_exit(&npf->conn_lock);
921 return 0;
922 }
923 prev = NULL;
924 con = npf_conndb_getlist(npf->conn_db);
925 while (con) {
926 npf_conn_t *next = con->c_next;
927 prop_dictionary_t cdict;
928
929 if ((cdict = npf_conn_export(npf, con)) != NULL) {
930 prop_array_add(conlist, cdict);
931 prop_object_release(cdict);
932 }
933 prev = con;
934 con = next;
935 }
936 npf_conndb_settail(npf->conn_db, prev);
937 mutex_exit(&npf->conn_lock);
938 return 0;
939 }
940
941 static prop_dictionary_t
942 npf_connkey_export(const npf_connkey_t *key)
943 {
944 uint16_t id[2], alen, proto;
945 prop_dictionary_t kdict;
946 npf_addr_t ips[2];
947 prop_data_t d;
948
949 kdict = prop_dictionary_create();
950 connkey_getkey(key, &proto, ips, id, &alen);
951
952 prop_dictionary_set_uint16(kdict, "proto", proto);
953
954 prop_dictionary_set_uint16(kdict, "sport", id[NPF_SRC]);
955 prop_dictionary_set_uint16(kdict, "dport", id[NPF_DST]);
956
957 d = prop_data_create_data(&ips[NPF_SRC], alen);
958 prop_dictionary_set_and_rel(kdict, "saddr", d);
959
960 d = prop_data_create_data(&ips[NPF_DST], alen);
961 prop_dictionary_set_and_rel(kdict, "daddr", d);
962
963 return kdict;
964 }
965
966 /*
967 * npf_conn_export: serialise a single connection.
968 */
969 prop_dictionary_t
970 npf_conn_export(npf_t *npf, const npf_conn_t *con)
971 {
972 prop_dictionary_t cdict, kdict;
973 prop_data_t d;
974
975 if ((con->c_flags & (CONN_ACTIVE|CONN_EXPIRE)) != CONN_ACTIVE) {
976 return NULL;
977 }
978 cdict = prop_dictionary_create();
979 prop_dictionary_set_uint32(cdict, "flags", con->c_flags);
980 prop_dictionary_set_uint32(cdict, "proto", con->c_proto);
981 if (con->c_ifid) {
982 const char *ifname = npf_ifmap_getname(npf, con->c_ifid);
983 prop_dictionary_set_cstring(cdict, "ifname", ifname);
984 }
985
986 d = prop_data_create_data(&con->c_state, sizeof(npf_state_t));
987 prop_dictionary_set_and_rel(cdict, "state", d);
988
989 kdict = npf_connkey_export(&con->c_forw_entry);
990 prop_dictionary_set_and_rel(cdict, "forw-key", kdict);
991
992 kdict = npf_connkey_export(&con->c_back_entry);
993 prop_dictionary_set_and_rel(cdict, "back-key", kdict);
994
995 if (con->c_nat) {
996 npf_nat_export(cdict, con->c_nat);
997 }
998 return cdict;
999 }
1000
1001 static uint32_t
1002 npf_connkey_import(prop_dictionary_t kdict, npf_connkey_t *key)
1003 {
1004 prop_object_t sobj, dobj;
1005 npf_addr_t const * ips[2];
1006 uint16_t alen, proto, id[2];
1007
1008 if (!prop_dictionary_get_uint16(kdict, "proto", &proto))
1009 return 0;
1010
1011 if (!prop_dictionary_get_uint16(kdict, "sport", &id[NPF_SRC]))
1012 return 0;
1013
1014 if (!prop_dictionary_get_uint16(kdict, "dport", &id[NPF_DST]))
1015 return 0;
1016
1017 sobj = prop_dictionary_get(kdict, "saddr");
1018 if ((ips[NPF_SRC] = prop_data_data_nocopy(sobj)) == NULL)
1019 return 0;
1020
1021 dobj = prop_dictionary_get(kdict, "daddr");
1022 if ((ips[NPF_DST] = prop_data_data_nocopy(dobj)) == NULL)
1023 return 0;
1024
1025 alen = prop_data_size(sobj);
1026 if (alen != prop_data_size(dobj))
1027 return 0;
1028
1029 return connkey_setkey(key, proto, ips, id, alen, true);
1030 }
1031
1032 /*
1033 * npf_conn_import: fully reconstruct a single connection from a
1034 * directory and insert into the given database.
1035 */
1036 int
1037 npf_conn_import(npf_t *npf, npf_conndb_t *cd, prop_dictionary_t cdict,
1038 npf_ruleset_t *natlist)
1039 {
1040 npf_conn_t *con;
1041 npf_connkey_t *fw, *bk;
1042 prop_object_t obj;
1043 const char *ifname;
1044 const void *d;
1045
1046 /* Allocate a connection and initialise it (clear first). */
1047 con = pool_cache_get(npf->conn_cache, PR_WAITOK);
1048 memset(con, 0, sizeof(npf_conn_t));
1049 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1050 npf_stats_inc(npf, NPF_STAT_CONN_CREATE);
1051
1052 prop_dictionary_get_uint32(cdict, "proto", &con->c_proto);
1053 prop_dictionary_get_uint32(cdict, "flags", &con->c_flags);
1054 con->c_flags &= PFIL_ALL | CONN_ACTIVE | CONN_PASS;
1055 conn_update_atime(con);
1056
1057 if (prop_dictionary_get_cstring_nocopy(cdict, "ifname", &ifname) &&
1058 (con->c_ifid = npf_ifmap_register(npf, ifname)) == 0) {
1059 goto err;
1060 }
1061
1062 obj = prop_dictionary_get(cdict, "state");
1063 if ((d = prop_data_data_nocopy(obj)) == NULL ||
1064 prop_data_size(obj) != sizeof(npf_state_t)) {
1065 goto err;
1066 }
1067 memcpy(&con->c_state, d, sizeof(npf_state_t));
1068
1069 /* Reconstruct NAT association, if any. */
1070 if ((obj = prop_dictionary_get(cdict, "nat")) != NULL &&
1071 (con->c_nat = npf_nat_import(npf, obj, natlist, con)) == NULL) {
1072 goto err;
1073 }
1074
1075 /*
1076 * Fetch and copy the keys for each direction.
1077 */
1078 obj = prop_dictionary_get(cdict, "forw-key");
1079 fw = &con->c_forw_entry;
1080 if (obj == NULL || !npf_connkey_import(obj, fw)) {
1081 goto err;
1082 }
1083
1084 obj = prop_dictionary_get(cdict, "back-key");
1085 bk = &con->c_back_entry;
1086 if (obj == NULL || !npf_connkey_import(obj, bk)) {
1087 goto err;
1088 }
1089
1090 fw->ck_backptr = bk->ck_backptr = con;
1091
1092 /* Insert the entries and the connection itself. */
1093 if (!npf_conndb_insert(cd, fw, con)) {
1094 goto err;
1095 }
1096 if (!npf_conndb_insert(cd, bk, con)) {
1097 npf_conndb_remove(cd, fw);
1098 goto err;
1099 }
1100
1101 NPF_PRINTF(("NPF: imported conn %p\n", con));
1102 npf_conndb_enqueue(cd, con);
1103 return 0;
1104 err:
1105 npf_conn_destroy(npf, con);
1106 return EINVAL;
1107 }
1108
1109 int
1110 npf_conn_find(npf_t *npf, prop_dictionary_t idict, prop_dictionary_t *odict)
1111 {
1112 prop_dictionary_t kdict;
1113 npf_connkey_t key;
1114 npf_conn_t *con;
1115 uint16_t dir;
1116 bool forw;
1117
1118 if ((kdict = prop_dictionary_get(idict, "key")) == NULL)
1119 return EINVAL;
1120
1121 if (!npf_connkey_import(kdict, &key))
1122 return EINVAL;
1123
1124 if (!prop_dictionary_get_uint16(idict, "direction", &dir))
1125 return EINVAL;
1126
1127 con = npf_conndb_lookup(npf->conn_db, &key, &forw);
1128 if (con == NULL) {
1129 return ESRCH;
1130 }
1131
1132 if (!npf_conn_ok(con, dir, true)) {
1133 atomic_dec_uint(&con->c_refcnt);
1134 return ESRCH;
1135 }
1136
1137 *odict = npf_conn_export(npf, con);
1138 if (*odict == NULL) {
1139 atomic_dec_uint(&con->c_refcnt);
1140 return ENOSPC;
1141 }
1142 atomic_dec_uint(&con->c_refcnt);
1143
1144 return 0;
1145 }
1146
1147 #if defined(DDB) || defined(_NPF_TESTING)
1148
1149 void
1150 npf_conn_print(const npf_conn_t *con)
1151 {
1152 const u_int alen = NPF_CONN_GETALEN(&con->c_forw_entry);
1153 const uint32_t *fkey = con->c_forw_entry.ck_key;
1154 const uint32_t *bkey = con->c_back_entry.ck_key;
1155 const u_int proto = con->c_proto;
1156 struct timespec tspnow;
1157 const void *src, *dst;
1158 int etime;
1159
1160 getnanouptime(&tspnow);
1161 etime = npf_state_etime(&con->c_state, proto);
1162
1163 printf("%p:\n\tproto %d flags 0x%x tsdiff %ld etime %d\n", con,
1164 proto, con->c_flags, (long)(tspnow.tv_sec - con->c_atime), etime);
1165
1166 src = &fkey[2], dst = &fkey[2 + (alen >> 2)];
1167 printf("\tforw %s:%d", npf_addr_dump(src, alen), ntohs(fkey[1] >> 16));
1168 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(fkey[1] & 0xffff));
1169
1170 src = &bkey[2], dst = &bkey[2 + (alen >> 2)];
1171 printf("\tback %s:%d", npf_addr_dump(src, alen), ntohs(bkey[1] >> 16));
1172 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(bkey[1] & 0xffff));
1173
1174 npf_state_dump(&con->c_state);
1175 if (con->c_nat) {
1176 npf_nat_dump(con->c_nat);
1177 }
1178 }
1179
1180 #endif
1181