Home | History | Annotate | Download | only in npf

Lines Matching defs:con

46  *	- npf_conn_getforwkey(con)        -- for the forwards stream;
47 * - npf_conn_getbackkey(con, alen) -- for the backwards stream.
275 conn_update_atime(npf_conn_t *con)
280 atomic_store_relaxed(&con->c_atime, tsnow.tv_sec);
295 npf_conn_check(const npf_conn_t *con, const nbuf_t *nbuf,
298 const uint32_t flags = atomic_load_relaxed(&con->c_flags);
299 const unsigned ifid = atomic_load_relaxed(&con->c_ifid);
330 npf_conn_t *con;
337 con = npf_conndb_lookup(npf, &key, flow);
338 if (con == NULL) {
341 KASSERT(npc->npc_proto == atomic_load_relaxed(&con->c_proto));
344 if (!npf_conn_check(con, nbuf, di, *flow)) {
345 atomic_dec_uint(&con->c_refcnt);
350 conn_update_atime(con);
351 return con;
364 npf_conn_t *con;
373 if ((con = npf_alg_conn(npc, di)) != NULL) {
375 return con;
384 if ((con = npf_conn_lookup(npc, di, &flow)) == NULL) {
389 mutex_enter(&con->c_lock);
390 ok = npf_state_inspect(npc, &con->c_state, flow);
391 mutex_exit(&con->c_lock);
395 npf_conn_release(con);
406 if (atomic_load_relaxed(&con->c_flags) & CONN_GPASS) {
414 return con;
432 npf_conn_t *con;
442 con = pool_cache_get(npf->conn_cache[idx], PR_NOWAIT);
443 if (__predict_false(!con)) {
447 NPF_PRINTF(("NPF: create conn %p\n", con));
450 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
451 atomic_store_relaxed(&con->c_flags, di & PFIL_ALL);
452 atomic_store_relaxed(&con->c_refcnt, 0);
453 con->c_rproc = NULL;
454 con->c_nat = NULL;
456 con->c_proto = npc->npc_proto;
457 CTASSERT(sizeof(con->c_proto) >= sizeof(npc->npc_proto));
458 con->c_alen = alen;
461 if (!npf_state_init(npc, &con->c_state)) {
462 npf_conn_destroy(npf, con);
467 fw = npf_conn_getforwkey(con);
468 bk = npf_conn_getbackkey(con, alen);
476 npf_conn_destroy(npf, con);
479 con->c_ifid = global ? nbuf->nb_ifid : 0;
485 conn_update_atime(con);
486 atomic_store_relaxed(&con->c_refcnt, 1);
493 mutex_enter(&con->c_lock);
495 if (!npf_conndb_insert(conn_db, fw, con, NPF_FLOW_FORW)) {
499 if (!npf_conndb_insert(conn_db, bk, con, NPF_FLOW_BACK)) {
502 KASSERT(ret == con);
513 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
514 atomic_dec_uint(&con->c_refcnt);
517 NPF_PRINTF(("NPF: establish conn %p\n", con));
521 npf_conndb_enqueue(conn_db, con);
522 mutex_exit(&con->c_lock);
524 return error ? NULL : con;
528 npf_conn_destroy(npf_t *npf, npf_conn_t *con)
530 const unsigned idx __unused = NPF_CONNCACHE(con->c_alen);
532 KASSERT(atomic_load_relaxed(&con->c_refcnt) == 0);
534 if (con->c_nat) {
536 npf_nat_destroy(con, con->c_nat);
538 if (con->c_rproc) {
540 npf_rproc_release(con->c_rproc);
544 npf_state_destroy(&con->c_state);
545 mutex_destroy(&con
548 pool_cache_put(npf->conn_cache[idx], con);
550 NPF_PRINTF(("NPF: conn %p destroyed\n", con));
560 npf_conn_setnat(const npf_cache_t *npc, npf_conn_t *con,
576 KASSERT(atomic_load_relaxed(&con->c_refcnt) > 0);
582 mutex_enter(&con->c_lock);
583 flags = atomic_load_relaxed(&con->c_flags);
586 mutex_exit(&con->c_lock);
591 if (__predict_false(con->c_nat != NULL)) {
593 mutex_exit(&con->c_lock);
600 bk = npf_conn_getbackkey(con, con->c_alen);
602 KASSERT(ret == con);
608 if (!npf_conndb_insert(conn_db, bk, con, NPF_FLOW_BACK)) {
613 npf_connkey_t *fw = npf_conn_getforwkey(con);
615 KASSERT(ret == con);
617 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
618 mutex_exit(&con->c_lock);
625 con->c_nat = nt;
626 mutex_exit(&con->c_lock);
638 npf_conn_expire(npf_conn_t *con)
640 atomic_or_uint(&con->c_flags, CONN_EXPIRE);
647 npf_conn_pass(const npf_conn_t *con, npf_match_info_t *mi, npf_rproc_t **rp)
649 KASSERT(atomic_load_relaxed(&con->c_refcnt) > 0);
650 if (__predict_true(atomic_load_relaxed(&con->c_flags) & CONN_PASS)) {
651 mi->mi_retfl = atomic_load_relaxed(&con->c_retfl);
652 mi->mi_rid = con->c_rid;
653 *rp = con->c_rproc;
664 npf_conn_setpass(npf_conn_t *con, const npf_match_info_t *mi, npf_rproc_t *rp)
666 KASSERT((atomic_load_relaxed(&con->c_flags) & CONN_ACTIVE) == 0);
667 KASSERT(atomic_load_relaxed(&con->c_refcnt) > 0);
668 KASSERT(con->c_rproc == NULL);
675 atomic_or_uint(&con->c_flags, CONN_PASS);
676 con->c_rproc = rp;
678 con->c_rid = mi->mi_rid;
679 con->c_retfl = mi->mi_retfl;
688 npf_conn_release(npf_conn_t *con)
690 const unsigned flags = atomic_load_relaxed(&con->c_flags);
694 atomic_or_uint(&con->c_flags, CONN_ACTIVE);
696 KASSERT(atomic_load_relaxed(&con->c_refcnt) > 0);
697 atomic_dec_uint(&con->c_refcnt);
704 npf_conn_getnat(const npf_conn_t *con)
706 return con->c_nat;
713 npf_conn_expired(npf_t *npf, const npf_conn_t *con, uint64_t tsnow)
715 const unsigned flags = atomic_load_relaxed(&con->c_flags);
716 const int etime = npf_state_etime(npf, &con->c_state, con->c_proto);
728 elapsed = (int64_t)tsnow - atomic_load_relaxed(&con->c_atime);
736 npf_conn_remove(npf_conndb_t *cd, npf_conn_t *con)
739 mutex_enter(&con->c_lock);
740 if ((atomic_load_relaxed(&con->c_flags) & CONN_REMOVED) == 0) {
744 fw = npf_conn_getforwkey(con);
746 KASSERT(ret == con);
748 bk = npf_conn_getbackkey(con, NPF_CONNKEY_ALEN(fw));
750 KASSERT(ret == con);
754 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
755 mutex_exit(&con->c_lock);
775 npf_conn_t *head, *con;
789 con = head;
790 while (con) {
794 if (npf_conn_export(npf, con, con_nvl) == 0) {
799 if ((con = npf_conndb_getnext(conn_db, con)) == head) {
811 npf_conn_export(npf_t *npf, npf_conn_t *con, nvlist_t *nvl)
817 flags = atomic_load_relaxed(&con->c_flags);
822 nvlist_add_number(nvl, "proto", con->c_proto);
823 if (con->c_ifid) {
825 npf_ifmap_copyname(npf, con->c_ifid, ifname, sizeof(ifname));
828 nvlist_add_binary(nvl, "state", &con->c_state, sizeof(npf_state_t));
830 fw = npf_conn_getforwkey(con);
832 KASSERT(alen == con->c_alen);
833 bk = npf_conn_getbackkey(con, alen);
844 if (con->c_nat) {
845 npf_nat_export(npf, con->c_nat, nvl);
858 npf_conn_t *con;
874 con = pool_cache_get(npf->conn_cache[idx], PR_WAITOK);
875 memset(con, 0, sizeof(npf_conn_t));
876 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
879 con->c_proto = dnvlist_get_number(cdict, "proto", 0);
882 atomic_store_relaxed(&con->c_flags, flags);
883 conn_update_atime(con);
886 if (ifname && (con->c_ifid = npf_ifmap_register(npf, ifname)) == 0) {
894 memcpy(&con->c_state, state, sizeof(npf_state_t));
898 (con->c_nat = npf_nat_import(npf, nat, natlist, con)) == NULL) {
905 fw = npf_conn_getforwkey(con);
910 bk = npf_conn_getbackkey(con, NPF_CONNKEY_ALEN(fw));
922 if (!npf_conndb_insert(cd, fw, con, NPF_FLOW_FORW)) {
925 if (!npf_conndb_insert(cd, bk, con, NPF_FLOW_BACK)) {
930 NPF_PRINTF(("NPF: imported conn %p\n", con));
931 npf_conndb_enqueue(cd, con);
934 npf_conn_destroy(npf, con);
945 npf_conn_t *con;
954 con = npf_conndb_lookup(npf, &key, &flow);
955 if (con == NULL) {
958 if (!npf_conn_check(con, NULL, 0, NPF_FLOW_FORW)) {
959 atomic_dec_uint(&con->c_refcnt);
962 error = npf_conn_export(npf, con, resp);
964 atomic_dec_uint(&con->c_refcnt);
971 npf_conn_print(npf_conn_t *con)
973 const npf_connkey_t *fw = npf_conn_getforwkey(con);
974 const npf_connkey_t *bk = npf_conn_getbackkey(con, NPF_CONNKEY_ALEN(fw));
975 const unsigned flags = atomic_load_relaxed(&con->c_flags);
976 const unsigned proto = con->c_proto;
980 printf("%p:\n\tproto %d flags 0x%x tsdiff %ld etime %d\n", con,
981 proto, flags, (long)(tspnow.tv_sec - con->c_atime),
982 npf_state_etime(npf_getkernctx(), &con->c_state, proto));
985 npf_state_dump(&con->c_state);
986 if (con->c_nat) {
987 npf_nat_dump(con->c_nat);