Lines Matching defs:ipf
90 #define IPFLOW_INSERT(hashidx, ipf) \
92 (ipf)->ipf_hashidx = (hashidx); \
93 TAILQ_INSERT_HEAD(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
94 TAILQ_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
97 #define IPFLOW_REMOVE(hashidx, ipf) \
99 TAILQ_REMOVE(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
100 TAILQ_REMOVE(&ipflowlist, (ipf), ipf_list); \
134 struct ipflow *ipf;
140 TAILQ_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
141 if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
142 && ip->ip_src.s_addr == ipf->ipf_src.s_addr
143 && ip->ip_tos == ipf->ipf_tos)
146 return ipf;
207 struct ipflow *ipf;
245 if ((ipf = ipflow_lookup(ip)) == NULL)
278 rt = rtcache_validate(&ipf->ipf_ro);
333 ipf->ipf_uses++;
342 TAILQ_REMOVE(&ipflowlist, ipf, ipf_list);
343 TAILQ_INSERT_HEAD(&ipflowlist, ipf, ipf_list);
346 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
351 dst = rtcache_getdst(&ipf->ipf_ro);
355 ipf->ipf_dropped++;
357 ipf->ipf_errors++;
361 rtcache_unref(rt, &ipf->ipf_ro);
368 ipflow_addstats(struct ipflow *ipf)
373 rt = rtcache_validate(&ipf->ipf_ro);
375 rt->rt_use += ipf->ipf_uses;
376 rtcache_unref(rt, &ipf->ipf_ro);
381 ipf->ipf_errors + ipf->ipf_dropped);
382 _NET_STATADD_REF(ips, IP_STAT_TOTAL, ipf->ipf_uses);
383 _NET_STATADD_REF(ips, IP_STAT_FORWARD, ipf->ipf_uses);
384 _NET_STATADD_REF(ips, IP_STAT_FASTFORWARD, ipf->ipf_uses);
389 ipflow_free(struct ipflow *ipf)
399 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
401 ipflow_addstats(ipf);
402 rtcache_free(&ipf->ipf_ro);
404 pool_put(&ipflow_pool, ipf);
410 struct ipflow *ipf;
419 ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
420 KASSERT(ipf != NULL);
422 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
424 ipflow_addstats(ipf);
425 rtcache_free(&ipf->ipf_ro);
426 return ipf;
437 TAILQ_FOREACH(ipf, &ipflowlist, ipf_list) {
443 rt = rtcache_validate(&ipf->ipf_ro);
446 rtcache_unref(rt, &ipf->ipf_ro);
452 if (ipf->ipf_timer < maybe_ipf->ipf_timer
453 || ((ipf->ipf_timer == maybe_ipf->ipf_timer)
454 && (ipf->ipf_last_uses + ipf->ipf_uses
456 maybe_ipf = ipf;
458 ipf = maybe_ipf;
463 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
465 ipflow_addstats(ipf);
466 rtcache_free(&ipf->ipf_ro);
467 pool_put(&ipflow_pool, ipf);
479 struct ipflow *ipf, *next_ipf;
487 for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
488 next_ipf = TAILQ_NEXT(ipf, ipf_list);
489 if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
490 (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
491 ipflow_free(ipf);
493 ipf->ipf_last_uses = ipf->ipf_uses;
494 rt->rt_use += ipf->ipf_uses;
495 rtcache_unref(rt, &ipf->ipf_ro);
497 _NET_STATADD_REF(ips, IP_STAT_TOTAL, ipf->ipf_uses);
498 _NET_STATADD_REF(ips, IP_STAT_FORWARD, ipf->ipf_uses);
500 ipf->ipf_uses);
502 ipf->ipf_uses = 0;
524 struct ipflow *ipf;
541 ipf = ipflow_lookup(ip);
542 if (ipf == NULL) {
544 ipf = ipflow_reap(true);
546 ipf = pool_get(&ipflow_pool, PR_NOWAIT);
547 if (ipf == NULL)
551 memset(ipf, 0, sizeof(*ipf));
553 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
555 ipflow_addstats(ipf);
556 rtcache_free(&ipf->ipf_ro);
557 ipf->ipf_uses = ipf->ipf_last_uses = 0;
558 ipf->ipf_errors = ipf->ipf_dropped = 0;
564 rtcache_copy(&ipf->ipf_ro, ro);
565 ipf->ipf_dst = ip->ip_dst;
566 ipf->ipf_src = ip->ip_src;
567 ipf->ipf_tos = ip->ip_tos;
568 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
574 IPFLOW_INSERT(hash, ipf);
584 struct ipflow *ipf, *next_ipf;
591 for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
592 next_ipf = TAILQ_NEXT(ipf, ipf_list);
593 ipflow_free(ipf);