Home | History | Annotate | Download | only in net

Lines Matching defs:frag

215 	struct pf_fragment	*frag;
219 while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
220 KASSERT(BUFFER_FRAGMENTS(frag));
221 if (frag->fr_timeout > expire)
224 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
225 pf_free_fragment(frag);
228 while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) {
229 KASSERT(!BUFFER_FRAGMENTS(frag));
230 if (frag->fr_timeout > expire)
233 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
234 pf_free_fragment(frag);
236 TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag);
247 struct pf_fragment *frag;
254 frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
255 if (frag == NULL)
257 pf_free_fragment(frag);
265 frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue);
266 if (frag == NULL)
268 pf_free_fragment(frag);
275 pf_free_fragment(struct pf_fragment *frag)
281 if (BUFFER_FRAGMENTS(frag)) {
282 for (frent = LIST_FIRST(&frag->fr_queue); frent;
283 frent = LIST_FIRST(&frag->fr_queue)) {
291 for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
292 frcache = LIST_FIRST(&frag->fr_cache)) {
295 KASSERT(LIST_EMPTY(&frag->fr_cache) ||
296 LIST_FIRST(&frag->fr_cache)->fr_off >
304 pf_remove_fragment(frag);
320 struct pf_fragment *frag;
324 frag = RB_FIND(pf_frag_tree, tree, &key);
325 if (frag != NULL) {
327 frag->fr_timeout = time_second;
328 if (BUFFER_FRAGMENTS(frag)) {
329 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
330 TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
332 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
333 TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
337 return (frag);
343 pf_remove_fragment(struct pf_fragment *frag)
345 if (BUFFER_FRAGMENTS(frag)) {
346 RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag);
347 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
348 pool_put(&pf_frag_pl, frag);
350 RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag);
351 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
352 pool_put(&pf_cache_pl, frag);
358 pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
370 KASSERT(*frag == NULL || BUFFER_FRAGMENTS(*frag));
377 if (*frag == NULL) {
378 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
379 if (*frag == NULL) {
381 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
382 if (*frag == NULL)
386 (*frag)->fr_flags = 0;
387 (*frag)->fr_max = 0;
388 (*frag)->fr_src = frent->fr_ip->ip_src;
389 (*frag)->fr_dst = frent->fr_ip->ip_dst;
390 (*frag)->fr_p = frent->fr_ip->ip_p;
391 (*frag)->fr_id = frent->fr_ip->ip_id;
392 (*frag)->fr_timeout = time_second;
393 LIST_INIT(&(*frag)->fr_queue);
395 RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
396 TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
407 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
462 if ((*frag)->fr_max < frmax)
463 (*frag)->fr_max = frmax;
466 (*frag)->fr_flags |= PFFRAG_SEENLAST;
469 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
474 if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
479 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
483 if (off < (*frag)->fr_max &&
488 (*frag)->fr_max));
492 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
493 if (off < (*frag)->fr_max)
497 frent = LIST_FIRST(&(*frag)->fr_queue);
501 pf_free_fragment(*frag);
502 *frag = NULL;
524 ip->ip_src = (*frag)->fr_src;
525 ip->ip_dst = (*frag)->fr_dst;
528 pf_remove_fragment(*frag);
529 *frag = NULL;
560 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
570 KASSERT(*frag == NULL || !BUFFER_FRAGMENTS(*frag));
573 if (*frag == NULL) {
574 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
575 if (*frag == NULL) {
577 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
578 if (*frag == NULL)
585 pool_put(&pf_cache_pl, *frag);
586 *frag = NULL;
591 (*frag)->fr_flags = PFFRAG_NOBUFFER;
592 (*frag)->fr_max = 0;
593 (*frag)->fr_src = h->ip_src;
594 (*frag)->fr_dst = h->ip_dst;
595 (*frag)->fr_p = h->ip_p;
596 (*frag)->fr_id = h->ip_id;
597 (*frag)->fr_timeout = time_second;
601 LIST_INIT(&(*frag)->fr_cache);
602 LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
604 RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
605 TAILQ_INSERT_HEAD(&pf_cachequeue, *frag
617 LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
654 /* Update the previous frag to encompass this one */
799 * free the overall descriptor. Thus we drop the frag late.
807 if ((*frag)->fr_max < frmax)
808 (*frag)->fr_max = frmax;
812 (*frag)->fr_flags |= PFFRAG_SEENLAST;
815 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
816 LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
817 LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
820 (*frag)->fr_max));
821 pf_free_fragment(*frag);
822 *frag = NULL;
831 if (!mff && *frag != NULL)
832 (*frag)->fr_flags |= PFFRAG_SEENLAST;
840 if (!mff && *frag != NULL)
841 (*frag)->fr_flags |= PFFRAG_SEENLAST;
845 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
848 (*frag)->fr_flags |= PFFRAG_DROP;
862 struct pf_fragment *frag = NULL;
946 frag = pf_find_fragment(h, &pf_frag_tree);
949 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
950 frmax > frag->fr_max)
964 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, frmax));
965 *m0 = m = pf_reassemble(m0, &frag, frent, mff);
970 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
994 frag = pf_find_fragment(h, &pf_cache_tree);
997 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
998 frmax > frag->fr_max) {
1000 frag->fr_flags |= PFFRAG_DROP;
1004 *m0 = m = pf_fragcache(m0, h, &frag, mff,
1024 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1085 if (frag != NULL)
1086 pf_free_fragment(frag);
1107 struct ip6_frag frag;
1244 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1246 fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
1247 if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)