Home | History | Annotate | Line # | Download | only in netinet
ip_reass.c revision 1.14
      1  1.14   maxv /*	$NetBSD: ip_reass.c,v 1.14 2018/03/09 11:57:38 maxv Exp $	*/
      2   1.1  rmind 
      3   1.1  rmind /*
      4   1.1  rmind  * Copyright (c) 1982, 1986, 1988, 1993
      5   1.1  rmind  *	The Regents of the University of California.  All rights reserved.
      6   1.1  rmind  *
      7   1.1  rmind  * Redistribution and use in source and binary forms, with or without
      8   1.1  rmind  * modification, are permitted provided that the following conditions
      9   1.1  rmind  * are met:
     10   1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     11   1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     12   1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     14   1.1  rmind  *    documentation and/or other materials provided with the distribution.
     15   1.1  rmind  * 3. Neither the name of the University nor the names of its contributors
     16   1.1  rmind  *    may be used to endorse or promote products derived from this software
     17   1.1  rmind  *    without specific prior written permission.
     18   1.1  rmind  *
     19   1.1  rmind  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20   1.1  rmind  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21   1.1  rmind  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22   1.1  rmind  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23   1.1  rmind  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24   1.1  rmind  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25   1.1  rmind  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26   1.1  rmind  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27   1.1  rmind  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28   1.1  rmind  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29   1.1  rmind  * SUCH DAMAGE.
     30   1.1  rmind  *
     31   1.1  rmind  *	@(#)ip_input.c	8.2 (Berkeley) 1/4/94
     32   1.1  rmind  */
     33   1.1  rmind 
     34   1.1  rmind /*
     35   1.1  rmind  * IP reassembly.
     36   1.1  rmind  *
     37   1.1  rmind  * Additive-Increase/Multiplicative-Decrease (AIMD) strategy for IP
     38   1.1  rmind  * reassembly queue buffer managment.
     39   1.1  rmind  *
     40   1.1  rmind  * We keep a count of total IP fragments (NB: not fragmented packets),
     41   1.1  rmind  * awaiting reassembly (ip_nfrags) and a limit (ip_maxfrags) on fragments.
     42   1.1  rmind  * If ip_nfrags exceeds ip_maxfrags the limit, we drop half the total
     43   1.1  rmind  * fragments in reassembly queues.  This AIMD policy avoids repeatedly
     44   1.1  rmind  * deleting single packets under heavy fragmentation load (e.g., from lossy
     45   1.1  rmind  * NFS peers).
     46   1.1  rmind  */
     47   1.1  rmind 
     48   1.1  rmind #include <sys/cdefs.h>
     49  1.14   maxv __KERNEL_RCSID(0, "$NetBSD: ip_reass.c,v 1.14 2018/03/09 11:57:38 maxv Exp $");
     50   1.1  rmind 
     51   1.1  rmind #include <sys/param.h>
     52   1.2  rmind #include <sys/types.h>
     53   1.1  rmind 
     54   1.1  rmind #include <sys/malloc.h>
     55   1.1  rmind #include <sys/mbuf.h>
     56   1.4  rmind #include <sys/mutex.h>
     57   1.1  rmind #include <sys/pool.h>
     58   1.2  rmind #include <sys/queue.h>
     59   1.1  rmind #include <sys/sysctl.h>
     60   1.2  rmind #include <sys/systm.h>
     61   1.1  rmind 
     62   1.1  rmind #include <net/if.h>
     63   1.1  rmind 
     64   1.1  rmind #include <netinet/in.h>
     65   1.1  rmind #include <netinet/in_systm.h>
     66   1.1  rmind #include <netinet/ip.h>
     67   1.1  rmind #include <netinet/in_pcb.h>
     68   1.2  rmind #include <netinet/ip_var.h>
     69   1.1  rmind #include <netinet/ip_private.h>
     70   1.1  rmind #include <netinet/in_var.h>
     71   1.1  rmind 
     72   1.1  rmind /*
     73   1.3  rmind  * IP reassembly queue structures.  Each fragment being reassembled is
     74   1.3  rmind  * attached to one of these structures.  They are timed out after TTL
     75   1.3  rmind  * drops to 0, and may also be reclaimed if memory becomes tight.
     76   1.3  rmind  */
     77   1.3  rmind 
     78   1.3  rmind typedef struct ipfr_qent {
     79   1.3  rmind 	TAILQ_ENTRY(ipfr_qent)	ipqe_q;
     80   1.3  rmind 	struct ip *		ipqe_ip;
     81   1.3  rmind 	struct mbuf *		ipqe_m;
     82   1.3  rmind 	bool			ipqe_mff;
     83   1.3  rmind } ipfr_qent_t;
     84   1.3  rmind 
     85   1.7  rmind TAILQ_HEAD(ipfr_qent_head, ipfr_qent);
     86   1.7  rmind 
     87   1.3  rmind typedef struct ipfr_queue {
     88   1.3  rmind 	LIST_ENTRY(ipfr_queue)	ipq_q;		/* to other reass headers */
     89   1.7  rmind 	struct ipfr_qent_head	ipq_fragq;	/* queue of fragment entries */
     90   1.3  rmind 	uint8_t			ipq_ttl;	/* time for reass q to live */
     91   1.3  rmind 	uint8_t			ipq_p;		/* protocol of this fragment */
     92   1.3  rmind 	uint16_t		ipq_id;		/* sequence id for reassembly */
     93   1.3  rmind 	struct in_addr		ipq_src;
     94   1.3  rmind 	struct in_addr		ipq_dst;
     95   1.3  rmind 	uint16_t		ipq_nfrags;	/* frags in this queue entry */
     96   1.3  rmind 	uint8_t 		ipq_tos;	/* TOS of this fragment */
     97   1.3  rmind } ipfr_queue_t;
     98   1.3  rmind 
     99   1.3  rmind /*
    100   1.3  rmind  * Hash table of IP reassembly queues.
    101   1.1  rmind  */
    102   1.1  rmind #define	IPREASS_HASH_SHIFT	6
    103   1.1  rmind #define	IPREASS_HASH_SIZE	(1 << IPREASS_HASH_SHIFT)
    104   1.1  rmind #define	IPREASS_HASH_MASK	(IPREASS_HASH_SIZE - 1)
    105   1.1  rmind #define	IPREASS_HASH(x, y) \
    106   1.1  rmind 	(((((x) & 0xf) | ((((x) >> 8) & 0xf) << 4)) ^ (y)) & IPREASS_HASH_MASK)
    107   1.1  rmind 
    108   1.3  rmind static LIST_HEAD(, ipfr_queue)	ip_frags[IPREASS_HASH_SIZE];
    109   1.4  rmind static pool_cache_t	ipfren_cache;
    110   1.4  rmind static kmutex_t		ipfr_lock;
    111   1.1  rmind 
    112   1.3  rmind /* Number of packets in reassembly queue and total number of fragments. */
    113   1.3  rmind static int		ip_nfragpackets;
    114   1.3  rmind static int		ip_nfrags;
    115   1.1  rmind 
    116   1.3  rmind /* Limits on packet and fragments. */
    117   1.3  rmind static int		ip_maxfragpackets;
    118   1.3  rmind static int		ip_maxfrags;
    119   1.1  rmind 
    120   1.1  rmind /*
    121   1.3  rmind  * Cached copy of nmbclusters.  If nbclusters is different, recalculate
    122   1.3  rmind  * IP parameters derived from nmbclusters.
    123   1.2  rmind  */
    124   1.3  rmind static int		ip_nmbclusters;
    125   1.1  rmind 
    126   1.1  rmind /*
    127   1.1  rmind  * IP reassembly TTL machinery for multiplicative drop.
    128   1.1  rmind  */
    129   1.3  rmind static u_int		fragttl_histo[IPFRAGTTL + 1];
    130   1.1  rmind 
    131   1.4  rmind static struct sysctllog *ip_reass_sysctllog;
    132   1.4  rmind 
    133   1.3  rmind void			sysctl_ip_reass_setup(void);
    134   1.3  rmind static void		ip_nmbclusters_changed(void);
    135   1.2  rmind 
    136   1.3  rmind static struct mbuf *	ip_reass(ipfr_qent_t *, ipfr_queue_t *, u_int);
    137   1.2  rmind static u_int		ip_reass_ttl_decr(u_int ticks);
    138   1.2  rmind static void		ip_reass_drophalf(void);
    139   1.3  rmind static void		ip_freef(ipfr_queue_t *);
    140   1.1  rmind 
    141   1.1  rmind /*
    142   1.1  rmind  * ip_reass_init:
    143   1.1  rmind  *
    144   1.1  rmind  *	Initialization of IP reassembly mechanism.
    145   1.1  rmind  */
    146   1.1  rmind void
    147   1.1  rmind ip_reass_init(void)
    148   1.1  rmind {
    149   1.1  rmind 	int i;
    150   1.1  rmind 
    151   1.4  rmind 	ipfren_cache = pool_cache_init(sizeof(ipfr_qent_t), coherency_unit,
    152   1.4  rmind 	    0, 0, "ipfrenpl", NULL, IPL_NET, NULL, NULL, NULL);
    153   1.6   yamt 	mutex_init(&ipfr_lock, MUTEX_DEFAULT, IPL_VM);
    154   1.1  rmind 
    155   1.1  rmind 	for (i = 0; i < IPREASS_HASH_SIZE; i++) {
    156   1.3  rmind 		LIST_INIT(&ip_frags[i]);
    157   1.1  rmind 	}
    158   1.1  rmind 	ip_maxfragpackets = 200;
    159   1.1  rmind 	ip_maxfrags = 0;
    160   1.1  rmind 	ip_nmbclusters_changed();
    161   1.1  rmind 
    162   1.1  rmind 	sysctl_ip_reass_setup();
    163   1.1  rmind }
    164   1.1  rmind 
    165   1.1  rmind void
    166   1.1  rmind sysctl_ip_reass_setup(void)
    167   1.1  rmind {
    168   1.1  rmind 
    169   1.1  rmind 	sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL,
    170   1.1  rmind 		CTLFLAG_PERMANENT,
    171   1.1  rmind 		CTLTYPE_NODE, "inet",
    172   1.1  rmind 		SYSCTL_DESCR("PF_INET related settings"),
    173   1.1  rmind 		NULL, 0, NULL, 0,
    174   1.1  rmind 		CTL_NET, PF_INET, CTL_EOL);
    175   1.1  rmind 	sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL,
    176   1.1  rmind 		CTLFLAG_PERMANENT,
    177   1.1  rmind 		CTLTYPE_NODE, "ip",
    178   1.1  rmind 		SYSCTL_DESCR("IPv4 related settings"),
    179   1.1  rmind 		NULL, 0, NULL, 0,
    180   1.1  rmind 		CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
    181   1.1  rmind 
    182   1.1  rmind 	sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL,
    183   1.1  rmind 		CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    184   1.1  rmind 		CTLTYPE_INT, "maxfragpackets",
    185   1.1  rmind 		SYSCTL_DESCR("Maximum number of fragments to retain for "
    186   1.1  rmind 			     "possible reassembly"),
    187   1.1  rmind 		NULL, 0, &ip_maxfragpackets, 0,
    188   1.1  rmind 		CTL_NET, PF_INET, IPPROTO_IP, IPCTL_MAXFRAGPACKETS, CTL_EOL);
    189   1.1  rmind }
    190   1.1  rmind 
    191   1.1  rmind #define CHECK_NMBCLUSTER_PARAMS()				\
    192   1.1  rmind do {								\
    193   1.1  rmind 	if (__predict_false(ip_nmbclusters != nmbclusters))	\
    194   1.1  rmind 		ip_nmbclusters_changed();			\
    195   1.1  rmind } while (/*CONSTCOND*/0)
    196   1.1  rmind 
    197   1.1  rmind /*
    198   1.1  rmind  * Compute IP limits derived from the value of nmbclusters.
    199   1.1  rmind  */
    200   1.1  rmind static void
    201   1.1  rmind ip_nmbclusters_changed(void)
    202   1.1  rmind {
    203   1.1  rmind 	ip_maxfrags = nmbclusters / 4;
    204   1.1  rmind 	ip_nmbclusters = nmbclusters;
    205   1.1  rmind }
    206   1.1  rmind 
    207   1.1  rmind /*
    208   1.1  rmind  * ip_reass:
    209   1.1  rmind  *
    210   1.1  rmind  *	Take incoming datagram fragment and try to reassemble it into whole
    211   1.1  rmind  *	datagram.  If a chain for reassembly of this datagram already exists,
    212   1.1  rmind  *	then it is given as 'fp'; otherwise have to make a chain.
    213   1.1  rmind  */
    214   1.1  rmind struct mbuf *
    215   1.3  rmind ip_reass(ipfr_qent_t *ipqe, ipfr_queue_t *fp, const u_int hash)
    216   1.1  rmind {
    217   1.7  rmind 	struct ip *ip = ipqe->ipqe_ip, *qip;
    218   1.7  rmind 	const int hlen = ip->ip_hl << 2;
    219   1.1  rmind 	struct mbuf *m = ipqe->ipqe_m, *t;
    220   1.3  rmind 	ipfr_qent_t *nq, *p, *q;
    221   1.4  rmind 	int i, next;
    222   1.1  rmind 
    223   1.4  rmind 	KASSERT(mutex_owned(&ipfr_lock));
    224   1.1  rmind 
    225   1.1  rmind 	/*
    226   1.1  rmind 	 * Presence of header sizes in mbufs would confuse code below.
    227   1.1  rmind 	 */
    228   1.1  rmind 	m->m_data += hlen;
    229   1.1  rmind 	m->m_len -= hlen;
    230   1.1  rmind 
    231   1.1  rmind #ifdef	notyet
    232   1.1  rmind 	/* Make sure fragment limit is up-to-date. */
    233   1.1  rmind 	CHECK_NMBCLUSTER_PARAMS();
    234   1.1  rmind 
    235   1.1  rmind 	/* If we have too many fragments, drop the older half. */
    236   1.1  rmind 	if (ip_nfrags >= ip_maxfrags) {
    237   1.1  rmind 		ip_reass_drophalf(void);
    238   1.1  rmind 	}
    239   1.1  rmind #endif
    240   1.1  rmind 
    241   1.1  rmind 	/*
    242   1.1  rmind 	 * We are about to add a fragment; increment frag count.
    243   1.1  rmind 	 */
    244   1.1  rmind 	ip_nfrags++;
    245   1.1  rmind 
    246   1.1  rmind 	/*
    247   1.1  rmind 	 * If first fragment to arrive, create a reassembly queue.
    248   1.1  rmind 	 */
    249   1.1  rmind 	if (fp == NULL) {
    250   1.1  rmind 		/*
    251   1.1  rmind 		 * Enforce upper bound on number of fragmented packets
    252   1.1  rmind 		 * for which we attempt reassembly:  a) if maxfrag is 0,
    253   1.1  rmind 		 * never accept fragments  b) if maxfrag is -1, accept
    254   1.1  rmind 		 * all fragments without limitation.
    255   1.1  rmind 		 */
    256   1.1  rmind 		if (ip_maxfragpackets < 0)
    257   1.1  rmind 			;
    258   1.1  rmind 		else if (ip_nfragpackets >= ip_maxfragpackets) {
    259   1.1  rmind 			goto dropfrag;
    260   1.1  rmind 		}
    261   1.3  rmind 		fp = malloc(sizeof(ipfr_queue_t), M_FTABLE, M_NOWAIT);
    262   1.1  rmind 		if (fp == NULL) {
    263   1.1  rmind 			goto dropfrag;
    264   1.1  rmind 		}
    265   1.8  enami 		ip_nfragpackets++;
    266   1.7  rmind 		TAILQ_INIT(&fp->ipq_fragq);
    267   1.1  rmind 		fp->ipq_nfrags = 1;
    268   1.1  rmind 		fp->ipq_ttl = IPFRAGTTL;
    269   1.7  rmind 		fp->ipq_p = ip->ip_p;
    270   1.7  rmind 		fp->ipq_id = ip->ip_id;
    271   1.7  rmind 		fp->ipq_tos = ip->ip_tos;
    272   1.7  rmind 		fp->ipq_src = ip->ip_src;
    273   1.7  rmind 		fp->ipq_dst = ip->ip_dst;
    274   1.7  rmind 		LIST_INSERT_HEAD(&ip_frags[hash], fp, ipq_q);
    275   1.1  rmind 		p = NULL;
    276   1.1  rmind 		goto insert;
    277   1.1  rmind 	} else {
    278   1.1  rmind 		fp->ipq_nfrags++;
    279   1.1  rmind 	}
    280   1.1  rmind 
    281   1.1  rmind 	/*
    282   1.1  rmind 	 * Find a segment which begins after this one does.
    283   1.1  rmind 	 */
    284   1.7  rmind 	TAILQ_FOREACH(q, &fp->ipq_fragq, ipqe_q) {
    285   1.7  rmind 		if (ntohs(q->ipqe_ip->ip_off) > ntohs(ip->ip_off))
    286   1.1  rmind 			break;
    287   1.7  rmind 	}
    288   1.7  rmind 	if (q != NULL) {
    289   1.7  rmind 		p = TAILQ_PREV(q, ipfr_qent_head, ipqe_q);
    290   1.7  rmind 	} else {
    291   1.7  rmind 		p = TAILQ_LAST(&fp->ipq_fragq, ipfr_qent_head);
    292   1.7  rmind 	}
    293   1.1  rmind 
    294   1.1  rmind 	/*
    295   1.1  rmind 	 * If there is a preceding segment, it may provide some of our
    296   1.1  rmind 	 * data already.  If so, drop the data from the incoming segment.
    297   1.1  rmind 	 * If it provides all of our data, drop us.
    298   1.1  rmind 	 */
    299   1.1  rmind 	if (p != NULL) {
    300   1.1  rmind 		i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) -
    301   1.7  rmind 		    ntohs(ip->ip_off);
    302   1.1  rmind 		if (i > 0) {
    303   1.7  rmind 			if (i >= ntohs(ip->ip_len)) {
    304   1.1  rmind 				goto dropfrag;
    305   1.1  rmind 			}
    306   1.1  rmind 			m_adj(ipqe->ipqe_m, i);
    307   1.7  rmind 			ip->ip_off = htons(ntohs(ip->ip_off) + i);
    308   1.7  rmind 			ip->ip_len = htons(ntohs(ip->ip_len) - i);
    309   1.1  rmind 		}
    310   1.1  rmind 	}
    311   1.1  rmind 
    312   1.1  rmind 	/*
    313   1.1  rmind 	 * While we overlap succeeding segments trim them or, if they are
    314   1.1  rmind 	 * completely covered, dequeue them.
    315   1.1  rmind 	 */
    316   1.7  rmind 	while (q != NULL) {
    317   1.7  rmind 		size_t end;
    318   1.7  rmind 
    319   1.7  rmind 		qip = q->ipqe_ip;
    320   1.7  rmind 		end = ntohs(ip->ip_off) + ntohs(ip->ip_len);
    321   1.7  rmind 		if (end <= ntohs(qip->ip_off)) {
    322   1.7  rmind 			break;
    323   1.7  rmind 		}
    324   1.7  rmind 		i = end - ntohs(qip->ip_off);
    325   1.7  rmind 		if (i < ntohs(qip->ip_len)) {
    326   1.7  rmind 			qip->ip_len = htons(ntohs(qip->ip_len) - i);
    327   1.7  rmind 			qip->ip_off = htons(ntohs(qip->ip_off) + i);
    328   1.1  rmind 			m_adj(q->ipqe_m, i);
    329   1.1  rmind 			break;
    330   1.1  rmind 		}
    331   1.1  rmind 		nq = TAILQ_NEXT(q, ipqe_q);
    332   1.1  rmind 		m_freem(q->ipqe_m);
    333   1.1  rmind 		TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q);
    334   1.4  rmind 		pool_cache_put(ipfren_cache, q);
    335   1.1  rmind 		fp->ipq_nfrags--;
    336   1.1  rmind 		ip_nfrags--;
    337   1.7  rmind 		q = nq;
    338   1.1  rmind 	}
    339   1.1  rmind 
    340   1.1  rmind insert:
    341   1.1  rmind 	/*
    342   1.1  rmind 	 * Stick new segment in its place; check for complete reassembly.
    343   1.1  rmind 	 */
    344   1.1  rmind 	if (p == NULL) {
    345   1.1  rmind 		TAILQ_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q);
    346   1.1  rmind 	} else {
    347   1.1  rmind 		TAILQ_INSERT_AFTER(&fp->ipq_fragq, p, ipqe, ipqe_q);
    348   1.1  rmind 	}
    349   1.1  rmind 	next = 0;
    350   1.7  rmind 	TAILQ_FOREACH(q, &fp->ipq_fragq, ipqe_q) {
    351   1.7  rmind 		qip = q->ipqe_ip;
    352   1.7  rmind 		if (ntohs(qip->ip_off) != next) {
    353   1.4  rmind 			mutex_exit(&ipfr_lock);
    354   1.1  rmind 			return NULL;
    355   1.1  rmind 		}
    356   1.7  rmind 		next += ntohs(qip->ip_len);
    357   1.1  rmind 	}
    358   1.7  rmind 	p = TAILQ_LAST(&fp->ipq_fragq, ipfr_qent_head);
    359   1.1  rmind 	if (p->ipqe_mff) {
    360   1.4  rmind 		mutex_exit(&ipfr_lock);
    361   1.1  rmind 		return NULL;
    362   1.1  rmind 	}
    363   1.7  rmind 
    364   1.1  rmind 	/*
    365   1.4  rmind 	 * Reassembly is complete.  Check for a bogus message size.
    366   1.1  rmind 	 */
    367   1.1  rmind 	q = TAILQ_FIRST(&fp->ipq_fragq);
    368   1.1  rmind 	ip = q->ipqe_ip;
    369   1.1  rmind 	if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) {
    370   1.1  rmind 		IP_STATINC(IP_STAT_TOOLONG);
    371   1.1  rmind 		ip_freef(fp);
    372   1.4  rmind 		mutex_exit(&ipfr_lock);
    373   1.1  rmind 		return NULL;
    374   1.1  rmind 	}
    375   1.4  rmind 	LIST_REMOVE(fp, ipq_q);
    376   1.4  rmind 	ip_nfrags -= fp->ipq_nfrags;
    377   1.4  rmind 	ip_nfragpackets--;
    378   1.4  rmind 	mutex_exit(&ipfr_lock);
    379   1.4  rmind 
    380   1.4  rmind 	/* Concatenate all fragments. */
    381   1.1  rmind 	m = q->ipqe_m;
    382   1.1  rmind 	t = m->m_next;
    383   1.1  rmind 	m->m_next = NULL;
    384   1.1  rmind 	m_cat(m, t);
    385   1.1  rmind 	nq = TAILQ_NEXT(q, ipqe_q);
    386   1.4  rmind 	pool_cache_put(ipfren_cache, q);
    387   1.4  rmind 
    388   1.1  rmind 	for (q = nq; q != NULL; q = nq) {
    389   1.1  rmind 		t = q->ipqe_m;
    390   1.1  rmind 		nq = TAILQ_NEXT(q, ipqe_q);
    391   1.4  rmind 		pool_cache_put(ipfren_cache, q);
    392  1.14   maxv 		m_pkthdr_remove(t);
    393   1.1  rmind 		m_cat(m, t);
    394   1.1  rmind 	}
    395   1.1  rmind 
    396   1.1  rmind 	/*
    397   1.1  rmind 	 * Create header for new packet by modifying header of first
    398   1.1  rmind 	 * packet.  Dequeue and discard fragment reassembly header.  Make
    399   1.1  rmind 	 * header visible.
    400   1.1  rmind 	 */
    401   1.2  rmind 	ip->ip_len = htons((ip->ip_hl << 2) + next);
    402   1.1  rmind 	ip->ip_src = fp->ipq_src;
    403   1.1  rmind 	ip->ip_dst = fp->ipq_dst;
    404   1.5  enami 	free(fp, M_FTABLE);
    405   1.2  rmind 
    406   1.1  rmind 	m->m_len += (ip->ip_hl << 2);
    407   1.1  rmind 	m->m_data -= (ip->ip_hl << 2);
    408   1.4  rmind 
    409   1.4  rmind 	/* Fix up mbuf.  XXX This should be done elsewhere. */
    410  1.14   maxv 	{
    411  1.14   maxv 		KASSERT(m->m_flags & M_PKTHDR);
    412   1.1  rmind 		int plen = 0;
    413   1.1  rmind 		for (t = m; t; t = t->m_next) {
    414   1.1  rmind 			plen += t->m_len;
    415   1.1  rmind 		}
    416   1.1  rmind 		m->m_pkthdr.len = plen;
    417   1.1  rmind 		m->m_pkthdr.csum_flags = 0;
    418   1.1  rmind 	}
    419   1.1  rmind 	return m;
    420   1.1  rmind 
    421   1.1  rmind dropfrag:
    422   1.1  rmind 	if (fp != NULL) {
    423   1.1  rmind 		fp->ipq_nfrags--;
    424   1.1  rmind 	}
    425   1.1  rmind 	ip_nfrags--;
    426   1.1  rmind 	IP_STATINC(IP_STAT_FRAGDROPPED);
    427   1.4  rmind 	mutex_exit(&ipfr_lock);
    428   1.4  rmind 
    429   1.4  rmind 	pool_cache_put(ipfren_cache, ipqe);
    430   1.1  rmind 	m_freem(m);
    431   1.1  rmind 	return NULL;
    432   1.1  rmind }
    433   1.1  rmind 
    434   1.1  rmind /*
    435   1.1  rmind  * ip_freef:
    436   1.1  rmind  *
    437   1.1  rmind  *	Free a fragment reassembly header and all associated datagrams.
    438   1.1  rmind  */
    439   1.2  rmind static void
    440   1.3  rmind ip_freef(ipfr_queue_t *fp)
    441   1.1  rmind {
    442   1.4  rmind 	ipfr_qent_t *q;
    443   1.1  rmind 
    444   1.4  rmind 	KASSERT(mutex_owned(&ipfr_lock));
    445   1.1  rmind 
    446   1.4  rmind 	LIST_REMOVE(fp, ipq_q);
    447   1.4  rmind 	ip_nfrags -= fp->ipq_nfrags;
    448   1.4  rmind 	ip_nfragpackets--;
    449   1.4  rmind 
    450   1.4  rmind 	while ((q = TAILQ_FIRST(&fp->ipq_fragq)) != NULL) {
    451   1.4  rmind 		TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q);
    452   1.1  rmind 		m_freem(q->ipqe_m);
    453   1.4  rmind 		pool_cache_put(ipfren_cache, q);
    454   1.1  rmind 	}
    455   1.1  rmind 	free(fp, M_FTABLE);
    456   1.1  rmind }
    457   1.1  rmind 
    458   1.1  rmind /*
    459   1.1  rmind  * ip_reass_ttl_decr:
    460   1.1  rmind  *
    461   1.1  rmind  *	Decrement TTL of all reasembly queue entries by `ticks'.  Count
    462   1.1  rmind  *	number of distinct fragments (as opposed to partial, fragmented
    463   1.1  rmind  *	datagrams) inthe reassembly queue.  While we  traverse the entire
    464   1.1  rmind  *	reassembly queue, compute and return the median TTL over all
    465   1.1  rmind  *	fragments.
    466   1.1  rmind  */
    467   1.1  rmind static u_int
    468   1.1  rmind ip_reass_ttl_decr(u_int ticks)
    469   1.1  rmind {
    470   1.1  rmind 	u_int nfrags, median, dropfraction, keepfraction;
    471   1.3  rmind 	ipfr_queue_t *fp, *nfp;
    472   1.1  rmind 	int i;
    473   1.1  rmind 
    474   1.1  rmind 	nfrags = 0;
    475   1.1  rmind 	memset(fragttl_histo, 0, sizeof(fragttl_histo));
    476   1.1  rmind 
    477   1.1  rmind 	for (i = 0; i < IPREASS_HASH_SIZE; i++) {
    478   1.3  rmind 		for (fp = LIST_FIRST(&ip_frags[i]); fp != NULL; fp = nfp) {
    479   1.1  rmind 			fp->ipq_ttl = ((fp->ipq_ttl <= ticks) ?
    480   1.1  rmind 			    0 : fp->ipq_ttl - ticks);
    481   1.1  rmind 			nfp = LIST_NEXT(fp, ipq_q);
    482   1.1  rmind 			if (fp->ipq_ttl == 0) {
    483   1.1  rmind 				IP_STATINC(IP_STAT_FRAGTIMEOUT);
    484   1.1  rmind 				ip_freef(fp);
    485   1.1  rmind 			} else {
    486   1.1  rmind 				nfrags += fp->ipq_nfrags;
    487   1.1  rmind 				fragttl_histo[fp->ipq_ttl] += fp->ipq_nfrags;
    488   1.1  rmind 			}
    489   1.1  rmind 		}
    490   1.1  rmind 	}
    491   1.1  rmind 
    492   1.1  rmind 	KASSERT(ip_nfrags == nfrags);
    493   1.1  rmind 
    494   1.1  rmind 	/* Find median (or other drop fraction) in histogram. */
    495   1.1  rmind 	dropfraction = (ip_nfrags / 2);
    496   1.1  rmind 	keepfraction = ip_nfrags - dropfraction;
    497   1.1  rmind 	for (i = IPFRAGTTL, median = 0; i >= 0; i--) {
    498   1.1  rmind 		median += fragttl_histo[i];
    499   1.1  rmind 		if (median >= keepfraction)
    500   1.1  rmind 			break;
    501   1.1  rmind 	}
    502   1.1  rmind 
    503   1.1  rmind 	/* Return TTL of median (or other fraction). */
    504   1.1  rmind 	return (u_int)i;
    505   1.1  rmind }
    506   1.1  rmind 
    507   1.1  rmind static void
    508   1.1  rmind ip_reass_drophalf(void)
    509   1.1  rmind {
    510   1.1  rmind 	u_int median_ticks;
    511   1.1  rmind 
    512   1.4  rmind 	KASSERT(mutex_owned(&ipfr_lock));
    513   1.4  rmind 
    514   1.1  rmind 	/*
    515   1.1  rmind 	 * Compute median TTL of all fragments, and count frags
    516   1.1  rmind 	 * with that TTL or lower (roughly half of all fragments).
    517   1.1  rmind 	 */
    518   1.1  rmind 	median_ticks = ip_reass_ttl_decr(0);
    519   1.1  rmind 
    520   1.1  rmind 	/* Drop half. */
    521   1.1  rmind 	median_ticks = ip_reass_ttl_decr(median_ticks);
    522   1.1  rmind }
    523   1.1  rmind 
    524   1.1  rmind /*
    525   1.1  rmind  * ip_reass_drain: drain off all datagram fragments.  Do not acquire
    526   1.1  rmind  * softnet_lock as can be called from hardware interrupt context.
    527   1.1  rmind  */
    528   1.1  rmind void
    529   1.1  rmind ip_reass_drain(void)
    530   1.1  rmind {
    531   1.1  rmind 
    532   1.1  rmind 	/*
    533   1.1  rmind 	 * We may be called from a device's interrupt context.  If
    534   1.1  rmind 	 * the ipq is already busy, just bail out now.
    535   1.1  rmind 	 */
    536   1.4  rmind 	if (mutex_tryenter(&ipfr_lock)) {
    537   1.1  rmind 		/*
    538   1.1  rmind 		 * Drop half the total fragments now. If more mbufs are
    539   1.1  rmind 		 * needed, we will be called again soon.
    540   1.1  rmind 		 */
    541   1.1  rmind 		ip_reass_drophalf();
    542   1.4  rmind 		mutex_exit(&ipfr_lock);
    543   1.1  rmind 	}
    544   1.1  rmind }
    545   1.1  rmind 
    546   1.1  rmind /*
    547   1.1  rmind  * ip_reass_slowtimo:
    548   1.1  rmind  *
    549   1.1  rmind  *	If a timer expires on a reassembly queue, discard it.
    550   1.1  rmind  */
    551   1.1  rmind void
    552   1.1  rmind ip_reass_slowtimo(void)
    553   1.1  rmind {
    554   1.1  rmind 	static u_int dropscanidx = 0;
    555   1.1  rmind 	u_int i, median_ttl;
    556   1.1  rmind 
    557   1.4  rmind 	mutex_enter(&ipfr_lock);
    558   1.1  rmind 
    559   1.1  rmind 	/* Age TTL of all fragments by 1 tick .*/
    560   1.1  rmind 	median_ttl = ip_reass_ttl_decr(1);
    561   1.1  rmind 
    562   1.1  rmind 	/* Make sure fragment limit is up-to-date. */
    563   1.1  rmind 	CHECK_NMBCLUSTER_PARAMS();
    564   1.1  rmind 
    565   1.1  rmind 	/* If we have too many fragments, drop the older half. */
    566   1.1  rmind 	if (ip_nfrags > ip_maxfrags) {
    567   1.1  rmind 		ip_reass_ttl_decr(median_ttl);
    568   1.1  rmind 	}
    569   1.1  rmind 
    570   1.1  rmind 	/*
    571   1.1  rmind 	 * If we are over the maximum number of fragmented packets (due to
    572   1.1  rmind 	 * the limit being lowered), drain off enough to get down to the
    573   1.1  rmind 	 * new limit.  Start draining from the reassembly hashqueue most
    574   1.1  rmind 	 * recently drained.
    575   1.1  rmind 	 */
    576   1.1  rmind 	if (ip_maxfragpackets < 0)
    577   1.1  rmind 		;
    578   1.1  rmind 	else {
    579   1.1  rmind 		int wrapped = 0;
    580   1.1  rmind 
    581   1.1  rmind 		i = dropscanidx;
    582   1.1  rmind 		while (ip_nfragpackets > ip_maxfragpackets && wrapped == 0) {
    583   1.3  rmind 			while (LIST_FIRST(&ip_frags[i]) != NULL) {
    584   1.3  rmind 				ip_freef(LIST_FIRST(&ip_frags[i]));
    585   1.1  rmind 			}
    586   1.1  rmind 			if (++i >= IPREASS_HASH_SIZE) {
    587   1.1  rmind 				i = 0;
    588   1.1  rmind 			}
    589   1.1  rmind 			/*
    590   1.1  rmind 			 * Do not scan forever even if fragment counters are
    591   1.1  rmind 			 * wrong: stop after scanning entire reassembly queue.
    592   1.1  rmind 			 */
    593   1.1  rmind 			if (i == dropscanidx) {
    594   1.1  rmind 				wrapped = 1;
    595   1.1  rmind 			}
    596   1.1  rmind 		}
    597   1.1  rmind 		dropscanidx = i;
    598   1.1  rmind 	}
    599   1.4  rmind 	mutex_exit(&ipfr_lock);
    600   1.1  rmind }
    601   1.2  rmind 
    602   1.2  rmind /*
    603   1.2  rmind  * ip_reass_packet: generic routine to perform IP reassembly.
    604   1.2  rmind  *
    605   1.2  rmind  * => Passed fragment should have IP_MF flag and/or offset set.
    606   1.2  rmind  * => Fragment should not have other than IP_MF flags set.
    607   1.2  rmind  *
    608   1.7  rmind  * => Returns 0 on success or error otherwise.
    609   1.7  rmind  * => On complete, m0 represents a constructed final packet.
    610   1.2  rmind  */
    611   1.2  rmind int
    612   1.7  rmind ip_reass_packet(struct mbuf **m0, struct ip *ip)
    613   1.2  rmind {
    614   1.7  rmind 	const int hlen = ip->ip_hl << 2;
    615   1.7  rmind 	const int len = ntohs(ip->ip_len);
    616   1.7  rmind 	struct mbuf *m = *m0;
    617   1.3  rmind 	ipfr_queue_t *fp;
    618   1.3  rmind 	ipfr_qent_t *ipqe;
    619   1.7  rmind 	u_int hash, off, flen;
    620   1.7  rmind 	bool mff;
    621   1.7  rmind 
    622   1.7  rmind 	/*
    623   1.7  rmind 	 * Prevent TCP blind data attacks by not allowing non-initial
    624   1.7  rmind 	 * fragments to start at less than 68 bytes (minimal fragment
    625   1.7  rmind 	 * size) and making sure the first fragment is at least 68
    626   1.7  rmind 	 * bytes.
    627   1.7  rmind 	 */
    628   1.7  rmind 	off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
    629   1.7  rmind 	if ((off > 0 ? off + hlen : len) < IP_MINFRAGSIZE - 1) {
    630   1.7  rmind 		IP_STATINC(IP_STAT_BADFRAGS);
    631   1.7  rmind 		return EINVAL;
    632   1.7  rmind 	}
    633   1.7  rmind 
    634  1.12   maxv 	if (off + len > IP_MAXPACKET) {
    635  1.13   maxv 		IP_STATINC(IP_STAT_TOOLONG);
    636  1.12   maxv 		return EINVAL;
    637  1.12   maxv 	}
    638  1.12   maxv 
    639   1.7  rmind 	/*
    640   1.7  rmind 	 * Fragment length and MF flag.  Make sure that fragments have
    641   1.7  rmind 	 * a data length which is non-zero and multiple of 8 bytes.
    642   1.7  rmind 	 */
    643   1.7  rmind 	flen = ntohs(ip->ip_len) - hlen;
    644   1.7  rmind 	mff = (ip->ip_off & htons(IP_MF)) != 0;
    645   1.7  rmind 	if (mff && (flen == 0 || (flen & 0x7) != 0)) {
    646   1.7  rmind 		IP_STATINC(IP_STAT_BADFRAGS);
    647   1.7  rmind 		return EINVAL;
    648   1.7  rmind 	}
    649   1.7  rmind 
    650   1.7  rmind 	/*
    651   1.7  rmind 	 * Adjust total IP length to not reflect header and convert
    652   1.7  rmind 	 * offset of this to bytes.  XXX: clobbers struct ip.
    653   1.7  rmind 	 */
    654   1.7  rmind 	ip->ip_len = htons(flen);
    655   1.7  rmind 	ip->ip_off = htons(off);
    656   1.2  rmind 
    657   1.2  rmind 	/* Look for queue of fragments of this datagram. */
    658   1.4  rmind 	mutex_enter(&ipfr_lock);
    659   1.3  rmind 	hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
    660   1.3  rmind 	LIST_FOREACH(fp, &ip_frags[hash], ipq_q) {
    661   1.3  rmind 		if (ip->ip_id != fp->ipq_id)
    662   1.3  rmind 			continue;
    663   1.3  rmind 		if (!in_hosteq(ip->ip_src, fp->ipq_src))
    664   1.3  rmind 			continue;
    665   1.3  rmind 		if (!in_hosteq(ip->ip_dst, fp->ipq_dst))
    666   1.3  rmind 			continue;
    667   1.3  rmind 		if (ip->ip_p != fp->ipq_p)
    668   1.3  rmind 			continue;
    669   1.3  rmind 		break;
    670   1.3  rmind 	}
    671   1.2  rmind 
    672   1.2  rmind 	/* Make sure that TOS matches previous fragments. */
    673   1.2  rmind 	if (fp && fp->ipq_tos != ip->ip_tos) {
    674   1.2  rmind 		IP_STATINC(IP_STAT_BADFRAGS);
    675   1.4  rmind 		mutex_exit(&ipfr_lock);
    676   1.2  rmind 		return EINVAL;
    677   1.2  rmind 	}
    678   1.2  rmind 
    679   1.2  rmind 	/*
    680   1.2  rmind 	 * Create new entry and attempt to reassembly.
    681   1.2  rmind 	 */
    682   1.2  rmind 	IP_STATINC(IP_STAT_FRAGMENTS);
    683   1.4  rmind 	ipqe = pool_cache_get(ipfren_cache, PR_NOWAIT);
    684   1.2  rmind 	if (ipqe == NULL) {
    685   1.2  rmind 		IP_STATINC(IP_STAT_RCVMEMDROP);
    686   1.4  rmind 		mutex_exit(&ipfr_lock);
    687   1.2  rmind 		return ENOMEM;
    688   1.2  rmind 	}
    689   1.2  rmind 	ipqe->ipqe_mff = mff;
    690   1.2  rmind 	ipqe->ipqe_m = m;
    691   1.2  rmind 	ipqe->ipqe_ip = ip;
    692   1.2  rmind 
    693   1.7  rmind 	*m0 = ip_reass(ipqe, fp, hash);
    694   1.7  rmind 	if (*m0) {
    695   1.7  rmind 		/* Note that finally reassembled. */
    696   1.2  rmind 		IP_STATINC(IP_STAT_REASSEMBLED);
    697   1.2  rmind 	}
    698   1.2  rmind 	return 0;
    699   1.2  rmind }
    700