pf_norm.c revision 1.3.2.6 1 1.3.2.6 skrll /* $NetBSD: pf_norm.c,v 1.3.2.6 2004/12/18 09:32:35 skrll Exp $ */
2 1.3.2.5 skrll /* $OpenBSD: pf_norm.c,v 1.96 2004/07/17 00:17:27 frantzen Exp $ */
3 1.3.2.2 skrll
4 1.3.2.2 skrll /*
5 1.3.2.2 skrll * Copyright 2001 Niels Provos <provos (at) citi.umich.edu>
6 1.3.2.2 skrll * All rights reserved.
7 1.3.2.2 skrll *
8 1.3.2.2 skrll * Redistribution and use in source and binary forms, with or without
9 1.3.2.2 skrll * modification, are permitted provided that the following conditions
10 1.3.2.2 skrll * are met:
11 1.3.2.2 skrll * 1. Redistributions of source code must retain the above copyright
12 1.3.2.2 skrll * notice, this list of conditions and the following disclaimer.
13 1.3.2.2 skrll * 2. Redistributions in binary form must reproduce the above copyright
14 1.3.2.2 skrll * notice, this list of conditions and the following disclaimer in the
15 1.3.2.2 skrll * documentation and/or other materials provided with the distribution.
16 1.3.2.2 skrll *
17 1.3.2.2 skrll * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 1.3.2.2 skrll * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 1.3.2.2 skrll * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 1.3.2.2 skrll * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 1.3.2.2 skrll * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 1.3.2.2 skrll * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 1.3.2.2 skrll * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 1.3.2.2 skrll * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 1.3.2.2 skrll * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 1.3.2.2 skrll * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 1.3.2.2 skrll */
28 1.3.2.2 skrll
29 1.3.2.2 skrll #ifdef _KERNEL_OPT
30 1.3.2.2 skrll #include "opt_inet.h"
31 1.3.2.2 skrll #endif
32 1.3.2.2 skrll
33 1.3.2.2 skrll #include "pflog.h"
34 1.3.2.2 skrll
35 1.3.2.2 skrll #include <sys/param.h>
36 1.3.2.2 skrll #include <sys/systm.h>
37 1.3.2.2 skrll #include <sys/mbuf.h>
38 1.3.2.2 skrll #include <sys/filio.h>
39 1.3.2.2 skrll #include <sys/fcntl.h>
40 1.3.2.2 skrll #include <sys/socket.h>
41 1.3.2.2 skrll #include <sys/kernel.h>
42 1.3.2.2 skrll #include <sys/time.h>
43 1.3.2.2 skrll #include <sys/pool.h>
44 1.3.2.2 skrll
45 1.3.2.2 skrll #ifdef __OpenBSD__
46 1.3.2.2 skrll #include <dev/rndvar.h>
47 1.3.2.2 skrll #else
48 1.3.2.2 skrll #include <sys/rnd.h>
49 1.3.2.2 skrll #endif
50 1.3.2.2 skrll #include <net/if.h>
51 1.3.2.2 skrll #include <net/if_types.h>
52 1.3.2.2 skrll #include <net/bpf.h>
53 1.3.2.2 skrll #include <net/route.h>
54 1.3.2.2 skrll #include <net/if_pflog.h>
55 1.3.2.2 skrll
56 1.3.2.2 skrll #include <netinet/in.h>
57 1.3.2.2 skrll #include <netinet/in_var.h>
58 1.3.2.2 skrll #include <netinet/in_systm.h>
59 1.3.2.2 skrll #include <netinet/ip.h>
60 1.3.2.2 skrll #include <netinet/ip_var.h>
61 1.3.2.2 skrll #include <netinet/tcp.h>
62 1.3.2.2 skrll #include <netinet/tcp_seq.h>
63 1.3.2.2 skrll #include <netinet/udp.h>
64 1.3.2.2 skrll #include <netinet/ip_icmp.h>
65 1.3.2.2 skrll
66 1.3.2.2 skrll #ifdef INET6
67 1.3.2.2 skrll #include <netinet/ip6.h>
68 1.3.2.2 skrll #endif /* INET6 */
69 1.3.2.2 skrll
70 1.3.2.2 skrll #include <net/pfvar.h>
71 1.3.2.2 skrll
72 1.3.2.2 skrll struct pf_frent {
73 1.3.2.2 skrll LIST_ENTRY(pf_frent) fr_next;
74 1.3.2.2 skrll struct ip *fr_ip;
75 1.3.2.2 skrll struct mbuf *fr_m;
76 1.3.2.2 skrll };
77 1.3.2.2 skrll
78 1.3.2.2 skrll struct pf_frcache {
79 1.3.2.2 skrll LIST_ENTRY(pf_frcache) fr_next;
80 1.3.2.2 skrll uint16_t fr_off;
81 1.3.2.2 skrll uint16_t fr_end;
82 1.3.2.2 skrll };
83 1.3.2.2 skrll
84 1.3.2.2 skrll #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
85 1.3.2.2 skrll #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
86 1.3.2.2 skrll #define PFFRAG_DROP 0x0004 /* Drop all fragments */
87 1.3.2.2 skrll #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
88 1.3.2.2 skrll
89 1.3.2.2 skrll struct pf_fragment {
90 1.3.2.2 skrll RB_ENTRY(pf_fragment) fr_entry;
91 1.3.2.2 skrll TAILQ_ENTRY(pf_fragment) frag_next;
92 1.3.2.2 skrll struct in_addr fr_src;
93 1.3.2.2 skrll struct in_addr fr_dst;
94 1.3.2.2 skrll u_int8_t fr_p; /* protocol of this fragment */
95 1.3.2.2 skrll u_int8_t fr_flags; /* status flags */
96 1.3.2.2 skrll u_int16_t fr_id; /* fragment id for reassemble */
97 1.3.2.2 skrll u_int16_t fr_max; /* fragment data max */
98 1.3.2.2 skrll u_int32_t fr_timeout;
99 1.3.2.2 skrll #define fr_queue fr_u.fru_queue
100 1.3.2.2 skrll #define fr_cache fr_u.fru_cache
101 1.3.2.2 skrll union {
102 1.3.2.2 skrll LIST_HEAD(pf_fragq, pf_frent) fru_queue; /* buffering */
103 1.3.2.2 skrll LIST_HEAD(pf_cacheq, pf_frcache) fru_cache; /* non-buf */
104 1.3.2.2 skrll } fr_u;
105 1.3.2.2 skrll };
106 1.3.2.2 skrll
107 1.3.2.2 skrll TAILQ_HEAD(pf_fragqueue, pf_fragment) pf_fragqueue;
108 1.3.2.2 skrll TAILQ_HEAD(pf_cachequeue, pf_fragment) pf_cachequeue;
109 1.3.2.2 skrll
110 1.3.2.2 skrll static __inline int pf_frag_compare(struct pf_fragment *,
111 1.3.2.2 skrll struct pf_fragment *);
112 1.3.2.2 skrll RB_HEAD(pf_frag_tree, pf_fragment) pf_frag_tree, pf_cache_tree;
113 1.3.2.2 skrll RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
114 1.3.2.2 skrll RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
115 1.3.2.2 skrll
116 1.3.2.2 skrll /* Private prototypes */
117 1.3.2.2 skrll void pf_ip2key(struct pf_fragment *, struct ip *);
118 1.3.2.2 skrll void pf_remove_fragment(struct pf_fragment *);
119 1.3.2.2 skrll void pf_flush_fragments(void);
120 1.3.2.2 skrll void pf_free_fragment(struct pf_fragment *);
121 1.3.2.2 skrll struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *);
122 1.3.2.2 skrll struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **,
123 1.3.2.2 skrll struct pf_frent *, int);
124 1.3.2.2 skrll struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
125 1.3.2.2 skrll struct pf_fragment **, int, int, int *);
126 1.3.2.2 skrll int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
127 1.3.2.2 skrll struct tcphdr *, int);
128 1.3.2.2 skrll
129 1.3.2.5 skrll #define DPFPRINTF(x) do { \
130 1.3.2.5 skrll if (pf_status.debug >= PF_DEBUG_MISC) { \
131 1.3.2.5 skrll printf("%s: ", __func__); \
132 1.3.2.5 skrll printf x ; \
133 1.3.2.5 skrll } \
134 1.3.2.5 skrll } while(0)
135 1.3.2.2 skrll
136 1.3.2.2 skrll /* Globals */
137 1.3.2.2 skrll struct pool pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl;
138 1.3.2.2 skrll struct pool pf_state_scrub_pl;
139 1.3.2.2 skrll int pf_nfrents, pf_ncache;
140 1.3.2.2 skrll
141 1.3.2.2 skrll void
142 1.3.2.2 skrll pf_normalize_init(void)
143 1.3.2.2 skrll {
144 1.3.2.2 skrll pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent",
145 1.3.2.2 skrll NULL);
146 1.3.2.2 skrll pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag",
147 1.3.2.2 skrll NULL);
148 1.3.2.2 skrll pool_init(&pf_cache_pl, sizeof(struct pf_fragment), 0, 0, 0,
149 1.3.2.2 skrll "pffrcache", NULL);
150 1.3.2.2 skrll pool_init(&pf_cent_pl, sizeof(struct pf_frcache), 0, 0, 0, "pffrcent",
151 1.3.2.2 skrll NULL);
152 1.3.2.2 skrll pool_init(&pf_state_scrub_pl, sizeof(struct pf_state_scrub), 0, 0, 0,
153 1.3.2.2 skrll "pfstscr", NULL);
154 1.3.2.2 skrll
155 1.3.2.2 skrll pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
156 1.3.2.2 skrll pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
157 1.3.2.2 skrll pool_sethardlimit(&pf_cache_pl, PFFRAG_FRCACHE_HIWAT, NULL, 0);
158 1.3.2.2 skrll pool_sethardlimit(&pf_cent_pl, PFFRAG_FRCENT_HIWAT, NULL, 0);
159 1.3.2.2 skrll
160 1.3.2.2 skrll TAILQ_INIT(&pf_fragqueue);
161 1.3.2.2 skrll TAILQ_INIT(&pf_cachequeue);
162 1.3.2.2 skrll }
163 1.3.2.2 skrll
164 1.3.2.2 skrll #ifdef _LKM
165 1.3.2.2 skrll void
166 1.3.2.2 skrll pf_normalize_destroy(void)
167 1.3.2.2 skrll {
168 1.3.2.2 skrll pool_destroy(&pf_state_scrub_pl);
169 1.3.2.2 skrll pool_destroy(&pf_cent_pl);
170 1.3.2.2 skrll pool_destroy(&pf_cache_pl);
171 1.3.2.2 skrll pool_destroy(&pf_frag_pl);
172 1.3.2.2 skrll pool_destroy(&pf_frent_pl);
173 1.3.2.2 skrll }
174 1.3.2.2 skrll #endif
175 1.3.2.2 skrll
176 1.3.2.2 skrll static __inline int
177 1.3.2.2 skrll pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
178 1.3.2.2 skrll {
179 1.3.2.2 skrll int diff;
180 1.3.2.2 skrll
181 1.3.2.2 skrll if ((diff = a->fr_id - b->fr_id))
182 1.3.2.2 skrll return (diff);
183 1.3.2.2 skrll else if ((diff = a->fr_p - b->fr_p))
184 1.3.2.2 skrll return (diff);
185 1.3.2.2 skrll else if (a->fr_src.s_addr < b->fr_src.s_addr)
186 1.3.2.2 skrll return (-1);
187 1.3.2.2 skrll else if (a->fr_src.s_addr > b->fr_src.s_addr)
188 1.3.2.2 skrll return (1);
189 1.3.2.2 skrll else if (a->fr_dst.s_addr < b->fr_dst.s_addr)
190 1.3.2.2 skrll return (-1);
191 1.3.2.2 skrll else if (a->fr_dst.s_addr > b->fr_dst.s_addr)
192 1.3.2.2 skrll return (1);
193 1.3.2.2 skrll return (0);
194 1.3.2.2 skrll }
195 1.3.2.2 skrll
196 1.3.2.2 skrll void
197 1.3.2.2 skrll pf_purge_expired_fragments(void)
198 1.3.2.2 skrll {
199 1.3.2.2 skrll struct pf_fragment *frag;
200 1.3.2.5 skrll u_int32_t expire = time_second -
201 1.3.2.2 skrll pf_default_rule.timeout[PFTM_FRAG];
202 1.3.2.2 skrll
203 1.3.2.2 skrll while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
204 1.3.2.2 skrll KASSERT(BUFFER_FRAGMENTS(frag));
205 1.3.2.2 skrll if (frag->fr_timeout > expire)
206 1.3.2.2 skrll break;
207 1.3.2.2 skrll
208 1.3.2.2 skrll DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
209 1.3.2.2 skrll pf_free_fragment(frag);
210 1.3.2.2 skrll }
211 1.3.2.2 skrll
212 1.3.2.2 skrll while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) {
213 1.3.2.2 skrll KASSERT(!BUFFER_FRAGMENTS(frag));
214 1.3.2.2 skrll if (frag->fr_timeout > expire)
215 1.3.2.2 skrll break;
216 1.3.2.2 skrll
217 1.3.2.2 skrll DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
218 1.3.2.2 skrll pf_free_fragment(frag);
219 1.3.2.2 skrll KASSERT(TAILQ_EMPTY(&pf_cachequeue) ||
220 1.3.2.2 skrll TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag);
221 1.3.2.2 skrll }
222 1.3.2.2 skrll }
223 1.3.2.2 skrll
224 1.3.2.2 skrll /*
225 1.3.2.2 skrll * Try to flush old fragments to make space for new ones
226 1.3.2.2 skrll */
227 1.3.2.2 skrll
228 1.3.2.2 skrll void
229 1.3.2.2 skrll pf_flush_fragments(void)
230 1.3.2.2 skrll {
231 1.3.2.2 skrll struct pf_fragment *frag;
232 1.3.2.2 skrll int goal;
233 1.3.2.2 skrll
234 1.3.2.2 skrll goal = pf_nfrents * 9 / 10;
235 1.3.2.2 skrll DPFPRINTF(("trying to free > %d frents\n",
236 1.3.2.2 skrll pf_nfrents - goal));
237 1.3.2.2 skrll while (goal < pf_nfrents) {
238 1.3.2.2 skrll frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
239 1.3.2.2 skrll if (frag == NULL)
240 1.3.2.2 skrll break;
241 1.3.2.2 skrll pf_free_fragment(frag);
242 1.3.2.2 skrll }
243 1.3.2.2 skrll
244 1.3.2.2 skrll
245 1.3.2.2 skrll goal = pf_ncache * 9 / 10;
246 1.3.2.2 skrll DPFPRINTF(("trying to free > %d cache entries\n",
247 1.3.2.2 skrll pf_ncache - goal));
248 1.3.2.2 skrll while (goal < pf_ncache) {
249 1.3.2.2 skrll frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue);
250 1.3.2.2 skrll if (frag == NULL)
251 1.3.2.2 skrll break;
252 1.3.2.2 skrll pf_free_fragment(frag);
253 1.3.2.2 skrll }
254 1.3.2.2 skrll }
255 1.3.2.2 skrll
256 1.3.2.2 skrll /* Frees the fragments and all associated entries */
257 1.3.2.2 skrll
258 1.3.2.2 skrll void
259 1.3.2.2 skrll pf_free_fragment(struct pf_fragment *frag)
260 1.3.2.2 skrll {
261 1.3.2.2 skrll struct pf_frent *frent;
262 1.3.2.2 skrll struct pf_frcache *frcache;
263 1.3.2.2 skrll
264 1.3.2.2 skrll /* Free all fragments */
265 1.3.2.2 skrll if (BUFFER_FRAGMENTS(frag)) {
266 1.3.2.2 skrll for (frent = LIST_FIRST(&frag->fr_queue); frent;
267 1.3.2.2 skrll frent = LIST_FIRST(&frag->fr_queue)) {
268 1.3.2.2 skrll LIST_REMOVE(frent, fr_next);
269 1.3.2.2 skrll
270 1.3.2.2 skrll m_freem(frent->fr_m);
271 1.3.2.2 skrll pool_put(&pf_frent_pl, frent);
272 1.3.2.2 skrll pf_nfrents--;
273 1.3.2.2 skrll }
274 1.3.2.2 skrll } else {
275 1.3.2.2 skrll for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
276 1.3.2.2 skrll frcache = LIST_FIRST(&frag->fr_cache)) {
277 1.3.2.2 skrll LIST_REMOVE(frcache, fr_next);
278 1.3.2.2 skrll
279 1.3.2.2 skrll KASSERT(LIST_EMPTY(&frag->fr_cache) ||
280 1.3.2.2 skrll LIST_FIRST(&frag->fr_cache)->fr_off >
281 1.3.2.2 skrll frcache->fr_end);
282 1.3.2.2 skrll
283 1.3.2.2 skrll pool_put(&pf_cent_pl, frcache);
284 1.3.2.2 skrll pf_ncache--;
285 1.3.2.2 skrll }
286 1.3.2.2 skrll }
287 1.3.2.2 skrll
288 1.3.2.2 skrll pf_remove_fragment(frag);
289 1.3.2.2 skrll }
290 1.3.2.2 skrll
291 1.3.2.2 skrll void
292 1.3.2.2 skrll pf_ip2key(struct pf_fragment *key, struct ip *ip)
293 1.3.2.2 skrll {
294 1.3.2.2 skrll key->fr_p = ip->ip_p;
295 1.3.2.2 skrll key->fr_id = ip->ip_id;
296 1.3.2.2 skrll key->fr_src.s_addr = ip->ip_src.s_addr;
297 1.3.2.2 skrll key->fr_dst.s_addr = ip->ip_dst.s_addr;
298 1.3.2.2 skrll }
299 1.3.2.2 skrll
300 1.3.2.2 skrll struct pf_fragment *
301 1.3.2.2 skrll pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree)
302 1.3.2.2 skrll {
303 1.3.2.2 skrll struct pf_fragment key;
304 1.3.2.2 skrll struct pf_fragment *frag;
305 1.3.2.2 skrll
306 1.3.2.2 skrll pf_ip2key(&key, ip);
307 1.3.2.2 skrll
308 1.3.2.2 skrll frag = RB_FIND(pf_frag_tree, tree, &key);
309 1.3.2.2 skrll if (frag != NULL) {
310 1.3.2.2 skrll /* XXX Are we sure we want to update the timeout? */
311 1.3.2.5 skrll frag->fr_timeout = time_second;
312 1.3.2.2 skrll if (BUFFER_FRAGMENTS(frag)) {
313 1.3.2.2 skrll TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
314 1.3.2.2 skrll TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
315 1.3.2.2 skrll } else {
316 1.3.2.2 skrll TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
317 1.3.2.2 skrll TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
318 1.3.2.2 skrll }
319 1.3.2.2 skrll }
320 1.3.2.2 skrll
321 1.3.2.2 skrll return (frag);
322 1.3.2.2 skrll }
323 1.3.2.2 skrll
324 1.3.2.2 skrll /* Removes a fragment from the fragment queue and frees the fragment */
325 1.3.2.2 skrll
326 1.3.2.2 skrll void
327 1.3.2.2 skrll pf_remove_fragment(struct pf_fragment *frag)
328 1.3.2.2 skrll {
329 1.3.2.2 skrll if (BUFFER_FRAGMENTS(frag)) {
330 1.3.2.2 skrll RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag);
331 1.3.2.2 skrll TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
332 1.3.2.2 skrll pool_put(&pf_frag_pl, frag);
333 1.3.2.2 skrll } else {
334 1.3.2.2 skrll RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag);
335 1.3.2.2 skrll TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
336 1.3.2.2 skrll pool_put(&pf_cache_pl, frag);
337 1.3.2.2 skrll }
338 1.3.2.2 skrll }
339 1.3.2.2 skrll
340 1.3.2.2 skrll #define FR_IP_OFF(fr) ((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3)
341 1.3.2.2 skrll struct mbuf *
342 1.3.2.2 skrll pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
343 1.3.2.2 skrll struct pf_frent *frent, int mff)
344 1.3.2.2 skrll {
345 1.3.2.2 skrll struct mbuf *m = *m0, *m2;
346 1.3.2.2 skrll struct pf_frent *frea, *next;
347 1.3.2.2 skrll struct pf_frent *frep = NULL;
348 1.3.2.2 skrll struct ip *ip = frent->fr_ip;
349 1.3.2.2 skrll int hlen = ip->ip_hl << 2;
350 1.3.2.2 skrll u_int16_t off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
351 1.3.2.2 skrll u_int16_t ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4;
352 1.3.2.2 skrll u_int16_t max = ip_len + off;
353 1.3.2.2 skrll
354 1.3.2.2 skrll KASSERT(*frag == NULL || BUFFER_FRAGMENTS(*frag));
355 1.3.2.2 skrll
356 1.3.2.2 skrll /* Strip off ip header */
357 1.3.2.2 skrll m->m_data += hlen;
358 1.3.2.2 skrll m->m_len -= hlen;
359 1.3.2.2 skrll
360 1.3.2.2 skrll /* Create a new reassembly queue for this packet */
361 1.3.2.2 skrll if (*frag == NULL) {
362 1.3.2.2 skrll *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
363 1.3.2.2 skrll if (*frag == NULL) {
364 1.3.2.2 skrll pf_flush_fragments();
365 1.3.2.2 skrll *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
366 1.3.2.2 skrll if (*frag == NULL)
367 1.3.2.2 skrll goto drop_fragment;
368 1.3.2.2 skrll }
369 1.3.2.2 skrll
370 1.3.2.2 skrll (*frag)->fr_flags = 0;
371 1.3.2.2 skrll (*frag)->fr_max = 0;
372 1.3.2.2 skrll (*frag)->fr_src = frent->fr_ip->ip_src;
373 1.3.2.2 skrll (*frag)->fr_dst = frent->fr_ip->ip_dst;
374 1.3.2.2 skrll (*frag)->fr_p = frent->fr_ip->ip_p;
375 1.3.2.2 skrll (*frag)->fr_id = frent->fr_ip->ip_id;
376 1.3.2.5 skrll (*frag)->fr_timeout = time_second;
377 1.3.2.2 skrll LIST_INIT(&(*frag)->fr_queue);
378 1.3.2.2 skrll
379 1.3.2.2 skrll RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
380 1.3.2.2 skrll TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
381 1.3.2.2 skrll
382 1.3.2.2 skrll /* We do not have a previous fragment */
383 1.3.2.2 skrll frep = NULL;
384 1.3.2.2 skrll goto insert;
385 1.3.2.2 skrll }
386 1.3.2.2 skrll
387 1.3.2.2 skrll /*
388 1.3.2.2 skrll * Find a fragment after the current one:
389 1.3.2.2 skrll * - off contains the real shifted offset.
390 1.3.2.2 skrll */
391 1.3.2.2 skrll LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
392 1.3.2.2 skrll if (FR_IP_OFF(frea) > off)
393 1.3.2.2 skrll break;
394 1.3.2.2 skrll frep = frea;
395 1.3.2.2 skrll }
396 1.3.2.2 skrll
397 1.3.2.2 skrll KASSERT(frep != NULL || frea != NULL);
398 1.3.2.2 skrll
399 1.3.2.2 skrll if (frep != NULL &&
400 1.3.2.2 skrll FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl *
401 1.3.2.2 skrll 4 > off)
402 1.3.2.2 skrll {
403 1.3.2.2 skrll u_int16_t precut;
404 1.3.2.2 skrll
405 1.3.2.2 skrll precut = FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) -
406 1.3.2.2 skrll frep->fr_ip->ip_hl * 4 - off;
407 1.3.2.2 skrll if (precut >= ip_len)
408 1.3.2.2 skrll goto drop_fragment;
409 1.3.2.2 skrll m_adj(frent->fr_m, precut);
410 1.3.2.2 skrll DPFPRINTF(("overlap -%d\n", precut));
411 1.3.2.2 skrll /* Enforce 8 byte boundaries */
412 1.3.2.2 skrll ip->ip_off = htons(ntohs(ip->ip_off) + (precut >> 3));
413 1.3.2.2 skrll off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
414 1.3.2.2 skrll ip_len -= precut;
415 1.3.2.2 skrll ip->ip_len = htons(ip_len);
416 1.3.2.2 skrll }
417 1.3.2.2 skrll
418 1.3.2.2 skrll for (; frea != NULL && ip_len + off > FR_IP_OFF(frea);
419 1.3.2.2 skrll frea = next)
420 1.3.2.2 skrll {
421 1.3.2.2 skrll u_int16_t aftercut;
422 1.3.2.2 skrll
423 1.3.2.2 skrll aftercut = ip_len + off - FR_IP_OFF(frea);
424 1.3.2.2 skrll DPFPRINTF(("adjust overlap %d\n", aftercut));
425 1.3.2.2 skrll if (aftercut < ntohs(frea->fr_ip->ip_len) - frea->fr_ip->ip_hl
426 1.3.2.2 skrll * 4)
427 1.3.2.2 skrll {
428 1.3.2.2 skrll frea->fr_ip->ip_len =
429 1.3.2.2 skrll htons(ntohs(frea->fr_ip->ip_len) - aftercut);
430 1.3.2.2 skrll frea->fr_ip->ip_off = htons(ntohs(frea->fr_ip->ip_off) +
431 1.3.2.2 skrll (aftercut >> 3));
432 1.3.2.2 skrll m_adj(frea->fr_m, aftercut);
433 1.3.2.2 skrll break;
434 1.3.2.2 skrll }
435 1.3.2.2 skrll
436 1.3.2.2 skrll /* This fragment is completely overlapped, loose it */
437 1.3.2.2 skrll next = LIST_NEXT(frea, fr_next);
438 1.3.2.2 skrll m_freem(frea->fr_m);
439 1.3.2.2 skrll LIST_REMOVE(frea, fr_next);
440 1.3.2.2 skrll pool_put(&pf_frent_pl, frea);
441 1.3.2.2 skrll pf_nfrents--;
442 1.3.2.2 skrll }
443 1.3.2.2 skrll
444 1.3.2.2 skrll insert:
445 1.3.2.2 skrll /* Update maximum data size */
446 1.3.2.2 skrll if ((*frag)->fr_max < max)
447 1.3.2.2 skrll (*frag)->fr_max = max;
448 1.3.2.2 skrll /* This is the last segment */
449 1.3.2.2 skrll if (!mff)
450 1.3.2.2 skrll (*frag)->fr_flags |= PFFRAG_SEENLAST;
451 1.3.2.2 skrll
452 1.3.2.2 skrll if (frep == NULL)
453 1.3.2.2 skrll LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
454 1.3.2.2 skrll else
455 1.3.2.2 skrll LIST_INSERT_AFTER(frep, frent, fr_next);
456 1.3.2.2 skrll
457 1.3.2.2 skrll /* Check if we are completely reassembled */
458 1.3.2.2 skrll if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
459 1.3.2.2 skrll return (NULL);
460 1.3.2.2 skrll
461 1.3.2.2 skrll /* Check if we have all the data */
462 1.3.2.2 skrll off = 0;
463 1.3.2.2 skrll for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
464 1.3.2.2 skrll next = LIST_NEXT(frep, fr_next);
465 1.3.2.2 skrll
466 1.3.2.2 skrll off += ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4;
467 1.3.2.2 skrll if (off < (*frag)->fr_max &&
468 1.3.2.2 skrll (next == NULL || FR_IP_OFF(next) != off))
469 1.3.2.2 skrll {
470 1.3.2.2 skrll DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
471 1.3.2.2 skrll off, next == NULL ? -1 : FR_IP_OFF(next),
472 1.3.2.2 skrll (*frag)->fr_max));
473 1.3.2.2 skrll return (NULL);
474 1.3.2.2 skrll }
475 1.3.2.2 skrll }
476 1.3.2.2 skrll DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
477 1.3.2.2 skrll if (off < (*frag)->fr_max)
478 1.3.2.2 skrll return (NULL);
479 1.3.2.2 skrll
480 1.3.2.2 skrll /* We have all the data */
481 1.3.2.2 skrll frent = LIST_FIRST(&(*frag)->fr_queue);
482 1.3.2.2 skrll KASSERT(frent != NULL);
483 1.3.2.2 skrll if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
484 1.3.2.2 skrll DPFPRINTF(("drop: too big: %d\n", off));
485 1.3.2.2 skrll pf_free_fragment(*frag);
486 1.3.2.2 skrll *frag = NULL;
487 1.3.2.2 skrll return (NULL);
488 1.3.2.2 skrll }
489 1.3.2.2 skrll next = LIST_NEXT(frent, fr_next);
490 1.3.2.2 skrll
491 1.3.2.2 skrll /* Magic from ip_input */
492 1.3.2.2 skrll ip = frent->fr_ip;
493 1.3.2.2 skrll m = frent->fr_m;
494 1.3.2.2 skrll m2 = m->m_next;
495 1.3.2.2 skrll m->m_next = NULL;
496 1.3.2.2 skrll m_cat(m, m2);
497 1.3.2.2 skrll pool_put(&pf_frent_pl, frent);
498 1.3.2.2 skrll pf_nfrents--;
499 1.3.2.2 skrll for (frent = next; frent != NULL; frent = next) {
500 1.3.2.2 skrll next = LIST_NEXT(frent, fr_next);
501 1.3.2.2 skrll
502 1.3.2.2 skrll m2 = frent->fr_m;
503 1.3.2.2 skrll pool_put(&pf_frent_pl, frent);
504 1.3.2.2 skrll pf_nfrents--;
505 1.3.2.2 skrll m_cat(m, m2);
506 1.3.2.2 skrll }
507 1.3.2.2 skrll
508 1.3.2.2 skrll ip->ip_src = (*frag)->fr_src;
509 1.3.2.2 skrll ip->ip_dst = (*frag)->fr_dst;
510 1.3.2.2 skrll
511 1.3.2.2 skrll /* Remove from fragment queue */
512 1.3.2.2 skrll pf_remove_fragment(*frag);
513 1.3.2.2 skrll *frag = NULL;
514 1.3.2.2 skrll
515 1.3.2.2 skrll hlen = ip->ip_hl << 2;
516 1.3.2.2 skrll ip->ip_len = htons(off + hlen);
517 1.3.2.2 skrll m->m_len += hlen;
518 1.3.2.2 skrll m->m_data -= hlen;
519 1.3.2.2 skrll
520 1.3.2.2 skrll /* some debugging cruft by sklower, below, will go away soon */
521 1.3.2.2 skrll /* XXX this should be done elsewhere */
522 1.3.2.2 skrll if (m->m_flags & M_PKTHDR) {
523 1.3.2.2 skrll int plen = 0;
524 1.3.2.2 skrll for (m2 = m; m2; m2 = m2->m_next)
525 1.3.2.2 skrll plen += m2->m_len;
526 1.3.2.2 skrll m->m_pkthdr.len = plen;
527 1.3.2.2 skrll }
528 1.3.2.2 skrll
529 1.3.2.2 skrll DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
530 1.3.2.2 skrll return (m);
531 1.3.2.2 skrll
532 1.3.2.2 skrll drop_fragment:
533 1.3.2.2 skrll /* Oops - fail safe - drop packet */
534 1.3.2.2 skrll pool_put(&pf_frent_pl, frent);
535 1.3.2.2 skrll pf_nfrents--;
536 1.3.2.2 skrll m_freem(m);
537 1.3.2.2 skrll return (NULL);
538 1.3.2.2 skrll }
539 1.3.2.2 skrll
540 1.3.2.2 skrll struct mbuf *
541 1.3.2.2 skrll pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
542 1.3.2.2 skrll int drop, int *nomem)
543 1.3.2.2 skrll {
544 1.3.2.2 skrll struct mbuf *m = *m0;
545 1.3.2.2 skrll struct pf_frcache *frp, *fra, *cur = NULL;
546 1.3.2.2 skrll int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
547 1.3.2.2 skrll u_int16_t off = ntohs(h->ip_off) << 3;
548 1.3.2.2 skrll u_int16_t max = ip_len + off;
549 1.3.2.2 skrll int hosed = 0;
550 1.3.2.2 skrll
551 1.3.2.2 skrll KASSERT(*frag == NULL || !BUFFER_FRAGMENTS(*frag));
552 1.3.2.2 skrll
553 1.3.2.2 skrll /* Create a new range queue for this packet */
554 1.3.2.2 skrll if (*frag == NULL) {
555 1.3.2.2 skrll *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
556 1.3.2.2 skrll if (*frag == NULL) {
557 1.3.2.2 skrll pf_flush_fragments();
558 1.3.2.2 skrll *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
559 1.3.2.2 skrll if (*frag == NULL)
560 1.3.2.2 skrll goto no_mem;
561 1.3.2.2 skrll }
562 1.3.2.2 skrll
563 1.3.2.2 skrll /* Get an entry for the queue */
564 1.3.2.2 skrll cur = pool_get(&pf_cent_pl, PR_NOWAIT);
565 1.3.2.2 skrll if (cur == NULL) {
566 1.3.2.2 skrll pool_put(&pf_cache_pl, *frag);
567 1.3.2.2 skrll *frag = NULL;
568 1.3.2.2 skrll goto no_mem;
569 1.3.2.2 skrll }
570 1.3.2.2 skrll pf_ncache++;
571 1.3.2.2 skrll
572 1.3.2.2 skrll (*frag)->fr_flags = PFFRAG_NOBUFFER;
573 1.3.2.2 skrll (*frag)->fr_max = 0;
574 1.3.2.2 skrll (*frag)->fr_src = h->ip_src;
575 1.3.2.2 skrll (*frag)->fr_dst = h->ip_dst;
576 1.3.2.2 skrll (*frag)->fr_p = h->ip_p;
577 1.3.2.2 skrll (*frag)->fr_id = h->ip_id;
578 1.3.2.5 skrll (*frag)->fr_timeout = time_second;
579 1.3.2.2 skrll
580 1.3.2.2 skrll cur->fr_off = off;
581 1.3.2.2 skrll cur->fr_end = max;
582 1.3.2.2 skrll LIST_INIT(&(*frag)->fr_cache);
583 1.3.2.2 skrll LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
584 1.3.2.2 skrll
585 1.3.2.2 skrll RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
586 1.3.2.2 skrll TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
587 1.3.2.2 skrll
588 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
589 1.3.2.2 skrll
590 1.3.2.2 skrll goto pass;
591 1.3.2.2 skrll }
592 1.3.2.2 skrll
593 1.3.2.2 skrll /*
594 1.3.2.2 skrll * Find a fragment after the current one:
595 1.3.2.2 skrll * - off contains the real shifted offset.
596 1.3.2.2 skrll */
597 1.3.2.2 skrll frp = NULL;
598 1.3.2.2 skrll LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
599 1.3.2.2 skrll if (fra->fr_off > off)
600 1.3.2.2 skrll break;
601 1.3.2.2 skrll frp = fra;
602 1.3.2.2 skrll }
603 1.3.2.2 skrll
604 1.3.2.2 skrll KASSERT(frp != NULL || fra != NULL);
605 1.3.2.2 skrll
606 1.3.2.2 skrll if (frp != NULL) {
607 1.3.2.2 skrll int precut;
608 1.3.2.2 skrll
609 1.3.2.2 skrll precut = frp->fr_end - off;
610 1.3.2.2 skrll if (precut >= ip_len) {
611 1.3.2.2 skrll /* Fragment is entirely a duplicate */
612 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
613 1.3.2.2 skrll h->ip_id, frp->fr_off, frp->fr_end, off, max));
614 1.3.2.2 skrll goto drop_fragment;
615 1.3.2.2 skrll }
616 1.3.2.2 skrll if (precut == 0) {
617 1.3.2.2 skrll /* They are adjacent. Fixup cache entry */
618 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
619 1.3.2.2 skrll h->ip_id, frp->fr_off, frp->fr_end, off, max));
620 1.3.2.2 skrll frp->fr_end = max;
621 1.3.2.2 skrll } else if (precut > 0) {
622 1.3.2.2 skrll /* The first part of this payload overlaps with a
623 1.3.2.2 skrll * fragment that has already been passed.
624 1.3.2.2 skrll * Need to trim off the first part of the payload.
625 1.3.2.2 skrll * But to do so easily, we need to create another
626 1.3.2.2 skrll * mbuf to throw the original header into.
627 1.3.2.2 skrll */
628 1.3.2.2 skrll
629 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
630 1.3.2.2 skrll h->ip_id, precut, frp->fr_off, frp->fr_end, off,
631 1.3.2.2 skrll max));
632 1.3.2.2 skrll
633 1.3.2.2 skrll off += precut;
634 1.3.2.2 skrll max -= precut;
635 1.3.2.2 skrll /* Update the previous frag to encompass this one */
636 1.3.2.2 skrll frp->fr_end = max;
637 1.3.2.2 skrll
638 1.3.2.2 skrll if (!drop) {
639 1.3.2.2 skrll /* XXX Optimization opportunity
640 1.3.2.2 skrll * This is a very heavy way to trim the payload.
641 1.3.2.2 skrll * we could do it much faster by diddling mbuf
642 1.3.2.2 skrll * internals but that would be even less legible
643 1.3.2.2 skrll * than this mbuf magic. For my next trick,
644 1.3.2.2 skrll * I'll pull a rabbit out of my laptop.
645 1.3.2.2 skrll */
646 1.3.2.2 skrll #ifdef __OpenBSD__
647 1.3.2.2 skrll *m0 = m_copym2(m, 0, h->ip_hl << 2, M_NOWAIT);
648 1.3.2.2 skrll #else
649 1.3.2.2 skrll *m0 = m_dup(m, 0, h->ip_hl << 2, M_NOWAIT);
650 1.3.2.2 skrll #endif
651 1.3.2.2 skrll if (*m0 == NULL)
652 1.3.2.2 skrll goto no_mem;
653 1.3.2.2 skrll KASSERT((*m0)->m_next == NULL);
654 1.3.2.2 skrll m_adj(m, precut + (h->ip_hl << 2));
655 1.3.2.2 skrll m_cat(*m0, m);
656 1.3.2.2 skrll m = *m0;
657 1.3.2.2 skrll if (m->m_flags & M_PKTHDR) {
658 1.3.2.2 skrll int plen = 0;
659 1.3.2.2 skrll struct mbuf *t;
660 1.3.2.2 skrll for (t = m; t; t = t->m_next)
661 1.3.2.2 skrll plen += t->m_len;
662 1.3.2.2 skrll m->m_pkthdr.len = plen;
663 1.3.2.2 skrll }
664 1.3.2.2 skrll
665 1.3.2.2 skrll
666 1.3.2.2 skrll h = mtod(m, struct ip *);
667 1.3.2.2 skrll
668 1.3.2.2 skrll
669 1.3.2.2 skrll KASSERT((int)m->m_len ==
670 1.3.2.2 skrll ntohs(h->ip_len) - precut);
671 1.3.2.2 skrll h->ip_off = htons(ntohs(h->ip_off) +
672 1.3.2.2 skrll (precut >> 3));
673 1.3.2.2 skrll h->ip_len = htons(ntohs(h->ip_len) - precut);
674 1.3.2.2 skrll } else {
675 1.3.2.2 skrll hosed++;
676 1.3.2.2 skrll }
677 1.3.2.2 skrll } else {
678 1.3.2.2 skrll /* There is a gap between fragments */
679 1.3.2.2 skrll
680 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
681 1.3.2.2 skrll h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
682 1.3.2.2 skrll max));
683 1.3.2.2 skrll
684 1.3.2.2 skrll cur = pool_get(&pf_cent_pl, PR_NOWAIT);
685 1.3.2.2 skrll if (cur == NULL)
686 1.3.2.2 skrll goto no_mem;
687 1.3.2.2 skrll pf_ncache++;
688 1.3.2.2 skrll
689 1.3.2.2 skrll cur->fr_off = off;
690 1.3.2.2 skrll cur->fr_end = max;
691 1.3.2.2 skrll LIST_INSERT_AFTER(frp, cur, fr_next);
692 1.3.2.2 skrll }
693 1.3.2.2 skrll }
694 1.3.2.2 skrll
695 1.3.2.2 skrll if (fra != NULL) {
696 1.3.2.2 skrll int aftercut;
697 1.3.2.2 skrll int merge = 0;
698 1.3.2.2 skrll
699 1.3.2.2 skrll aftercut = max - fra->fr_off;
700 1.3.2.2 skrll if (aftercut == 0) {
701 1.3.2.2 skrll /* Adjacent fragments */
702 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
703 1.3.2.2 skrll h->ip_id, off, max, fra->fr_off, fra->fr_end));
704 1.3.2.2 skrll fra->fr_off = off;
705 1.3.2.2 skrll merge = 1;
706 1.3.2.2 skrll } else if (aftercut > 0) {
707 1.3.2.2 skrll /* Need to chop off the tail of this fragment */
708 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
709 1.3.2.2 skrll h->ip_id, aftercut, off, max, fra->fr_off,
710 1.3.2.2 skrll fra->fr_end));
711 1.3.2.2 skrll fra->fr_off = off;
712 1.3.2.2 skrll max -= aftercut;
713 1.3.2.2 skrll
714 1.3.2.2 skrll merge = 1;
715 1.3.2.2 skrll
716 1.3.2.2 skrll if (!drop) {
717 1.3.2.2 skrll m_adj(m, -aftercut);
718 1.3.2.2 skrll if (m->m_flags & M_PKTHDR) {
719 1.3.2.2 skrll int plen = 0;
720 1.3.2.2 skrll struct mbuf *t;
721 1.3.2.2 skrll for (t = m; t; t = t->m_next)
722 1.3.2.2 skrll plen += t->m_len;
723 1.3.2.2 skrll m->m_pkthdr.len = plen;
724 1.3.2.2 skrll }
725 1.3.2.2 skrll h = mtod(m, struct ip *);
726 1.3.2.2 skrll KASSERT((int)m->m_len ==
727 1.3.2.2 skrll ntohs(h->ip_len) - aftercut);
728 1.3.2.2 skrll h->ip_len = htons(ntohs(h->ip_len) - aftercut);
729 1.3.2.2 skrll } else {
730 1.3.2.2 skrll hosed++;
731 1.3.2.2 skrll }
732 1.3.2.2 skrll } else {
733 1.3.2.2 skrll /* There is a gap between fragments */
734 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
735 1.3.2.2 skrll h->ip_id, -aftercut, off, max, fra->fr_off,
736 1.3.2.2 skrll fra->fr_end));
737 1.3.2.2 skrll
738 1.3.2.2 skrll cur = pool_get(&pf_cent_pl, PR_NOWAIT);
739 1.3.2.2 skrll if (cur == NULL)
740 1.3.2.2 skrll goto no_mem;
741 1.3.2.2 skrll pf_ncache++;
742 1.3.2.2 skrll
743 1.3.2.2 skrll cur->fr_off = off;
744 1.3.2.2 skrll cur->fr_end = max;
745 1.3.2.2 skrll LIST_INSERT_BEFORE(fra, cur, fr_next);
746 1.3.2.2 skrll }
747 1.3.2.2 skrll
748 1.3.2.2 skrll
749 1.3.2.2 skrll /* Need to glue together two separate fragment descriptors */
750 1.3.2.2 skrll if (merge) {
751 1.3.2.2 skrll if (cur && fra->fr_off <= cur->fr_end) {
752 1.3.2.2 skrll /* Need to merge in a previous 'cur' */
753 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: adjacent(merge "
754 1.3.2.2 skrll "%d-%d) %d-%d (%d-%d)\n",
755 1.3.2.2 skrll h->ip_id, cur->fr_off, cur->fr_end, off,
756 1.3.2.2 skrll max, fra->fr_off, fra->fr_end));
757 1.3.2.2 skrll fra->fr_off = cur->fr_off;
758 1.3.2.2 skrll LIST_REMOVE(cur, fr_next);
759 1.3.2.2 skrll pool_put(&pf_cent_pl, cur);
760 1.3.2.2 skrll pf_ncache--;
761 1.3.2.2 skrll cur = NULL;
762 1.3.2.2 skrll
763 1.3.2.2 skrll } else if (frp && fra->fr_off <= frp->fr_end) {
764 1.3.2.2 skrll /* Need to merge in a modified 'frp' */
765 1.3.2.2 skrll KASSERT(cur == NULL);
766 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: adjacent(merge "
767 1.3.2.2 skrll "%d-%d) %d-%d (%d-%d)\n",
768 1.3.2.2 skrll h->ip_id, frp->fr_off, frp->fr_end, off,
769 1.3.2.2 skrll max, fra->fr_off, fra->fr_end));
770 1.3.2.2 skrll fra->fr_off = frp->fr_off;
771 1.3.2.2 skrll LIST_REMOVE(frp, fr_next);
772 1.3.2.2 skrll pool_put(&pf_cent_pl, frp);
773 1.3.2.2 skrll pf_ncache--;
774 1.3.2.2 skrll frp = NULL;
775 1.3.2.2 skrll
776 1.3.2.2 skrll }
777 1.3.2.2 skrll }
778 1.3.2.2 skrll }
779 1.3.2.2 skrll
780 1.3.2.2 skrll if (hosed) {
781 1.3.2.2 skrll /*
782 1.3.2.2 skrll * We must keep tracking the overall fragment even when
783 1.3.2.2 skrll * we're going to drop it anyway so that we know when to
784 1.3.2.2 skrll * free the overall descriptor. Thus we drop the frag late.
785 1.3.2.2 skrll */
786 1.3.2.2 skrll goto drop_fragment;
787 1.3.2.2 skrll }
788 1.3.2.2 skrll
789 1.3.2.2 skrll
790 1.3.2.2 skrll pass:
791 1.3.2.2 skrll /* Update maximum data size */
792 1.3.2.2 skrll if ((*frag)->fr_max < max)
793 1.3.2.2 skrll (*frag)->fr_max = max;
794 1.3.2.2 skrll
795 1.3.2.2 skrll /* This is the last segment */
796 1.3.2.2 skrll if (!mff)
797 1.3.2.2 skrll (*frag)->fr_flags |= PFFRAG_SEENLAST;
798 1.3.2.2 skrll
799 1.3.2.2 skrll /* Check if we are completely reassembled */
800 1.3.2.2 skrll if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
801 1.3.2.2 skrll LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
802 1.3.2.2 skrll LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
803 1.3.2.2 skrll /* Remove from fragment queue */
804 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
805 1.3.2.2 skrll (*frag)->fr_max));
806 1.3.2.2 skrll pf_free_fragment(*frag);
807 1.3.2.2 skrll *frag = NULL;
808 1.3.2.2 skrll }
809 1.3.2.2 skrll
810 1.3.2.2 skrll return (m);
811 1.3.2.2 skrll
812 1.3.2.2 skrll no_mem:
813 1.3.2.2 skrll *nomem = 1;
814 1.3.2.2 skrll
815 1.3.2.2 skrll /* Still need to pay attention to !IP_MF */
816 1.3.2.2 skrll if (!mff && *frag != NULL)
817 1.3.2.2 skrll (*frag)->fr_flags |= PFFRAG_SEENLAST;
818 1.3.2.2 skrll
819 1.3.2.2 skrll m_freem(m);
820 1.3.2.2 skrll return (NULL);
821 1.3.2.2 skrll
822 1.3.2.2 skrll drop_fragment:
823 1.3.2.2 skrll
824 1.3.2.2 skrll /* Still need to pay attention to !IP_MF */
825 1.3.2.2 skrll if (!mff && *frag != NULL)
826 1.3.2.2 skrll (*frag)->fr_flags |= PFFRAG_SEENLAST;
827 1.3.2.2 skrll
828 1.3.2.2 skrll if (drop) {
829 1.3.2.2 skrll /* This fragment has been deemed bad. Don't reass */
830 1.3.2.2 skrll if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
831 1.3.2.2 skrll DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
832 1.3.2.2 skrll h->ip_id));
833 1.3.2.2 skrll (*frag)->fr_flags |= PFFRAG_DROP;
834 1.3.2.2 skrll }
835 1.3.2.2 skrll
836 1.3.2.2 skrll m_freem(m);
837 1.3.2.2 skrll return (NULL);
838 1.3.2.2 skrll }
839 1.3.2.2 skrll
840 1.3.2.2 skrll int
841 1.3.2.5 skrll pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
842 1.3.2.5 skrll struct pf_pdesc *pd)
843 1.3.2.2 skrll {
844 1.3.2.2 skrll struct mbuf *m = *m0;
845 1.3.2.2 skrll struct pf_rule *r;
846 1.3.2.2 skrll struct pf_frent *frent;
847 1.3.2.2 skrll struct pf_fragment *frag = NULL;
848 1.3.2.2 skrll struct ip *h = mtod(m, struct ip *);
849 1.3.2.2 skrll int mff = (ntohs(h->ip_off) & IP_MF);
850 1.3.2.2 skrll int hlen = h->ip_hl << 2;
851 1.3.2.2 skrll u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
852 1.3.2.2 skrll u_int16_t max;
853 1.3.2.2 skrll int ip_len;
854 1.3.2.2 skrll int ip_off;
855 1.3.2.2 skrll
856 1.3.2.2 skrll r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
857 1.3.2.2 skrll while (r != NULL) {
858 1.3.2.2 skrll r->evaluations++;
859 1.3.2.2 skrll if (r->kif != NULL &&
860 1.3.2.2 skrll (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
861 1.3.2.2 skrll r = r->skip[PF_SKIP_IFP].ptr;
862 1.3.2.2 skrll else if (r->direction && r->direction != dir)
863 1.3.2.2 skrll r = r->skip[PF_SKIP_DIR].ptr;
864 1.3.2.2 skrll else if (r->af && r->af != AF_INET)
865 1.3.2.2 skrll r = r->skip[PF_SKIP_AF].ptr;
866 1.3.2.2 skrll else if (r->proto && r->proto != h->ip_p)
867 1.3.2.2 skrll r = r->skip[PF_SKIP_PROTO].ptr;
868 1.3.2.2 skrll else if (PF_MISMATCHAW(&r->src.addr,
869 1.3.2.5 skrll (struct pf_addr *)&h->ip_src.s_addr, AF_INET, r->src.neg))
870 1.3.2.2 skrll r = r->skip[PF_SKIP_SRC_ADDR].ptr;
871 1.3.2.2 skrll else if (PF_MISMATCHAW(&r->dst.addr,
872 1.3.2.5 skrll (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, r->dst.neg))
873 1.3.2.2 skrll r = r->skip[PF_SKIP_DST_ADDR].ptr;
874 1.3.2.2 skrll else
875 1.3.2.2 skrll break;
876 1.3.2.2 skrll }
877 1.3.2.2 skrll
878 1.3.2.2 skrll if (r == NULL)
879 1.3.2.2 skrll return (PF_PASS);
880 1.3.2.2 skrll else
881 1.3.2.2 skrll r->packets++;
882 1.3.2.2 skrll
883 1.3.2.2 skrll /* Check for illegal packets */
884 1.3.2.2 skrll if (hlen < (int)sizeof(struct ip))
885 1.3.2.2 skrll goto drop;
886 1.3.2.2 skrll
887 1.3.2.2 skrll if (hlen > ntohs(h->ip_len))
888 1.3.2.2 skrll goto drop;
889 1.3.2.2 skrll
890 1.3.2.2 skrll /* Clear IP_DF if the rule uses the no-df option */
891 1.3.2.2 skrll if (r->rule_flag & PFRULE_NODF)
892 1.3.2.2 skrll h->ip_off &= htons(~IP_DF);
893 1.3.2.2 skrll
894 1.3.2.2 skrll /* We will need other tests here */
895 1.3.2.2 skrll if (!fragoff && !mff)
896 1.3.2.2 skrll goto no_fragment;
897 1.3.2.2 skrll
898 1.3.2.2 skrll /* We're dealing with a fragment now. Don't allow fragments
899 1.3.2.2 skrll * with IP_DF to enter the cache. If the flag was cleared by
900 1.3.2.2 skrll * no-df above, fine. Otherwise drop it.
901 1.3.2.2 skrll */
902 1.3.2.2 skrll if (h->ip_off & htons(IP_DF)) {
903 1.3.2.2 skrll DPFPRINTF(("IP_DF\n"));
904 1.3.2.2 skrll goto bad;
905 1.3.2.2 skrll }
906 1.3.2.2 skrll
907 1.3.2.2 skrll ip_len = ntohs(h->ip_len) - hlen;
908 1.3.2.2 skrll ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
909 1.3.2.2 skrll
910 1.3.2.2 skrll /* All fragments are 8 byte aligned */
911 1.3.2.2 skrll if (mff && (ip_len & 0x7)) {
912 1.3.2.2 skrll DPFPRINTF(("mff and %d\n", ip_len));
913 1.3.2.2 skrll goto bad;
914 1.3.2.2 skrll }
915 1.3.2.2 skrll
916 1.3.2.2 skrll /* Respect maximum length */
917 1.3.2.2 skrll if (fragoff + ip_len > IP_MAXPACKET) {
918 1.3.2.2 skrll DPFPRINTF(("max packet %d\n", fragoff + ip_len));
919 1.3.2.2 skrll goto bad;
920 1.3.2.2 skrll }
921 1.3.2.2 skrll max = fragoff + ip_len;
922 1.3.2.2 skrll
923 1.3.2.2 skrll if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
924 1.3.2.2 skrll /* Fully buffer all of the fragments */
925 1.3.2.2 skrll
926 1.3.2.2 skrll frag = pf_find_fragment(h, &pf_frag_tree);
927 1.3.2.2 skrll
928 1.3.2.2 skrll /* Check if we saw the last fragment already */
929 1.3.2.2 skrll if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
930 1.3.2.2 skrll max > frag->fr_max)
931 1.3.2.2 skrll goto bad;
932 1.3.2.2 skrll
933 1.3.2.2 skrll /* Get an entry for the fragment queue */
934 1.3.2.2 skrll frent = pool_get(&pf_frent_pl, PR_NOWAIT);
935 1.3.2.2 skrll if (frent == NULL) {
936 1.3.2.2 skrll REASON_SET(reason, PFRES_MEMORY);
937 1.3.2.2 skrll return (PF_DROP);
938 1.3.2.2 skrll }
939 1.3.2.2 skrll pf_nfrents++;
940 1.3.2.2 skrll frent->fr_ip = h;
941 1.3.2.2 skrll frent->fr_m = m;
942 1.3.2.2 skrll
943 1.3.2.2 skrll /* Might return a completely reassembled mbuf, or NULL */
944 1.3.2.2 skrll DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
945 1.3.2.2 skrll *m0 = m = pf_reassemble(m0, &frag, frent, mff);
946 1.3.2.2 skrll
947 1.3.2.2 skrll if (m == NULL)
948 1.3.2.2 skrll return (PF_DROP);
949 1.3.2.2 skrll
950 1.3.2.2 skrll if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
951 1.3.2.2 skrll goto drop;
952 1.3.2.2 skrll
953 1.3.2.2 skrll h = mtod(m, struct ip *);
954 1.3.2.2 skrll } else {
955 1.3.2.2 skrll /* non-buffering fragment cache (drops or masks overlaps) */
956 1.3.2.2 skrll int nomem = 0;
957 1.3.2.2 skrll
958 1.3.2.2 skrll if (dir == PF_OUT) {
959 1.3.2.2 skrll if (m_tag_find(m, PACKET_TAG_PF_FRAGCACHE, NULL) !=
960 1.3.2.2 skrll NULL) {
961 1.3.2.2 skrll /* Already passed the fragment cache in the
962 1.3.2.2 skrll * input direction. If we continued, it would
963 1.3.2.2 skrll * appear to be a dup and would be dropped.
964 1.3.2.2 skrll */
965 1.3.2.2 skrll goto fragment_pass;
966 1.3.2.2 skrll }
967 1.3.2.2 skrll }
968 1.3.2.2 skrll
969 1.3.2.2 skrll frag = pf_find_fragment(h, &pf_cache_tree);
970 1.3.2.2 skrll
971 1.3.2.2 skrll /* Check if we saw the last fragment already */
972 1.3.2.2 skrll if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
973 1.3.2.2 skrll max > frag->fr_max) {
974 1.3.2.2 skrll if (r->rule_flag & PFRULE_FRAGDROP)
975 1.3.2.2 skrll frag->fr_flags |= PFFRAG_DROP;
976 1.3.2.2 skrll goto bad;
977 1.3.2.2 skrll }
978 1.3.2.2 skrll
979 1.3.2.2 skrll *m0 = m = pf_fragcache(m0, h, &frag, mff,
980 1.3.2.2 skrll (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
981 1.3.2.2 skrll if (m == NULL) {
982 1.3.2.2 skrll if (nomem)
983 1.3.2.2 skrll goto no_mem;
984 1.3.2.2 skrll goto drop;
985 1.3.2.2 skrll }
986 1.3.2.2 skrll
987 1.3.2.2 skrll if (dir == PF_IN) {
988 1.3.2.2 skrll struct m_tag *mtag;
989 1.3.2.2 skrll
990 1.3.2.2 skrll mtag = m_tag_get(PACKET_TAG_PF_FRAGCACHE, 0, M_NOWAIT);
991 1.3.2.2 skrll if (mtag == NULL)
992 1.3.2.2 skrll goto no_mem;
993 1.3.2.2 skrll m_tag_prepend(m, mtag);
994 1.3.2.2 skrll }
995 1.3.2.2 skrll if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
996 1.3.2.2 skrll goto drop;
997 1.3.2.2 skrll goto fragment_pass;
998 1.3.2.2 skrll }
999 1.3.2.2 skrll
1000 1.3.2.2 skrll no_fragment:
1001 1.3.2.2 skrll /* At this point, only IP_DF is allowed in ip_off */
1002 1.3.2.2 skrll h->ip_off &= htons(IP_DF);
1003 1.3.2.2 skrll
1004 1.3.2.2 skrll /* Enforce a minimum ttl, may cause endless packet loops */
1005 1.3.2.2 skrll if (r->min_ttl && h->ip_ttl < r->min_ttl)
1006 1.3.2.2 skrll h->ip_ttl = r->min_ttl;
1007 1.3.2.2 skrll
1008 1.3.2.5 skrll if (r->rule_flag & PFRULE_RANDOMID) {
1009 1.3.2.5 skrll u_int16_t ip_id = h->ip_id;
1010 1.3.2.5 skrll
1011 1.3.2.2 skrll h->ip_id = ip_randomid();
1012 1.3.2.5 skrll h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
1013 1.3.2.5 skrll }
1014 1.3.2.5 skrll if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
1015 1.3.2.5 skrll pd->flags |= PFDESC_IP_REAS;
1016 1.3.2.2 skrll
1017 1.3.2.2 skrll return (PF_PASS);
1018 1.3.2.2 skrll
1019 1.3.2.2 skrll fragment_pass:
1020 1.3.2.2 skrll /* Enforce a minimum ttl, may cause endless packet loops */
1021 1.3.2.2 skrll if (r->min_ttl && h->ip_ttl < r->min_ttl)
1022 1.3.2.2 skrll h->ip_ttl = r->min_ttl;
1023 1.3.2.5 skrll if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
1024 1.3.2.5 skrll pd->flags |= PFDESC_IP_REAS;
1025 1.3.2.2 skrll return (PF_PASS);
1026 1.3.2.2 skrll
1027 1.3.2.2 skrll no_mem:
1028 1.3.2.2 skrll REASON_SET(reason, PFRES_MEMORY);
1029 1.3.2.2 skrll if (r != NULL && r->log)
1030 1.3.2.2 skrll PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1031 1.3.2.2 skrll return (PF_DROP);
1032 1.3.2.2 skrll
1033 1.3.2.2 skrll drop:
1034 1.3.2.2 skrll REASON_SET(reason, PFRES_NORM);
1035 1.3.2.2 skrll if (r != NULL && r->log)
1036 1.3.2.2 skrll PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1037 1.3.2.2 skrll return (PF_DROP);
1038 1.3.2.2 skrll
1039 1.3.2.2 skrll bad:
1040 1.3.2.2 skrll DPFPRINTF(("dropping bad fragment\n"));
1041 1.3.2.2 skrll
1042 1.3.2.2 skrll /* Free associated fragments */
1043 1.3.2.2 skrll if (frag != NULL)
1044 1.3.2.2 skrll pf_free_fragment(frag);
1045 1.3.2.2 skrll
1046 1.3.2.2 skrll REASON_SET(reason, PFRES_FRAG);
1047 1.3.2.2 skrll if (r != NULL && r->log)
1048 1.3.2.2 skrll PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1049 1.3.2.2 skrll
1050 1.3.2.2 skrll return (PF_DROP);
1051 1.3.2.2 skrll }
1052 1.3.2.2 skrll
1053 1.3.2.2 skrll #ifdef INET6
1054 1.3.2.2 skrll int
1055 1.3.2.2 skrll pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
1056 1.3.2.5 skrll u_short *reason, struct pf_pdesc *pd)
1057 1.3.2.2 skrll {
1058 1.3.2.2 skrll struct mbuf *m = *m0;
1059 1.3.2.2 skrll struct pf_rule *r;
1060 1.3.2.2 skrll struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1061 1.3.2.2 skrll int off;
1062 1.3.2.2 skrll struct ip6_ext ext;
1063 1.3.2.2 skrll struct ip6_opt opt;
1064 1.3.2.2 skrll struct ip6_opt_jumbo jumbo;
1065 1.3.2.2 skrll struct ip6_frag frag;
1066 1.3.2.2 skrll u_int32_t jumbolen = 0, plen;
1067 1.3.2.2 skrll u_int16_t fragoff = 0;
1068 1.3.2.2 skrll int optend;
1069 1.3.2.2 skrll int ooff;
1070 1.3.2.2 skrll u_int8_t proto;
1071 1.3.2.2 skrll int terminal;
1072 1.3.2.2 skrll
1073 1.3.2.2 skrll r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1074 1.3.2.2 skrll while (r != NULL) {
1075 1.3.2.2 skrll r->evaluations++;
1076 1.3.2.2 skrll if (r->kif != NULL &&
1077 1.3.2.2 skrll (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
1078 1.3.2.2 skrll r = r->skip[PF_SKIP_IFP].ptr;
1079 1.3.2.2 skrll else if (r->direction && r->direction != dir)
1080 1.3.2.2 skrll r = r->skip[PF_SKIP_DIR].ptr;
1081 1.3.2.2 skrll else if (r->af && r->af != AF_INET6)
1082 1.3.2.2 skrll r = r->skip[PF_SKIP_AF].ptr;
1083 1.3.2.2 skrll #if 0 /* header chain! */
1084 1.3.2.2 skrll else if (r->proto && r->proto != h->ip6_nxt)
1085 1.3.2.2 skrll r = r->skip[PF_SKIP_PROTO].ptr;
1086 1.3.2.2 skrll #endif
1087 1.3.2.2 skrll else if (PF_MISMATCHAW(&r->src.addr,
1088 1.3.2.5 skrll (struct pf_addr *)&h->ip6_src, AF_INET6, r->src.neg))
1089 1.3.2.2 skrll r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1090 1.3.2.2 skrll else if (PF_MISMATCHAW(&r->dst.addr,
1091 1.3.2.5 skrll (struct pf_addr *)&h->ip6_dst, AF_INET6, r->dst.neg))
1092 1.3.2.2 skrll r = r->skip[PF_SKIP_DST_ADDR].ptr;
1093 1.3.2.2 skrll else
1094 1.3.2.2 skrll break;
1095 1.3.2.2 skrll }
1096 1.3.2.2 skrll
1097 1.3.2.2 skrll if (r == NULL)
1098 1.3.2.2 skrll return (PF_PASS);
1099 1.3.2.2 skrll else
1100 1.3.2.2 skrll r->packets++;
1101 1.3.2.2 skrll
1102 1.3.2.2 skrll /* Check for illegal packets */
1103 1.3.2.2 skrll if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1104 1.3.2.2 skrll goto drop;
1105 1.3.2.2 skrll
1106 1.3.2.2 skrll off = sizeof(struct ip6_hdr);
1107 1.3.2.2 skrll proto = h->ip6_nxt;
1108 1.3.2.2 skrll terminal = 0;
1109 1.3.2.2 skrll do {
1110 1.3.2.2 skrll switch (proto) {
1111 1.3.2.2 skrll case IPPROTO_FRAGMENT:
1112 1.3.2.2 skrll goto fragment;
1113 1.3.2.2 skrll break;
1114 1.3.2.2 skrll case IPPROTO_AH:
1115 1.3.2.2 skrll case IPPROTO_ROUTING:
1116 1.3.2.2 skrll case IPPROTO_DSTOPTS:
1117 1.3.2.2 skrll if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1118 1.3.2.2 skrll NULL, AF_INET6))
1119 1.3.2.2 skrll goto shortpkt;
1120 1.3.2.2 skrll if (proto == IPPROTO_AH)
1121 1.3.2.2 skrll off += (ext.ip6e_len + 2) * 4;
1122 1.3.2.2 skrll else
1123 1.3.2.2 skrll off += (ext.ip6e_len + 1) * 8;
1124 1.3.2.2 skrll proto = ext.ip6e_nxt;
1125 1.3.2.2 skrll break;
1126 1.3.2.2 skrll case IPPROTO_HOPOPTS:
1127 1.3.2.2 skrll if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1128 1.3.2.2 skrll NULL, AF_INET6))
1129 1.3.2.2 skrll goto shortpkt;
1130 1.3.2.2 skrll optend = off + (ext.ip6e_len + 1) * 8;
1131 1.3.2.2 skrll ooff = off + sizeof(ext);
1132 1.3.2.2 skrll do {
1133 1.3.2.2 skrll if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1134 1.3.2.2 skrll sizeof(opt.ip6o_type), NULL, NULL,
1135 1.3.2.2 skrll AF_INET6))
1136 1.3.2.2 skrll goto shortpkt;
1137 1.3.2.2 skrll if (opt.ip6o_type == IP6OPT_PAD1) {
1138 1.3.2.2 skrll ooff++;
1139 1.3.2.2 skrll continue;
1140 1.3.2.2 skrll }
1141 1.3.2.2 skrll if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1142 1.3.2.2 skrll NULL, NULL, AF_INET6))
1143 1.3.2.2 skrll goto shortpkt;
1144 1.3.2.2 skrll if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1145 1.3.2.2 skrll goto drop;
1146 1.3.2.2 skrll switch (opt.ip6o_type) {
1147 1.3.2.2 skrll case IP6OPT_JUMBO:
1148 1.3.2.2 skrll if (h->ip6_plen != 0)
1149 1.3.2.2 skrll goto drop;
1150 1.3.2.2 skrll if (!pf_pull_hdr(m, ooff, &jumbo,
1151 1.3.2.2 skrll sizeof(jumbo), NULL, NULL,
1152 1.3.2.2 skrll AF_INET6))
1153 1.3.2.2 skrll goto shortpkt;
1154 1.3.2.2 skrll memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1155 1.3.2.2 skrll sizeof(jumbolen));
1156 1.3.2.2 skrll jumbolen = ntohl(jumbolen);
1157 1.3.2.2 skrll if (jumbolen <= IPV6_MAXPACKET)
1158 1.3.2.2 skrll goto drop;
1159 1.3.2.2 skrll if (sizeof(struct ip6_hdr) + jumbolen !=
1160 1.3.2.2 skrll m->m_pkthdr.len)
1161 1.3.2.2 skrll goto drop;
1162 1.3.2.2 skrll break;
1163 1.3.2.2 skrll default:
1164 1.3.2.2 skrll break;
1165 1.3.2.2 skrll }
1166 1.3.2.2 skrll ooff += sizeof(opt) + opt.ip6o_len;
1167 1.3.2.2 skrll } while (ooff < optend);
1168 1.3.2.2 skrll
1169 1.3.2.2 skrll off = optend;
1170 1.3.2.2 skrll proto = ext.ip6e_nxt;
1171 1.3.2.2 skrll break;
1172 1.3.2.2 skrll default:
1173 1.3.2.2 skrll terminal = 1;
1174 1.3.2.2 skrll break;
1175 1.3.2.2 skrll }
1176 1.3.2.2 skrll } while (!terminal);
1177 1.3.2.2 skrll
1178 1.3.2.2 skrll /* jumbo payload option must be present, or plen > 0 */
1179 1.3.2.2 skrll if (ntohs(h->ip6_plen) == 0)
1180 1.3.2.2 skrll plen = jumbolen;
1181 1.3.2.2 skrll else
1182 1.3.2.2 skrll plen = ntohs(h->ip6_plen);
1183 1.3.2.2 skrll if (plen == 0)
1184 1.3.2.2 skrll goto drop;
1185 1.3.2.2 skrll if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1186 1.3.2.2 skrll goto shortpkt;
1187 1.3.2.2 skrll
1188 1.3.2.2 skrll /* Enforce a minimum ttl, may cause endless packet loops */
1189 1.3.2.2 skrll if (r->min_ttl && h->ip6_hlim < r->min_ttl)
1190 1.3.2.2 skrll h->ip6_hlim = r->min_ttl;
1191 1.3.2.2 skrll
1192 1.3.2.2 skrll return (PF_PASS);
1193 1.3.2.2 skrll
1194 1.3.2.2 skrll fragment:
1195 1.3.2.2 skrll if (ntohs(h->ip6_plen) == 0 || jumbolen)
1196 1.3.2.2 skrll goto drop;
1197 1.3.2.2 skrll plen = ntohs(h->ip6_plen);
1198 1.3.2.2 skrll
1199 1.3.2.2 skrll if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1200 1.3.2.2 skrll goto shortpkt;
1201 1.3.2.2 skrll fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
1202 1.3.2.2 skrll if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)
1203 1.3.2.2 skrll goto badfrag;
1204 1.3.2.2 skrll
1205 1.3.2.2 skrll /* do something about it */
1206 1.3.2.5 skrll /* remember to set pd->flags |= PFDESC_IP_REAS */
1207 1.3.2.2 skrll return (PF_PASS);
1208 1.3.2.2 skrll
1209 1.3.2.2 skrll shortpkt:
1210 1.3.2.2 skrll REASON_SET(reason, PFRES_SHORT);
1211 1.3.2.2 skrll if (r != NULL && r->log)
1212 1.3.2.2 skrll PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1213 1.3.2.2 skrll return (PF_DROP);
1214 1.3.2.2 skrll
1215 1.3.2.2 skrll drop:
1216 1.3.2.2 skrll REASON_SET(reason, PFRES_NORM);
1217 1.3.2.2 skrll if (r != NULL && r->log)
1218 1.3.2.2 skrll PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1219 1.3.2.2 skrll return (PF_DROP);
1220 1.3.2.2 skrll
1221 1.3.2.2 skrll badfrag:
1222 1.3.2.2 skrll REASON_SET(reason, PFRES_FRAG);
1223 1.3.2.2 skrll if (r != NULL && r->log)
1224 1.3.2.2 skrll PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1225 1.3.2.2 skrll return (PF_DROP);
1226 1.3.2.2 skrll }
1227 1.3.2.5 skrll #endif /* INET6 */
1228 1.3.2.2 skrll
1229 1.3.2.2 skrll int
1230 1.3.2.2 skrll pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1231 1.3.2.2 skrll int off, void *h, struct pf_pdesc *pd)
1232 1.3.2.2 skrll {
1233 1.3.2.2 skrll struct pf_rule *r, *rm = NULL;
1234 1.3.2.2 skrll struct tcphdr *th = pd->hdr.tcp;
1235 1.3.2.2 skrll int rewrite = 0;
1236 1.3.2.2 skrll u_short reason;
1237 1.3.2.2 skrll u_int8_t flags;
1238 1.3.2.2 skrll sa_family_t af = pd->af;
1239 1.3.2.2 skrll
1240 1.3.2.2 skrll r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1241 1.3.2.2 skrll while (r != NULL) {
1242 1.3.2.2 skrll r->evaluations++;
1243 1.3.2.2 skrll if (r->kif != NULL &&
1244 1.3.2.2 skrll (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
1245 1.3.2.2 skrll r = r->skip[PF_SKIP_IFP].ptr;
1246 1.3.2.2 skrll else if (r->direction && r->direction != dir)
1247 1.3.2.2 skrll r = r->skip[PF_SKIP_DIR].ptr;
1248 1.3.2.2 skrll else if (r->af && r->af != af)
1249 1.3.2.2 skrll r = r->skip[PF_SKIP_AF].ptr;
1250 1.3.2.2 skrll else if (r->proto && r->proto != pd->proto)
1251 1.3.2.2 skrll r = r->skip[PF_SKIP_PROTO].ptr;
1252 1.3.2.5 skrll else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.neg))
1253 1.3.2.2 skrll r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1254 1.3.2.2 skrll else if (r->src.port_op && !pf_match_port(r->src.port_op,
1255 1.3.2.2 skrll r->src.port[0], r->src.port[1], th->th_sport))
1256 1.3.2.2 skrll r = r->skip[PF_SKIP_SRC_PORT].ptr;
1257 1.3.2.5 skrll else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.neg))
1258 1.3.2.2 skrll r = r->skip[PF_SKIP_DST_ADDR].ptr;
1259 1.3.2.2 skrll else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1260 1.3.2.2 skrll r->dst.port[0], r->dst.port[1], th->th_dport))
1261 1.3.2.2 skrll r = r->skip[PF_SKIP_DST_PORT].ptr;
1262 1.3.2.2 skrll else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1263 1.3.2.2 skrll pf_osfp_fingerprint(pd, m, off, th),
1264 1.3.2.2 skrll r->os_fingerprint))
1265 1.3.2.2 skrll r = TAILQ_NEXT(r, entries);
1266 1.3.2.2 skrll else {
1267 1.3.2.2 skrll rm = r;
1268 1.3.2.2 skrll break;
1269 1.3.2.2 skrll }
1270 1.3.2.2 skrll }
1271 1.3.2.2 skrll
1272 1.3.2.2 skrll if (rm == NULL)
1273 1.3.2.2 skrll return (PF_PASS);
1274 1.3.2.2 skrll else
1275 1.3.2.2 skrll r->packets++;
1276 1.3.2.2 skrll
1277 1.3.2.2 skrll if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1278 1.3.2.2 skrll pd->flags |= PFDESC_TCP_NORM;
1279 1.3.2.2 skrll
1280 1.3.2.2 skrll flags = th->th_flags;
1281 1.3.2.2 skrll if (flags & TH_SYN) {
1282 1.3.2.2 skrll /* Illegal packet */
1283 1.3.2.2 skrll if (flags & TH_RST)
1284 1.3.2.2 skrll goto tcp_drop;
1285 1.3.2.2 skrll
1286 1.3.2.2 skrll if (flags & TH_FIN)
1287 1.3.2.2 skrll flags &= ~TH_FIN;
1288 1.3.2.2 skrll } else {
1289 1.3.2.2 skrll /* Illegal packet */
1290 1.3.2.2 skrll if (!(flags & (TH_ACK|TH_RST)))
1291 1.3.2.2 skrll goto tcp_drop;
1292 1.3.2.2 skrll }
1293 1.3.2.2 skrll
1294 1.3.2.2 skrll if (!(flags & TH_ACK)) {
1295 1.3.2.2 skrll /* These flags are only valid if ACK is set */
1296 1.3.2.2 skrll if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1297 1.3.2.2 skrll goto tcp_drop;
1298 1.3.2.2 skrll }
1299 1.3.2.2 skrll
1300 1.3.2.2 skrll /* Check for illegal header length */
1301 1.3.2.2 skrll if (th->th_off < (sizeof(struct tcphdr) >> 2))
1302 1.3.2.2 skrll goto tcp_drop;
1303 1.3.2.2 skrll
1304 1.3.2.2 skrll /* If flags changed, or reserved data set, then adjust */
1305 1.3.2.2 skrll if (flags != th->th_flags || th->th_x2 != 0) {
1306 1.3.2.2 skrll u_int16_t ov, nv;
1307 1.3.2.2 skrll
1308 1.3.2.2 skrll ov = *(u_int16_t *)(&th->th_ack + 1);
1309 1.3.2.2 skrll th->th_flags = flags;
1310 1.3.2.2 skrll th->th_x2 = 0;
1311 1.3.2.2 skrll nv = *(u_int16_t *)(&th->th_ack + 1);
1312 1.3.2.2 skrll
1313 1.3.2.5 skrll th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv, 0);
1314 1.3.2.2 skrll rewrite = 1;
1315 1.3.2.2 skrll }
1316 1.3.2.2 skrll
1317 1.3.2.2 skrll /* Remove urgent pointer, if TH_URG is not set */
1318 1.3.2.2 skrll if (!(flags & TH_URG) && th->th_urp) {
1319 1.3.2.5 skrll th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0, 0);
1320 1.3.2.2 skrll th->th_urp = 0;
1321 1.3.2.2 skrll rewrite = 1;
1322 1.3.2.2 skrll }
1323 1.3.2.2 skrll
1324 1.3.2.2 skrll /* Process options */
1325 1.3.2.2 skrll if (r->max_mss && pf_normalize_tcpopt(r, m, th, off))
1326 1.3.2.2 skrll rewrite = 1;
1327 1.3.2.2 skrll
1328 1.3.2.2 skrll /* copy back packet headers if we sanitized */
1329 1.3.2.2 skrll if (rewrite)
1330 1.3.2.3 skrll m_copyback(m, off, sizeof(*th), th);
1331 1.3.2.2 skrll
1332 1.3.2.2 skrll return (PF_PASS);
1333 1.3.2.2 skrll
1334 1.3.2.2 skrll tcp_drop:
1335 1.3.2.2 skrll REASON_SET(&reason, PFRES_NORM);
1336 1.3.2.2 skrll if (rm != NULL && r->log)
1337 1.3.2.2 skrll PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, r, NULL, NULL);
1338 1.3.2.2 skrll return (PF_DROP);
1339 1.3.2.2 skrll }
1340 1.3.2.2 skrll
1341 1.3.2.2 skrll int
1342 1.3.2.2 skrll pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1343 1.3.2.2 skrll struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1344 1.3.2.2 skrll {
1345 1.3.2.5 skrll u_int32_t tsval, tsecr;
1346 1.3.2.2 skrll u_int8_t hdr[60];
1347 1.3.2.2 skrll u_int8_t *opt;
1348 1.3.2.2 skrll
1349 1.3.2.2 skrll KASSERT(src->scrub == NULL);
1350 1.3.2.2 skrll
1351 1.3.2.2 skrll src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
1352 1.3.2.2 skrll if (src->scrub == NULL)
1353 1.3.2.2 skrll return (1);
1354 1.3.2.2 skrll bzero(src->scrub, sizeof(*src->scrub));
1355 1.3.2.2 skrll
1356 1.3.2.2 skrll switch (pd->af) {
1357 1.3.2.2 skrll #ifdef INET
1358 1.3.2.2 skrll case AF_INET: {
1359 1.3.2.2 skrll struct ip *h = mtod(m, struct ip *);
1360 1.3.2.2 skrll src->scrub->pfss_ttl = h->ip_ttl;
1361 1.3.2.2 skrll break;
1362 1.3.2.2 skrll }
1363 1.3.2.2 skrll #endif /* INET */
1364 1.3.2.2 skrll #ifdef INET6
1365 1.3.2.2 skrll case AF_INET6: {
1366 1.3.2.2 skrll struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1367 1.3.2.2 skrll src->scrub->pfss_ttl = h->ip6_hlim;
1368 1.3.2.2 skrll break;
1369 1.3.2.2 skrll }
1370 1.3.2.2 skrll #endif /* INET6 */
1371 1.3.2.2 skrll }
1372 1.3.2.2 skrll
1373 1.3.2.2 skrll
1374 1.3.2.2 skrll /*
1375 1.3.2.2 skrll * All normalizations below are only begun if we see the start of
1376 1.3.2.2 skrll * the connections. They must all set an enabled bit in pfss_flags
1377 1.3.2.2 skrll */
1378 1.3.2.2 skrll if ((th->th_flags & TH_SYN) == 0)
1379 1.3.2.2 skrll return (0);
1380 1.3.2.2 skrll
1381 1.3.2.2 skrll
1382 1.3.2.2 skrll if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1383 1.3.2.2 skrll pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1384 1.3.2.2 skrll /* Diddle with TCP options */
1385 1.3.2.2 skrll int hlen;
1386 1.3.2.2 skrll opt = hdr + sizeof(struct tcphdr);
1387 1.3.2.2 skrll hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1388 1.3.2.2 skrll while (hlen >= TCPOLEN_TIMESTAMP) {
1389 1.3.2.2 skrll switch (*opt) {
1390 1.3.2.2 skrll case TCPOPT_EOL: /* FALLTHROUGH */
1391 1.3.2.2 skrll case TCPOPT_NOP:
1392 1.3.2.2 skrll opt++;
1393 1.3.2.2 skrll hlen--;
1394 1.3.2.2 skrll break;
1395 1.3.2.2 skrll case TCPOPT_TIMESTAMP:
1396 1.3.2.2 skrll if (opt[1] >= TCPOLEN_TIMESTAMP) {
1397 1.3.2.2 skrll src->scrub->pfss_flags |=
1398 1.3.2.2 skrll PFSS_TIMESTAMP;
1399 1.3.2.5 skrll src->scrub->pfss_ts_mod =
1400 1.3.2.5 skrll htonl(arc4random());
1401 1.3.2.5 skrll
1402 1.3.2.5 skrll /* note PFSS_PAWS not set yet */
1403 1.3.2.5 skrll memcpy(&tsval, &opt[2],
1404 1.3.2.5 skrll sizeof(u_int32_t));
1405 1.3.2.5 skrll memcpy(&tsecr, &opt[6],
1406 1.3.2.5 skrll sizeof(u_int32_t));
1407 1.3.2.5 skrll src->scrub->pfss_tsval0 = ntohl(tsval);
1408 1.3.2.5 skrll src->scrub->pfss_tsval = ntohl(tsval);
1409 1.3.2.5 skrll src->scrub->pfss_tsecr = ntohl(tsecr);
1410 1.3.2.5 skrll getmicrouptime(&src->scrub->pfss_last);
1411 1.3.2.2 skrll }
1412 1.3.2.2 skrll /* FALLTHROUGH */
1413 1.3.2.2 skrll default:
1414 1.3.2.5 skrll hlen -= MAX(opt[1], 2);
1415 1.3.2.5 skrll opt += MAX(opt[1], 2);
1416 1.3.2.2 skrll break;
1417 1.3.2.2 skrll }
1418 1.3.2.2 skrll }
1419 1.3.2.2 skrll }
1420 1.3.2.2 skrll
1421 1.3.2.2 skrll return (0);
1422 1.3.2.2 skrll }
1423 1.3.2.2 skrll
1424 1.3.2.2 skrll void
1425 1.3.2.2 skrll pf_normalize_tcp_cleanup(struct pf_state *state)
1426 1.3.2.2 skrll {
1427 1.3.2.2 skrll if (state->src.scrub)
1428 1.3.2.2 skrll pool_put(&pf_state_scrub_pl, state->src.scrub);
1429 1.3.2.2 skrll if (state->dst.scrub)
1430 1.3.2.2 skrll pool_put(&pf_state_scrub_pl, state->dst.scrub);
1431 1.3.2.2 skrll
1432 1.3.2.2 skrll /* Someday... flush the TCP segment reassembly descriptors. */
1433 1.3.2.2 skrll }
1434 1.3.2.2 skrll
1435 1.3.2.2 skrll int
1436 1.3.2.2 skrll pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1437 1.3.2.5 skrll u_short *reason, struct tcphdr *th, struct pf_state *state,
1438 1.3.2.5 skrll struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1439 1.3.2.2 skrll {
1440 1.3.2.5 skrll struct timeval uptime;
1441 1.3.2.5 skrll u_int32_t tsval, tsecr;
1442 1.3.2.5 skrll u_int tsval_from_last;
1443 1.3.2.2 skrll u_int8_t hdr[60];
1444 1.3.2.2 skrll u_int8_t *opt;
1445 1.3.2.2 skrll int copyback = 0;
1446 1.3.2.5 skrll int got_ts = 0;
1447 1.3.2.2 skrll
1448 1.3.2.2 skrll KASSERT(src->scrub || dst->scrub);
1449 1.3.2.2 skrll
1450 1.3.2.2 skrll /*
1451 1.3.2.2 skrll * Enforce the minimum TTL seen for this connection. Negate a common
1452 1.3.2.2 skrll * technique to evade an intrusion detection system and confuse
1453 1.3.2.2 skrll * firewall state code.
1454 1.3.2.2 skrll */
1455 1.3.2.2 skrll switch (pd->af) {
1456 1.3.2.2 skrll #ifdef INET
1457 1.3.2.2 skrll case AF_INET: {
1458 1.3.2.2 skrll if (src->scrub) {
1459 1.3.2.2 skrll struct ip *h = mtod(m, struct ip *);
1460 1.3.2.2 skrll if (h->ip_ttl > src->scrub->pfss_ttl)
1461 1.3.2.2 skrll src->scrub->pfss_ttl = h->ip_ttl;
1462 1.3.2.2 skrll h->ip_ttl = src->scrub->pfss_ttl;
1463 1.3.2.2 skrll }
1464 1.3.2.2 skrll break;
1465 1.3.2.2 skrll }
1466 1.3.2.2 skrll #endif /* INET */
1467 1.3.2.2 skrll #ifdef INET6
1468 1.3.2.2 skrll case AF_INET6: {
1469 1.3.2.2 skrll if (src->scrub) {
1470 1.3.2.2 skrll struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1471 1.3.2.2 skrll if (h->ip6_hlim > src->scrub->pfss_ttl)
1472 1.3.2.2 skrll src->scrub->pfss_ttl = h->ip6_hlim;
1473 1.3.2.2 skrll h->ip6_hlim = src->scrub->pfss_ttl;
1474 1.3.2.2 skrll }
1475 1.3.2.2 skrll break;
1476 1.3.2.2 skrll }
1477 1.3.2.2 skrll #endif /* INET6 */
1478 1.3.2.2 skrll }
1479 1.3.2.2 skrll
1480 1.3.2.2 skrll if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1481 1.3.2.2 skrll ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1482 1.3.2.2 skrll (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1483 1.3.2.2 skrll pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1484 1.3.2.2 skrll /* Diddle with TCP options */
1485 1.3.2.2 skrll int hlen;
1486 1.3.2.2 skrll opt = hdr + sizeof(struct tcphdr);
1487 1.3.2.2 skrll hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1488 1.3.2.2 skrll while (hlen >= TCPOLEN_TIMESTAMP) {
1489 1.3.2.2 skrll switch (*opt) {
1490 1.3.2.2 skrll case TCPOPT_EOL: /* FALLTHROUGH */
1491 1.3.2.2 skrll case TCPOPT_NOP:
1492 1.3.2.2 skrll opt++;
1493 1.3.2.2 skrll hlen--;
1494 1.3.2.2 skrll break;
1495 1.3.2.2 skrll case TCPOPT_TIMESTAMP:
1496 1.3.2.2 skrll /* Modulate the timestamps. Can be used for
1497 1.3.2.2 skrll * NAT detection, OS uptime determination or
1498 1.3.2.2 skrll * reboot detection.
1499 1.3.2.2 skrll */
1500 1.3.2.5 skrll
1501 1.3.2.5 skrll if (got_ts) {
1502 1.3.2.5 skrll /* Huh? Multiple timestamps!? */
1503 1.3.2.5 skrll if (pf_status.debug >= PF_DEBUG_MISC) {
1504 1.3.2.5 skrll DPFPRINTF(("multiple TS??"));
1505 1.3.2.5 skrll pf_print_state(state);
1506 1.3.2.5 skrll printf("\n");
1507 1.3.2.5 skrll }
1508 1.3.2.5 skrll REASON_SET(reason, PFRES_TS);
1509 1.3.2.5 skrll return (PF_DROP);
1510 1.3.2.5 skrll }
1511 1.3.2.2 skrll if (opt[1] >= TCPOLEN_TIMESTAMP) {
1512 1.3.2.5 skrll memcpy(&tsval, &opt[2],
1513 1.3.2.5 skrll sizeof(u_int32_t));
1514 1.3.2.5 skrll if (tsval && src->scrub &&
1515 1.3.2.2 skrll (src->scrub->pfss_flags &
1516 1.3.2.2 skrll PFSS_TIMESTAMP)) {
1517 1.3.2.5 skrll tsval = ntohl(tsval);
1518 1.3.2.2 skrll pf_change_a(&opt[2],
1519 1.3.2.5 skrll &th->th_sum,
1520 1.3.2.5 skrll htonl(tsval +
1521 1.3.2.5 skrll src->scrub->pfss_ts_mod),
1522 1.3.2.5 skrll 0);
1523 1.3.2.2 skrll copyback = 1;
1524 1.3.2.2 skrll }
1525 1.3.2.2 skrll
1526 1.3.2.2 skrll /* Modulate TS reply iff valid (!0) */
1527 1.3.2.5 skrll memcpy(&tsecr, &opt[6],
1528 1.3.2.2 skrll sizeof(u_int32_t));
1529 1.3.2.5 skrll if (tsecr && dst->scrub &&
1530 1.3.2.2 skrll (dst->scrub->pfss_flags &
1531 1.3.2.2 skrll PFSS_TIMESTAMP)) {
1532 1.3.2.5 skrll tsecr = ntohl(tsecr)
1533 1.3.2.5 skrll - dst->scrub->pfss_ts_mod;
1534 1.3.2.2 skrll pf_change_a(&opt[6],
1535 1.3.2.5 skrll &th->th_sum, htonl(tsecr),
1536 1.3.2.5 skrll 0);
1537 1.3.2.2 skrll copyback = 1;
1538 1.3.2.2 skrll }
1539 1.3.2.5 skrll got_ts = 1;
1540 1.3.2.2 skrll }
1541 1.3.2.2 skrll /* FALLTHROUGH */
1542 1.3.2.2 skrll default:
1543 1.3.2.5 skrll hlen -= MAX(opt[1], 2);
1544 1.3.2.5 skrll opt += MAX(opt[1], 2);
1545 1.3.2.2 skrll break;
1546 1.3.2.2 skrll }
1547 1.3.2.2 skrll }
1548 1.3.2.2 skrll if (copyback) {
1549 1.3.2.2 skrll /* Copyback the options, caller copys back header */
1550 1.3.2.2 skrll *writeback = 1;
1551 1.3.2.2 skrll m_copyback(m, off + sizeof(struct tcphdr),
1552 1.3.2.3 skrll (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1553 1.3.2.3 skrll sizeof(struct tcphdr));
1554 1.3.2.2 skrll }
1555 1.3.2.2 skrll }
1556 1.3.2.2 skrll
1557 1.3.2.2 skrll
1558 1.3.2.5 skrll /*
1559 1.3.2.5 skrll * Must invalidate PAWS checks on connections idle for too long.
1560 1.3.2.5 skrll * The fastest allowed timestamp clock is 1ms. That turns out to
1561 1.3.2.5 skrll * be about 24 days before it wraps. XXX Right now our lowerbound
1562 1.3.2.5 skrll * TS echo check only works for the first 12 days of a connection
1563 1.3.2.5 skrll * when the TS has exhausted half its 32bit space
1564 1.3.2.5 skrll */
1565 1.3.2.5 skrll #define TS_MAX_IDLE (24*24*60*60)
1566 1.3.2.5 skrll #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1567 1.3.2.5 skrll
1568 1.3.2.5 skrll getmicrouptime(&uptime);
1569 1.3.2.5 skrll if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1570 1.3.2.5 skrll (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1571 1.3.2.5 skrll time_second - state->creation > TS_MAX_CONN)) {
1572 1.3.2.5 skrll if (pf_status.debug >= PF_DEBUG_MISC) {
1573 1.3.2.5 skrll DPFPRINTF(("src idled out of PAWS\n"));
1574 1.3.2.5 skrll pf_print_state(state);
1575 1.3.2.5 skrll printf("\n");
1576 1.3.2.5 skrll }
1577 1.3.2.5 skrll src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1578 1.3.2.5 skrll | PFSS_PAWS_IDLED;
1579 1.3.2.5 skrll }
1580 1.3.2.5 skrll if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1581 1.3.2.5 skrll uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1582 1.3.2.5 skrll if (pf_status.debug >= PF_DEBUG_MISC) {
1583 1.3.2.5 skrll DPFPRINTF(("dst idled out of PAWS\n"));
1584 1.3.2.5 skrll pf_print_state(state);
1585 1.3.2.5 skrll printf("\n");
1586 1.3.2.5 skrll }
1587 1.3.2.5 skrll dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1588 1.3.2.5 skrll | PFSS_PAWS_IDLED;
1589 1.3.2.5 skrll }
1590 1.3.2.5 skrll
1591 1.3.2.5 skrll if (got_ts && src->scrub && dst->scrub &&
1592 1.3.2.5 skrll (src->scrub->pfss_flags & PFSS_PAWS) &&
1593 1.3.2.5 skrll (dst->scrub->pfss_flags & PFSS_PAWS)) {
1594 1.3.2.5 skrll /* Validate that the timestamps are "in-window".
1595 1.3.2.5 skrll * RFC1323 describes TCP Timestamp options that allow
1596 1.3.2.5 skrll * measurement of RTT (round trip time) and PAWS
1597 1.3.2.5 skrll * (protection against wrapped sequence numbers). PAWS
1598 1.3.2.5 skrll * gives us a set of rules for rejecting packets on
1599 1.3.2.5 skrll * long fat pipes (packets that were somehow delayed
1600 1.3.2.5 skrll * in transit longer than the time it took to send the
1601 1.3.2.5 skrll * full TCP sequence space of 4Gb). We can use these
1602 1.3.2.5 skrll * rules and infer a few others that will let us treat
1603 1.3.2.5 skrll * the 32bit timestamp and the 32bit echoed timestamp
1604 1.3.2.5 skrll * as sequence numbers to prevent a blind attacker from
1605 1.3.2.5 skrll * inserting packets into a connection.
1606 1.3.2.5 skrll *
1607 1.3.2.5 skrll * RFC1323 tells us:
1608 1.3.2.5 skrll * - The timestamp on this packet must be greater than
1609 1.3.2.5 skrll * or equal to the last value echoed by the other
1610 1.3.2.5 skrll * endpoint. The RFC says those will be discarded
1611 1.3.2.5 skrll * since it is a dup that has already been acked.
1612 1.3.2.5 skrll * This gives us a lowerbound on the timestamp.
1613 1.3.2.5 skrll * timestamp >= other last echoed timestamp
1614 1.3.2.5 skrll * - The timestamp will be less than or equal to
1615 1.3.2.5 skrll * the last timestamp plus the time between the
1616 1.3.2.5 skrll * last packet and now. The RFC defines the max
1617 1.3.2.5 skrll * clock rate as 1ms. We will allow clocks to be
1618 1.3.2.5 skrll * up to 10% fast and will allow a total difference
1619 1.3.2.5 skrll * or 30 seconds due to a route change. And this
1620 1.3.2.5 skrll * gives us an upperbound on the timestamp.
1621 1.3.2.5 skrll * timestamp <= last timestamp + max ticks
1622 1.3.2.5 skrll * We have to be careful here. Windows will send an
1623 1.3.2.5 skrll * initial timestamp of zero and then initialize it
1624 1.3.2.5 skrll * to a random value after the 3whs; presumably to
1625 1.3.2.5 skrll * avoid a DoS by having to call an expensive RNG
1626 1.3.2.5 skrll * during a SYN flood. Proof MS has at least one
1627 1.3.2.5 skrll * good security geek.
1628 1.3.2.5 skrll *
1629 1.3.2.5 skrll * - The TCP timestamp option must also echo the other
1630 1.3.2.5 skrll * endpoints timestamp. The timestamp echoed is the
1631 1.3.2.5 skrll * one carried on the earliest unacknowledged segment
1632 1.3.2.5 skrll * on the left edge of the sequence window. The RFC
1633 1.3.2.5 skrll * states that the host will reject any echoed
1634 1.3.2.5 skrll * timestamps that were larger than any ever sent.
1635 1.3.2.5 skrll * This gives us an upperbound on the TS echo.
1636 1.3.2.5 skrll * tescr <= largest_tsval
1637 1.3.2.5 skrll * - The lowerbound on the TS echo is a little more
1638 1.3.2.5 skrll * tricky to determine. The other endpoint's echoed
1639 1.3.2.5 skrll * values will not decrease. But there may be
1640 1.3.2.5 skrll * network conditions that re-order packets and
1641 1.3.2.5 skrll * cause our view of them to decrease. For now the
1642 1.3.2.5 skrll * only lowerbound we can safely determine is that
1643 1.3.2.5 skrll * the TS echo will never be less than the orginal
1644 1.3.2.5 skrll * TS. XXX There is probably a better lowerbound.
1645 1.3.2.5 skrll * Remove TS_MAX_CONN with better lowerbound check.
1646 1.3.2.5 skrll * tescr >= other original TS
1647 1.3.2.5 skrll *
1648 1.3.2.5 skrll * It is also important to note that the fastest
1649 1.3.2.5 skrll * timestamp clock of 1ms will wrap its 32bit space in
1650 1.3.2.5 skrll * 24 days. So we just disable TS checking after 24
1651 1.3.2.5 skrll * days of idle time. We actually must use a 12d
1652 1.3.2.5 skrll * connection limit until we can come up with a better
1653 1.3.2.5 skrll * lowerbound to the TS echo check.
1654 1.3.2.5 skrll */
1655 1.3.2.5 skrll struct timeval delta_ts;
1656 1.3.2.5 skrll int ts_fudge;
1657 1.3.2.5 skrll
1658 1.3.2.5 skrll
1659 1.3.2.5 skrll /*
1660 1.3.2.5 skrll * PFTM_TS_DIFF is how many seconds of leeway to allow
1661 1.3.2.5 skrll * a host's timestamp. This can happen if the previous
1662 1.3.2.5 skrll * packet got delayed in transit for much longer than
1663 1.3.2.5 skrll * this packet.
1664 1.3.2.5 skrll */
1665 1.3.2.5 skrll if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1666 1.3.2.5 skrll ts_fudge = pf_default_rule.timeout[PFTM_TS_DIFF];
1667 1.3.2.5 skrll
1668 1.3.2.5 skrll
1669 1.3.2.5 skrll /* Calculate max ticks since the last timestamp */
1670 1.3.2.5 skrll #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1671 1.3.2.5 skrll #define TS_MICROSECS 1000000 /* microseconds per second */
1672 1.3.2.5 skrll timersub(&uptime, &src->scrub->pfss_last, &delta_ts);
1673 1.3.2.5 skrll tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1674 1.3.2.5 skrll tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1675 1.3.2.5 skrll
1676 1.3.2.5 skrll
1677 1.3.2.5 skrll if ((src->state >= TCPS_ESTABLISHED &&
1678 1.3.2.5 skrll dst->state >= TCPS_ESTABLISHED) &&
1679 1.3.2.5 skrll (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1680 1.3.2.5 skrll SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1681 1.3.2.5 skrll (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1682 1.3.2.5 skrll SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1683 1.3.2.5 skrll /* Bad RFC1323 implementation or an insertion attack.
1684 1.3.2.5 skrll *
1685 1.3.2.5 skrll * - Solaris 2.6 and 2.7 are known to send another ACK
1686 1.3.2.5 skrll * after the FIN,FIN|ACK,ACK closing that carries
1687 1.3.2.5 skrll * an old timestamp.
1688 1.3.2.5 skrll */
1689 1.3.2.5 skrll
1690 1.3.2.5 skrll DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1691 1.3.2.5 skrll SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1692 1.3.2.5 skrll SEQ_GT(tsval, src->scrub->pfss_tsval +
1693 1.3.2.5 skrll tsval_from_last) ? '1' : ' ',
1694 1.3.2.5 skrll SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1695 1.3.2.5 skrll SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1696 1.3.2.5 skrll DPFPRINTF((" tsval: %" PRIu32 " tsecr: %" PRIu32
1697 1.3.2.5 skrll " +ticks: %" PRIu32 " idle: %lus %lums\n",
1698 1.3.2.5 skrll tsval, tsecr, tsval_from_last, delta_ts.tv_sec,
1699 1.3.2.5 skrll delta_ts.tv_usec / 1000));
1700 1.3.2.5 skrll DPFPRINTF((" src->tsval: %" PRIu32 " tsecr: %" PRIu32
1701 1.3.2.5 skrll "\n",
1702 1.3.2.5 skrll src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1703 1.3.2.5 skrll DPFPRINTF((" dst->tsval: %" PRIu32 " tsecr: %" PRIu32
1704 1.3.2.5 skrll " tsval0: %" PRIu32 "\n",
1705 1.3.2.5 skrll dst->scrub->pfss_tsval,
1706 1.3.2.5 skrll dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1707 1.3.2.5 skrll if (pf_status.debug >= PF_DEBUG_MISC) {
1708 1.3.2.5 skrll pf_print_state(state);
1709 1.3.2.5 skrll pf_print_flags(th->th_flags);
1710 1.3.2.5 skrll printf("\n");
1711 1.3.2.5 skrll }
1712 1.3.2.5 skrll REASON_SET(reason, PFRES_TS);
1713 1.3.2.5 skrll return (PF_DROP);
1714 1.3.2.5 skrll }
1715 1.3.2.5 skrll
1716 1.3.2.5 skrll /* XXX I'd really like to require tsecr but it's optional */
1717 1.3.2.5 skrll
1718 1.3.2.5 skrll } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1719 1.3.2.5 skrll ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1720 1.3.2.5 skrll || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1721 1.3.2.5 skrll src->scrub && dst->scrub &&
1722 1.3.2.5 skrll (src->scrub->pfss_flags & PFSS_PAWS) &&
1723 1.3.2.5 skrll (dst->scrub->pfss_flags & PFSS_PAWS)) {
1724 1.3.2.5 skrll /* Didn't send a timestamp. Timestamps aren't really useful
1725 1.3.2.5 skrll * when:
1726 1.3.2.5 skrll * - connection opening or closing (often not even sent).
1727 1.3.2.5 skrll * but we must not let an attacker to put a FIN on a
1728 1.3.2.5 skrll * data packet to sneak it through our ESTABLISHED check.
1729 1.3.2.5 skrll * - on a TCP reset. RFC suggests not even looking at TS.
1730 1.3.2.5 skrll * - on an empty ACK. The TS will not be echoed so it will
1731 1.3.2.5 skrll * probably not help keep the RTT calculation in sync and
1732 1.3.2.5 skrll * there isn't as much danger when the sequence numbers
1733 1.3.2.5 skrll * got wrapped. So some stacks don't include TS on empty
1734 1.3.2.5 skrll * ACKs :-(
1735 1.3.2.5 skrll *
1736 1.3.2.5 skrll * To minimize the disruption to mostly RFC1323 conformant
1737 1.3.2.5 skrll * stacks, we will only require timestamps on data packets.
1738 1.3.2.5 skrll *
1739 1.3.2.5 skrll * And what do ya know, we cannot require timestamps on data
1740 1.3.2.5 skrll * packets. There appear to be devices that do legitimate
1741 1.3.2.5 skrll * TCP connection hijacking. There are HTTP devices that allow
1742 1.3.2.5 skrll * a 3whs (with timestamps) and then buffer the HTTP request.
1743 1.3.2.5 skrll * If the intermediate device has the HTTP response cache, it
1744 1.3.2.5 skrll * will spoof the response but not bother timestamping its
1745 1.3.2.5 skrll * packets. So we can look for the presence of a timestamp in
1746 1.3.2.5 skrll * the first data packet and if there, require it in all future
1747 1.3.2.5 skrll * packets.
1748 1.3.2.5 skrll */
1749 1.3.2.5 skrll
1750 1.3.2.5 skrll if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1751 1.3.2.5 skrll /*
1752 1.3.2.5 skrll * Hey! Someone tried to sneak a packet in. Or the
1753 1.3.2.5 skrll * stack changed its RFC1323 behavior?!?!
1754 1.3.2.5 skrll */
1755 1.3.2.5 skrll if (pf_status.debug >= PF_DEBUG_MISC) {
1756 1.3.2.5 skrll DPFPRINTF(("Did not receive expected RFC1323 "
1757 1.3.2.5 skrll "timestamp\n"));
1758 1.3.2.5 skrll pf_print_state(state);
1759 1.3.2.5 skrll pf_print_flags(th->th_flags);
1760 1.3.2.5 skrll printf("\n");
1761 1.3.2.5 skrll }
1762 1.3.2.5 skrll REASON_SET(reason, PFRES_TS);
1763 1.3.2.5 skrll return (PF_DROP);
1764 1.3.2.5 skrll }
1765 1.3.2.5 skrll }
1766 1.3.2.5 skrll
1767 1.3.2.5 skrll
1768 1.3.2.5 skrll /*
1769 1.3.2.5 skrll * We will note if a host sends his data packets with or without
1770 1.3.2.5 skrll * timestamps. And require all data packets to contain a timestamp
1771 1.3.2.5 skrll * if the first does. PAWS implicitly requires that all data packets be
1772 1.3.2.5 skrll * timestamped. But I think there are middle-man devices that hijack
1773 1.3.2.5 skrll * TCP streams immedietly after the 3whs and don't timestamp their
1774 1.3.2.5 skrll * packets (seen in a WWW accelerator or cache).
1775 1.3.2.5 skrll */
1776 1.3.2.5 skrll if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1777 1.3.2.5 skrll (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1778 1.3.2.5 skrll if (got_ts)
1779 1.3.2.5 skrll src->scrub->pfss_flags |= PFSS_DATA_TS;
1780 1.3.2.5 skrll else {
1781 1.3.2.5 skrll src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1782 1.3.2.5 skrll if (pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1783 1.3.2.5 skrll (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1784 1.3.2.5 skrll /* Don't warn if other host rejected RFC1323 */
1785 1.3.2.5 skrll DPFPRINTF(("Broken RFC1323 stack did not "
1786 1.3.2.5 skrll "timestamp data packet. Disabled PAWS "
1787 1.3.2.5 skrll "security.\n"));
1788 1.3.2.5 skrll pf_print_state(state);
1789 1.3.2.5 skrll pf_print_flags(th->th_flags);
1790 1.3.2.5 skrll printf("\n");
1791 1.3.2.5 skrll }
1792 1.3.2.5 skrll }
1793 1.3.2.5 skrll }
1794 1.3.2.5 skrll
1795 1.3.2.5 skrll
1796 1.3.2.5 skrll /*
1797 1.3.2.5 skrll * Update PAWS values
1798 1.3.2.5 skrll */
1799 1.3.2.5 skrll if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1800 1.3.2.5 skrll (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1801 1.3.2.5 skrll getmicrouptime(&src->scrub->pfss_last);
1802 1.3.2.5 skrll if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1803 1.3.2.5 skrll (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1804 1.3.2.5 skrll src->scrub->pfss_tsval = tsval;
1805 1.3.2.5 skrll
1806 1.3.2.5 skrll if (tsecr) {
1807 1.3.2.5 skrll if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1808 1.3.2.5 skrll (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1809 1.3.2.5 skrll src->scrub->pfss_tsecr = tsecr;
1810 1.3.2.5 skrll
1811 1.3.2.5 skrll if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1812 1.3.2.5 skrll (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1813 1.3.2.5 skrll src->scrub->pfss_tsval0 == 0)) {
1814 1.3.2.5 skrll /* tsval0 MUST be the lowest timestamp */
1815 1.3.2.5 skrll src->scrub->pfss_tsval0 = tsval;
1816 1.3.2.5 skrll }
1817 1.3.2.5 skrll
1818 1.3.2.5 skrll /* Only fully initialized after a TS gets echoed */
1819 1.3.2.5 skrll if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1820 1.3.2.5 skrll src->scrub->pfss_flags |= PFSS_PAWS;
1821 1.3.2.5 skrll }
1822 1.3.2.5 skrll }
1823 1.3.2.5 skrll
1824 1.3.2.2 skrll /* I have a dream.... TCP segment reassembly.... */
1825 1.3.2.2 skrll return (0);
1826 1.3.2.2 skrll }
1827 1.3.2.5 skrll
1828 1.3.2.2 skrll int
1829 1.3.2.2 skrll pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1830 1.3.2.2 skrll int off)
1831 1.3.2.2 skrll {
1832 1.3.2.2 skrll u_int16_t *mss;
1833 1.3.2.2 skrll int thoff;
1834 1.3.2.2 skrll int opt, cnt, optlen = 0;
1835 1.3.2.2 skrll int rewrite = 0;
1836 1.3.2.2 skrll u_char *optp;
1837 1.3.2.2 skrll
1838 1.3.2.2 skrll thoff = th->th_off << 2;
1839 1.3.2.2 skrll cnt = thoff - sizeof(struct tcphdr);
1840 1.3.2.2 skrll optp = mtod(m, caddr_t) + off + sizeof(struct tcphdr);
1841 1.3.2.2 skrll
1842 1.3.2.2 skrll for (; cnt > 0; cnt -= optlen, optp += optlen) {
1843 1.3.2.2 skrll opt = optp[0];
1844 1.3.2.2 skrll if (opt == TCPOPT_EOL)
1845 1.3.2.2 skrll break;
1846 1.3.2.2 skrll if (opt == TCPOPT_NOP)
1847 1.3.2.2 skrll optlen = 1;
1848 1.3.2.2 skrll else {
1849 1.3.2.2 skrll if (cnt < 2)
1850 1.3.2.2 skrll break;
1851 1.3.2.2 skrll optlen = optp[1];
1852 1.3.2.2 skrll if (optlen < 2 || optlen > cnt)
1853 1.3.2.2 skrll break;
1854 1.3.2.2 skrll }
1855 1.3.2.2 skrll switch (opt) {
1856 1.3.2.2 skrll case TCPOPT_MAXSEG:
1857 1.3.2.2 skrll mss = (u_int16_t *)(optp + 2);
1858 1.3.2.2 skrll if ((ntohs(*mss)) > r->max_mss) {
1859 1.3.2.2 skrll th->th_sum = pf_cksum_fixup(th->th_sum,
1860 1.3.2.5 skrll *mss, htons(r->max_mss), 0);
1861 1.3.2.2 skrll *mss = htons(r->max_mss);
1862 1.3.2.2 skrll rewrite = 1;
1863 1.3.2.2 skrll }
1864 1.3.2.2 skrll break;
1865 1.3.2.2 skrll default:
1866 1.3.2.2 skrll break;
1867 1.3.2.2 skrll }
1868 1.3.2.2 skrll }
1869 1.3.2.2 skrll
1870 1.3.2.2 skrll return (rewrite);
1871 1.3.2.2 skrll }
1872