npf_inet.c revision 1.13 1 1.13 rmind /* $NetBSD: npf_inet.c,v 1.13 2012/07/01 23:21:06 rmind Exp $ */
2 1.1 rmind
3 1.1 rmind /*-
4 1.12 rmind * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
5 1.1 rmind * All rights reserved.
6 1.1 rmind *
7 1.1 rmind * This material is based upon work partially supported by The
8 1.1 rmind * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 1.1 rmind *
10 1.1 rmind * Redistribution and use in source and binary forms, with or without
11 1.1 rmind * modification, are permitted provided that the following conditions
12 1.1 rmind * are met:
13 1.1 rmind * 1. Redistributions of source code must retain the above copyright
14 1.1 rmind * notice, this list of conditions and the following disclaimer.
15 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 rmind * notice, this list of conditions and the following disclaimer in the
17 1.1 rmind * documentation and/or other materials provided with the distribution.
18 1.1 rmind *
19 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 rmind * POSSIBILITY OF SUCH DAMAGE.
30 1.1 rmind */
31 1.1 rmind
32 1.1 rmind /*
33 1.1 rmind * Various procotol related helper routines.
34 1.12 rmind *
35 1.12 rmind * This layer manipulates npf_cache_t structure i.e. caches requested headers
36 1.12 rmind * and stores which information was cached in the information bit field.
37 1.12 rmind * It is also responsibility of this layer to update or invalidate the cache
38 1.12 rmind * on rewrites (e.g. by translation routines).
39 1.1 rmind */
40 1.1 rmind
41 1.1 rmind #include <sys/cdefs.h>
42 1.13 rmind __KERNEL_RCSID(0, "$NetBSD: npf_inet.c,v 1.13 2012/07/01 23:21:06 rmind Exp $");
43 1.1 rmind
44 1.1 rmind #include <sys/param.h>
45 1.11 rmind #include <sys/types.h>
46 1.1 rmind
47 1.4 rmind #include <net/pfil.h>
48 1.4 rmind #include <net/if.h>
49 1.4 rmind #include <net/ethertypes.h>
50 1.4 rmind #include <net/if_ether.h>
51 1.4 rmind
52 1.1 rmind #include <netinet/in_systm.h>
53 1.1 rmind #include <netinet/in.h>
54 1.4 rmind #include <netinet/in_var.h>
55 1.1 rmind #include <netinet/ip.h>
56 1.4 rmind #include <netinet/ip6.h>
57 1.1 rmind #include <netinet/tcp.h>
58 1.1 rmind #include <netinet/udp.h>
59 1.1 rmind #include <netinet/ip_icmp.h>
60 1.1 rmind
61 1.1 rmind #include "npf_impl.h"
62 1.1 rmind
63 1.1 rmind /*
64 1.1 rmind * npf_fixup{16,32}_cksum: update IPv4 checksum.
65 1.1 rmind */
66 1.1 rmind
67 1.1 rmind uint16_t
68 1.1 rmind npf_fixup16_cksum(uint16_t cksum, uint16_t odatum, uint16_t ndatum)
69 1.1 rmind {
70 1.1 rmind uint32_t sum;
71 1.1 rmind
72 1.1 rmind /*
73 1.1 rmind * RFC 1624:
74 1.1 rmind * HC' = ~(~HC + ~m + m')
75 1.1 rmind */
76 1.1 rmind sum = ~ntohs(cksum) & 0xffff;
77 1.1 rmind sum += (~ntohs(odatum) & 0xffff) + ntohs(ndatum);
78 1.1 rmind sum = (sum >> 16) + (sum & 0xffff);
79 1.1 rmind sum += (sum >> 16);
80 1.1 rmind
81 1.1 rmind return htons(~sum & 0xffff);
82 1.1 rmind }
83 1.1 rmind
84 1.1 rmind uint16_t
85 1.1 rmind npf_fixup32_cksum(uint16_t cksum, uint32_t odatum, uint32_t ndatum)
86 1.1 rmind {
87 1.1 rmind
88 1.1 rmind cksum = npf_fixup16_cksum(cksum, odatum & 0xffff, ndatum & 0xffff);
89 1.1 rmind cksum = npf_fixup16_cksum(cksum, odatum >> 16, ndatum >> 16);
90 1.1 rmind return cksum;
91 1.1 rmind }
92 1.1 rmind
93 1.1 rmind /*
94 1.4 rmind * npf_addr_cksum: calculate checksum of the address, either IPv4 or IPv6.
95 1.4 rmind */
96 1.4 rmind uint16_t
97 1.4 rmind npf_addr_cksum(uint16_t cksum, int sz, npf_addr_t *oaddr, npf_addr_t *naddr)
98 1.4 rmind {
99 1.4 rmind uint32_t *oip32 = (uint32_t *)oaddr, *nip32 = (uint32_t *)naddr;
100 1.4 rmind
101 1.4 rmind KASSERT(sz % sizeof(uint32_t) == 0);
102 1.4 rmind do {
103 1.4 rmind cksum = npf_fixup32_cksum(cksum, *oip32++, *nip32++);
104 1.4 rmind sz -= sizeof(uint32_t);
105 1.4 rmind } while (sz);
106 1.4 rmind
107 1.4 rmind return cksum;
108 1.4 rmind }
109 1.4 rmind
110 1.4 rmind /*
111 1.4 rmind * npf_addr_sum: provide IP address as a summed (if needed) 32-bit integer.
112 1.4 rmind * Note: used for hash function.
113 1.1 rmind */
114 1.4 rmind uint32_t
115 1.4 rmind npf_addr_sum(const int sz, const npf_addr_t *a1, const npf_addr_t *a2)
116 1.1 rmind {
117 1.4 rmind uint32_t mix = 0;
118 1.4 rmind int i;
119 1.1 rmind
120 1.5 rmind KASSERT(sz > 0 && a1 != NULL && a2 != NULL);
121 1.5 rmind
122 1.4 rmind for (i = 0; i < (sz >> 2); i++) {
123 1.4 rmind mix += a1->s6_addr32[i];
124 1.4 rmind mix += a2->s6_addr32[i];
125 1.4 rmind }
126 1.4 rmind return mix;
127 1.4 rmind }
128 1.1 rmind
129 1.13 rmind /*
130 1.13 rmind * npf_addr_mask: apply the mask to a given address and store the result.
131 1.13 rmind */
132 1.13 rmind void
133 1.13 rmind npf_addr_mask(const npf_addr_t *addr, const npf_netmask_t mask,
134 1.13 rmind const int alen, npf_addr_t *out)
135 1.12 rmind {
136 1.13 rmind const int nwords = alen >> 2;
137 1.12 rmind uint_fast8_t length = mask;
138 1.12 rmind
139 1.12 rmind /* Note: maximum length is 32 for IPv4 and 128 for IPv6. */
140 1.12 rmind KASSERT(length <= NPF_MAX_NETMASK);
141 1.12 rmind
142 1.13 rmind for (int i = 0; i < nwords; i++) {
143 1.13 rmind uint32_t wordmask;
144 1.13 rmind
145 1.12 rmind if (length >= 32) {
146 1.13 rmind wordmask = htonl(0xffffffff);
147 1.12 rmind length -= 32;
148 1.13 rmind } else if (length) {
149 1.13 rmind wordmask = htonl(0xffffffff << (32 - length));
150 1.13 rmind length = 0;
151 1.12 rmind } else {
152 1.13 rmind wordmask = 0;
153 1.12 rmind }
154 1.13 rmind out->s6_addr32[i] = addr->s6_addr32[i] & wordmask;
155 1.12 rmind }
156 1.12 rmind }
157 1.12 rmind
158 1.12 rmind /*
159 1.12 rmind * npf_addr_cmp: compare two addresses, either IPv4 or IPv6.
160 1.12 rmind *
161 1.13 rmind * => Return 0 if equal and negative/positive if less/greater accordingly.
162 1.12 rmind * => Ignore the mask, if NPF_NO_NETMASK is specified.
163 1.12 rmind */
164 1.12 rmind int
165 1.12 rmind npf_addr_cmp(const npf_addr_t *addr1, const npf_netmask_t mask1,
166 1.13 rmind const npf_addr_t *addr2, const npf_netmask_t mask2, const int alen)
167 1.12 rmind {
168 1.13 rmind npf_addr_t realaddr1, realaddr2;
169 1.12 rmind
170 1.12 rmind if (mask1 != NPF_NO_NETMASK) {
171 1.13 rmind npf_addr_mask(addr1, mask1, alen, &realaddr1);
172 1.13 rmind addr1 = &realaddr1;
173 1.12 rmind }
174 1.12 rmind if (mask2 != NPF_NO_NETMASK) {
175 1.13 rmind npf_addr_mask(addr2, mask2, alen, &realaddr2);
176 1.13 rmind addr2 = &realaddr2;
177 1.12 rmind }
178 1.13 rmind return memcmp(addr1, addr2, alen);
179 1.12 rmind }
180 1.12 rmind
181 1.4 rmind /*
182 1.4 rmind * npf_tcpsaw: helper to fetch SEQ, ACK, WIN and return TCP data length.
183 1.12 rmind *
184 1.12 rmind * => Returns all values in host byte-order.
185 1.4 rmind */
186 1.4 rmind int
187 1.12 rmind npf_tcpsaw(const npf_cache_t *npc, tcp_seq *seq, tcp_seq *ack, uint32_t *win)
188 1.4 rmind {
189 1.12 rmind const struct tcphdr *th = &npc->npc_l4.tcp;
190 1.8 rmind u_int thlen;
191 1.1 rmind
192 1.7 zoltan KASSERT(npf_iscached(npc, NPC_TCP));
193 1.1 rmind
194 1.4 rmind *seq = ntohl(th->th_seq);
195 1.4 rmind *ack = ntohl(th->th_ack);
196 1.4 rmind *win = (uint32_t)ntohs(th->th_win);
197 1.8 rmind thlen = th->th_off << 2;
198 1.1 rmind
199 1.7 zoltan if (npf_iscached(npc, NPC_IP4)) {
200 1.12 rmind const struct ip *ip = &npc->npc_ip.v4;
201 1.10 rmind return ntohs(ip->ip_len) - npf_cache_hlen(npc) - thlen;
202 1.12 rmind } else if (npf_iscached(npc, NPC_IP6)) {
203 1.12 rmind const struct ip6_hdr *ip6 = &npc->npc_ip.v6;
204 1.8 rmind return ntohs(ip6->ip6_plen) - thlen;
205 1.7 zoltan }
206 1.7 zoltan return 0;
207 1.1 rmind }
208 1.1 rmind
209 1.1 rmind /*
210 1.4 rmind * npf_fetch_tcpopts: parse and return TCP options.
211 1.1 rmind */
212 1.1 rmind bool
213 1.4 rmind npf_fetch_tcpopts(const npf_cache_t *npc, nbuf_t *nbuf,
214 1.4 rmind uint16_t *mss, int *wscale)
215 1.1 rmind {
216 1.4 rmind void *n_ptr = nbuf_dataptr(nbuf);
217 1.4 rmind const struct tcphdr *th = &npc->npc_l4.tcp;
218 1.4 rmind int topts_len, step;
219 1.4 rmind uint16_t val16;
220 1.4 rmind uint8_t val;
221 1.4 rmind
222 1.7 zoltan KASSERT(npf_iscached(npc, NPC_IP46));
223 1.7 zoltan KASSERT(npf_iscached(npc, NPC_TCP));
224 1.10 rmind
225 1.4 rmind /* Determine if there are any TCP options, get their length. */
226 1.4 rmind topts_len = (th->th_off << 2) - sizeof(struct tcphdr);
227 1.4 rmind if (topts_len <= 0) {
228 1.4 rmind /* No options. */
229 1.1 rmind return false;
230 1.4 rmind }
231 1.4 rmind KASSERT(topts_len <= MAX_TCPOPTLEN);
232 1.1 rmind
233 1.4 rmind /* First step: IP and TCP header up to options. */
234 1.10 rmind step = npf_cache_hlen(npc) + sizeof(struct tcphdr);
235 1.4 rmind next:
236 1.4 rmind if (nbuf_advfetch(&nbuf, &n_ptr, step, sizeof(val), &val)) {
237 1.1 rmind return false;
238 1.4 rmind }
239 1.12 rmind
240 1.4 rmind switch (val) {
241 1.4 rmind case TCPOPT_EOL:
242 1.4 rmind /* Done. */
243 1.4 rmind return true;
244 1.4 rmind case TCPOPT_NOP:
245 1.4 rmind topts_len--;
246 1.4 rmind step = 1;
247 1.4 rmind break;
248 1.4 rmind case TCPOPT_MAXSEG:
249 1.4 rmind /*
250 1.4 rmind * XXX: clean this mess.
251 1.4 rmind */
252 1.4 rmind if (mss && *mss) {
253 1.4 rmind val16 = *mss;
254 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, 2,
255 1.4 rmind sizeof(val16), &val16))
256 1.4 rmind return false;
257 1.4 rmind } else if (nbuf_advfetch(&nbuf, &n_ptr, 2,
258 1.4 rmind sizeof(val16), &val16)) {
259 1.4 rmind return false;
260 1.4 rmind }
261 1.4 rmind if (mss) {
262 1.4 rmind *mss = val16;
263 1.4 rmind }
264 1.4 rmind topts_len -= TCPOLEN_MAXSEG;
265 1.4 rmind step = sizeof(val16);
266 1.4 rmind break;
267 1.4 rmind case TCPOPT_WINDOW:
268 1.10 rmind /* TCP Window Scaling (RFC 1323). */
269 1.4 rmind if (nbuf_advfetch(&nbuf, &n_ptr, 2, sizeof(val), &val)) {
270 1.4 rmind return false;
271 1.4 rmind }
272 1.4 rmind *wscale = (val > TCP_MAX_WINSHIFT) ? TCP_MAX_WINSHIFT : val;
273 1.4 rmind topts_len -= TCPOLEN_WINDOW;
274 1.4 rmind step = sizeof(val);
275 1.4 rmind break;
276 1.4 rmind default:
277 1.4 rmind if (nbuf_advfetch(&nbuf, &n_ptr, 1, sizeof(val), &val)) {
278 1.4 rmind return false;
279 1.4 rmind }
280 1.4 rmind if (val < 2 || val >= topts_len) {
281 1.4 rmind return false;
282 1.4 rmind }
283 1.4 rmind topts_len -= val;
284 1.4 rmind step = val - 1;
285 1.4 rmind }
286 1.12 rmind
287 1.6 rmind /* Any options left? */
288 1.4 rmind if (__predict_true(topts_len > 0)) {
289 1.4 rmind goto next;
290 1.4 rmind }
291 1.6 rmind return true;
292 1.1 rmind }
293 1.1 rmind
294 1.1 rmind /*
295 1.4 rmind * npf_fetch_ip: fetch, check and cache IP header.
296 1.1 rmind */
297 1.1 rmind bool
298 1.4 rmind npf_fetch_ip(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
299 1.1 rmind {
300 1.4 rmind uint8_t ver;
301 1.1 rmind
302 1.4 rmind if (nbuf_fetch_datum(nbuf, n_ptr, sizeof(uint8_t), &ver)) {
303 1.1 rmind return false;
304 1.4 rmind }
305 1.12 rmind
306 1.4 rmind switch (ver >> 4) {
307 1.12 rmind case IPVERSION: {
308 1.12 rmind struct ip *ip = &npc->npc_ip.v4;
309 1.12 rmind
310 1.12 rmind /* Fetch IPv4 header. */
311 1.4 rmind if (nbuf_fetch_datum(nbuf, n_ptr, sizeof(struct ip), ip)) {
312 1.4 rmind return false;
313 1.4 rmind }
314 1.12 rmind
315 1.4 rmind /* Check header length and fragment offset. */
316 1.10 rmind if ((u_int)(ip->ip_hl << 2) < sizeof(struct ip)) {
317 1.4 rmind return false;
318 1.4 rmind }
319 1.4 rmind if (ip->ip_off & ~htons(IP_DF | IP_RF)) {
320 1.4 rmind /* Note fragmentation. */
321 1.4 rmind npc->npc_info |= NPC_IPFRAG;
322 1.4 rmind }
323 1.12 rmind
324 1.4 rmind /* Cache: layer 3 - IPv4. */
325 1.4 rmind npc->npc_ipsz = sizeof(struct in_addr);
326 1.4 rmind npc->npc_srcip = (npf_addr_t *)&ip->ip_src;
327 1.4 rmind npc->npc_dstip = (npf_addr_t *)&ip->ip_dst;
328 1.4 rmind npc->npc_info |= NPC_IP4;
329 1.7 zoltan npc->npc_hlen = ip->ip_hl << 2;
330 1.7 zoltan npc->npc_next_proto = npc->npc_ip.v4.ip_p;
331 1.4 rmind break;
332 1.12 rmind }
333 1.4 rmind
334 1.12 rmind case (IPV6_VERSION >> 4): {
335 1.12 rmind struct ip6_hdr *ip6 = &npc->npc_ip.v6;
336 1.13 rmind size_t hlen = sizeof(struct ip6_hdr);
337 1.13 rmind struct ip6_ext ip6e;
338 1.12 rmind
339 1.13 rmind /* Fetch IPv6 header and set initial next-protocol value. */
340 1.13 rmind if (nbuf_fetch_datum(nbuf, n_ptr, hlen, ip6)) {
341 1.7 zoltan return false;
342 1.7 zoltan }
343 1.12 rmind npc->npc_next_proto = ip6->ip6_nxt;
344 1.13 rmind npc->npc_hlen = hlen;
345 1.7 zoltan
346 1.12 rmind /*
347 1.13 rmind * Advance by the length of the current header and
348 1.13 rmind * prefetch the extension header.
349 1.12 rmind */
350 1.13 rmind while (nbuf_advfetch(&nbuf, &n_ptr, hlen,
351 1.13 rmind sizeof(struct ip6_ext), &ip6e) == 0) {
352 1.13 rmind /*
353 1.13 rmind * Determine whether we are going to continue.
354 1.13 rmind */
355 1.7 zoltan switch (npc->npc_next_proto) {
356 1.13 rmind case IPPROTO_HOPOPTS:
357 1.7 zoltan case IPPROTO_DSTOPTS:
358 1.7 zoltan case IPPROTO_ROUTING:
359 1.13 rmind hlen = (ip6e.ip6e_len + 1) << 3;
360 1.7 zoltan break;
361 1.7 zoltan case IPPROTO_FRAGMENT:
362 1.7 zoltan npc->npc_info |= NPC_IPFRAG;
363 1.13 rmind hlen = sizeof(struct ip6_frag);
364 1.7 zoltan break;
365 1.7 zoltan case IPPROTO_AH:
366 1.13 rmind hlen = (ip6e.ip6e_len + 2) << 2;
367 1.7 zoltan break;
368 1.7 zoltan default:
369 1.13 rmind hlen = 0;
370 1.13 rmind break;
371 1.13 rmind }
372 1.13 rmind
373 1.13 rmind if (!hlen) {
374 1.7 zoltan break;
375 1.7 zoltan }
376 1.12 rmind npc->npc_next_proto = ip6e.ip6e_nxt;
377 1.13 rmind npc->npc_hlen += hlen;
378 1.13 rmind }
379 1.7 zoltan
380 1.12 rmind /* Cache: layer 3 - IPv6. */
381 1.7 zoltan npc->npc_ipsz = sizeof(struct in6_addr);
382 1.7 zoltan npc->npc_srcip = (npf_addr_t *)&ip6->ip6_src;
383 1.7 zoltan npc->npc_dstip = (npf_addr_t *)&ip6->ip6_dst;
384 1.7 zoltan npc->npc_info |= NPC_IP6;
385 1.7 zoltan break;
386 1.12 rmind }
387 1.4 rmind default:
388 1.1 rmind return false;
389 1.4 rmind }
390 1.12 rmind
391 1.4 rmind return true;
392 1.4 rmind }
393 1.1 rmind
394 1.12 rmind /*
395 1.12 rmind * npf_fetch_tcp: fetch, check and cache TCP header. If necessary,
396 1.12 rmind * fetch and cache layer 3 as well.
397 1.12 rmind */
398 1.4 rmind bool
399 1.4 rmind npf_fetch_tcp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
400 1.4 rmind {
401 1.4 rmind struct tcphdr *th;
402 1.1 rmind
403 1.4 rmind /* Must have IP header processed for its length and protocol. */
404 1.4 rmind if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
405 1.1 rmind return false;
406 1.4 rmind }
407 1.7 zoltan if (npf_cache_ipproto(npc) != IPPROTO_TCP) {
408 1.1 rmind return false;
409 1.4 rmind }
410 1.4 rmind th = &npc->npc_l4.tcp;
411 1.4 rmind
412 1.4 rmind /* Fetch TCP header. */
413 1.10 rmind if (nbuf_advfetch(&nbuf, &n_ptr, npf_cache_hlen(npc),
414 1.8 rmind sizeof(struct tcphdr), th)) {
415 1.1 rmind return false;
416 1.4 rmind }
417 1.1 rmind
418 1.4 rmind /* Cache: layer 4 - TCP. */
419 1.4 rmind npc->npc_info |= (NPC_LAYER4 | NPC_TCP);
420 1.1 rmind return true;
421 1.1 rmind }
422 1.1 rmind
423 1.12 rmind /*
424 1.12 rmind * npf_fetch_udp: fetch, check and cache UDP header. If necessary,
425 1.12 rmind * fetch and cache layer 3 as well.
426 1.12 rmind */
427 1.1 rmind bool
428 1.4 rmind npf_fetch_udp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
429 1.1 rmind {
430 1.4 rmind struct ip *ip = &npc->npc_ip.v4;
431 1.4 rmind struct udphdr *uh;
432 1.10 rmind u_int hlen;
433 1.1 rmind
434 1.4 rmind /* Must have IP header processed for its length and protocol. */
435 1.4 rmind if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
436 1.4 rmind return false;
437 1.4 rmind }
438 1.4 rmind if (ip->ip_p != IPPROTO_UDP) {
439 1.1 rmind return false;
440 1.4 rmind }
441 1.4 rmind uh = &npc->npc_l4.udp;
442 1.10 rmind hlen = npf_cache_hlen(npc);
443 1.1 rmind
444 1.12 rmind /* Fetch UDP header. */
445 1.4 rmind if (nbuf_advfetch(&nbuf, &n_ptr, hlen, sizeof(struct udphdr), uh)) {
446 1.1 rmind return false;
447 1.4 rmind }
448 1.1 rmind
449 1.9 jakllsch /* Cache: layer 4 - UDP. */
450 1.4 rmind npc->npc_info |= (NPC_LAYER4 | NPC_UDP);
451 1.1 rmind return true;
452 1.1 rmind }
453 1.1 rmind
454 1.2 rmind /*
455 1.4 rmind * npf_fetch_icmp: fetch ICMP code, type and possible query ID.
456 1.2 rmind */
457 1.2 rmind bool
458 1.4 rmind npf_fetch_icmp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
459 1.1 rmind {
460 1.4 rmind struct ip *ip = &npc->npc_ip.v4;
461 1.4 rmind struct icmp *ic;
462 1.10 rmind u_int hlen, iclen;
463 1.1 rmind
464 1.4 rmind /* Must have IP header processed for its length and protocol. */
465 1.4 rmind if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
466 1.4 rmind return false;
467 1.4 rmind }
468 1.4 rmind if (ip->ip_p != IPPROTO_ICMP) {
469 1.1 rmind return false;
470 1.3 rmind }
471 1.4 rmind ic = &npc->npc_l4.icmp;
472 1.10 rmind hlen = npf_cache_hlen(npc);
473 1.4 rmind
474 1.4 rmind /* Fetch basic ICMP header, up to the "data" point. */
475 1.6 rmind iclen = offsetof(struct icmp, icmp_data);
476 1.6 rmind if (nbuf_advfetch(&nbuf, &n_ptr, hlen, iclen, ic)) {
477 1.4 rmind return false;
478 1.4 rmind }
479 1.4 rmind
480 1.4 rmind /* Cache: layer 4 - ICMP. */
481 1.4 rmind npc->npc_info |= (NPC_LAYER4 | NPC_ICMP);
482 1.1 rmind return true;
483 1.1 rmind }
484 1.1 rmind
485 1.1 rmind /*
486 1.4 rmind * npf_cache_all: general routine to cache all relevant IP (v4 or v6)
487 1.12 rmind * and TCP, UDP or ICMP headers.
488 1.1 rmind */
489 1.10 rmind int
490 1.2 rmind npf_cache_all(npf_cache_t *npc, nbuf_t *nbuf)
491 1.1 rmind {
492 1.1 rmind void *n_ptr = nbuf_dataptr(nbuf);
493 1.1 rmind
494 1.4 rmind if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
495 1.10 rmind return npc->npc_info;
496 1.1 rmind }
497 1.4 rmind if (npf_iscached(npc, NPC_IPFRAG)) {
498 1.10 rmind return npc->npc_info;
499 1.1 rmind }
500 1.4 rmind switch (npf_cache_ipproto(npc)) {
501 1.1 rmind case IPPROTO_TCP:
502 1.10 rmind (void)npf_fetch_tcp(npc, nbuf, n_ptr);
503 1.10 rmind break;
504 1.1 rmind case IPPROTO_UDP:
505 1.10 rmind (void)npf_fetch_udp(npc, nbuf, n_ptr);
506 1.10 rmind break;
507 1.1 rmind case IPPROTO_ICMP:
508 1.10 rmind (void)npf_fetch_icmp(npc, nbuf, n_ptr);
509 1.10 rmind break;
510 1.1 rmind }
511 1.10 rmind return npc->npc_info;
512 1.1 rmind }
513 1.1 rmind
514 1.1 rmind /*
515 1.4 rmind * npf_rwrip: rewrite required IP address, update the cache.
516 1.4 rmind */
517 1.4 rmind bool
518 1.4 rmind npf_rwrip(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
519 1.4 rmind npf_addr_t *addr)
520 1.4 rmind {
521 1.4 rmind npf_addr_t *oaddr;
522 1.4 rmind u_int offby;
523 1.4 rmind
524 1.4 rmind KASSERT(npf_iscached(npc, NPC_IP46));
525 1.4 rmind
526 1.4 rmind if (di == PFIL_OUT) {
527 1.4 rmind /* Rewrite source address, if outgoing. */
528 1.4 rmind offby = offsetof(struct ip, ip_src);
529 1.4 rmind oaddr = npc->npc_srcip;
530 1.4 rmind } else {
531 1.4 rmind /* Rewrite destination, if incoming. */
532 1.4 rmind offby = offsetof(struct ip, ip_dst);
533 1.4 rmind oaddr = npc->npc_dstip;
534 1.4 rmind }
535 1.4 rmind
536 1.4 rmind /* Advance to the address and rewrite it. */
537 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, npc->npc_ipsz, addr))
538 1.4 rmind return false;
539 1.4 rmind
540 1.4 rmind /* Cache: IP address. */
541 1.4 rmind memcpy(oaddr, addr, npc->npc_ipsz);
542 1.4 rmind return true;
543 1.4 rmind }
544 1.4 rmind
545 1.4 rmind /*
546 1.4 rmind * npf_rwrport: rewrite required TCP/UDP port, update the cache.
547 1.1 rmind */
548 1.1 rmind bool
549 1.1 rmind npf_rwrport(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
550 1.4 rmind in_port_t port)
551 1.1 rmind {
552 1.4 rmind const int proto = npf_cache_ipproto(npc);
553 1.10 rmind u_int offby = npf_cache_hlen(npc);
554 1.4 rmind in_port_t *oport;
555 1.1 rmind
556 1.4 rmind KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
557 1.1 rmind KASSERT(proto == IPPROTO_TCP || proto == IPPROTO_UDP);
558 1.1 rmind
559 1.4 rmind /* Offset to the port and pointer in the cache. */
560 1.4 rmind if (proto == IPPROTO_TCP) {
561 1.4 rmind struct tcphdr *th = &npc->npc_l4.tcp;
562 1.4 rmind if (di == PFIL_OUT) {
563 1.4 rmind CTASSERT(offsetof(struct tcphdr, th_sport) == 0);
564 1.4 rmind oport = &th->th_sport;
565 1.1 rmind } else {
566 1.4 rmind offby += offsetof(struct tcphdr, th_dport);
567 1.4 rmind oport = &th->th_dport;
568 1.1 rmind }
569 1.1 rmind } else {
570 1.4 rmind struct udphdr *uh = &npc->npc_l4.udp;
571 1.4 rmind if (di == PFIL_OUT) {
572 1.4 rmind CTASSERT(offsetof(struct udphdr, uh_sport) == 0);
573 1.4 rmind oport = &uh->uh_sport;
574 1.1 rmind } else {
575 1.4 rmind offby += offsetof(struct udphdr, uh_dport);
576 1.4 rmind oport = &uh->uh_dport;
577 1.1 rmind }
578 1.1 rmind }
579 1.1 rmind
580 1.4 rmind /* Advance and rewrite the port. */
581 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(in_port_t), &port))
582 1.1 rmind return false;
583 1.1 rmind
584 1.4 rmind /* Cache: TCP/UDP port. */
585 1.4 rmind *oport = port;
586 1.1 rmind return true;
587 1.1 rmind }
588 1.1 rmind
589 1.1 rmind /*
590 1.6 rmind * npf_rwrcksum: rewrite IPv4 and/or TCP/UDP checksum, update the cache.
591 1.1 rmind */
592 1.1 rmind bool
593 1.4 rmind npf_rwrcksum(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
594 1.4 rmind npf_addr_t *addr, in_port_t port)
595 1.1 rmind {
596 1.4 rmind const int proto = npf_cache_ipproto(npc);
597 1.4 rmind npf_addr_t *oaddr;
598 1.4 rmind in_port_t *oport;
599 1.4 rmind uint16_t *cksum;
600 1.1 rmind u_int offby;
601 1.1 rmind
602 1.4 rmind /* Checksum update for IPv4 header. */
603 1.4 rmind if (npf_iscached(npc, NPC_IP4)) {
604 1.4 rmind struct ip *ip = &npc->npc_ip.v4;
605 1.4 rmind uint16_t ipsum;
606 1.4 rmind
607 1.4 rmind oaddr = (di == PFIL_OUT) ? npc->npc_srcip : npc->npc_dstip;
608 1.4 rmind ipsum = npf_addr_cksum(ip->ip_sum, npc->npc_ipsz, oaddr, addr);
609 1.4 rmind
610 1.4 rmind /* Advance to the IPv4 checksum and rewrite it. */
611 1.4 rmind offby = offsetof(struct ip, ip_sum);
612 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(ipsum), &ipsum))
613 1.4 rmind return false;
614 1.4 rmind
615 1.4 rmind ip->ip_sum = ipsum;
616 1.10 rmind offby = npf_cache_hlen(npc) - offby;
617 1.4 rmind } else {
618 1.4 rmind /* No checksum for IPv6. */
619 1.4 rmind KASSERT(npf_iscached(npc, NPC_IP6));
620 1.4 rmind oaddr = NULL;
621 1.4 rmind offby = 0;
622 1.6 rmind return false; /* XXX: Not yet supported. */
623 1.4 rmind }
624 1.4 rmind
625 1.4 rmind /* Determine whether TCP/UDP checksum update is needed. */
626 1.6 rmind if (proto == IPPROTO_ICMP || port == 0) {
627 1.4 rmind return true;
628 1.4 rmind }
629 1.7 zoltan KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
630 1.4 rmind
631 1.4 rmind /* Calculate TCP/UDP checksum. */
632 1.4 rmind if (proto == IPPROTO_TCP) {
633 1.4 rmind struct tcphdr *th = &npc->npc_l4.tcp;
634 1.4 rmind
635 1.4 rmind cksum = &th->th_sum;
636 1.4 rmind offby += offsetof(struct tcphdr, th_sum);
637 1.4 rmind oport = (di == PFIL_OUT) ? &th->th_sport : &th->th_dport;
638 1.4 rmind } else {
639 1.4 rmind struct udphdr *uh = &npc->npc_l4.udp;
640 1.4 rmind
641 1.4 rmind KASSERT(proto == IPPROTO_UDP);
642 1.4 rmind cksum = &uh->uh_sum;
643 1.4 rmind if (*cksum == 0) {
644 1.4 rmind /* No need to update. */
645 1.4 rmind return true;
646 1.4 rmind }
647 1.4 rmind offby += offsetof(struct udphdr, uh_sum);
648 1.4 rmind oport = (di == PFIL_OUT) ? &uh->uh_sport : &uh->uh_dport;
649 1.4 rmind }
650 1.4 rmind *cksum = npf_addr_cksum(*cksum, npc->npc_ipsz, oaddr, addr);
651 1.4 rmind *cksum = npf_fixup16_cksum(*cksum, *oport, port);
652 1.1 rmind
653 1.4 rmind /* Advance to TCP/UDP checksum and rewrite it. */
654 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(uint16_t), cksum)) {
655 1.1 rmind return false;
656 1.4 rmind }
657 1.4 rmind return true;
658 1.4 rmind }
659 1.4 rmind
660 1.4 rmind static inline bool
661 1.5 rmind npf_normalize_ip4(npf_cache_t *npc, nbuf_t *nbuf,
662 1.5 rmind bool rnd, bool no_df, int minttl)
663 1.4 rmind {
664 1.4 rmind void *n_ptr = nbuf_dataptr(nbuf);
665 1.4 rmind struct ip *ip = &npc->npc_ip.v4;
666 1.4 rmind uint16_t cksum = ip->ip_sum;
667 1.5 rmind uint16_t ip_off = ip->ip_off;
668 1.4 rmind uint8_t ttl = ip->ip_ttl;
669 1.4 rmind u_int offby = 0;
670 1.4 rmind
671 1.5 rmind KASSERT(rnd || minttl || no_df);
672 1.4 rmind
673 1.4 rmind /* Randomize IPv4 ID. */
674 1.4 rmind if (rnd) {
675 1.4 rmind uint16_t oid = ip->ip_id, nid;
676 1.4 rmind
677 1.4 rmind nid = htons(ip_randomid(ip_ids, 0));
678 1.4 rmind offby = offsetof(struct ip, ip_id);
679 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(nid), &nid)) {
680 1.4 rmind return false;
681 1.4 rmind }
682 1.4 rmind cksum = npf_fixup16_cksum(cksum, oid, nid);
683 1.4 rmind ip->ip_id = nid;
684 1.4 rmind }
685 1.1 rmind
686 1.5 rmind /* IP_DF flag cleansing. */
687 1.5 rmind if (no_df && (ip_off & htons(IP_DF)) != 0) {
688 1.5 rmind uint16_t nip_off = ip_off & ~htons(IP_DF);
689 1.5 rmind
690 1.5 rmind if (nbuf_advstore(&nbuf, &n_ptr,
691 1.5 rmind offsetof(struct ip, ip_off) - offby,
692 1.6 rmind sizeof(uint16_t), &nip_off)) {
693 1.5 rmind return false;
694 1.5 rmind }
695 1.5 rmind cksum = npf_fixup16_cksum(cksum, ip_off, nip_off);
696 1.5 rmind ip->ip_off = nip_off;
697 1.5 rmind offby = offsetof(struct ip, ip_off);
698 1.5 rmind }
699 1.5 rmind
700 1.4 rmind /* Enforce minimum TTL. */
701 1.4 rmind if (minttl && ttl < minttl) {
702 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr,
703 1.4 rmind offsetof(struct ip, ip_ttl) - offby,
704 1.4 rmind sizeof(uint8_t), &minttl)) {
705 1.4 rmind return false;
706 1.4 rmind }
707 1.4 rmind cksum = npf_fixup16_cksum(cksum, ttl, minttl);
708 1.4 rmind ip->ip_ttl = minttl;
709 1.4 rmind offby = offsetof(struct ip, ip_ttl);
710 1.1 rmind }
711 1.1 rmind
712 1.4 rmind /* Update IP checksum. */
713 1.4 rmind offby = offsetof(struct ip, ip_sum) - offby;
714 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(cksum), &cksum)) {
715 1.1 rmind return false;
716 1.4 rmind }
717 1.4 rmind ip->ip_sum = cksum;
718 1.4 rmind return true;
719 1.4 rmind }
720 1.4 rmind
721 1.4 rmind bool
722 1.4 rmind npf_normalize(npf_cache_t *npc, nbuf_t *nbuf,
723 1.5 rmind bool no_df, bool rnd, u_int minttl, u_int maxmss)
724 1.4 rmind {
725 1.4 rmind void *n_ptr = nbuf_dataptr(nbuf);
726 1.4 rmind struct tcphdr *th = &npc->npc_l4.tcp;
727 1.4 rmind uint16_t cksum, mss;
728 1.10 rmind u_int offby;
729 1.10 rmind int wscale;
730 1.4 rmind
731 1.4 rmind /* Normalize IPv4. */
732 1.4 rmind if (npf_iscached(npc, NPC_IP4) && (rnd || minttl)) {
733 1.5 rmind if (!npf_normalize_ip4(npc, nbuf, rnd, no_df, minttl)) {
734 1.4 rmind return false;
735 1.4 rmind }
736 1.6 rmind } else if (!npf_iscached(npc, NPC_IP4)) {
737 1.6 rmind /* XXX: no IPv6 */
738 1.6 rmind return false;
739 1.4 rmind }
740 1.1 rmind
741 1.4 rmind /*
742 1.4 rmind * TCP Maximum Segment Size (MSS) "clamping". Only if SYN packet.
743 1.6 rmind * Fetch MSS and check whether rewrite to lower is needed.
744 1.4 rmind */
745 1.4 rmind if (maxmss == 0 || !npf_iscached(npc, NPC_TCP) ||
746 1.4 rmind (th->th_flags & TH_SYN) == 0) {
747 1.4 rmind /* Not required; done. */
748 1.4 rmind return true;
749 1.4 rmind }
750 1.4 rmind mss = 0;
751 1.4 rmind if (!npf_fetch_tcpopts(npc, nbuf, &mss, &wscale)) {
752 1.4 rmind return false;
753 1.4 rmind }
754 1.4 rmind if (ntohs(mss) <= maxmss) {
755 1.4 rmind return true;
756 1.4 rmind }
757 1.4 rmind
758 1.6 rmind /* Calculate TCP checksum, then rewrite MSS and the checksum. */
759 1.4 rmind maxmss = htons(maxmss);
760 1.4 rmind cksum = npf_fixup16_cksum(th->th_sum, mss, maxmss);
761 1.4 rmind th->th_sum = cksum;
762 1.4 rmind mss = maxmss;
763 1.4 rmind if (!npf_fetch_tcpopts(npc, nbuf, &mss, &wscale)) {
764 1.1 rmind return false;
765 1.4 rmind }
766 1.10 rmind offby = npf_cache_hlen(npc) + offsetof(struct tcphdr, th_sum);
767 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(cksum), &cksum)) {
768 1.4 rmind return false;
769 1.4 rmind }
770 1.1 rmind return true;
771 1.1 rmind }
772 1.13 rmind
773 1.13 rmind #if defined(DDB) || defined(_NPF_TESTING)
774 1.13 rmind
775 1.13 rmind void
776 1.13 rmind npf_addr_dump(const npf_addr_t *addr)
777 1.13 rmind {
778 1.13 rmind printf("IP[%x:%x:%x:%x]\n",
779 1.13 rmind addr->s6_addr32[0], addr->s6_addr32[1],
780 1.13 rmind addr->s6_addr32[2], addr->s6_addr32[3]);
781 1.13 rmind }
782 1.13 rmind
783 1.13 rmind #endif
784