npf_inet.c revision 1.16 1 1.16 rmind /* $NetBSD: npf_inet.c,v 1.16 2012/07/21 17:11:01 rmind Exp $ */
2 1.1 rmind
3 1.1 rmind /*-
4 1.12 rmind * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
5 1.1 rmind * All rights reserved.
6 1.1 rmind *
7 1.1 rmind * This material is based upon work partially supported by The
8 1.1 rmind * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 1.1 rmind *
10 1.1 rmind * Redistribution and use in source and binary forms, with or without
11 1.1 rmind * modification, are permitted provided that the following conditions
12 1.1 rmind * are met:
13 1.1 rmind * 1. Redistributions of source code must retain the above copyright
14 1.1 rmind * notice, this list of conditions and the following disclaimer.
15 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 rmind * notice, this list of conditions and the following disclaimer in the
17 1.1 rmind * documentation and/or other materials provided with the distribution.
18 1.1 rmind *
19 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 rmind * POSSIBILITY OF SUCH DAMAGE.
30 1.1 rmind */
31 1.1 rmind
32 1.1 rmind /*
33 1.1 rmind * Various procotol related helper routines.
34 1.12 rmind *
35 1.12 rmind * This layer manipulates npf_cache_t structure i.e. caches requested headers
36 1.12 rmind * and stores which information was cached in the information bit field.
37 1.12 rmind * It is also responsibility of this layer to update or invalidate the cache
38 1.12 rmind * on rewrites (e.g. by translation routines).
39 1.1 rmind */
40 1.1 rmind
41 1.1 rmind #include <sys/cdefs.h>
42 1.16 rmind __KERNEL_RCSID(0, "$NetBSD: npf_inet.c,v 1.16 2012/07/21 17:11:01 rmind Exp $");
43 1.1 rmind
44 1.1 rmind #include <sys/param.h>
45 1.11 rmind #include <sys/types.h>
46 1.1 rmind
47 1.4 rmind #include <net/pfil.h>
48 1.4 rmind #include <net/if.h>
49 1.4 rmind #include <net/ethertypes.h>
50 1.4 rmind #include <net/if_ether.h>
51 1.4 rmind
52 1.1 rmind #include <netinet/in_systm.h>
53 1.1 rmind #include <netinet/in.h>
54 1.4 rmind #include <netinet/in_var.h>
55 1.1 rmind #include <netinet/ip.h>
56 1.4 rmind #include <netinet/ip6.h>
57 1.1 rmind #include <netinet/tcp.h>
58 1.1 rmind #include <netinet/udp.h>
59 1.1 rmind #include <netinet/ip_icmp.h>
60 1.1 rmind
61 1.1 rmind #include "npf_impl.h"
62 1.1 rmind
63 1.1 rmind /*
64 1.1 rmind * npf_fixup{16,32}_cksum: update IPv4 checksum.
65 1.1 rmind */
66 1.1 rmind
67 1.1 rmind uint16_t
68 1.1 rmind npf_fixup16_cksum(uint16_t cksum, uint16_t odatum, uint16_t ndatum)
69 1.1 rmind {
70 1.1 rmind uint32_t sum;
71 1.1 rmind
72 1.1 rmind /*
73 1.1 rmind * RFC 1624:
74 1.1 rmind * HC' = ~(~HC + ~m + m')
75 1.1 rmind */
76 1.1 rmind sum = ~ntohs(cksum) & 0xffff;
77 1.1 rmind sum += (~ntohs(odatum) & 0xffff) + ntohs(ndatum);
78 1.1 rmind sum = (sum >> 16) + (sum & 0xffff);
79 1.1 rmind sum += (sum >> 16);
80 1.1 rmind
81 1.1 rmind return htons(~sum & 0xffff);
82 1.1 rmind }
83 1.1 rmind
84 1.1 rmind uint16_t
85 1.1 rmind npf_fixup32_cksum(uint16_t cksum, uint32_t odatum, uint32_t ndatum)
86 1.1 rmind {
87 1.1 rmind
88 1.1 rmind cksum = npf_fixup16_cksum(cksum, odatum & 0xffff, ndatum & 0xffff);
89 1.1 rmind cksum = npf_fixup16_cksum(cksum, odatum >> 16, ndatum >> 16);
90 1.1 rmind return cksum;
91 1.1 rmind }
92 1.1 rmind
93 1.1 rmind /*
94 1.4 rmind * npf_addr_cksum: calculate checksum of the address, either IPv4 or IPv6.
95 1.4 rmind */
96 1.4 rmind uint16_t
97 1.4 rmind npf_addr_cksum(uint16_t cksum, int sz, npf_addr_t *oaddr, npf_addr_t *naddr)
98 1.4 rmind {
99 1.4 rmind uint32_t *oip32 = (uint32_t *)oaddr, *nip32 = (uint32_t *)naddr;
100 1.4 rmind
101 1.4 rmind KASSERT(sz % sizeof(uint32_t) == 0);
102 1.4 rmind do {
103 1.4 rmind cksum = npf_fixup32_cksum(cksum, *oip32++, *nip32++);
104 1.4 rmind sz -= sizeof(uint32_t);
105 1.4 rmind } while (sz);
106 1.4 rmind
107 1.4 rmind return cksum;
108 1.4 rmind }
109 1.4 rmind
110 1.4 rmind /*
111 1.4 rmind * npf_addr_sum: provide IP address as a summed (if needed) 32-bit integer.
112 1.4 rmind * Note: used for hash function.
113 1.1 rmind */
114 1.4 rmind uint32_t
115 1.4 rmind npf_addr_sum(const int sz, const npf_addr_t *a1, const npf_addr_t *a2)
116 1.1 rmind {
117 1.4 rmind uint32_t mix = 0;
118 1.4 rmind int i;
119 1.1 rmind
120 1.5 rmind KASSERT(sz > 0 && a1 != NULL && a2 != NULL);
121 1.5 rmind
122 1.4 rmind for (i = 0; i < (sz >> 2); i++) {
123 1.4 rmind mix += a1->s6_addr32[i];
124 1.4 rmind mix += a2->s6_addr32[i];
125 1.4 rmind }
126 1.4 rmind return mix;
127 1.4 rmind }
128 1.1 rmind
129 1.13 rmind /*
130 1.13 rmind * npf_addr_mask: apply the mask to a given address and store the result.
131 1.13 rmind */
132 1.13 rmind void
133 1.13 rmind npf_addr_mask(const npf_addr_t *addr, const npf_netmask_t mask,
134 1.13 rmind const int alen, npf_addr_t *out)
135 1.12 rmind {
136 1.13 rmind const int nwords = alen >> 2;
137 1.12 rmind uint_fast8_t length = mask;
138 1.12 rmind
139 1.12 rmind /* Note: maximum length is 32 for IPv4 and 128 for IPv6. */
140 1.12 rmind KASSERT(length <= NPF_MAX_NETMASK);
141 1.12 rmind
142 1.13 rmind for (int i = 0; i < nwords; i++) {
143 1.13 rmind uint32_t wordmask;
144 1.13 rmind
145 1.12 rmind if (length >= 32) {
146 1.13 rmind wordmask = htonl(0xffffffff);
147 1.12 rmind length -= 32;
148 1.13 rmind } else if (length) {
149 1.13 rmind wordmask = htonl(0xffffffff << (32 - length));
150 1.13 rmind length = 0;
151 1.12 rmind } else {
152 1.13 rmind wordmask = 0;
153 1.12 rmind }
154 1.13 rmind out->s6_addr32[i] = addr->s6_addr32[i] & wordmask;
155 1.12 rmind }
156 1.12 rmind }
157 1.12 rmind
158 1.12 rmind /*
159 1.12 rmind * npf_addr_cmp: compare two addresses, either IPv4 or IPv6.
160 1.12 rmind *
161 1.13 rmind * => Return 0 if equal and negative/positive if less/greater accordingly.
162 1.12 rmind * => Ignore the mask, if NPF_NO_NETMASK is specified.
163 1.12 rmind */
164 1.12 rmind int
165 1.12 rmind npf_addr_cmp(const npf_addr_t *addr1, const npf_netmask_t mask1,
166 1.13 rmind const npf_addr_t *addr2, const npf_netmask_t mask2, const int alen)
167 1.12 rmind {
168 1.13 rmind npf_addr_t realaddr1, realaddr2;
169 1.12 rmind
170 1.12 rmind if (mask1 != NPF_NO_NETMASK) {
171 1.13 rmind npf_addr_mask(addr1, mask1, alen, &realaddr1);
172 1.13 rmind addr1 = &realaddr1;
173 1.12 rmind }
174 1.12 rmind if (mask2 != NPF_NO_NETMASK) {
175 1.13 rmind npf_addr_mask(addr2, mask2, alen, &realaddr2);
176 1.13 rmind addr2 = &realaddr2;
177 1.12 rmind }
178 1.13 rmind return memcmp(addr1, addr2, alen);
179 1.12 rmind }
180 1.12 rmind
181 1.4 rmind /*
182 1.4 rmind * npf_tcpsaw: helper to fetch SEQ, ACK, WIN and return TCP data length.
183 1.12 rmind *
184 1.12 rmind * => Returns all values in host byte-order.
185 1.4 rmind */
186 1.4 rmind int
187 1.12 rmind npf_tcpsaw(const npf_cache_t *npc, tcp_seq *seq, tcp_seq *ack, uint32_t *win)
188 1.4 rmind {
189 1.12 rmind const struct tcphdr *th = &npc->npc_l4.tcp;
190 1.8 rmind u_int thlen;
191 1.1 rmind
192 1.7 zoltan KASSERT(npf_iscached(npc, NPC_TCP));
193 1.1 rmind
194 1.4 rmind *seq = ntohl(th->th_seq);
195 1.4 rmind *ack = ntohl(th->th_ack);
196 1.4 rmind *win = (uint32_t)ntohs(th->th_win);
197 1.8 rmind thlen = th->th_off << 2;
198 1.1 rmind
199 1.7 zoltan if (npf_iscached(npc, NPC_IP4)) {
200 1.12 rmind const struct ip *ip = &npc->npc_ip.v4;
201 1.10 rmind return ntohs(ip->ip_len) - npf_cache_hlen(npc) - thlen;
202 1.12 rmind } else if (npf_iscached(npc, NPC_IP6)) {
203 1.12 rmind const struct ip6_hdr *ip6 = &npc->npc_ip.v6;
204 1.8 rmind return ntohs(ip6->ip6_plen) - thlen;
205 1.7 zoltan }
206 1.7 zoltan return 0;
207 1.1 rmind }
208 1.1 rmind
209 1.1 rmind /*
210 1.4 rmind * npf_fetch_tcpopts: parse and return TCP options.
211 1.1 rmind */
212 1.1 rmind bool
213 1.4 rmind npf_fetch_tcpopts(const npf_cache_t *npc, nbuf_t *nbuf,
214 1.4 rmind uint16_t *mss, int *wscale)
215 1.1 rmind {
216 1.4 rmind void *n_ptr = nbuf_dataptr(nbuf);
217 1.4 rmind const struct tcphdr *th = &npc->npc_l4.tcp;
218 1.4 rmind int topts_len, step;
219 1.4 rmind uint16_t val16;
220 1.4 rmind uint8_t val;
221 1.4 rmind
222 1.7 zoltan KASSERT(npf_iscached(npc, NPC_IP46));
223 1.7 zoltan KASSERT(npf_iscached(npc, NPC_TCP));
224 1.10 rmind
225 1.4 rmind /* Determine if there are any TCP options, get their length. */
226 1.4 rmind topts_len = (th->th_off << 2) - sizeof(struct tcphdr);
227 1.4 rmind if (topts_len <= 0) {
228 1.4 rmind /* No options. */
229 1.1 rmind return false;
230 1.4 rmind }
231 1.4 rmind KASSERT(topts_len <= MAX_TCPOPTLEN);
232 1.1 rmind
233 1.4 rmind /* First step: IP and TCP header up to options. */
234 1.10 rmind step = npf_cache_hlen(npc) + sizeof(struct tcphdr);
235 1.4 rmind next:
236 1.4 rmind if (nbuf_advfetch(&nbuf, &n_ptr, step, sizeof(val), &val)) {
237 1.1 rmind return false;
238 1.4 rmind }
239 1.12 rmind
240 1.4 rmind switch (val) {
241 1.4 rmind case TCPOPT_EOL:
242 1.4 rmind /* Done. */
243 1.4 rmind return true;
244 1.4 rmind case TCPOPT_NOP:
245 1.4 rmind topts_len--;
246 1.4 rmind step = 1;
247 1.4 rmind break;
248 1.4 rmind case TCPOPT_MAXSEG:
249 1.4 rmind /*
250 1.4 rmind * XXX: clean this mess.
251 1.4 rmind */
252 1.4 rmind if (mss && *mss) {
253 1.4 rmind val16 = *mss;
254 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, 2,
255 1.4 rmind sizeof(val16), &val16))
256 1.4 rmind return false;
257 1.4 rmind } else if (nbuf_advfetch(&nbuf, &n_ptr, 2,
258 1.4 rmind sizeof(val16), &val16)) {
259 1.4 rmind return false;
260 1.4 rmind }
261 1.4 rmind if (mss) {
262 1.4 rmind *mss = val16;
263 1.4 rmind }
264 1.4 rmind topts_len -= TCPOLEN_MAXSEG;
265 1.4 rmind step = sizeof(val16);
266 1.4 rmind break;
267 1.4 rmind case TCPOPT_WINDOW:
268 1.10 rmind /* TCP Window Scaling (RFC 1323). */
269 1.4 rmind if (nbuf_advfetch(&nbuf, &n_ptr, 2, sizeof(val), &val)) {
270 1.4 rmind return false;
271 1.4 rmind }
272 1.4 rmind *wscale = (val > TCP_MAX_WINSHIFT) ? TCP_MAX_WINSHIFT : val;
273 1.4 rmind topts_len -= TCPOLEN_WINDOW;
274 1.4 rmind step = sizeof(val);
275 1.4 rmind break;
276 1.4 rmind default:
277 1.4 rmind if (nbuf_advfetch(&nbuf, &n_ptr, 1, sizeof(val), &val)) {
278 1.4 rmind return false;
279 1.4 rmind }
280 1.16 rmind if (val < 2 || val > topts_len) {
281 1.4 rmind return false;
282 1.4 rmind }
283 1.4 rmind topts_len -= val;
284 1.4 rmind step = val - 1;
285 1.4 rmind }
286 1.12 rmind
287 1.6 rmind /* Any options left? */
288 1.4 rmind if (__predict_true(topts_len > 0)) {
289 1.4 rmind goto next;
290 1.4 rmind }
291 1.6 rmind return true;
292 1.1 rmind }
293 1.1 rmind
294 1.1 rmind /*
295 1.4 rmind * npf_fetch_ip: fetch, check and cache IP header.
296 1.1 rmind */
297 1.1 rmind bool
298 1.4 rmind npf_fetch_ip(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
299 1.1 rmind {
300 1.4 rmind uint8_t ver;
301 1.1 rmind
302 1.4 rmind if (nbuf_fetch_datum(nbuf, n_ptr, sizeof(uint8_t), &ver)) {
303 1.1 rmind return false;
304 1.4 rmind }
305 1.12 rmind
306 1.4 rmind switch (ver >> 4) {
307 1.12 rmind case IPVERSION: {
308 1.12 rmind struct ip *ip = &npc->npc_ip.v4;
309 1.12 rmind
310 1.12 rmind /* Fetch IPv4 header. */
311 1.4 rmind if (nbuf_fetch_datum(nbuf, n_ptr, sizeof(struct ip), ip)) {
312 1.4 rmind return false;
313 1.4 rmind }
314 1.12 rmind
315 1.4 rmind /* Check header length and fragment offset. */
316 1.10 rmind if ((u_int)(ip->ip_hl << 2) < sizeof(struct ip)) {
317 1.4 rmind return false;
318 1.4 rmind }
319 1.4 rmind if (ip->ip_off & ~htons(IP_DF | IP_RF)) {
320 1.4 rmind /* Note fragmentation. */
321 1.4 rmind npc->npc_info |= NPC_IPFRAG;
322 1.4 rmind }
323 1.12 rmind
324 1.4 rmind /* Cache: layer 3 - IPv4. */
325 1.14 rmind npc->npc_alen = sizeof(struct in_addr);
326 1.4 rmind npc->npc_srcip = (npf_addr_t *)&ip->ip_src;
327 1.4 rmind npc->npc_dstip = (npf_addr_t *)&ip->ip_dst;
328 1.4 rmind npc->npc_info |= NPC_IP4;
329 1.7 zoltan npc->npc_hlen = ip->ip_hl << 2;
330 1.7 zoltan npc->npc_next_proto = npc->npc_ip.v4.ip_p;
331 1.4 rmind break;
332 1.12 rmind }
333 1.4 rmind
334 1.12 rmind case (IPV6_VERSION >> 4): {
335 1.12 rmind struct ip6_hdr *ip6 = &npc->npc_ip.v6;
336 1.13 rmind size_t hlen = sizeof(struct ip6_hdr);
337 1.13 rmind struct ip6_ext ip6e;
338 1.12 rmind
339 1.13 rmind /* Fetch IPv6 header and set initial next-protocol value. */
340 1.13 rmind if (nbuf_fetch_datum(nbuf, n_ptr, hlen, ip6)) {
341 1.7 zoltan return false;
342 1.7 zoltan }
343 1.12 rmind npc->npc_next_proto = ip6->ip6_nxt;
344 1.13 rmind npc->npc_hlen = hlen;
345 1.7 zoltan
346 1.12 rmind /*
347 1.13 rmind * Advance by the length of the current header and
348 1.13 rmind * prefetch the extension header.
349 1.12 rmind */
350 1.13 rmind while (nbuf_advfetch(&nbuf, &n_ptr, hlen,
351 1.13 rmind sizeof(struct ip6_ext), &ip6e) == 0) {
352 1.13 rmind /*
353 1.13 rmind * Determine whether we are going to continue.
354 1.13 rmind */
355 1.7 zoltan switch (npc->npc_next_proto) {
356 1.13 rmind case IPPROTO_HOPOPTS:
357 1.7 zoltan case IPPROTO_DSTOPTS:
358 1.7 zoltan case IPPROTO_ROUTING:
359 1.13 rmind hlen = (ip6e.ip6e_len + 1) << 3;
360 1.7 zoltan break;
361 1.7 zoltan case IPPROTO_FRAGMENT:
362 1.7 zoltan npc->npc_info |= NPC_IPFRAG;
363 1.13 rmind hlen = sizeof(struct ip6_frag);
364 1.7 zoltan break;
365 1.7 zoltan case IPPROTO_AH:
366 1.13 rmind hlen = (ip6e.ip6e_len + 2) << 2;
367 1.7 zoltan break;
368 1.7 zoltan default:
369 1.13 rmind hlen = 0;
370 1.13 rmind break;
371 1.13 rmind }
372 1.13 rmind
373 1.13 rmind if (!hlen) {
374 1.7 zoltan break;
375 1.7 zoltan }
376 1.12 rmind npc->npc_next_proto = ip6e.ip6e_nxt;
377 1.13 rmind npc->npc_hlen += hlen;
378 1.13 rmind }
379 1.7 zoltan
380 1.12 rmind /* Cache: layer 3 - IPv6. */
381 1.14 rmind npc->npc_alen = sizeof(struct in6_addr);
382 1.7 zoltan npc->npc_srcip = (npf_addr_t *)&ip6->ip6_src;
383 1.7 zoltan npc->npc_dstip = (npf_addr_t *)&ip6->ip6_dst;
384 1.7 zoltan npc->npc_info |= NPC_IP6;
385 1.7 zoltan break;
386 1.12 rmind }
387 1.4 rmind default:
388 1.1 rmind return false;
389 1.4 rmind }
390 1.12 rmind
391 1.4 rmind return true;
392 1.4 rmind }
393 1.1 rmind
394 1.12 rmind /*
395 1.12 rmind * npf_fetch_tcp: fetch, check and cache TCP header. If necessary,
396 1.12 rmind * fetch and cache layer 3 as well.
397 1.12 rmind */
398 1.4 rmind bool
399 1.4 rmind npf_fetch_tcp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
400 1.4 rmind {
401 1.4 rmind struct tcphdr *th;
402 1.1 rmind
403 1.4 rmind /* Must have IP header processed for its length and protocol. */
404 1.4 rmind if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
405 1.1 rmind return false;
406 1.4 rmind }
407 1.7 zoltan if (npf_cache_ipproto(npc) != IPPROTO_TCP) {
408 1.1 rmind return false;
409 1.4 rmind }
410 1.4 rmind th = &npc->npc_l4.tcp;
411 1.4 rmind
412 1.4 rmind /* Fetch TCP header. */
413 1.10 rmind if (nbuf_advfetch(&nbuf, &n_ptr, npf_cache_hlen(npc),
414 1.8 rmind sizeof(struct tcphdr), th)) {
415 1.1 rmind return false;
416 1.4 rmind }
417 1.1 rmind
418 1.4 rmind /* Cache: layer 4 - TCP. */
419 1.4 rmind npc->npc_info |= (NPC_LAYER4 | NPC_TCP);
420 1.1 rmind return true;
421 1.1 rmind }
422 1.1 rmind
423 1.12 rmind /*
424 1.12 rmind * npf_fetch_udp: fetch, check and cache UDP header. If necessary,
425 1.12 rmind * fetch and cache layer 3 as well.
426 1.12 rmind */
427 1.1 rmind bool
428 1.4 rmind npf_fetch_udp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
429 1.1 rmind {
430 1.4 rmind struct udphdr *uh;
431 1.10 rmind u_int hlen;
432 1.1 rmind
433 1.4 rmind /* Must have IP header processed for its length and protocol. */
434 1.4 rmind if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
435 1.4 rmind return false;
436 1.4 rmind }
437 1.14 rmind if (npf_cache_ipproto(npc) != IPPROTO_UDP) {
438 1.1 rmind return false;
439 1.4 rmind }
440 1.4 rmind uh = &npc->npc_l4.udp;
441 1.10 rmind hlen = npf_cache_hlen(npc);
442 1.1 rmind
443 1.12 rmind /* Fetch UDP header. */
444 1.4 rmind if (nbuf_advfetch(&nbuf, &n_ptr, hlen, sizeof(struct udphdr), uh)) {
445 1.1 rmind return false;
446 1.4 rmind }
447 1.1 rmind
448 1.9 jakllsch /* Cache: layer 4 - UDP. */
449 1.4 rmind npc->npc_info |= (NPC_LAYER4 | NPC_UDP);
450 1.1 rmind return true;
451 1.1 rmind }
452 1.1 rmind
453 1.2 rmind /*
454 1.4 rmind * npf_fetch_icmp: fetch ICMP code, type and possible query ID.
455 1.2 rmind */
456 1.2 rmind bool
457 1.4 rmind npf_fetch_icmp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
458 1.1 rmind {
459 1.4 rmind struct icmp *ic;
460 1.10 rmind u_int hlen, iclen;
461 1.1 rmind
462 1.4 rmind /* Must have IP header processed for its length and protocol. */
463 1.4 rmind if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
464 1.4 rmind return false;
465 1.4 rmind }
466 1.15 spz if (npf_cache_ipproto(npc) != IPPROTO_ICMP &&
467 1.15 spz npf_cache_ipproto(npc) != IPPROTO_ICMPV6) {
468 1.1 rmind return false;
469 1.3 rmind }
470 1.4 rmind ic = &npc->npc_l4.icmp;
471 1.10 rmind hlen = npf_cache_hlen(npc);
472 1.4 rmind
473 1.4 rmind /* Fetch basic ICMP header, up to the "data" point. */
474 1.15 spz CTASSERT(offsetof(struct icmp, icmp_void) ==
475 1.15 spz offsetof(struct icmp6_hdr, icmp6_data32));
476 1.15 spz
477 1.15 spz iclen = offsetof(struct icmp, icmp_void);
478 1.6 rmind if (nbuf_advfetch(&nbuf, &n_ptr, hlen, iclen, ic)) {
479 1.4 rmind return false;
480 1.4 rmind }
481 1.4 rmind
482 1.4 rmind /* Cache: layer 4 - ICMP. */
483 1.4 rmind npc->npc_info |= (NPC_LAYER4 | NPC_ICMP);
484 1.1 rmind return true;
485 1.1 rmind }
486 1.1 rmind
487 1.1 rmind /*
488 1.4 rmind * npf_cache_all: general routine to cache all relevant IP (v4 or v6)
489 1.12 rmind * and TCP, UDP or ICMP headers.
490 1.1 rmind */
491 1.10 rmind int
492 1.2 rmind npf_cache_all(npf_cache_t *npc, nbuf_t *nbuf)
493 1.1 rmind {
494 1.1 rmind void *n_ptr = nbuf_dataptr(nbuf);
495 1.1 rmind
496 1.4 rmind if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
497 1.10 rmind return npc->npc_info;
498 1.1 rmind }
499 1.4 rmind if (npf_iscached(npc, NPC_IPFRAG)) {
500 1.10 rmind return npc->npc_info;
501 1.1 rmind }
502 1.4 rmind switch (npf_cache_ipproto(npc)) {
503 1.1 rmind case IPPROTO_TCP:
504 1.10 rmind (void)npf_fetch_tcp(npc, nbuf, n_ptr);
505 1.10 rmind break;
506 1.1 rmind case IPPROTO_UDP:
507 1.10 rmind (void)npf_fetch_udp(npc, nbuf, n_ptr);
508 1.10 rmind break;
509 1.1 rmind case IPPROTO_ICMP:
510 1.15 spz case IPPROTO_ICMPV6:
511 1.10 rmind (void)npf_fetch_icmp(npc, nbuf, n_ptr);
512 1.10 rmind break;
513 1.1 rmind }
514 1.10 rmind return npc->npc_info;
515 1.1 rmind }
516 1.1 rmind
517 1.1 rmind /*
518 1.4 rmind * npf_rwrip: rewrite required IP address, update the cache.
519 1.4 rmind */
520 1.4 rmind bool
521 1.4 rmind npf_rwrip(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
522 1.4 rmind npf_addr_t *addr)
523 1.4 rmind {
524 1.4 rmind npf_addr_t *oaddr;
525 1.4 rmind u_int offby;
526 1.4 rmind
527 1.4 rmind KASSERT(npf_iscached(npc, NPC_IP46));
528 1.4 rmind
529 1.4 rmind if (di == PFIL_OUT) {
530 1.4 rmind /* Rewrite source address, if outgoing. */
531 1.4 rmind offby = offsetof(struct ip, ip_src);
532 1.4 rmind oaddr = npc->npc_srcip;
533 1.4 rmind } else {
534 1.4 rmind /* Rewrite destination, if incoming. */
535 1.4 rmind offby = offsetof(struct ip, ip_dst);
536 1.4 rmind oaddr = npc->npc_dstip;
537 1.4 rmind }
538 1.4 rmind
539 1.4 rmind /* Advance to the address and rewrite it. */
540 1.14 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, npc->npc_alen, addr))
541 1.4 rmind return false;
542 1.4 rmind
543 1.4 rmind /* Cache: IP address. */
544 1.14 rmind memcpy(oaddr, addr, npc->npc_alen);
545 1.4 rmind return true;
546 1.4 rmind }
547 1.4 rmind
548 1.4 rmind /*
549 1.4 rmind * npf_rwrport: rewrite required TCP/UDP port, update the cache.
550 1.1 rmind */
551 1.1 rmind bool
552 1.1 rmind npf_rwrport(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
553 1.4 rmind in_port_t port)
554 1.1 rmind {
555 1.4 rmind const int proto = npf_cache_ipproto(npc);
556 1.10 rmind u_int offby = npf_cache_hlen(npc);
557 1.4 rmind in_port_t *oport;
558 1.1 rmind
559 1.4 rmind KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
560 1.1 rmind KASSERT(proto == IPPROTO_TCP || proto == IPPROTO_UDP);
561 1.1 rmind
562 1.4 rmind /* Offset to the port and pointer in the cache. */
563 1.4 rmind if (proto == IPPROTO_TCP) {
564 1.4 rmind struct tcphdr *th = &npc->npc_l4.tcp;
565 1.4 rmind if (di == PFIL_OUT) {
566 1.4 rmind CTASSERT(offsetof(struct tcphdr, th_sport) == 0);
567 1.4 rmind oport = &th->th_sport;
568 1.1 rmind } else {
569 1.4 rmind offby += offsetof(struct tcphdr, th_dport);
570 1.4 rmind oport = &th->th_dport;
571 1.1 rmind }
572 1.1 rmind } else {
573 1.4 rmind struct udphdr *uh = &npc->npc_l4.udp;
574 1.4 rmind if (di == PFIL_OUT) {
575 1.4 rmind CTASSERT(offsetof(struct udphdr, uh_sport) == 0);
576 1.4 rmind oport = &uh->uh_sport;
577 1.1 rmind } else {
578 1.4 rmind offby += offsetof(struct udphdr, uh_dport);
579 1.4 rmind oport = &uh->uh_dport;
580 1.1 rmind }
581 1.1 rmind }
582 1.1 rmind
583 1.4 rmind /* Advance and rewrite the port. */
584 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(in_port_t), &port))
585 1.1 rmind return false;
586 1.1 rmind
587 1.4 rmind /* Cache: TCP/UDP port. */
588 1.4 rmind *oport = port;
589 1.1 rmind return true;
590 1.1 rmind }
591 1.1 rmind
592 1.1 rmind /*
593 1.6 rmind * npf_rwrcksum: rewrite IPv4 and/or TCP/UDP checksum, update the cache.
594 1.1 rmind */
595 1.1 rmind bool
596 1.4 rmind npf_rwrcksum(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
597 1.4 rmind npf_addr_t *addr, in_port_t port)
598 1.1 rmind {
599 1.4 rmind const int proto = npf_cache_ipproto(npc);
600 1.4 rmind npf_addr_t *oaddr;
601 1.4 rmind in_port_t *oport;
602 1.4 rmind uint16_t *cksum;
603 1.1 rmind u_int offby;
604 1.1 rmind
605 1.4 rmind /* Checksum update for IPv4 header. */
606 1.4 rmind if (npf_iscached(npc, NPC_IP4)) {
607 1.4 rmind struct ip *ip = &npc->npc_ip.v4;
608 1.4 rmind uint16_t ipsum;
609 1.4 rmind
610 1.4 rmind oaddr = (di == PFIL_OUT) ? npc->npc_srcip : npc->npc_dstip;
611 1.14 rmind ipsum = npf_addr_cksum(ip->ip_sum, npc->npc_alen, oaddr, addr);
612 1.4 rmind
613 1.4 rmind /* Advance to the IPv4 checksum and rewrite it. */
614 1.4 rmind offby = offsetof(struct ip, ip_sum);
615 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(ipsum), &ipsum))
616 1.4 rmind return false;
617 1.4 rmind
618 1.4 rmind ip->ip_sum = ipsum;
619 1.10 rmind offby = npf_cache_hlen(npc) - offby;
620 1.4 rmind } else {
621 1.4 rmind /* No checksum for IPv6. */
622 1.4 rmind KASSERT(npf_iscached(npc, NPC_IP6));
623 1.4 rmind oaddr = NULL;
624 1.4 rmind offby = 0;
625 1.6 rmind return false; /* XXX: Not yet supported. */
626 1.4 rmind }
627 1.4 rmind
628 1.4 rmind /* Determine whether TCP/UDP checksum update is needed. */
629 1.6 rmind if (proto == IPPROTO_ICMP || port == 0) {
630 1.4 rmind return true;
631 1.4 rmind }
632 1.7 zoltan KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
633 1.4 rmind
634 1.4 rmind /* Calculate TCP/UDP checksum. */
635 1.4 rmind if (proto == IPPROTO_TCP) {
636 1.4 rmind struct tcphdr *th = &npc->npc_l4.tcp;
637 1.4 rmind
638 1.4 rmind cksum = &th->th_sum;
639 1.4 rmind offby += offsetof(struct tcphdr, th_sum);
640 1.4 rmind oport = (di == PFIL_OUT) ? &th->th_sport : &th->th_dport;
641 1.4 rmind } else {
642 1.4 rmind struct udphdr *uh = &npc->npc_l4.udp;
643 1.4 rmind
644 1.4 rmind KASSERT(proto == IPPROTO_UDP);
645 1.4 rmind cksum = &uh->uh_sum;
646 1.4 rmind if (*cksum == 0) {
647 1.4 rmind /* No need to update. */
648 1.4 rmind return true;
649 1.4 rmind }
650 1.4 rmind offby += offsetof(struct udphdr, uh_sum);
651 1.4 rmind oport = (di == PFIL_OUT) ? &uh->uh_sport : &uh->uh_dport;
652 1.4 rmind }
653 1.14 rmind *cksum = npf_addr_cksum(*cksum, npc->npc_alen, oaddr, addr);
654 1.4 rmind *cksum = npf_fixup16_cksum(*cksum, *oport, port);
655 1.1 rmind
656 1.4 rmind /* Advance to TCP/UDP checksum and rewrite it. */
657 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(uint16_t), cksum)) {
658 1.1 rmind return false;
659 1.4 rmind }
660 1.4 rmind return true;
661 1.4 rmind }
662 1.4 rmind
663 1.4 rmind static inline bool
664 1.5 rmind npf_normalize_ip4(npf_cache_t *npc, nbuf_t *nbuf,
665 1.5 rmind bool rnd, bool no_df, int minttl)
666 1.4 rmind {
667 1.4 rmind void *n_ptr = nbuf_dataptr(nbuf);
668 1.4 rmind struct ip *ip = &npc->npc_ip.v4;
669 1.4 rmind uint16_t cksum = ip->ip_sum;
670 1.5 rmind uint16_t ip_off = ip->ip_off;
671 1.4 rmind uint8_t ttl = ip->ip_ttl;
672 1.4 rmind u_int offby = 0;
673 1.4 rmind
674 1.5 rmind KASSERT(rnd || minttl || no_df);
675 1.4 rmind
676 1.4 rmind /* Randomize IPv4 ID. */
677 1.4 rmind if (rnd) {
678 1.4 rmind uint16_t oid = ip->ip_id, nid;
679 1.4 rmind
680 1.4 rmind nid = htons(ip_randomid(ip_ids, 0));
681 1.4 rmind offby = offsetof(struct ip, ip_id);
682 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(nid), &nid)) {
683 1.4 rmind return false;
684 1.4 rmind }
685 1.4 rmind cksum = npf_fixup16_cksum(cksum, oid, nid);
686 1.4 rmind ip->ip_id = nid;
687 1.4 rmind }
688 1.1 rmind
689 1.5 rmind /* IP_DF flag cleansing. */
690 1.5 rmind if (no_df && (ip_off & htons(IP_DF)) != 0) {
691 1.5 rmind uint16_t nip_off = ip_off & ~htons(IP_DF);
692 1.5 rmind
693 1.5 rmind if (nbuf_advstore(&nbuf, &n_ptr,
694 1.5 rmind offsetof(struct ip, ip_off) - offby,
695 1.6 rmind sizeof(uint16_t), &nip_off)) {
696 1.5 rmind return false;
697 1.5 rmind }
698 1.5 rmind cksum = npf_fixup16_cksum(cksum, ip_off, nip_off);
699 1.5 rmind ip->ip_off = nip_off;
700 1.5 rmind offby = offsetof(struct ip, ip_off);
701 1.5 rmind }
702 1.5 rmind
703 1.4 rmind /* Enforce minimum TTL. */
704 1.4 rmind if (minttl && ttl < minttl) {
705 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr,
706 1.4 rmind offsetof(struct ip, ip_ttl) - offby,
707 1.4 rmind sizeof(uint8_t), &minttl)) {
708 1.4 rmind return false;
709 1.4 rmind }
710 1.4 rmind cksum = npf_fixup16_cksum(cksum, ttl, minttl);
711 1.4 rmind ip->ip_ttl = minttl;
712 1.4 rmind offby = offsetof(struct ip, ip_ttl);
713 1.1 rmind }
714 1.1 rmind
715 1.4 rmind /* Update IP checksum. */
716 1.4 rmind offby = offsetof(struct ip, ip_sum) - offby;
717 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(cksum), &cksum)) {
718 1.1 rmind return false;
719 1.4 rmind }
720 1.4 rmind ip->ip_sum = cksum;
721 1.4 rmind return true;
722 1.4 rmind }
723 1.4 rmind
724 1.4 rmind bool
725 1.4 rmind npf_normalize(npf_cache_t *npc, nbuf_t *nbuf,
726 1.5 rmind bool no_df, bool rnd, u_int minttl, u_int maxmss)
727 1.4 rmind {
728 1.4 rmind void *n_ptr = nbuf_dataptr(nbuf);
729 1.4 rmind struct tcphdr *th = &npc->npc_l4.tcp;
730 1.4 rmind uint16_t cksum, mss;
731 1.10 rmind u_int offby;
732 1.10 rmind int wscale;
733 1.4 rmind
734 1.4 rmind /* Normalize IPv4. */
735 1.4 rmind if (npf_iscached(npc, NPC_IP4) && (rnd || minttl)) {
736 1.5 rmind if (!npf_normalize_ip4(npc, nbuf, rnd, no_df, minttl)) {
737 1.4 rmind return false;
738 1.4 rmind }
739 1.6 rmind } else if (!npf_iscached(npc, NPC_IP4)) {
740 1.6 rmind /* XXX: no IPv6 */
741 1.6 rmind return false;
742 1.4 rmind }
743 1.1 rmind
744 1.4 rmind /*
745 1.4 rmind * TCP Maximum Segment Size (MSS) "clamping". Only if SYN packet.
746 1.6 rmind * Fetch MSS and check whether rewrite to lower is needed.
747 1.4 rmind */
748 1.4 rmind if (maxmss == 0 || !npf_iscached(npc, NPC_TCP) ||
749 1.4 rmind (th->th_flags & TH_SYN) == 0) {
750 1.4 rmind /* Not required; done. */
751 1.4 rmind return true;
752 1.4 rmind }
753 1.4 rmind mss = 0;
754 1.4 rmind if (!npf_fetch_tcpopts(npc, nbuf, &mss, &wscale)) {
755 1.4 rmind return false;
756 1.4 rmind }
757 1.4 rmind if (ntohs(mss) <= maxmss) {
758 1.4 rmind return true;
759 1.4 rmind }
760 1.4 rmind
761 1.6 rmind /* Calculate TCP checksum, then rewrite MSS and the checksum. */
762 1.4 rmind maxmss = htons(maxmss);
763 1.4 rmind cksum = npf_fixup16_cksum(th->th_sum, mss, maxmss);
764 1.4 rmind th->th_sum = cksum;
765 1.4 rmind mss = maxmss;
766 1.4 rmind if (!npf_fetch_tcpopts(npc, nbuf, &mss, &wscale)) {
767 1.1 rmind return false;
768 1.4 rmind }
769 1.10 rmind offby = npf_cache_hlen(npc) + offsetof(struct tcphdr, th_sum);
770 1.4 rmind if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(cksum), &cksum)) {
771 1.4 rmind return false;
772 1.4 rmind }
773 1.1 rmind return true;
774 1.1 rmind }
775 1.13 rmind
776 1.13 rmind #if defined(DDB) || defined(_NPF_TESTING)
777 1.13 rmind
778 1.13 rmind void
779 1.13 rmind npf_addr_dump(const npf_addr_t *addr)
780 1.13 rmind {
781 1.13 rmind printf("IP[%x:%x:%x:%x]\n",
782 1.13 rmind addr->s6_addr32[0], addr->s6_addr32[1],
783 1.13 rmind addr->s6_addr32[2], addr->s6_addr32[3]);
784 1.13 rmind }
785 1.13 rmind
786 1.13 rmind #endif
787