ip_output.c revision 1.163 1 /* $NetBSD: ip_output.c,v 1.163 2006/07/12 13:11:27 tron Exp $ */
2
3 /*
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 1998 The NetBSD Foundation, Inc.
34 * All rights reserved.
35 *
36 * This code is derived from software contributed to The NetBSD Foundation
37 * by Public Access Networks Corporation ("Panix"). It was developed under
38 * contract to Panix by Eric Haszlakiewicz and Thor Lancelot Simon.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the NetBSD
51 * Foundation, Inc. and its contributors.
52 * 4. Neither the name of The NetBSD Foundation nor the names of its
53 * contributors may be used to endorse or promote products derived
54 * from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
57 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
58 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
59 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
60 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
61 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
62 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
63 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
64 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
65 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 /*
70 * Copyright (c) 1982, 1986, 1988, 1990, 1993
71 * The Regents of the University of California. All rights reserved.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 * 3. Neither the name of the University nor the names of its contributors
82 * may be used to endorse or promote products derived from this software
83 * without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
86 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
89 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
90 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
91 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
92 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
93 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
94 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
95 * SUCH DAMAGE.
96 *
97 * @(#)ip_output.c 8.3 (Berkeley) 1/21/94
98 */
99
100 #include <sys/cdefs.h>
101 __KERNEL_RCSID(0, "$NetBSD: ip_output.c,v 1.163 2006/07/12 13:11:27 tron Exp $");
102
103 #include "opt_pfil_hooks.h"
104 #include "opt_inet.h"
105 #include "opt_ipsec.h"
106 #include "opt_mrouting.h"
107
108 #include <sys/param.h>
109 #include <sys/malloc.h>
110 #include <sys/mbuf.h>
111 #include <sys/errno.h>
112 #include <sys/protosw.h>
113 #include <sys/socket.h>
114 #include <sys/socketvar.h>
115 #include <sys/kauth.h>
116 #ifdef FAST_IPSEC
117 #include <sys/domain.h>
118 #endif
119 #include <sys/systm.h>
120 #include <sys/proc.h>
121
122 #include <net/if.h>
123 #include <net/route.h>
124 #include <net/pfil.h>
125
126 #include <netinet/in.h>
127 #include <netinet/in_systm.h>
128 #include <netinet/ip.h>
129 #include <netinet/in_pcb.h>
130 #include <netinet/in_var.h>
131 #include <netinet/ip_var.h>
132 #include <netinet/in_offload.h>
133
134 #ifdef MROUTING
135 #include <netinet/ip_mroute.h>
136 #endif
137
138 #include <machine/stdarg.h>
139
140 #ifdef IPSEC
141 #include <netinet6/ipsec.h>
142 #include <netkey/key.h>
143 #include <netkey/key_debug.h>
144 #endif /*IPSEC*/
145
146 #ifdef FAST_IPSEC
147 #include <netipsec/ipsec.h>
148 #include <netipsec/key.h>
149 #include <netipsec/xform.h>
150 #endif /* FAST_IPSEC*/
151
152 #ifdef IPSEC_NAT_T
153 #include <netinet/udp.h>
154 #endif
155
156 static struct mbuf *ip_insertoptions(struct mbuf *, struct mbuf *, int *);
157 static struct ifnet *ip_multicast_if(struct in_addr *, int *);
158 static void ip_mloopback(struct ifnet *, struct mbuf *, struct sockaddr_in *);
159 static int ip_getoptval(struct mbuf *, u_int8_t *, u_int);
160
161 #ifdef PFIL_HOOKS
162 extern struct pfil_head inet_pfil_hook; /* XXX */
163 #endif
164
165 int ip_do_loopback_cksum = 0;
166
167 #define IN_NEED_CHECKSUM(ifp, csum_flags) \
168 (__predict_true(((ifp)->if_flags & IFF_LOOPBACK) == 0 || \
169 (((csum_flags) & M_CSUM_UDPv4) != 0 && udp_do_loopback_cksum) || \
170 (((csum_flags) & M_CSUM_TCPv4) != 0 && tcp_do_loopback_cksum) || \
171 (((csum_flags) & M_CSUM_IPv4) != 0 && ip_do_loopback_cksum)))
172
173 struct ip_tso_output_args {
174 struct ifnet *ifp;
175 struct sockaddr *sa;
176 struct rtentry *rt;
177 };
178
179 static int ip_tso_output_callback(void *, struct mbuf *);
180 static int ip_tso_output(struct ifnet *, struct mbuf *, struct sockaddr *,
181 struct rtentry *);
182
183 static int
184 ip_tso_output_callback(void *vp, struct mbuf *m)
185 {
186 struct ip_tso_output_args *args = vp;
187 struct ifnet *ifp = args->ifp;
188
189 return (*ifp->if_output)(ifp, m, args->sa, args->rt);
190 }
191
192 static int
193 ip_tso_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
194 struct rtentry *rt)
195 {
196 struct ip_tso_output_args args;
197
198 args.ifp = ifp;
199 args.sa = sa;
200 args.rt = rt;
201
202 return tcp4_segment(m, ip_tso_output_callback, &args);
203 }
204
205 /*
206 * IP output. The packet in mbuf chain m contains a skeletal IP
207 * header (with len, off, ttl, proto, tos, src, dst).
208 * The mbuf chain containing the packet will be freed.
209 * The mbuf opt, if present, will not be freed.
210 */
211 int
212 ip_output(struct mbuf *m0, ...)
213 {
214 struct ip *ip;
215 struct ifnet *ifp;
216 struct mbuf *m = m0;
217 int hlen = sizeof (struct ip);
218 int len, error = 0;
219 struct route iproute;
220 struct sockaddr_in *dst;
221 struct in_ifaddr *ia;
222 struct mbuf *opt;
223 struct route *ro;
224 int flags, sw_csum;
225 int *mtu_p;
226 u_long mtu;
227 struct ip_moptions *imo;
228 struct socket *so;
229 va_list ap;
230 #ifdef IPSEC_NAT_T
231 int natt_frag = 0;
232 #endif
233 #ifdef IPSEC
234 struct secpolicy *sp = NULL;
235 #endif /*IPSEC*/
236 #ifdef FAST_IPSEC
237 struct inpcb *inp;
238 struct m_tag *mtag;
239 struct secpolicy *sp = NULL;
240 struct tdb_ident *tdbi;
241 int s;
242 #endif
243 u_int16_t ip_len;
244
245 len = 0;
246 va_start(ap, m0);
247 opt = va_arg(ap, struct mbuf *);
248 ro = va_arg(ap, struct route *);
249 flags = va_arg(ap, int);
250 imo = va_arg(ap, struct ip_moptions *);
251 so = va_arg(ap, struct socket *);
252 if (flags & IP_RETURNMTU)
253 mtu_p = va_arg(ap, int *);
254 else
255 mtu_p = NULL;
256 va_end(ap);
257
258 MCLAIM(m, &ip_tx_mowner);
259 #ifdef FAST_IPSEC
260 if (so != NULL && so->so_proto->pr_domain->dom_family == AF_INET)
261 inp = (struct inpcb *)so->so_pcb;
262 else
263 inp = NULL;
264 #endif /* FAST_IPSEC */
265
266 #ifdef DIAGNOSTIC
267 if ((m->m_flags & M_PKTHDR) == 0)
268 panic("ip_output: no HDR");
269
270 if ((m->m_pkthdr.csum_flags &
271 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
272 panic("ip_output: IPv6 checksum offload flags: %d",
273 m->m_pkthdr.csum_flags);
274 }
275
276 if ((m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) ==
277 (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
278 panic("ip_output: conflicting checksum offload flags: %d",
279 m->m_pkthdr.csum_flags);
280 }
281 #endif
282 if (opt) {
283 m = ip_insertoptions(m, opt, &len);
284 if (len >= sizeof(struct ip))
285 hlen = len;
286 }
287 ip = mtod(m, struct ip *);
288 /*
289 * Fill in IP header.
290 */
291 if ((flags & (IP_FORWARDING|IP_RAWOUTPUT)) == 0) {
292 ip->ip_v = IPVERSION;
293 ip->ip_off = htons(0);
294 if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) == 0) {
295 ip->ip_id = ip_newid();
296 } else {
297
298 /*
299 * TSO capable interfaces (typically?) increment
300 * ip_id for each segment.
301 * "allocate" enough ids here to increase the chance
302 * for them to be unique.
303 *
304 * note that the following calculation is not
305 * needed to be precise. wasting some ip_id is fine.
306 */
307
308 unsigned int segsz = m->m_pkthdr.segsz;
309 unsigned int datasz = ntohs(ip->ip_len) - hlen;
310 unsigned int num = howmany(datasz, segsz);
311
312 ip->ip_id = ip_newid_range(num);
313 }
314 ip->ip_hl = hlen >> 2;
315 ipstat.ips_localout++;
316 } else {
317 hlen = ip->ip_hl << 2;
318 }
319 /*
320 * Route packet.
321 */
322 if (ro == 0) {
323 ro = &iproute;
324 bzero((caddr_t)ro, sizeof (*ro));
325 }
326 dst = satosin(&ro->ro_dst);
327 /*
328 * If there is a cached route,
329 * check that it is to the same destination
330 * and is still up. If not, free it and try again.
331 * The address family should also be checked in case of sharing the
332 * cache with IPv6.
333 */
334 if (ro->ro_rt && ((ro->ro_rt->rt_flags & RTF_UP) == 0 ||
335 dst->sin_family != AF_INET ||
336 !in_hosteq(dst->sin_addr, ip->ip_dst))) {
337 RTFREE(ro->ro_rt);
338 ro->ro_rt = (struct rtentry *)0;
339 }
340 if (ro->ro_rt == 0) {
341 bzero(dst, sizeof(*dst));
342 dst->sin_family = AF_INET;
343 dst->sin_len = sizeof(*dst);
344 dst->sin_addr = ip->ip_dst;
345 }
346 /*
347 * If routing to interface only,
348 * short circuit routing lookup.
349 */
350 if (flags & IP_ROUTETOIF) {
351 if ((ia = ifatoia(ifa_ifwithladdr(sintosa(dst)))) == 0) {
352 ipstat.ips_noroute++;
353 error = ENETUNREACH;
354 goto bad;
355 }
356 ifp = ia->ia_ifp;
357 mtu = ifp->if_mtu;
358 ip->ip_ttl = 1;
359 } else if ((IN_MULTICAST(ip->ip_dst.s_addr) ||
360 ip->ip_dst.s_addr == INADDR_BROADCAST) &&
361 imo != NULL && imo->imo_multicast_ifp != NULL) {
362 ifp = imo->imo_multicast_ifp;
363 mtu = ifp->if_mtu;
364 IFP_TO_IA(ifp, ia);
365 } else {
366 if (ro->ro_rt == 0)
367 rtalloc(ro);
368 if (ro->ro_rt == 0) {
369 ipstat.ips_noroute++;
370 error = EHOSTUNREACH;
371 goto bad;
372 }
373 ia = ifatoia(ro->ro_rt->rt_ifa);
374 ifp = ro->ro_rt->rt_ifp;
375 if ((mtu = ro->ro_rt->rt_rmx.rmx_mtu) == 0)
376 mtu = ifp->if_mtu;
377 ro->ro_rt->rt_use++;
378 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
379 dst = satosin(ro->ro_rt->rt_gateway);
380 }
381 if (IN_MULTICAST(ip->ip_dst.s_addr) ||
382 (ip->ip_dst.s_addr == INADDR_BROADCAST)) {
383 struct in_multi *inm;
384
385 m->m_flags |= (ip->ip_dst.s_addr == INADDR_BROADCAST) ?
386 M_BCAST : M_MCAST;
387 /*
388 * IP destination address is multicast. Make sure "dst"
389 * still points to the address in "ro". (It may have been
390 * changed to point to a gateway address, above.)
391 */
392 dst = satosin(&ro->ro_dst);
393 /*
394 * See if the caller provided any multicast options
395 */
396 if (imo != NULL)
397 ip->ip_ttl = imo->imo_multicast_ttl;
398 else
399 ip->ip_ttl = IP_DEFAULT_MULTICAST_TTL;
400
401 /*
402 * if we don't know the outgoing ifp yet, we can't generate
403 * output
404 */
405 if (!ifp) {
406 ipstat.ips_noroute++;
407 error = ENETUNREACH;
408 goto bad;
409 }
410
411 /*
412 * If the packet is multicast or broadcast, confirm that
413 * the outgoing interface can transmit it.
414 */
415 if (((m->m_flags & M_MCAST) &&
416 (ifp->if_flags & IFF_MULTICAST) == 0) ||
417 ((m->m_flags & M_BCAST) &&
418 (ifp->if_flags & (IFF_BROADCAST|IFF_POINTOPOINT)) == 0)) {
419 ipstat.ips_noroute++;
420 error = ENETUNREACH;
421 goto bad;
422 }
423 /*
424 * If source address not specified yet, use an address
425 * of outgoing interface.
426 */
427 if (in_nullhost(ip->ip_src)) {
428 struct in_ifaddr *xia;
429
430 IFP_TO_IA(ifp, xia);
431 if (!xia) {
432 error = EADDRNOTAVAIL;
433 goto bad;
434 }
435 ip->ip_src = xia->ia_addr.sin_addr;
436 }
437
438 IN_LOOKUP_MULTI(ip->ip_dst, ifp, inm);
439 if (inm != NULL &&
440 (imo == NULL || imo->imo_multicast_loop)) {
441 /*
442 * If we belong to the destination multicast group
443 * on the outgoing interface, and the caller did not
444 * forbid loopback, loop back a copy.
445 */
446 ip_mloopback(ifp, m, dst);
447 }
448 #ifdef MROUTING
449 else {
450 /*
451 * If we are acting as a multicast router, perform
452 * multicast forwarding as if the packet had just
453 * arrived on the interface to which we are about
454 * to send. The multicast forwarding function
455 * recursively calls this function, using the
456 * IP_FORWARDING flag to prevent infinite recursion.
457 *
458 * Multicasts that are looped back by ip_mloopback(),
459 * above, will be forwarded by the ip_input() routine,
460 * if necessary.
461 */
462 extern struct socket *ip_mrouter;
463
464 if (ip_mrouter && (flags & IP_FORWARDING) == 0) {
465 if (ip_mforward(m, ifp) != 0) {
466 m_freem(m);
467 goto done;
468 }
469 }
470 }
471 #endif
472 /*
473 * Multicasts with a time-to-live of zero may be looped-
474 * back, above, but must not be transmitted on a network.
475 * Also, multicasts addressed to the loopback interface
476 * are not sent -- the above call to ip_mloopback() will
477 * loop back a copy if this host actually belongs to the
478 * destination group on the loopback interface.
479 */
480 if (ip->ip_ttl == 0 || (ifp->if_flags & IFF_LOOPBACK) != 0) {
481 m_freem(m);
482 goto done;
483 }
484
485 goto sendit;
486 }
487 /*
488 * If source address not specified yet, use address
489 * of outgoing interface.
490 */
491 if (in_nullhost(ip->ip_src))
492 ip->ip_src = ia->ia_addr.sin_addr;
493
494 /*
495 * packets with Class-D address as source are not valid per
496 * RFC 1112
497 */
498 if (IN_MULTICAST(ip->ip_src.s_addr)) {
499 ipstat.ips_odropped++;
500 error = EADDRNOTAVAIL;
501 goto bad;
502 }
503
504 /*
505 * Look for broadcast address and
506 * and verify user is allowed to send
507 * such a packet.
508 */
509 if (in_broadcast(dst->sin_addr, ifp)) {
510 if ((ifp->if_flags & IFF_BROADCAST) == 0) {
511 error = EADDRNOTAVAIL;
512 goto bad;
513 }
514 if ((flags & IP_ALLOWBROADCAST) == 0) {
515 error = EACCES;
516 goto bad;
517 }
518 /* don't allow broadcast messages to be fragmented */
519 if (ntohs(ip->ip_len) > ifp->if_mtu) {
520 error = EMSGSIZE;
521 goto bad;
522 }
523 m->m_flags |= M_BCAST;
524 } else
525 m->m_flags &= ~M_BCAST;
526
527 sendit:
528 /*
529 * If we're doing Path MTU Discovery, we need to set DF unless
530 * the route's MTU is locked.
531 */
532 if ((flags & IP_MTUDISC) != 0 && ro->ro_rt != NULL &&
533 (ro->ro_rt->rt_rmx.rmx_locks & RTV_MTU) == 0)
534 ip->ip_off |= htons(IP_DF);
535
536 /* Remember the current ip_len */
537 ip_len = ntohs(ip->ip_len);
538
539 #ifdef IPSEC
540 /* get SP for this packet */
541 if (so == NULL)
542 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND,
543 flags, &error);
544 else {
545 if (IPSEC_PCB_SKIP_IPSEC(sotoinpcb_hdr(so)->inph_sp,
546 IPSEC_DIR_OUTBOUND))
547 goto skip_ipsec;
548 sp = ipsec4_getpolicybysock(m, IPSEC_DIR_OUTBOUND, so, &error);
549 }
550
551 if (sp == NULL) {
552 ipsecstat.out_inval++;
553 goto bad;
554 }
555
556 error = 0;
557
558 /* check policy */
559 switch (sp->policy) {
560 case IPSEC_POLICY_DISCARD:
561 /*
562 * This packet is just discarded.
563 */
564 ipsecstat.out_polvio++;
565 goto bad;
566
567 case IPSEC_POLICY_BYPASS:
568 case IPSEC_POLICY_NONE:
569 /* no need to do IPsec. */
570 goto skip_ipsec;
571
572 case IPSEC_POLICY_IPSEC:
573 if (sp->req == NULL) {
574 /* XXX should be panic ? */
575 printf("ip_output: No IPsec request specified.\n");
576 error = EINVAL;
577 goto bad;
578 }
579 break;
580
581 case IPSEC_POLICY_ENTRUST:
582 default:
583 printf("ip_output: Invalid policy found. %d\n", sp->policy);
584 }
585
586 #ifdef IPSEC_NAT_T
587 /*
588 * NAT-T ESP fragmentation: don't do IPSec processing now,
589 * we'll do it on each fragmented packet.
590 */
591 if (sp->req->sav &&
592 ((sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP) ||
593 (sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP_NON_IKE))) {
594 if (ntohs(ip->ip_len) > sp->req->sav->esp_frag) {
595 natt_frag = 1;
596 mtu = sp->req->sav->esp_frag;
597 goto skip_ipsec;
598 }
599 }
600 #endif /* IPSEC_NAT_T */
601
602 /*
603 * ipsec4_output() expects ip_len and ip_off in network
604 * order. They have been set to network order above.
605 */
606
607 {
608 struct ipsec_output_state state;
609 bzero(&state, sizeof(state));
610 state.m = m;
611 if (flags & IP_ROUTETOIF) {
612 state.ro = &iproute;
613 bzero(&iproute, sizeof(iproute));
614 } else
615 state.ro = ro;
616 state.dst = (struct sockaddr *)dst;
617
618 /*
619 * We can't defer the checksum of payload data if
620 * we're about to encrypt/authenticate it.
621 *
622 * XXX When we support crypto offloading functions of
623 * XXX network interfaces, we need to reconsider this,
624 * XXX since it's likely that they'll support checksumming,
625 * XXX as well.
626 */
627 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
628 in_delayed_cksum(m);
629 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
630 }
631
632 error = ipsec4_output(&state, sp, flags);
633
634 m = state.m;
635 if (flags & IP_ROUTETOIF) {
636 /*
637 * if we have tunnel mode SA, we may need to ignore
638 * IP_ROUTETOIF.
639 */
640 if (state.ro != &iproute || state.ro->ro_rt != NULL) {
641 flags &= ~IP_ROUTETOIF;
642 ro = state.ro;
643 }
644 } else
645 ro = state.ro;
646 dst = (struct sockaddr_in *)state.dst;
647 if (error) {
648 /* mbuf is already reclaimed in ipsec4_output. */
649 m0 = NULL;
650 switch (error) {
651 case EHOSTUNREACH:
652 case ENETUNREACH:
653 case EMSGSIZE:
654 case ENOBUFS:
655 case ENOMEM:
656 break;
657 default:
658 printf("ip4_output (ipsec): error code %d\n", error);
659 /*fall through*/
660 case ENOENT:
661 /* don't show these error codes to the user */
662 error = 0;
663 break;
664 }
665 goto bad;
666 }
667
668 /* be sure to update variables that are affected by ipsec4_output() */
669 ip = mtod(m, struct ip *);
670 hlen = ip->ip_hl << 2;
671 ip_len = ntohs(ip->ip_len);
672
673 if (ro->ro_rt == NULL) {
674 if ((flags & IP_ROUTETOIF) == 0) {
675 printf("ip_output: "
676 "can't update route after IPsec processing\n");
677 error = EHOSTUNREACH; /*XXX*/
678 goto bad;
679 }
680 } else {
681 /* nobody uses ia beyond here */
682 if (state.encap) {
683 ifp = ro->ro_rt->rt_ifp;
684 if ((mtu = ro->ro_rt->rt_rmx.rmx_mtu) == 0)
685 mtu = ifp->if_mtu;
686 }
687 }
688 }
689 skip_ipsec:
690 #endif /*IPSEC*/
691 #ifdef FAST_IPSEC
692 /*
693 * Check the security policy (SP) for the packet and, if
694 * required, do IPsec-related processing. There are two
695 * cases here; the first time a packet is sent through
696 * it will be untagged and handled by ipsec4_checkpolicy.
697 * If the packet is resubmitted to ip_output (e.g. after
698 * AH, ESP, etc. processing), there will be a tag to bypass
699 * the lookup and related policy checking.
700 */
701 mtag = m_tag_find(m, PACKET_TAG_IPSEC_PENDING_TDB, NULL);
702 s = splsoftnet();
703 if (mtag != NULL) {
704 tdbi = (struct tdb_ident *)(mtag + 1);
705 sp = ipsec_getpolicy(tdbi, IPSEC_DIR_OUTBOUND);
706 if (sp == NULL)
707 error = -EINVAL; /* force silent drop */
708 m_tag_delete(m, mtag);
709 } else {
710 if (inp != NULL &&
711 IPSEC_PCB_SKIP_IPSEC(inp->inp_sp, IPSEC_DIR_OUTBOUND))
712 goto spd_done;
713 sp = ipsec4_checkpolicy(m, IPSEC_DIR_OUTBOUND, flags,
714 &error, inp);
715 }
716 /*
717 * There are four return cases:
718 * sp != NULL apply IPsec policy
719 * sp == NULL, error == 0 no IPsec handling needed
720 * sp == NULL, error == -EINVAL discard packet w/o error
721 * sp == NULL, error != 0 discard packet, report error
722 */
723 if (sp != NULL) {
724 #ifdef IPSEC_NAT_T
725 /*
726 * NAT-T ESP fragmentation: don't do IPSec processing now,
727 * we'll do it on each fragmented packet.
728 */
729 if (sp->req->sav &&
730 ((sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP) ||
731 (sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP_NON_IKE))) {
732 if (ntohs(ip->ip_len) > sp->req->sav->esp_frag) {
733 natt_frag = 1;
734 mtu = sp->req->sav->esp_frag;
735 goto spd_done;
736 }
737 }
738 #endif /* IPSEC_NAT_T */
739 /* Loop detection, check if ipsec processing already done */
740 IPSEC_ASSERT(sp->req != NULL, ("ip_output: no ipsec request"));
741 for (mtag = m_tag_first(m); mtag != NULL;
742 mtag = m_tag_next(m, mtag)) {
743 #ifdef MTAG_ABI_COMPAT
744 if (mtag->m_tag_cookie != MTAG_ABI_COMPAT)
745 continue;
746 #endif
747 if (mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_DONE &&
748 mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED)
749 continue;
750 /*
751 * Check if policy has an SA associated with it.
752 * This can happen when an SP has yet to acquire
753 * an SA; e.g. on first reference. If it occurs,
754 * then we let ipsec4_process_packet do its thing.
755 */
756 if (sp->req->sav == NULL)
757 break;
758 tdbi = (struct tdb_ident *)(mtag + 1);
759 if (tdbi->spi == sp->req->sav->spi &&
760 tdbi->proto == sp->req->sav->sah->saidx.proto &&
761 bcmp(&tdbi->dst, &sp->req->sav->sah->saidx.dst,
762 sizeof (union sockaddr_union)) == 0) {
763 /*
764 * No IPsec processing is needed, free
765 * reference to SP.
766 *
767 * NB: null pointer to avoid free at
768 * done: below.
769 */
770 KEY_FREESP(&sp), sp = NULL;
771 splx(s);
772 goto spd_done;
773 }
774 }
775
776 /*
777 * Do delayed checksums now because we send before
778 * this is done in the normal processing path.
779 */
780 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
781 in_delayed_cksum(m);
782 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
783 }
784
785 #ifdef __FreeBSD__
786 ip->ip_len = htons(ip->ip_len);
787 ip->ip_off = htons(ip->ip_off);
788 #endif
789
790 /* NB: callee frees mbuf */
791 error = ipsec4_process_packet(m, sp->req, flags, 0);
792 /*
793 * Preserve KAME behaviour: ENOENT can be returned
794 * when an SA acquire is in progress. Don't propagate
795 * this to user-level; it confuses applications.
796 *
797 * XXX this will go away when the SADB is redone.
798 */
799 if (error == ENOENT)
800 error = 0;
801 splx(s);
802 goto done;
803 } else {
804 splx(s);
805
806 if (error != 0) {
807 /*
808 * Hack: -EINVAL is used to signal that a packet
809 * should be silently discarded. This is typically
810 * because we asked key management for an SA and
811 * it was delayed (e.g. kicked up to IKE).
812 */
813 if (error == -EINVAL)
814 error = 0;
815 goto bad;
816 } else {
817 /* No IPsec processing for this packet. */
818 }
819 #ifdef notyet
820 /*
821 * If deferred crypto processing is needed, check that
822 * the interface supports it.
823 */
824 mtag = m_tag_find(m, PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED, NULL);
825 if (mtag != NULL && (ifp->if_capenable & IFCAP_IPSEC) == 0) {
826 /* notify IPsec to do its own crypto */
827 ipsp_skipcrypto_unmark((struct tdb_ident *)(mtag + 1));
828 error = EHOSTUNREACH;
829 goto bad;
830 }
831 #endif
832 }
833 spd_done:
834 #endif /* FAST_IPSEC */
835
836 #ifdef PFIL_HOOKS
837 /*
838 * Run through list of hooks for output packets.
839 */
840 if ((error = pfil_run_hooks(&inet_pfil_hook, &m, ifp, PFIL_OUT)) != 0)
841 goto done;
842 if (m == NULL)
843 goto done;
844
845 ip = mtod(m, struct ip *);
846 hlen = ip->ip_hl << 2;
847 #endif /* PFIL_HOOKS */
848
849 m->m_pkthdr.csum_data |= hlen << 16;
850
851 #if IFA_STATS
852 /*
853 * search for the source address structure to
854 * maintain output statistics.
855 */
856 INADDR_TO_IA(ip->ip_src, ia);
857 #endif
858
859 /* Maybe skip checksums on loopback interfaces. */
860 if (IN_NEED_CHECKSUM(ifp, M_CSUM_IPv4)) {
861 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
862 }
863 sw_csum = m->m_pkthdr.csum_flags & ~ifp->if_csum_flags_tx;
864 /*
865 * If small enough for mtu of path, or if using TCP segmentation
866 * offload, can just send directly.
867 */
868 if (ip_len <= mtu ||
869 (m->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0) {
870 #if IFA_STATS
871 if (ia)
872 ia->ia_ifa.ifa_data.ifad_outbytes += ip_len;
873 #endif
874 /*
875 * Always initialize the sum to 0! Some HW assisted
876 * checksumming requires this.
877 */
878 ip->ip_sum = 0;
879
880 if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) == 0) {
881 /*
882 * Perform any checksums that the hardware can't do
883 * for us.
884 *
885 * XXX Does any hardware require the {th,uh}_sum
886 * XXX fields to be 0?
887 */
888 if (sw_csum & M_CSUM_IPv4) {
889 KASSERT(IN_NEED_CHECKSUM(ifp, M_CSUM_IPv4));
890 ip->ip_sum = in_cksum(m, hlen);
891 m->m_pkthdr.csum_flags &= ~M_CSUM_IPv4;
892 }
893 if (sw_csum & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
894 if (IN_NEED_CHECKSUM(ifp,
895 sw_csum & (M_CSUM_TCPv4|M_CSUM_UDPv4))) {
896 in_delayed_cksum(m);
897 }
898 m->m_pkthdr.csum_flags &=
899 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
900 }
901 }
902
903 #ifdef IPSEC
904 /* clean ipsec history once it goes out of the node */
905 ipsec_delaux(m);
906 #endif
907
908 if (__predict_true(
909 (m->m_pkthdr.csum_flags & M_CSUM_TSOv4) == 0 ||
910 (ifp->if_capenable & IFCAP_TSOv4) != 0)) {
911 error =
912 (*ifp->if_output)(ifp, m, sintosa(dst), ro->ro_rt);
913 } else {
914 error =
915 ip_tso_output(ifp, m, sintosa(dst), ro->ro_rt);
916 }
917 goto done;
918 }
919
920 /*
921 * We can't use HW checksumming if we're about to
922 * to fragment the packet.
923 *
924 * XXX Some hardware can do this.
925 */
926 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
927 if (IN_NEED_CHECKSUM(ifp,
928 m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))) {
929 in_delayed_cksum(m);
930 }
931 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
932 }
933
934 /*
935 * Too large for interface; fragment if possible.
936 * Must be able to put at least 8 bytes per fragment.
937 */
938 if (ntohs(ip->ip_off) & IP_DF) {
939 if (flags & IP_RETURNMTU)
940 *mtu_p = mtu;
941 error = EMSGSIZE;
942 ipstat.ips_cantfrag++;
943 goto bad;
944 }
945
946 error = ip_fragment(m, ifp, mtu);
947 if (error) {
948 m = NULL;
949 goto bad;
950 }
951
952 for (; m; m = m0) {
953 m0 = m->m_nextpkt;
954 m->m_nextpkt = 0;
955 if (error == 0) {
956 #if IFA_STATS
957 if (ia)
958 ia->ia_ifa.ifa_data.ifad_outbytes +=
959 ntohs(ip->ip_len);
960 #endif
961 #ifdef IPSEC
962 /* clean ipsec history once it goes out of the node */
963 ipsec_delaux(m);
964 #endif /* IPSEC */
965
966 #ifdef IPSEC_NAT_T
967 /*
968 * If we get there, the packet has not been handeld by
969 * IPSec whereas it should have. Now that it has been
970 * fragmented, re-inject it in ip_output so that IPsec
971 * processing can occur.
972 */
973 if (natt_frag) {
974 error = ip_output(m, opt,
975 ro, flags, imo, so, mtu_p);
976 } else
977 #endif /* IPSEC_NAT_T */
978 {
979 KASSERT((m->m_pkthdr.csum_flags &
980 (M_CSUM_UDPv4 | M_CSUM_TCPv4)) == 0);
981 error = (*ifp->if_output)(ifp, m, sintosa(dst),
982 ro->ro_rt);
983 }
984 } else
985 m_freem(m);
986 }
987
988 if (error == 0)
989 ipstat.ips_fragmented++;
990 done:
991 if (ro == &iproute && (flags & IP_ROUTETOIF) == 0 && ro->ro_rt) {
992 RTFREE(ro->ro_rt);
993 ro->ro_rt = 0;
994 }
995
996 #ifdef IPSEC
997 if (sp != NULL) {
998 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
999 printf("DP ip_output call free SP:%p\n", sp));
1000 key_freesp(sp);
1001 }
1002 #endif /* IPSEC */
1003 #ifdef FAST_IPSEC
1004 if (sp != NULL)
1005 KEY_FREESP(&sp);
1006 #endif /* FAST_IPSEC */
1007
1008 return (error);
1009 bad:
1010 m_freem(m);
1011 goto done;
1012 }
1013
1014 int
1015 ip_fragment(struct mbuf *m, struct ifnet *ifp, u_long mtu)
1016 {
1017 struct ip *ip, *mhip;
1018 struct mbuf *m0;
1019 int len, hlen, off;
1020 int mhlen, firstlen;
1021 struct mbuf **mnext;
1022 int sw_csum = m->m_pkthdr.csum_flags;
1023 int fragments = 0;
1024 int s;
1025 int error = 0;
1026
1027 ip = mtod(m, struct ip *);
1028 hlen = ip->ip_hl << 2;
1029 if (ifp != NULL)
1030 sw_csum &= ~ifp->if_csum_flags_tx;
1031
1032 len = (mtu - hlen) &~ 7;
1033 if (len < 8) {
1034 m_freem(m);
1035 return (EMSGSIZE);
1036 }
1037
1038 firstlen = len;
1039 mnext = &m->m_nextpkt;
1040
1041 /*
1042 * Loop through length of segment after first fragment,
1043 * make new header and copy data of each part and link onto chain.
1044 */
1045 m0 = m;
1046 mhlen = sizeof (struct ip);
1047 for (off = hlen + len; off < ntohs(ip->ip_len); off += len) {
1048 MGETHDR(m, M_DONTWAIT, MT_HEADER);
1049 if (m == 0) {
1050 error = ENOBUFS;
1051 ipstat.ips_odropped++;
1052 goto sendorfree;
1053 }
1054 MCLAIM(m, m0->m_owner);
1055 *mnext = m;
1056 mnext = &m->m_nextpkt;
1057 m->m_data += max_linkhdr;
1058 mhip = mtod(m, struct ip *);
1059 *mhip = *ip;
1060 /* we must inherit MCAST and BCAST flags */
1061 m->m_flags |= m0->m_flags & (M_MCAST|M_BCAST);
1062 if (hlen > sizeof (struct ip)) {
1063 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
1064 mhip->ip_hl = mhlen >> 2;
1065 }
1066 m->m_len = mhlen;
1067 mhip->ip_off = ((off - hlen) >> 3) +
1068 (ntohs(ip->ip_off) & ~IP_MF);
1069 if (ip->ip_off & htons(IP_MF))
1070 mhip->ip_off |= IP_MF;
1071 if (off + len >= ntohs(ip->ip_len))
1072 len = ntohs(ip->ip_len) - off;
1073 else
1074 mhip->ip_off |= IP_MF;
1075 HTONS(mhip->ip_off);
1076 mhip->ip_len = htons((u_int16_t)(len + mhlen));
1077 m->m_next = m_copy(m0, off, len);
1078 if (m->m_next == 0) {
1079 error = ENOBUFS; /* ??? */
1080 ipstat.ips_odropped++;
1081 goto sendorfree;
1082 }
1083 m->m_pkthdr.len = mhlen + len;
1084 m->m_pkthdr.rcvif = (struct ifnet *)0;
1085 mhip->ip_sum = 0;
1086 if (sw_csum & M_CSUM_IPv4) {
1087 mhip->ip_sum = in_cksum(m, mhlen);
1088 KASSERT((m->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0);
1089 } else {
1090 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1091 m->m_pkthdr.csum_data |= mhlen << 16;
1092 }
1093 ipstat.ips_ofragments++;
1094 fragments++;
1095 }
1096 /*
1097 * Update first fragment by trimming what's been copied out
1098 * and updating header, then send each fragment (in order).
1099 */
1100 m = m0;
1101 m_adj(m, hlen + firstlen - ntohs(ip->ip_len));
1102 m->m_pkthdr.len = hlen + firstlen;
1103 ip->ip_len = htons((u_int16_t)m->m_pkthdr.len);
1104 ip->ip_off |= htons(IP_MF);
1105 ip->ip_sum = 0;
1106 if (sw_csum & M_CSUM_IPv4) {
1107 ip->ip_sum = in_cksum(m, hlen);
1108 m->m_pkthdr.csum_flags &= ~M_CSUM_IPv4;
1109 } else {
1110 KASSERT(m->m_pkthdr.csum_flags & M_CSUM_IPv4);
1111 KASSERT(M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) >=
1112 sizeof(struct ip));
1113 }
1114 sendorfree:
1115 /*
1116 * If there is no room for all the fragments, don't queue
1117 * any of them.
1118 */
1119 if (ifp != NULL) {
1120 s = splnet();
1121 if (ifp->if_snd.ifq_maxlen - ifp->if_snd.ifq_len < fragments &&
1122 error == 0) {
1123 error = ENOBUFS;
1124 ipstat.ips_odropped++;
1125 IFQ_INC_DROPS(&ifp->if_snd);
1126 }
1127 splx(s);
1128 }
1129 if (error) {
1130 for (m = m0; m; m = m0) {
1131 m0 = m->m_nextpkt;
1132 m->m_nextpkt = NULL;
1133 m_freem(m);
1134 }
1135 }
1136 return (error);
1137 }
1138
1139 /*
1140 * Process a delayed payload checksum calculation.
1141 */
1142 void
1143 in_delayed_cksum(struct mbuf *m)
1144 {
1145 struct ip *ip;
1146 u_int16_t csum, offset;
1147
1148 ip = mtod(m, struct ip *);
1149 offset = ip->ip_hl << 2;
1150 csum = in4_cksum(m, 0, offset, ntohs(ip->ip_len) - offset);
1151 if (csum == 0 && (m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0)
1152 csum = 0xffff;
1153
1154 offset += M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data);
1155
1156 if ((offset + sizeof(u_int16_t)) > m->m_len) {
1157 /* This happen when ip options were inserted
1158 printf("in_delayed_cksum: pullup len %d off %d proto %d\n",
1159 m->m_len, offset, ip->ip_p);
1160 */
1161 m_copyback(m, offset, sizeof(csum), (caddr_t) &csum);
1162 } else
1163 *(u_int16_t *)(mtod(m, caddr_t) + offset) = csum;
1164 }
1165
1166 /*
1167 * Determine the maximum length of the options to be inserted;
1168 * we would far rather allocate too much space rather than too little.
1169 */
1170
1171 u_int
1172 ip_optlen(struct inpcb *inp)
1173 {
1174 struct mbuf *m = inp->inp_options;
1175
1176 if (m && m->m_len > offsetof(struct ipoption, ipopt_dst))
1177 return (m->m_len - offsetof(struct ipoption, ipopt_dst));
1178 else
1179 return 0;
1180 }
1181
1182
1183 /*
1184 * Insert IP options into preformed packet.
1185 * Adjust IP destination as required for IP source routing,
1186 * as indicated by a non-zero in_addr at the start of the options.
1187 */
1188 static struct mbuf *
1189 ip_insertoptions(struct mbuf *m, struct mbuf *opt, int *phlen)
1190 {
1191 struct ipoption *p = mtod(opt, struct ipoption *);
1192 struct mbuf *n;
1193 struct ip *ip = mtod(m, struct ip *);
1194 unsigned optlen;
1195
1196 optlen = opt->m_len - sizeof(p->ipopt_dst);
1197 if (optlen + ntohs(ip->ip_len) > IP_MAXPACKET)
1198 return (m); /* XXX should fail */
1199 if (!in_nullhost(p->ipopt_dst))
1200 ip->ip_dst = p->ipopt_dst;
1201 if (M_READONLY(m) || M_LEADINGSPACE(m) < optlen) {
1202 MGETHDR(n, M_DONTWAIT, MT_HEADER);
1203 if (n == 0)
1204 return (m);
1205 MCLAIM(n, m->m_owner);
1206 M_MOVE_PKTHDR(n, m);
1207 m->m_len -= sizeof(struct ip);
1208 m->m_data += sizeof(struct ip);
1209 n->m_next = m;
1210 m = n;
1211 m->m_len = optlen + sizeof(struct ip);
1212 m->m_data += max_linkhdr;
1213 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
1214 } else {
1215 m->m_data -= optlen;
1216 m->m_len += optlen;
1217 memmove(mtod(m, caddr_t), ip, sizeof(struct ip));
1218 }
1219 m->m_pkthdr.len += optlen;
1220 ip = mtod(m, struct ip *);
1221 bcopy((caddr_t)p->ipopt_list, (caddr_t)(ip + 1), (unsigned)optlen);
1222 *phlen = sizeof(struct ip) + optlen;
1223 ip->ip_len = htons(ntohs(ip->ip_len) + optlen);
1224 return (m);
1225 }
1226
1227 /*
1228 * Copy options from ip to jp,
1229 * omitting those not copied during fragmentation.
1230 */
1231 int
1232 ip_optcopy(struct ip *ip, struct ip *jp)
1233 {
1234 u_char *cp, *dp;
1235 int opt, optlen, cnt;
1236
1237 cp = (u_char *)(ip + 1);
1238 dp = (u_char *)(jp + 1);
1239 cnt = (ip->ip_hl << 2) - sizeof (struct ip);
1240 for (; cnt > 0; cnt -= optlen, cp += optlen) {
1241 opt = cp[0];
1242 if (opt == IPOPT_EOL)
1243 break;
1244 if (opt == IPOPT_NOP) {
1245 /* Preserve for IP mcast tunnel's LSRR alignment. */
1246 *dp++ = IPOPT_NOP;
1247 optlen = 1;
1248 continue;
1249 }
1250 #ifdef DIAGNOSTIC
1251 if (cnt < IPOPT_OLEN + sizeof(*cp))
1252 panic("malformed IPv4 option passed to ip_optcopy");
1253 #endif
1254 optlen = cp[IPOPT_OLEN];
1255 #ifdef DIAGNOSTIC
1256 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt)
1257 panic("malformed IPv4 option passed to ip_optcopy");
1258 #endif
1259 /* bogus lengths should have been caught by ip_dooptions */
1260 if (optlen > cnt)
1261 optlen = cnt;
1262 if (IPOPT_COPIED(opt)) {
1263 bcopy((caddr_t)cp, (caddr_t)dp, (unsigned)optlen);
1264 dp += optlen;
1265 }
1266 }
1267 for (optlen = dp - (u_char *)(jp+1); optlen & 0x3; optlen++)
1268 *dp++ = IPOPT_EOL;
1269 return (optlen);
1270 }
1271
1272 /*
1273 * IP socket option processing.
1274 */
1275 int
1276 ip_ctloutput(int op, struct socket *so, int level, int optname,
1277 struct mbuf **mp)
1278 {
1279 struct inpcb *inp = sotoinpcb(so);
1280 struct mbuf *m = *mp;
1281 int optval = 0;
1282 int error = 0;
1283 #if defined(IPSEC) || defined(FAST_IPSEC)
1284 struct proc *p = curproc; /*XXX*/
1285 #endif
1286
1287 if (level != IPPROTO_IP) {
1288 error = EINVAL;
1289 if (op == PRCO_SETOPT && *mp)
1290 (void) m_free(*mp);
1291 } else switch (op) {
1292
1293 case PRCO_SETOPT:
1294 switch (optname) {
1295 case IP_OPTIONS:
1296 #ifdef notyet
1297 case IP_RETOPTS:
1298 return (ip_pcbopts(optname, &inp->inp_options, m));
1299 #else
1300 return (ip_pcbopts(&inp->inp_options, m));
1301 #endif
1302
1303 case IP_TOS:
1304 case IP_TTL:
1305 case IP_RECVOPTS:
1306 case IP_RECVRETOPTS:
1307 case IP_RECVDSTADDR:
1308 case IP_RECVIF:
1309 if (m == NULL || m->m_len != sizeof(int))
1310 error = EINVAL;
1311 else {
1312 optval = *mtod(m, int *);
1313 switch (optname) {
1314
1315 case IP_TOS:
1316 inp->inp_ip.ip_tos = optval;
1317 break;
1318
1319 case IP_TTL:
1320 inp->inp_ip.ip_ttl = optval;
1321 break;
1322 #define OPTSET(bit) \
1323 if (optval) \
1324 inp->inp_flags |= bit; \
1325 else \
1326 inp->inp_flags &= ~bit;
1327
1328 case IP_RECVOPTS:
1329 OPTSET(INP_RECVOPTS);
1330 break;
1331
1332 case IP_RECVRETOPTS:
1333 OPTSET(INP_RECVRETOPTS);
1334 break;
1335
1336 case IP_RECVDSTADDR:
1337 OPTSET(INP_RECVDSTADDR);
1338 break;
1339
1340 case IP_RECVIF:
1341 OPTSET(INP_RECVIF);
1342 break;
1343 }
1344 }
1345 break;
1346 #undef OPTSET
1347
1348 case IP_MULTICAST_IF:
1349 case IP_MULTICAST_TTL:
1350 case IP_MULTICAST_LOOP:
1351 case IP_ADD_MEMBERSHIP:
1352 case IP_DROP_MEMBERSHIP:
1353 error = ip_setmoptions(optname, &inp->inp_moptions, m);
1354 break;
1355
1356 case IP_PORTRANGE:
1357 if (m == 0 || m->m_len != sizeof(int))
1358 error = EINVAL;
1359 else {
1360 optval = *mtod(m, int *);
1361
1362 switch (optval) {
1363
1364 case IP_PORTRANGE_DEFAULT:
1365 case IP_PORTRANGE_HIGH:
1366 inp->inp_flags &= ~(INP_LOWPORT);
1367 break;
1368
1369 case IP_PORTRANGE_LOW:
1370 inp->inp_flags |= INP_LOWPORT;
1371 break;
1372
1373 default:
1374 error = EINVAL;
1375 break;
1376 }
1377 }
1378 break;
1379
1380 #if defined(IPSEC) || defined(FAST_IPSEC)
1381 case IP_IPSEC_POLICY:
1382 {
1383 caddr_t req = NULL;
1384 size_t len = 0;
1385 int priv = 0;
1386
1387 #ifdef __NetBSD__
1388 if (p == 0 || kauth_authorize_generic(p->p_cred, KAUTH_GENERIC_ISSUSER,
1389 &p->p_acflag))
1390 priv = 0;
1391 else
1392 priv = 1;
1393 #else
1394 priv = (in6p->in6p_socket->so_state & SS_PRIV);
1395 #endif
1396 if (m) {
1397 req = mtod(m, caddr_t);
1398 len = m->m_len;
1399 }
1400 error = ipsec4_set_policy(inp, optname, req, len, priv);
1401 break;
1402 }
1403 #endif /*IPSEC*/
1404
1405 default:
1406 error = ENOPROTOOPT;
1407 break;
1408 }
1409 if (m)
1410 (void)m_free(m);
1411 break;
1412
1413 case PRCO_GETOPT:
1414 switch (optname) {
1415 case IP_OPTIONS:
1416 case IP_RETOPTS:
1417 *mp = m = m_get(M_WAIT, MT_SOOPTS);
1418 MCLAIM(m, so->so_mowner);
1419 if (inp->inp_options) {
1420 m->m_len = inp->inp_options->m_len;
1421 bcopy(mtod(inp->inp_options, caddr_t),
1422 mtod(m, caddr_t), (unsigned)m->m_len);
1423 } else
1424 m->m_len = 0;
1425 break;
1426
1427 case IP_TOS:
1428 case IP_TTL:
1429 case IP_RECVOPTS:
1430 case IP_RECVRETOPTS:
1431 case IP_RECVDSTADDR:
1432 case IP_RECVIF:
1433 case IP_ERRORMTU:
1434 *mp = m = m_get(M_WAIT, MT_SOOPTS);
1435 MCLAIM(m, so->so_mowner);
1436 m->m_len = sizeof(int);
1437 switch (optname) {
1438
1439 case IP_TOS:
1440 optval = inp->inp_ip.ip_tos;
1441 break;
1442
1443 case IP_TTL:
1444 optval = inp->inp_ip.ip_ttl;
1445 break;
1446
1447 case IP_ERRORMTU:
1448 optval = inp->inp_errormtu;
1449 break;
1450
1451 #define OPTBIT(bit) (inp->inp_flags & bit ? 1 : 0)
1452
1453 case IP_RECVOPTS:
1454 optval = OPTBIT(INP_RECVOPTS);
1455 break;
1456
1457 case IP_RECVRETOPTS:
1458 optval = OPTBIT(INP_RECVRETOPTS);
1459 break;
1460
1461 case IP_RECVDSTADDR:
1462 optval = OPTBIT(INP_RECVDSTADDR);
1463 break;
1464
1465 case IP_RECVIF:
1466 optval = OPTBIT(INP_RECVIF);
1467 break;
1468 }
1469 *mtod(m, int *) = optval;
1470 break;
1471
1472 #if 0 /* defined(IPSEC) || defined(FAST_IPSEC) */
1473 /* XXX: code broken */
1474 case IP_IPSEC_POLICY:
1475 {
1476 caddr_t req = NULL;
1477 size_t len = 0;
1478
1479 if (m) {
1480 req = mtod(m, caddr_t);
1481 len = m->m_len;
1482 }
1483 error = ipsec4_get_policy(inp, req, len, mp);
1484 break;
1485 }
1486 #endif /*IPSEC*/
1487
1488 case IP_MULTICAST_IF:
1489 case IP_MULTICAST_TTL:
1490 case IP_MULTICAST_LOOP:
1491 case IP_ADD_MEMBERSHIP:
1492 case IP_DROP_MEMBERSHIP:
1493 error = ip_getmoptions(optname, inp->inp_moptions, mp);
1494 if (*mp)
1495 MCLAIM(*mp, so->so_mowner);
1496 break;
1497
1498 case IP_PORTRANGE:
1499 *mp = m = m_get(M_WAIT, MT_SOOPTS);
1500 MCLAIM(m, so->so_mowner);
1501 m->m_len = sizeof(int);
1502
1503 if (inp->inp_flags & INP_LOWPORT)
1504 optval = IP_PORTRANGE_LOW;
1505 else
1506 optval = IP_PORTRANGE_DEFAULT;
1507
1508 *mtod(m, int *) = optval;
1509 break;
1510
1511 default:
1512 error = ENOPROTOOPT;
1513 break;
1514 }
1515 break;
1516 }
1517 return (error);
1518 }
1519
1520 /*
1521 * Set up IP options in pcb for insertion in output packets.
1522 * Store in mbuf with pointer in pcbopt, adding pseudo-option
1523 * with destination address if source routed.
1524 */
1525 int
1526 #ifdef notyet
1527 ip_pcbopts(int optname, struct mbuf **pcbopt, struct mbuf *m)
1528 #else
1529 ip_pcbopts(struct mbuf **pcbopt, struct mbuf *m)
1530 #endif
1531 {
1532 int cnt, optlen;
1533 u_char *cp;
1534 u_char opt;
1535
1536 /* turn off any old options */
1537 if (*pcbopt)
1538 (void)m_free(*pcbopt);
1539 *pcbopt = 0;
1540 if (m == (struct mbuf *)0 || m->m_len == 0) {
1541 /*
1542 * Only turning off any previous options.
1543 */
1544 if (m)
1545 (void)m_free(m);
1546 return (0);
1547 }
1548
1549 #ifndef __vax__
1550 if (m->m_len % sizeof(int32_t))
1551 goto bad;
1552 #endif
1553 /*
1554 * IP first-hop destination address will be stored before
1555 * actual options; move other options back
1556 * and clear it when none present.
1557 */
1558 if (m->m_data + m->m_len + sizeof(struct in_addr) >= &m->m_dat[MLEN])
1559 goto bad;
1560 cnt = m->m_len;
1561 m->m_len += sizeof(struct in_addr);
1562 cp = mtod(m, u_char *) + sizeof(struct in_addr);
1563 memmove(cp, mtod(m, caddr_t), (unsigned)cnt);
1564 bzero(mtod(m, caddr_t), sizeof(struct in_addr));
1565
1566 for (; cnt > 0; cnt -= optlen, cp += optlen) {
1567 opt = cp[IPOPT_OPTVAL];
1568 if (opt == IPOPT_EOL)
1569 break;
1570 if (opt == IPOPT_NOP)
1571 optlen = 1;
1572 else {
1573 if (cnt < IPOPT_OLEN + sizeof(*cp))
1574 goto bad;
1575 optlen = cp[IPOPT_OLEN];
1576 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt)
1577 goto bad;
1578 }
1579 switch (opt) {
1580
1581 default:
1582 break;
1583
1584 case IPOPT_LSRR:
1585 case IPOPT_SSRR:
1586 /*
1587 * user process specifies route as:
1588 * ->A->B->C->D
1589 * D must be our final destination (but we can't
1590 * check that since we may not have connected yet).
1591 * A is first hop destination, which doesn't appear in
1592 * actual IP option, but is stored before the options.
1593 */
1594 if (optlen < IPOPT_MINOFF - 1 + sizeof(struct in_addr))
1595 goto bad;
1596 m->m_len -= sizeof(struct in_addr);
1597 cnt -= sizeof(struct in_addr);
1598 optlen -= sizeof(struct in_addr);
1599 cp[IPOPT_OLEN] = optlen;
1600 /*
1601 * Move first hop before start of options.
1602 */
1603 bcopy((caddr_t)&cp[IPOPT_OFFSET+1], mtod(m, caddr_t),
1604 sizeof(struct in_addr));
1605 /*
1606 * Then copy rest of options back
1607 * to close up the deleted entry.
1608 */
1609 (void)memmove(&cp[IPOPT_OFFSET+1],
1610 &cp[IPOPT_OFFSET+1] + sizeof(struct in_addr),
1611 (unsigned)cnt - (IPOPT_MINOFF - 1));
1612 break;
1613 }
1614 }
1615 if (m->m_len > MAX_IPOPTLEN + sizeof(struct in_addr))
1616 goto bad;
1617 *pcbopt = m;
1618 return (0);
1619
1620 bad:
1621 (void)m_free(m);
1622 return (EINVAL);
1623 }
1624
1625 /*
1626 * following RFC1724 section 3.3, 0.0.0.0/8 is interpreted as interface index.
1627 */
1628 static struct ifnet *
1629 ip_multicast_if(struct in_addr *a, int *ifindexp)
1630 {
1631 int ifindex;
1632 struct ifnet *ifp = NULL;
1633 struct in_ifaddr *ia;
1634
1635 if (ifindexp)
1636 *ifindexp = 0;
1637 if (ntohl(a->s_addr) >> 24 == 0) {
1638 ifindex = ntohl(a->s_addr) & 0xffffff;
1639 if (ifindex < 0 || if_indexlim <= ifindex)
1640 return NULL;
1641 ifp = ifindex2ifnet[ifindex];
1642 if (!ifp)
1643 return NULL;
1644 if (ifindexp)
1645 *ifindexp = ifindex;
1646 } else {
1647 LIST_FOREACH(ia, &IN_IFADDR_HASH(a->s_addr), ia_hash) {
1648 if (in_hosteq(ia->ia_addr.sin_addr, *a) &&
1649 (ia->ia_ifp->if_flags & IFF_MULTICAST) != 0) {
1650 ifp = ia->ia_ifp;
1651 break;
1652 }
1653 }
1654 }
1655 return ifp;
1656 }
1657
1658 static int
1659 ip_getoptval(struct mbuf *m, u_int8_t *val, u_int maxval)
1660 {
1661 u_int tval;
1662
1663 if (m == NULL)
1664 return EINVAL;
1665
1666 switch (m->m_len) {
1667 case sizeof(u_char):
1668 tval = *(mtod(m, u_char *));
1669 break;
1670 case sizeof(u_int):
1671 tval = *(mtod(m, u_int *));
1672 break;
1673 default:
1674 return EINVAL;
1675 }
1676
1677 if (tval > maxval)
1678 return EINVAL;
1679
1680 *val = tval;
1681 return 0;
1682 }
1683
1684 /*
1685 * Set the IP multicast options in response to user setsockopt().
1686 */
1687 int
1688 ip_setmoptions(int optname, struct ip_moptions **imop, struct mbuf *m)
1689 {
1690 int error = 0;
1691 int i;
1692 struct in_addr addr;
1693 struct ip_mreq *mreq;
1694 struct ifnet *ifp;
1695 struct ip_moptions *imo = *imop;
1696 struct route ro;
1697 struct sockaddr_in *dst;
1698 int ifindex;
1699
1700 if (imo == NULL) {
1701 /*
1702 * No multicast option buffer attached to the pcb;
1703 * allocate one and initialize to default values.
1704 */
1705 imo = (struct ip_moptions *)malloc(sizeof(*imo), M_IPMOPTS,
1706 M_WAITOK);
1707
1708 if (imo == NULL)
1709 return (ENOBUFS);
1710 *imop = imo;
1711 imo->imo_multicast_ifp = NULL;
1712 imo->imo_multicast_addr.s_addr = INADDR_ANY;
1713 imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
1714 imo->imo_multicast_loop = IP_DEFAULT_MULTICAST_LOOP;
1715 imo->imo_num_memberships = 0;
1716 }
1717
1718 switch (optname) {
1719
1720 case IP_MULTICAST_IF:
1721 /*
1722 * Select the interface for outgoing multicast packets.
1723 */
1724 if (m == NULL || m->m_len != sizeof(struct in_addr)) {
1725 error = EINVAL;
1726 break;
1727 }
1728 addr = *(mtod(m, struct in_addr *));
1729 /*
1730 * INADDR_ANY is used to remove a previous selection.
1731 * When no interface is selected, a default one is
1732 * chosen every time a multicast packet is sent.
1733 */
1734 if (in_nullhost(addr)) {
1735 imo->imo_multicast_ifp = NULL;
1736 break;
1737 }
1738 /*
1739 * The selected interface is identified by its local
1740 * IP address. Find the interface and confirm that
1741 * it supports multicasting.
1742 */
1743 ifp = ip_multicast_if(&addr, &ifindex);
1744 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) {
1745 error = EADDRNOTAVAIL;
1746 break;
1747 }
1748 imo->imo_multicast_ifp = ifp;
1749 if (ifindex)
1750 imo->imo_multicast_addr = addr;
1751 else
1752 imo->imo_multicast_addr.s_addr = INADDR_ANY;
1753 break;
1754
1755 case IP_MULTICAST_TTL:
1756 /*
1757 * Set the IP time-to-live for outgoing multicast packets.
1758 */
1759 error = ip_getoptval(m, &imo->imo_multicast_ttl, MAXTTL);
1760 break;
1761
1762 case IP_MULTICAST_LOOP:
1763 /*
1764 * Set the loopback flag for outgoing multicast packets.
1765 * Must be zero or one.
1766 */
1767 error = ip_getoptval(m, &imo->imo_multicast_loop, 1);
1768 break;
1769
1770 case IP_ADD_MEMBERSHIP:
1771 /*
1772 * Add a multicast group membership.
1773 * Group must be a valid IP multicast address.
1774 */
1775 if (m == NULL || m->m_len != sizeof(struct ip_mreq)) {
1776 error = EINVAL;
1777 break;
1778 }
1779 mreq = mtod(m, struct ip_mreq *);
1780 if (!IN_MULTICAST(mreq->imr_multiaddr.s_addr)) {
1781 error = EINVAL;
1782 break;
1783 }
1784 /*
1785 * If no interface address was provided, use the interface of
1786 * the route to the given multicast address.
1787 */
1788 if (in_nullhost(mreq->imr_interface)) {
1789 bzero((caddr_t)&ro, sizeof(ro));
1790 ro.ro_rt = NULL;
1791 dst = satosin(&ro.ro_dst);
1792 dst->sin_len = sizeof(*dst);
1793 dst->sin_family = AF_INET;
1794 dst->sin_addr = mreq->imr_multiaddr;
1795 rtalloc(&ro);
1796 if (ro.ro_rt == NULL) {
1797 error = EADDRNOTAVAIL;
1798 break;
1799 }
1800 ifp = ro.ro_rt->rt_ifp;
1801 rtfree(ro.ro_rt);
1802 } else {
1803 ifp = ip_multicast_if(&mreq->imr_interface, NULL);
1804 }
1805 /*
1806 * See if we found an interface, and confirm that it
1807 * supports multicast.
1808 */
1809 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) {
1810 error = EADDRNOTAVAIL;
1811 break;
1812 }
1813 /*
1814 * See if the membership already exists or if all the
1815 * membership slots are full.
1816 */
1817 for (i = 0; i < imo->imo_num_memberships; ++i) {
1818 if (imo->imo_membership[i]->inm_ifp == ifp &&
1819 in_hosteq(imo->imo_membership[i]->inm_addr,
1820 mreq->imr_multiaddr))
1821 break;
1822 }
1823 if (i < imo->imo_num_memberships) {
1824 error = EADDRINUSE;
1825 break;
1826 }
1827 if (i == IP_MAX_MEMBERSHIPS) {
1828 error = ETOOMANYREFS;
1829 break;
1830 }
1831 /*
1832 * Everything looks good; add a new record to the multicast
1833 * address list for the given interface.
1834 */
1835 if ((imo->imo_membership[i] =
1836 in_addmulti(&mreq->imr_multiaddr, ifp)) == NULL) {
1837 error = ENOBUFS;
1838 break;
1839 }
1840 ++imo->imo_num_memberships;
1841 break;
1842
1843 case IP_DROP_MEMBERSHIP:
1844 /*
1845 * Drop a multicast group membership.
1846 * Group must be a valid IP multicast address.
1847 */
1848 if (m == NULL || m->m_len != sizeof(struct ip_mreq)) {
1849 error = EINVAL;
1850 break;
1851 }
1852 mreq = mtod(m, struct ip_mreq *);
1853 if (!IN_MULTICAST(mreq->imr_multiaddr.s_addr)) {
1854 error = EINVAL;
1855 break;
1856 }
1857 /*
1858 * If an interface address was specified, get a pointer
1859 * to its ifnet structure.
1860 */
1861 if (in_nullhost(mreq->imr_interface))
1862 ifp = NULL;
1863 else {
1864 ifp = ip_multicast_if(&mreq->imr_interface, NULL);
1865 if (ifp == NULL) {
1866 error = EADDRNOTAVAIL;
1867 break;
1868 }
1869 }
1870 /*
1871 * Find the membership in the membership array.
1872 */
1873 for (i = 0; i < imo->imo_num_memberships; ++i) {
1874 if ((ifp == NULL ||
1875 imo->imo_membership[i]->inm_ifp == ifp) &&
1876 in_hosteq(imo->imo_membership[i]->inm_addr,
1877 mreq->imr_multiaddr))
1878 break;
1879 }
1880 if (i == imo->imo_num_memberships) {
1881 error = EADDRNOTAVAIL;
1882 break;
1883 }
1884 /*
1885 * Give up the multicast address record to which the
1886 * membership points.
1887 */
1888 in_delmulti(imo->imo_membership[i]);
1889 /*
1890 * Remove the gap in the membership array.
1891 */
1892 for (++i; i < imo->imo_num_memberships; ++i)
1893 imo->imo_membership[i-1] = imo->imo_membership[i];
1894 --imo->imo_num_memberships;
1895 break;
1896
1897 default:
1898 error = EOPNOTSUPP;
1899 break;
1900 }
1901
1902 /*
1903 * If all options have default values, no need to keep the mbuf.
1904 */
1905 if (imo->imo_multicast_ifp == NULL &&
1906 imo->imo_multicast_ttl == IP_DEFAULT_MULTICAST_TTL &&
1907 imo->imo_multicast_loop == IP_DEFAULT_MULTICAST_LOOP &&
1908 imo->imo_num_memberships == 0) {
1909 free(*imop, M_IPMOPTS);
1910 *imop = NULL;
1911 }
1912
1913 return (error);
1914 }
1915
1916 /*
1917 * Return the IP multicast options in response to user getsockopt().
1918 */
1919 int
1920 ip_getmoptions(int optname, struct ip_moptions *imo, struct mbuf **mp)
1921 {
1922 u_char *ttl;
1923 u_char *loop;
1924 struct in_addr *addr;
1925 struct in_ifaddr *ia;
1926
1927 *mp = m_get(M_WAIT, MT_SOOPTS);
1928
1929 switch (optname) {
1930
1931 case IP_MULTICAST_IF:
1932 addr = mtod(*mp, struct in_addr *);
1933 (*mp)->m_len = sizeof(struct in_addr);
1934 if (imo == NULL || imo->imo_multicast_ifp == NULL)
1935 *addr = zeroin_addr;
1936 else if (imo->imo_multicast_addr.s_addr) {
1937 /* return the value user has set */
1938 *addr = imo->imo_multicast_addr;
1939 } else {
1940 IFP_TO_IA(imo->imo_multicast_ifp, ia);
1941 *addr = ia ? ia->ia_addr.sin_addr : zeroin_addr;
1942 }
1943 return (0);
1944
1945 case IP_MULTICAST_TTL:
1946 ttl = mtod(*mp, u_char *);
1947 (*mp)->m_len = 1;
1948 *ttl = imo ? imo->imo_multicast_ttl
1949 : IP_DEFAULT_MULTICAST_TTL;
1950 return (0);
1951
1952 case IP_MULTICAST_LOOP:
1953 loop = mtod(*mp, u_char *);
1954 (*mp)->m_len = 1;
1955 *loop = imo ? imo->imo_multicast_loop
1956 : IP_DEFAULT_MULTICAST_LOOP;
1957 return (0);
1958
1959 default:
1960 return (EOPNOTSUPP);
1961 }
1962 }
1963
1964 /*
1965 * Discard the IP multicast options.
1966 */
1967 void
1968 ip_freemoptions(struct ip_moptions *imo)
1969 {
1970 int i;
1971
1972 if (imo != NULL) {
1973 for (i = 0; i < imo->imo_num_memberships; ++i)
1974 in_delmulti(imo->imo_membership[i]);
1975 free(imo, M_IPMOPTS);
1976 }
1977 }
1978
1979 /*
1980 * Routine called from ip_output() to loop back a copy of an IP multicast
1981 * packet to the input queue of a specified interface. Note that this
1982 * calls the output routine of the loopback "driver", but with an interface
1983 * pointer that might NOT be lo0ifp -- easier than replicating that code here.
1984 */
1985 static void
1986 ip_mloopback(struct ifnet *ifp, struct mbuf *m, struct sockaddr_in *dst)
1987 {
1988 struct ip *ip;
1989 struct mbuf *copym;
1990
1991 copym = m_copy(m, 0, M_COPYALL);
1992 if (copym != NULL
1993 && (copym->m_flags & M_EXT || copym->m_len < sizeof(struct ip)))
1994 copym = m_pullup(copym, sizeof(struct ip));
1995 if (copym != NULL) {
1996 /*
1997 * We don't bother to fragment if the IP length is greater
1998 * than the interface's MTU. Can this possibly matter?
1999 */
2000 ip = mtod(copym, struct ip *);
2001
2002 if (copym->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2003 in_delayed_cksum(copym);
2004 copym->m_pkthdr.csum_flags &=
2005 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2006 }
2007
2008 ip->ip_sum = 0;
2009 ip->ip_sum = in_cksum(copym, ip->ip_hl << 2);
2010 (void) looutput(ifp, copym, sintosa(dst), NULL);
2011 }
2012 }
2013