ip_output.c revision 1.176 1 /* $NetBSD: ip_output.c,v 1.176 2007/01/29 06:00:11 dyoung Exp $ */
2
3 /*
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 1998 The NetBSD Foundation, Inc.
34 * All rights reserved.
35 *
36 * This code is derived from software contributed to The NetBSD Foundation
37 * by Public Access Networks Corporation ("Panix"). It was developed under
38 * contract to Panix by Eric Haszlakiewicz and Thor Lancelot Simon.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the NetBSD
51 * Foundation, Inc. and its contributors.
52 * 4. Neither the name of The NetBSD Foundation nor the names of its
53 * contributors may be used to endorse or promote products derived
54 * from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
57 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
58 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
59 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
60 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
61 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
62 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
63 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
64 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
65 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 /*
70 * Copyright (c) 1982, 1986, 1988, 1990, 1993
71 * The Regents of the University of California. All rights reserved.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 * 3. Neither the name of the University nor the names of its contributors
82 * may be used to endorse or promote products derived from this software
83 * without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
86 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
89 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
90 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
91 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
92 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
93 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
94 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
95 * SUCH DAMAGE.
96 *
97 * @(#)ip_output.c 8.3 (Berkeley) 1/21/94
98 */
99
100 #include <sys/cdefs.h>
101 __KERNEL_RCSID(0, "$NetBSD: ip_output.c,v 1.176 2007/01/29 06:00:11 dyoung Exp $");
102
103 #include "opt_pfil_hooks.h"
104 #include "opt_inet.h"
105 #include "opt_ipsec.h"
106 #include "opt_mrouting.h"
107
108 #include <sys/param.h>
109 #include <sys/malloc.h>
110 #include <sys/mbuf.h>
111 #include <sys/errno.h>
112 #include <sys/protosw.h>
113 #include <sys/socket.h>
114 #include <sys/socketvar.h>
115 #include <sys/kauth.h>
116 #ifdef FAST_IPSEC
117 #include <sys/domain.h>
118 #endif
119 #include <sys/systm.h>
120 #include <sys/proc.h>
121
122 #include <net/if.h>
123 #include <net/route.h>
124 #include <net/pfil.h>
125
126 #include <netinet/in.h>
127 #include <netinet/in_systm.h>
128 #include <netinet/ip.h>
129 #include <netinet/in_pcb.h>
130 #include <netinet/in_var.h>
131 #include <netinet/ip_var.h>
132 #include <netinet/in_offload.h>
133
134 #ifdef MROUTING
135 #include <netinet/ip_mroute.h>
136 #endif
137
138 #include <machine/stdarg.h>
139
140 #ifdef IPSEC
141 #include <netinet6/ipsec.h>
142 #include <netkey/key.h>
143 #include <netkey/key_debug.h>
144 #endif /*IPSEC*/
145
146 #ifdef FAST_IPSEC
147 #include <netipsec/ipsec.h>
148 #include <netipsec/key.h>
149 #include <netipsec/xform.h>
150 #endif /* FAST_IPSEC*/
151
152 #ifdef IPSEC_NAT_T
153 #include <netinet/udp.h>
154 #endif
155
156 static struct mbuf *ip_insertoptions(struct mbuf *, struct mbuf *, int *);
157 static struct ifnet *ip_multicast_if(struct in_addr *, int *);
158 static void ip_mloopback(struct ifnet *, struct mbuf *, struct sockaddr_in *);
159 static int ip_getoptval(struct mbuf *, u_int8_t *, u_int);
160
161 #ifdef PFIL_HOOKS
162 extern struct pfil_head inet_pfil_hook; /* XXX */
163 #endif
164
165 int ip_do_loopback_cksum = 0;
166
167 #define IN_NEED_CHECKSUM(ifp, csum_flags) \
168 (__predict_true(((ifp)->if_flags & IFF_LOOPBACK) == 0 || \
169 (((csum_flags) & M_CSUM_UDPv4) != 0 && udp_do_loopback_cksum) || \
170 (((csum_flags) & M_CSUM_TCPv4) != 0 && tcp_do_loopback_cksum) || \
171 (((csum_flags) & M_CSUM_IPv4) != 0 && ip_do_loopback_cksum)))
172
173 /*
174 * IP output. The packet in mbuf chain m contains a skeletal IP
175 * header (with len, off, ttl, proto, tos, src, dst).
176 * The mbuf chain containing the packet will be freed.
177 * The mbuf opt, if present, will not be freed.
178 */
179 int
180 ip_output(struct mbuf *m0, ...)
181 {
182 struct ip *ip;
183 struct ifnet *ifp;
184 struct mbuf *m = m0;
185 int hlen = sizeof (struct ip);
186 int len, error = 0;
187 struct route iproute;
188 struct sockaddr_in *dst;
189 struct in_ifaddr *ia;
190 struct ifaddr *xifa;
191 struct mbuf *opt;
192 struct route *ro;
193 int flags, sw_csum;
194 int *mtu_p;
195 u_long mtu;
196 struct ip_moptions *imo;
197 struct socket *so;
198 va_list ap;
199 #ifdef IPSEC_NAT_T
200 int natt_frag = 0;
201 #endif
202 #ifdef IPSEC
203 struct secpolicy *sp = NULL;
204 #endif /*IPSEC*/
205 #ifdef FAST_IPSEC
206 struct inpcb *inp;
207 struct m_tag *mtag;
208 struct secpolicy *sp = NULL;
209 struct tdb_ident *tdbi;
210 int s;
211 #endif
212 u_int16_t ip_len;
213
214 len = 0;
215 va_start(ap, m0);
216 opt = va_arg(ap, struct mbuf *);
217 ro = va_arg(ap, struct route *);
218 flags = va_arg(ap, int);
219 imo = va_arg(ap, struct ip_moptions *);
220 so = va_arg(ap, struct socket *);
221 if (flags & IP_RETURNMTU)
222 mtu_p = va_arg(ap, int *);
223 else
224 mtu_p = NULL;
225 va_end(ap);
226
227 MCLAIM(m, &ip_tx_mowner);
228 #ifdef FAST_IPSEC
229 if (so != NULL && so->so_proto->pr_domain->dom_family == AF_INET)
230 inp = (struct inpcb *)so->so_pcb;
231 else
232 inp = NULL;
233 #endif /* FAST_IPSEC */
234
235 #ifdef DIAGNOSTIC
236 if ((m->m_flags & M_PKTHDR) == 0)
237 panic("ip_output: no HDR");
238
239 if ((m->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) != 0) {
240 panic("ip_output: IPv6 checksum offload flags: %d",
241 m->m_pkthdr.csum_flags);
242 }
243
244 if ((m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) ==
245 (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
246 panic("ip_output: conflicting checksum offload flags: %d",
247 m->m_pkthdr.csum_flags);
248 }
249 #endif
250 if (opt) {
251 m = ip_insertoptions(m, opt, &len);
252 if (len >= sizeof(struct ip))
253 hlen = len;
254 }
255 ip = mtod(m, struct ip *);
256 /*
257 * Fill in IP header.
258 */
259 if ((flags & (IP_FORWARDING|IP_RAWOUTPUT)) == 0) {
260 ip->ip_v = IPVERSION;
261 ip->ip_off = htons(0);
262 if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) == 0) {
263 ip->ip_id = ip_newid();
264 } else {
265
266 /*
267 * TSO capable interfaces (typically?) increment
268 * ip_id for each segment.
269 * "allocate" enough ids here to increase the chance
270 * for them to be unique.
271 *
272 * note that the following calculation is not
273 * needed to be precise. wasting some ip_id is fine.
274 */
275
276 unsigned int segsz = m->m_pkthdr.segsz;
277 unsigned int datasz = ntohs(ip->ip_len) - hlen;
278 unsigned int num = howmany(datasz, segsz);
279
280 ip->ip_id = ip_newid_range(num);
281 }
282 ip->ip_hl = hlen >> 2;
283 ipstat.ips_localout++;
284 } else {
285 hlen = ip->ip_hl << 2;
286 }
287 /*
288 * Route packet.
289 */
290 memset(&iproute, 0, sizeof(iproute));
291 if (ro == NULL)
292 ro = &iproute;
293 dst = satosin(&ro->ro_dst);
294 /*
295 * If there is a cached route,
296 * check that it is to the same destination
297 * and is still up. If not, free it and try again.
298 * The address family should also be checked in case of sharing the
299 * cache with IPv6.
300 */
301 if (dst->sin_family != AF_INET ||
302 !in_hosteq(dst->sin_addr, ip->ip_dst))
303 rtcache_free(ro);
304 else
305 rtcache_check(ro);
306 if (ro->ro_rt == NULL) {
307 bzero(dst, sizeof(*dst));
308 dst->sin_family = AF_INET;
309 dst->sin_len = sizeof(*dst);
310 dst->sin_addr = ip->ip_dst;
311 }
312 /*
313 * If routing to interface only,
314 * short circuit routing lookup.
315 */
316 if (flags & IP_ROUTETOIF) {
317 if ((ia = ifatoia(ifa_ifwithladdr(sintosa(dst)))) == 0) {
318 ipstat.ips_noroute++;
319 error = ENETUNREACH;
320 goto bad;
321 }
322 ifp = ia->ia_ifp;
323 mtu = ifp->if_mtu;
324 ip->ip_ttl = 1;
325 } else if ((IN_MULTICAST(ip->ip_dst.s_addr) ||
326 ip->ip_dst.s_addr == INADDR_BROADCAST) &&
327 imo != NULL && imo->imo_multicast_ifp != NULL) {
328 ifp = imo->imo_multicast_ifp;
329 mtu = ifp->if_mtu;
330 IFP_TO_IA(ifp, ia);
331 } else {
332 if (ro->ro_rt == NULL)
333 rtcache_init(ro);
334 if (ro->ro_rt == NULL) {
335 ipstat.ips_noroute++;
336 error = EHOSTUNREACH;
337 goto bad;
338 }
339 ia = ifatoia(ro->ro_rt->rt_ifa);
340 ifp = ro->ro_rt->rt_ifp;
341 if ((mtu = ro->ro_rt->rt_rmx.rmx_mtu) == 0)
342 mtu = ifp->if_mtu;
343 ro->ro_rt->rt_use++;
344 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
345 dst = satosin(ro->ro_rt->rt_gateway);
346 }
347 if (IN_MULTICAST(ip->ip_dst.s_addr) ||
348 (ip->ip_dst.s_addr == INADDR_BROADCAST)) {
349 struct in_multi *inm;
350
351 m->m_flags |= (ip->ip_dst.s_addr == INADDR_BROADCAST) ?
352 M_BCAST : M_MCAST;
353 /*
354 * IP destination address is multicast. Make sure "dst"
355 * still points to the address in "ro". (It may have been
356 * changed to point to a gateway address, above.)
357 */
358 dst = satosin(&ro->ro_dst);
359 /*
360 * See if the caller provided any multicast options
361 */
362 if (imo != NULL)
363 ip->ip_ttl = imo->imo_multicast_ttl;
364 else
365 ip->ip_ttl = IP_DEFAULT_MULTICAST_TTL;
366
367 /*
368 * if we don't know the outgoing ifp yet, we can't generate
369 * output
370 */
371 if (!ifp) {
372 ipstat.ips_noroute++;
373 error = ENETUNREACH;
374 goto bad;
375 }
376
377 /*
378 * If the packet is multicast or broadcast, confirm that
379 * the outgoing interface can transmit it.
380 */
381 if (((m->m_flags & M_MCAST) &&
382 (ifp->if_flags & IFF_MULTICAST) == 0) ||
383 ((m->m_flags & M_BCAST) &&
384 (ifp->if_flags & (IFF_BROADCAST|IFF_POINTOPOINT)) == 0)) {
385 ipstat.ips_noroute++;
386 error = ENETUNREACH;
387 goto bad;
388 }
389 /*
390 * If source address not specified yet, use an address
391 * of outgoing interface.
392 */
393 if (in_nullhost(ip->ip_src)) {
394 struct in_ifaddr *xia;
395
396 IFP_TO_IA(ifp, xia);
397 if (!xia) {
398 error = EADDRNOTAVAIL;
399 goto bad;
400 }
401 xifa = &xia->ia_ifa;
402 if (xifa->ifa_getifa != NULL) {
403 xia = ifatoia((*xifa->ifa_getifa)(xifa,
404 &ro->ro_dst));
405 }
406 ip->ip_src = xia->ia_addr.sin_addr;
407 }
408
409 IN_LOOKUP_MULTI(ip->ip_dst, ifp, inm);
410 if (inm != NULL &&
411 (imo == NULL || imo->imo_multicast_loop)) {
412 /*
413 * If we belong to the destination multicast group
414 * on the outgoing interface, and the caller did not
415 * forbid loopback, loop back a copy.
416 */
417 ip_mloopback(ifp, m, dst);
418 }
419 #ifdef MROUTING
420 else {
421 /*
422 * If we are acting as a multicast router, perform
423 * multicast forwarding as if the packet had just
424 * arrived on the interface to which we are about
425 * to send. The multicast forwarding function
426 * recursively calls this function, using the
427 * IP_FORWARDING flag to prevent infinite recursion.
428 *
429 * Multicasts that are looped back by ip_mloopback(),
430 * above, will be forwarded by the ip_input() routine,
431 * if necessary.
432 */
433 extern struct socket *ip_mrouter;
434
435 if (ip_mrouter && (flags & IP_FORWARDING) == 0) {
436 if (ip_mforward(m, ifp) != 0) {
437 m_freem(m);
438 goto done;
439 }
440 }
441 }
442 #endif
443 /*
444 * Multicasts with a time-to-live of zero may be looped-
445 * back, above, but must not be transmitted on a network.
446 * Also, multicasts addressed to the loopback interface
447 * are not sent -- the above call to ip_mloopback() will
448 * loop back a copy if this host actually belongs to the
449 * destination group on the loopback interface.
450 */
451 if (ip->ip_ttl == 0 || (ifp->if_flags & IFF_LOOPBACK) != 0) {
452 m_freem(m);
453 goto done;
454 }
455
456 goto sendit;
457 }
458 /*
459 * If source address not specified yet, use address
460 * of outgoing interface.
461 */
462 if (in_nullhost(ip->ip_src)) {
463 xifa = &ia->ia_ifa;
464 if (xifa->ifa_getifa != NULL)
465 ia = ifatoia((*xifa->ifa_getifa)(xifa, &ro->ro_dst));
466 ip->ip_src = ia->ia_addr.sin_addr;
467 }
468
469 /*
470 * packets with Class-D address as source are not valid per
471 * RFC 1112
472 */
473 if (IN_MULTICAST(ip->ip_src.s_addr)) {
474 ipstat.ips_odropped++;
475 error = EADDRNOTAVAIL;
476 goto bad;
477 }
478
479 /*
480 * Look for broadcast address and
481 * and verify user is allowed to send
482 * such a packet.
483 */
484 if (in_broadcast(dst->sin_addr, ifp)) {
485 if ((ifp->if_flags & IFF_BROADCAST) == 0) {
486 error = EADDRNOTAVAIL;
487 goto bad;
488 }
489 if ((flags & IP_ALLOWBROADCAST) == 0) {
490 error = EACCES;
491 goto bad;
492 }
493 /* don't allow broadcast messages to be fragmented */
494 if (ntohs(ip->ip_len) > ifp->if_mtu) {
495 error = EMSGSIZE;
496 goto bad;
497 }
498 m->m_flags |= M_BCAST;
499 } else
500 m->m_flags &= ~M_BCAST;
501
502 sendit:
503 /*
504 * If we're doing Path MTU Discovery, we need to set DF unless
505 * the route's MTU is locked.
506 */
507 if ((flags & IP_MTUDISC) != 0 && ro->ro_rt != NULL &&
508 (ro->ro_rt->rt_rmx.rmx_locks & RTV_MTU) == 0)
509 ip->ip_off |= htons(IP_DF);
510
511 /* Remember the current ip_len */
512 ip_len = ntohs(ip->ip_len);
513
514 #ifdef IPSEC
515 /* get SP for this packet */
516 if (so == NULL)
517 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND,
518 flags, &error);
519 else {
520 if (IPSEC_PCB_SKIP_IPSEC(sotoinpcb_hdr(so)->inph_sp,
521 IPSEC_DIR_OUTBOUND))
522 goto skip_ipsec;
523 sp = ipsec4_getpolicybysock(m, IPSEC_DIR_OUTBOUND, so, &error);
524 }
525
526 if (sp == NULL) {
527 ipsecstat.out_inval++;
528 goto bad;
529 }
530
531 error = 0;
532
533 /* check policy */
534 switch (sp->policy) {
535 case IPSEC_POLICY_DISCARD:
536 /*
537 * This packet is just discarded.
538 */
539 ipsecstat.out_polvio++;
540 goto bad;
541
542 case IPSEC_POLICY_BYPASS:
543 case IPSEC_POLICY_NONE:
544 /* no need to do IPsec. */
545 goto skip_ipsec;
546
547 case IPSEC_POLICY_IPSEC:
548 if (sp->req == NULL) {
549 /* XXX should be panic ? */
550 printf("ip_output: No IPsec request specified.\n");
551 error = EINVAL;
552 goto bad;
553 }
554 break;
555
556 case IPSEC_POLICY_ENTRUST:
557 default:
558 printf("ip_output: Invalid policy found. %d\n", sp->policy);
559 }
560
561 #ifdef IPSEC_NAT_T
562 /*
563 * NAT-T ESP fragmentation: don't do IPSec processing now,
564 * we'll do it on each fragmented packet.
565 */
566 if (sp->req->sav &&
567 ((sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP) ||
568 (sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP_NON_IKE))) {
569 if (ntohs(ip->ip_len) > sp->req->sav->esp_frag) {
570 natt_frag = 1;
571 mtu = sp->req->sav->esp_frag;
572 goto skip_ipsec;
573 }
574 }
575 #endif /* IPSEC_NAT_T */
576
577 /*
578 * ipsec4_output() expects ip_len and ip_off in network
579 * order. They have been set to network order above.
580 */
581
582 {
583 struct ipsec_output_state state;
584 bzero(&state, sizeof(state));
585 state.m = m;
586 if (flags & IP_ROUTETOIF) {
587 state.ro = &iproute;
588 memset(&iproute, 0, sizeof(iproute));
589 } else
590 state.ro = ro;
591 state.dst = (struct sockaddr *)dst;
592
593 /*
594 * We can't defer the checksum of payload data if
595 * we're about to encrypt/authenticate it.
596 *
597 * XXX When we support crypto offloading functions of
598 * XXX network interfaces, we need to reconsider this,
599 * XXX since it's likely that they'll support checksumming,
600 * XXX as well.
601 */
602 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
603 in_delayed_cksum(m);
604 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
605 }
606
607 error = ipsec4_output(&state, sp, flags);
608
609 m = state.m;
610 if (flags & IP_ROUTETOIF) {
611 /*
612 * if we have tunnel mode SA, we may need to ignore
613 * IP_ROUTETOIF.
614 */
615 if (state.ro != &iproute || state.ro->ro_rt != NULL) {
616 flags &= ~IP_ROUTETOIF;
617 ro = state.ro;
618 }
619 } else
620 ro = state.ro;
621 dst = (struct sockaddr_in *)state.dst;
622 if (error) {
623 /* mbuf is already reclaimed in ipsec4_output. */
624 m0 = NULL;
625 switch (error) {
626 case EHOSTUNREACH:
627 case ENETUNREACH:
628 case EMSGSIZE:
629 case ENOBUFS:
630 case ENOMEM:
631 break;
632 default:
633 printf("ip4_output (ipsec): error code %d\n", error);
634 /*fall through*/
635 case ENOENT:
636 /* don't show these error codes to the user */
637 error = 0;
638 break;
639 }
640 goto bad;
641 }
642
643 /* be sure to update variables that are affected by ipsec4_output() */
644 ip = mtod(m, struct ip *);
645 hlen = ip->ip_hl << 2;
646 ip_len = ntohs(ip->ip_len);
647
648 if (ro->ro_rt == NULL) {
649 if ((flags & IP_ROUTETOIF) == 0) {
650 printf("ip_output: "
651 "can't update route after IPsec processing\n");
652 error = EHOSTUNREACH; /*XXX*/
653 goto bad;
654 }
655 } else {
656 /* nobody uses ia beyond here */
657 if (state.encap) {
658 ifp = ro->ro_rt->rt_ifp;
659 if ((mtu = ro->ro_rt->rt_rmx.rmx_mtu) == 0)
660 mtu = ifp->if_mtu;
661 }
662 }
663 }
664 skip_ipsec:
665 #endif /*IPSEC*/
666 #ifdef FAST_IPSEC
667 /*
668 * Check the security policy (SP) for the packet and, if
669 * required, do IPsec-related processing. There are two
670 * cases here; the first time a packet is sent through
671 * it will be untagged and handled by ipsec4_checkpolicy.
672 * If the packet is resubmitted to ip_output (e.g. after
673 * AH, ESP, etc. processing), there will be a tag to bypass
674 * the lookup and related policy checking.
675 */
676 mtag = m_tag_find(m, PACKET_TAG_IPSEC_PENDING_TDB, NULL);
677 s = splsoftnet();
678 if (mtag != NULL) {
679 tdbi = (struct tdb_ident *)(mtag + 1);
680 sp = ipsec_getpolicy(tdbi, IPSEC_DIR_OUTBOUND);
681 if (sp == NULL)
682 error = -EINVAL; /* force silent drop */
683 m_tag_delete(m, mtag);
684 } else {
685 if (inp != NULL &&
686 IPSEC_PCB_SKIP_IPSEC(inp->inp_sp, IPSEC_DIR_OUTBOUND))
687 goto spd_done;
688 sp = ipsec4_checkpolicy(m, IPSEC_DIR_OUTBOUND, flags,
689 &error, inp);
690 }
691 /*
692 * There are four return cases:
693 * sp != NULL apply IPsec policy
694 * sp == NULL, error == 0 no IPsec handling needed
695 * sp == NULL, error == -EINVAL discard packet w/o error
696 * sp == NULL, error != 0 discard packet, report error
697 */
698 if (sp != NULL) {
699 #ifdef IPSEC_NAT_T
700 /*
701 * NAT-T ESP fragmentation: don't do IPSec processing now,
702 * we'll do it on each fragmented packet.
703 */
704 if (sp->req->sav &&
705 ((sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP) ||
706 (sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP_NON_IKE))) {
707 if (ntohs(ip->ip_len) > sp->req->sav->esp_frag) {
708 natt_frag = 1;
709 mtu = sp->req->sav->esp_frag;
710 goto spd_done;
711 }
712 }
713 #endif /* IPSEC_NAT_T */
714 /* Loop detection, check if ipsec processing already done */
715 IPSEC_ASSERT(sp->req != NULL, ("ip_output: no ipsec request"));
716 for (mtag = m_tag_first(m); mtag != NULL;
717 mtag = m_tag_next(m, mtag)) {
718 #ifdef MTAG_ABI_COMPAT
719 if (mtag->m_tag_cookie != MTAG_ABI_COMPAT)
720 continue;
721 #endif
722 if (mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_DONE &&
723 mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED)
724 continue;
725 /*
726 * Check if policy has an SA associated with it.
727 * This can happen when an SP has yet to acquire
728 * an SA; e.g. on first reference. If it occurs,
729 * then we let ipsec4_process_packet do its thing.
730 */
731 if (sp->req->sav == NULL)
732 break;
733 tdbi = (struct tdb_ident *)(mtag + 1);
734 if (tdbi->spi == sp->req->sav->spi &&
735 tdbi->proto == sp->req->sav->sah->saidx.proto &&
736 bcmp(&tdbi->dst, &sp->req->sav->sah->saidx.dst,
737 sizeof (union sockaddr_union)) == 0) {
738 /*
739 * No IPsec processing is needed, free
740 * reference to SP.
741 *
742 * NB: null pointer to avoid free at
743 * done: below.
744 */
745 KEY_FREESP(&sp), sp = NULL;
746 splx(s);
747 goto spd_done;
748 }
749 }
750
751 /*
752 * Do delayed checksums now because we send before
753 * this is done in the normal processing path.
754 */
755 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
756 in_delayed_cksum(m);
757 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
758 }
759
760 #ifdef __FreeBSD__
761 ip->ip_len = htons(ip->ip_len);
762 ip->ip_off = htons(ip->ip_off);
763 #endif
764
765 /* NB: callee frees mbuf */
766 error = ipsec4_process_packet(m, sp->req, flags, 0);
767 /*
768 * Preserve KAME behaviour: ENOENT can be returned
769 * when an SA acquire is in progress. Don't propagate
770 * this to user-level; it confuses applications.
771 *
772 * XXX this will go away when the SADB is redone.
773 */
774 if (error == ENOENT)
775 error = 0;
776 splx(s);
777 goto done;
778 } else {
779 splx(s);
780
781 if (error != 0) {
782 /*
783 * Hack: -EINVAL is used to signal that a packet
784 * should be silently discarded. This is typically
785 * because we asked key management for an SA and
786 * it was delayed (e.g. kicked up to IKE).
787 */
788 if (error == -EINVAL)
789 error = 0;
790 goto bad;
791 } else {
792 /* No IPsec processing for this packet. */
793 }
794 #ifdef notyet
795 /*
796 * If deferred crypto processing is needed, check that
797 * the interface supports it.
798 */
799 mtag = m_tag_find(m, PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED, NULL);
800 if (mtag != NULL && (ifp->if_capenable & IFCAP_IPSEC) == 0) {
801 /* notify IPsec to do its own crypto */
802 ipsp_skipcrypto_unmark((struct tdb_ident *)(mtag + 1));
803 error = EHOSTUNREACH;
804 goto bad;
805 }
806 #endif
807 }
808 spd_done:
809 #endif /* FAST_IPSEC */
810
811 #ifdef PFIL_HOOKS
812 /*
813 * Run through list of hooks for output packets.
814 */
815 if ((error = pfil_run_hooks(&inet_pfil_hook, &m, ifp, PFIL_OUT)) != 0)
816 goto done;
817 if (m == NULL)
818 goto done;
819
820 ip = mtod(m, struct ip *);
821 hlen = ip->ip_hl << 2;
822 ip_len = ntohs(ip->ip_len);
823 #endif /* PFIL_HOOKS */
824
825 m->m_pkthdr.csum_data |= hlen << 16;
826
827 #if IFA_STATS
828 /*
829 * search for the source address structure to
830 * maintain output statistics.
831 */
832 INADDR_TO_IA(ip->ip_src, ia);
833 #endif
834
835 /* Maybe skip checksums on loopback interfaces. */
836 if (IN_NEED_CHECKSUM(ifp, M_CSUM_IPv4)) {
837 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
838 }
839 sw_csum = m->m_pkthdr.csum_flags & ~ifp->if_csum_flags_tx;
840 /*
841 * If small enough for mtu of path, or if using TCP segmentation
842 * offload, can just send directly.
843 */
844 if (ip_len <= mtu ||
845 (m->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0) {
846 #if IFA_STATS
847 if (ia)
848 ia->ia_ifa.ifa_data.ifad_outbytes += ip_len;
849 #endif
850 /*
851 * Always initialize the sum to 0! Some HW assisted
852 * checksumming requires this.
853 */
854 ip->ip_sum = 0;
855
856 if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) == 0) {
857 /*
858 * Perform any checksums that the hardware can't do
859 * for us.
860 *
861 * XXX Does any hardware require the {th,uh}_sum
862 * XXX fields to be 0?
863 */
864 if (sw_csum & M_CSUM_IPv4) {
865 KASSERT(IN_NEED_CHECKSUM(ifp, M_CSUM_IPv4));
866 ip->ip_sum = in_cksum(m, hlen);
867 m->m_pkthdr.csum_flags &= ~M_CSUM_IPv4;
868 }
869 if (sw_csum & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
870 if (IN_NEED_CHECKSUM(ifp,
871 sw_csum & (M_CSUM_TCPv4|M_CSUM_UDPv4))) {
872 in_delayed_cksum(m);
873 }
874 m->m_pkthdr.csum_flags &=
875 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
876 }
877 }
878
879 #ifdef IPSEC
880 /* clean ipsec history once it goes out of the node */
881 ipsec_delaux(m);
882 #endif
883
884 if (__predict_true(
885 (m->m_pkthdr.csum_flags & M_CSUM_TSOv4) == 0 ||
886 (ifp->if_capenable & IFCAP_TSOv4) != 0)) {
887 error =
888 (*ifp->if_output)(ifp, m, sintosa(dst), ro->ro_rt);
889 } else {
890 error =
891 ip_tso_output(ifp, m, sintosa(dst), ro->ro_rt);
892 }
893 goto done;
894 }
895
896 /*
897 * We can't use HW checksumming if we're about to
898 * to fragment the packet.
899 *
900 * XXX Some hardware can do this.
901 */
902 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
903 if (IN_NEED_CHECKSUM(ifp,
904 m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))) {
905 in_delayed_cksum(m);
906 }
907 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
908 }
909
910 /*
911 * Too large for interface; fragment if possible.
912 * Must be able to put at least 8 bytes per fragment.
913 */
914 if (ntohs(ip->ip_off) & IP_DF) {
915 if (flags & IP_RETURNMTU)
916 *mtu_p = mtu;
917 error = EMSGSIZE;
918 ipstat.ips_cantfrag++;
919 goto bad;
920 }
921
922 error = ip_fragment(m, ifp, mtu);
923 if (error) {
924 m = NULL;
925 goto bad;
926 }
927
928 for (; m; m = m0) {
929 m0 = m->m_nextpkt;
930 m->m_nextpkt = 0;
931 if (error == 0) {
932 #if IFA_STATS
933 if (ia)
934 ia->ia_ifa.ifa_data.ifad_outbytes +=
935 ntohs(ip->ip_len);
936 #endif
937 #ifdef IPSEC
938 /* clean ipsec history once it goes out of the node */
939 ipsec_delaux(m);
940 #endif /* IPSEC */
941
942 #ifdef IPSEC_NAT_T
943 /*
944 * If we get there, the packet has not been handeld by
945 * IPSec whereas it should have. Now that it has been
946 * fragmented, re-inject it in ip_output so that IPsec
947 * processing can occur.
948 */
949 if (natt_frag) {
950 error = ip_output(m, opt,
951 ro, flags, imo, so, mtu_p);
952 } else
953 #endif /* IPSEC_NAT_T */
954 {
955 KASSERT((m->m_pkthdr.csum_flags &
956 (M_CSUM_UDPv4 | M_CSUM_TCPv4)) == 0);
957 error = (*ifp->if_output)(ifp, m, sintosa(dst),
958 ro->ro_rt);
959 }
960 } else
961 m_freem(m);
962 }
963
964 if (error == 0)
965 ipstat.ips_fragmented++;
966 done:
967 rtcache_free(&iproute);
968
969 #ifdef IPSEC
970 if (sp != NULL) {
971 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
972 printf("DP ip_output call free SP:%p\n", sp));
973 key_freesp(sp);
974 }
975 #endif /* IPSEC */
976 #ifdef FAST_IPSEC
977 if (sp != NULL)
978 KEY_FREESP(&sp);
979 #endif /* FAST_IPSEC */
980
981 return (error);
982 bad:
983 m_freem(m);
984 goto done;
985 }
986
987 int
988 ip_fragment(struct mbuf *m, struct ifnet *ifp, u_long mtu)
989 {
990 struct ip *ip, *mhip;
991 struct mbuf *m0;
992 int len, hlen, off;
993 int mhlen, firstlen;
994 struct mbuf **mnext;
995 int sw_csum = m->m_pkthdr.csum_flags;
996 int fragments = 0;
997 int s;
998 int error = 0;
999
1000 ip = mtod(m, struct ip *);
1001 hlen = ip->ip_hl << 2;
1002 if (ifp != NULL)
1003 sw_csum &= ~ifp->if_csum_flags_tx;
1004
1005 len = (mtu - hlen) &~ 7;
1006 if (len < 8) {
1007 m_freem(m);
1008 return (EMSGSIZE);
1009 }
1010
1011 firstlen = len;
1012 mnext = &m->m_nextpkt;
1013
1014 /*
1015 * Loop through length of segment after first fragment,
1016 * make new header and copy data of each part and link onto chain.
1017 */
1018 m0 = m;
1019 mhlen = sizeof (struct ip);
1020 for (off = hlen + len; off < ntohs(ip->ip_len); off += len) {
1021 MGETHDR(m, M_DONTWAIT, MT_HEADER);
1022 if (m == 0) {
1023 error = ENOBUFS;
1024 ipstat.ips_odropped++;
1025 goto sendorfree;
1026 }
1027 MCLAIM(m, m0->m_owner);
1028 *mnext = m;
1029 mnext = &m->m_nextpkt;
1030 m->m_data += max_linkhdr;
1031 mhip = mtod(m, struct ip *);
1032 *mhip = *ip;
1033 /* we must inherit MCAST and BCAST flags */
1034 m->m_flags |= m0->m_flags & (M_MCAST|M_BCAST);
1035 if (hlen > sizeof (struct ip)) {
1036 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
1037 mhip->ip_hl = mhlen >> 2;
1038 }
1039 m->m_len = mhlen;
1040 mhip->ip_off = ((off - hlen) >> 3) +
1041 (ntohs(ip->ip_off) & ~IP_MF);
1042 if (ip->ip_off & htons(IP_MF))
1043 mhip->ip_off |= IP_MF;
1044 if (off + len >= ntohs(ip->ip_len))
1045 len = ntohs(ip->ip_len) - off;
1046 else
1047 mhip->ip_off |= IP_MF;
1048 HTONS(mhip->ip_off);
1049 mhip->ip_len = htons((u_int16_t)(len + mhlen));
1050 m->m_next = m_copy(m0, off, len);
1051 if (m->m_next == 0) {
1052 error = ENOBUFS; /* ??? */
1053 ipstat.ips_odropped++;
1054 goto sendorfree;
1055 }
1056 m->m_pkthdr.len = mhlen + len;
1057 m->m_pkthdr.rcvif = (struct ifnet *)0;
1058 mhip->ip_sum = 0;
1059 if (sw_csum & M_CSUM_IPv4) {
1060 mhip->ip_sum = in_cksum(m, mhlen);
1061 KASSERT((m->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0);
1062 } else {
1063 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1064 m->m_pkthdr.csum_data |= mhlen << 16;
1065 }
1066 ipstat.ips_ofragments++;
1067 fragments++;
1068 }
1069 /*
1070 * Update first fragment by trimming what's been copied out
1071 * and updating header, then send each fragment (in order).
1072 */
1073 m = m0;
1074 m_adj(m, hlen + firstlen - ntohs(ip->ip_len));
1075 m->m_pkthdr.len = hlen + firstlen;
1076 ip->ip_len = htons((u_int16_t)m->m_pkthdr.len);
1077 ip->ip_off |= htons(IP_MF);
1078 ip->ip_sum = 0;
1079 if (sw_csum & M_CSUM_IPv4) {
1080 ip->ip_sum = in_cksum(m, hlen);
1081 m->m_pkthdr.csum_flags &= ~M_CSUM_IPv4;
1082 } else {
1083 KASSERT(m->m_pkthdr.csum_flags & M_CSUM_IPv4);
1084 KASSERT(M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) >=
1085 sizeof(struct ip));
1086 }
1087 sendorfree:
1088 /*
1089 * If there is no room for all the fragments, don't queue
1090 * any of them.
1091 */
1092 if (ifp != NULL) {
1093 s = splnet();
1094 if (ifp->if_snd.ifq_maxlen - ifp->if_snd.ifq_len < fragments &&
1095 error == 0) {
1096 error = ENOBUFS;
1097 ipstat.ips_odropped++;
1098 IFQ_INC_DROPS(&ifp->if_snd);
1099 }
1100 splx(s);
1101 }
1102 if (error) {
1103 for (m = m0; m; m = m0) {
1104 m0 = m->m_nextpkt;
1105 m->m_nextpkt = NULL;
1106 m_freem(m);
1107 }
1108 }
1109 return (error);
1110 }
1111
1112 /*
1113 * Process a delayed payload checksum calculation.
1114 */
1115 void
1116 in_delayed_cksum(struct mbuf *m)
1117 {
1118 struct ip *ip;
1119 u_int16_t csum, offset;
1120
1121 ip = mtod(m, struct ip *);
1122 offset = ip->ip_hl << 2;
1123 csum = in4_cksum(m, 0, offset, ntohs(ip->ip_len) - offset);
1124 if (csum == 0 && (m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0)
1125 csum = 0xffff;
1126
1127 offset += M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data);
1128
1129 if ((offset + sizeof(u_int16_t)) > m->m_len) {
1130 /* This happen when ip options were inserted
1131 printf("in_delayed_cksum: pullup len %d off %d proto %d\n",
1132 m->m_len, offset, ip->ip_p);
1133 */
1134 m_copyback(m, offset, sizeof(csum), (caddr_t) &csum);
1135 } else
1136 *(u_int16_t *)(mtod(m, caddr_t) + offset) = csum;
1137 }
1138
1139 /*
1140 * Determine the maximum length of the options to be inserted;
1141 * we would far rather allocate too much space rather than too little.
1142 */
1143
1144 u_int
1145 ip_optlen(struct inpcb *inp)
1146 {
1147 struct mbuf *m = inp->inp_options;
1148
1149 if (m && m->m_len > offsetof(struct ipoption, ipopt_dst))
1150 return (m->m_len - offsetof(struct ipoption, ipopt_dst));
1151 else
1152 return 0;
1153 }
1154
1155
1156 /*
1157 * Insert IP options into preformed packet.
1158 * Adjust IP destination as required for IP source routing,
1159 * as indicated by a non-zero in_addr at the start of the options.
1160 */
1161 static struct mbuf *
1162 ip_insertoptions(struct mbuf *m, struct mbuf *opt, int *phlen)
1163 {
1164 struct ipoption *p = mtod(opt, struct ipoption *);
1165 struct mbuf *n;
1166 struct ip *ip = mtod(m, struct ip *);
1167 unsigned optlen;
1168
1169 optlen = opt->m_len - sizeof(p->ipopt_dst);
1170 if (optlen + ntohs(ip->ip_len) > IP_MAXPACKET)
1171 return (m); /* XXX should fail */
1172 if (!in_nullhost(p->ipopt_dst))
1173 ip->ip_dst = p->ipopt_dst;
1174 if (M_READONLY(m) || M_LEADINGSPACE(m) < optlen) {
1175 MGETHDR(n, M_DONTWAIT, MT_HEADER);
1176 if (n == 0)
1177 return (m);
1178 MCLAIM(n, m->m_owner);
1179 M_MOVE_PKTHDR(n, m);
1180 m->m_len -= sizeof(struct ip);
1181 m->m_data += sizeof(struct ip);
1182 n->m_next = m;
1183 m = n;
1184 m->m_len = optlen + sizeof(struct ip);
1185 m->m_data += max_linkhdr;
1186 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
1187 } else {
1188 m->m_data -= optlen;
1189 m->m_len += optlen;
1190 memmove(mtod(m, caddr_t), ip, sizeof(struct ip));
1191 }
1192 m->m_pkthdr.len += optlen;
1193 ip = mtod(m, struct ip *);
1194 bcopy((caddr_t)p->ipopt_list, (caddr_t)(ip + 1), (unsigned)optlen);
1195 *phlen = sizeof(struct ip) + optlen;
1196 ip->ip_len = htons(ntohs(ip->ip_len) + optlen);
1197 return (m);
1198 }
1199
1200 /*
1201 * Copy options from ip to jp,
1202 * omitting those not copied during fragmentation.
1203 */
1204 int
1205 ip_optcopy(struct ip *ip, struct ip *jp)
1206 {
1207 u_char *cp, *dp;
1208 int opt, optlen, cnt;
1209
1210 cp = (u_char *)(ip + 1);
1211 dp = (u_char *)(jp + 1);
1212 cnt = (ip->ip_hl << 2) - sizeof (struct ip);
1213 for (; cnt > 0; cnt -= optlen, cp += optlen) {
1214 opt = cp[0];
1215 if (opt == IPOPT_EOL)
1216 break;
1217 if (opt == IPOPT_NOP) {
1218 /* Preserve for IP mcast tunnel's LSRR alignment. */
1219 *dp++ = IPOPT_NOP;
1220 optlen = 1;
1221 continue;
1222 }
1223 #ifdef DIAGNOSTIC
1224 if (cnt < IPOPT_OLEN + sizeof(*cp))
1225 panic("malformed IPv4 option passed to ip_optcopy");
1226 #endif
1227 optlen = cp[IPOPT_OLEN];
1228 #ifdef DIAGNOSTIC
1229 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt)
1230 panic("malformed IPv4 option passed to ip_optcopy");
1231 #endif
1232 /* bogus lengths should have been caught by ip_dooptions */
1233 if (optlen > cnt)
1234 optlen = cnt;
1235 if (IPOPT_COPIED(opt)) {
1236 bcopy((caddr_t)cp, (caddr_t)dp, (unsigned)optlen);
1237 dp += optlen;
1238 }
1239 }
1240 for (optlen = dp - (u_char *)(jp+1); optlen & 0x3; optlen++)
1241 *dp++ = IPOPT_EOL;
1242 return (optlen);
1243 }
1244
1245 /*
1246 * IP socket option processing.
1247 */
1248 int
1249 ip_ctloutput(int op, struct socket *so, int level, int optname,
1250 struct mbuf **mp)
1251 {
1252 struct inpcb *inp = sotoinpcb(so);
1253 struct mbuf *m = *mp;
1254 int optval = 0;
1255 int error = 0;
1256 #if defined(IPSEC) || defined(FAST_IPSEC)
1257 struct lwp *l = curlwp; /*XXX*/
1258 #endif
1259
1260 if (level != IPPROTO_IP) {
1261 error = EINVAL;
1262 if (op == PRCO_SETOPT && *mp)
1263 (void) m_free(*mp);
1264 } else switch (op) {
1265
1266 case PRCO_SETOPT:
1267 switch (optname) {
1268 case IP_OPTIONS:
1269 #ifdef notyet
1270 case IP_RETOPTS:
1271 return (ip_pcbopts(optname, &inp->inp_options, m));
1272 #else
1273 return (ip_pcbopts(&inp->inp_options, m));
1274 #endif
1275
1276 case IP_TOS:
1277 case IP_TTL:
1278 case IP_RECVOPTS:
1279 case IP_RECVRETOPTS:
1280 case IP_RECVDSTADDR:
1281 case IP_RECVIF:
1282 if (m == NULL || m->m_len != sizeof(int))
1283 error = EINVAL;
1284 else {
1285 optval = *mtod(m, int *);
1286 switch (optname) {
1287
1288 case IP_TOS:
1289 inp->inp_ip.ip_tos = optval;
1290 break;
1291
1292 case IP_TTL:
1293 inp->inp_ip.ip_ttl = optval;
1294 break;
1295 #define OPTSET(bit) \
1296 if (optval) \
1297 inp->inp_flags |= bit; \
1298 else \
1299 inp->inp_flags &= ~bit;
1300
1301 case IP_RECVOPTS:
1302 OPTSET(INP_RECVOPTS);
1303 break;
1304
1305 case IP_RECVRETOPTS:
1306 OPTSET(INP_RECVRETOPTS);
1307 break;
1308
1309 case IP_RECVDSTADDR:
1310 OPTSET(INP_RECVDSTADDR);
1311 break;
1312
1313 case IP_RECVIF:
1314 OPTSET(INP_RECVIF);
1315 break;
1316 }
1317 }
1318 break;
1319 #undef OPTSET
1320
1321 case IP_MULTICAST_IF:
1322 case IP_MULTICAST_TTL:
1323 case IP_MULTICAST_LOOP:
1324 case IP_ADD_MEMBERSHIP:
1325 case IP_DROP_MEMBERSHIP:
1326 error = ip_setmoptions(optname, &inp->inp_moptions, m);
1327 break;
1328
1329 case IP_PORTRANGE:
1330 if (m == 0 || m->m_len != sizeof(int))
1331 error = EINVAL;
1332 else {
1333 optval = *mtod(m, int *);
1334
1335 switch (optval) {
1336
1337 case IP_PORTRANGE_DEFAULT:
1338 case IP_PORTRANGE_HIGH:
1339 inp->inp_flags &= ~(INP_LOWPORT);
1340 break;
1341
1342 case IP_PORTRANGE_LOW:
1343 inp->inp_flags |= INP_LOWPORT;
1344 break;
1345
1346 default:
1347 error = EINVAL;
1348 break;
1349 }
1350 }
1351 break;
1352
1353 #if defined(IPSEC) || defined(FAST_IPSEC)
1354 case IP_IPSEC_POLICY:
1355 {
1356 caddr_t req = NULL;
1357 size_t len = 0;
1358 int priv = 0;
1359
1360 #ifdef __NetBSD__
1361 if (l == 0 || kauth_authorize_generic(l->l_cred,
1362 KAUTH_GENERIC_ISSUSER, NULL))
1363 priv = 0;
1364 else
1365 priv = 1;
1366 #else
1367 priv = (in6p->in6p_socket->so_state & SS_PRIV);
1368 #endif
1369 if (m) {
1370 req = mtod(m, caddr_t);
1371 len = m->m_len;
1372 }
1373 error = ipsec4_set_policy(inp, optname, req, len, priv);
1374 break;
1375 }
1376 #endif /*IPSEC*/
1377
1378 default:
1379 error = ENOPROTOOPT;
1380 break;
1381 }
1382 if (m)
1383 (void)m_free(m);
1384 break;
1385
1386 case PRCO_GETOPT:
1387 switch (optname) {
1388 case IP_OPTIONS:
1389 case IP_RETOPTS:
1390 *mp = m = m_get(M_WAIT, MT_SOOPTS);
1391 MCLAIM(m, so->so_mowner);
1392 if (inp->inp_options) {
1393 m->m_len = inp->inp_options->m_len;
1394 bcopy(mtod(inp->inp_options, caddr_t),
1395 mtod(m, caddr_t), (unsigned)m->m_len);
1396 } else
1397 m->m_len = 0;
1398 break;
1399
1400 case IP_TOS:
1401 case IP_TTL:
1402 case IP_RECVOPTS:
1403 case IP_RECVRETOPTS:
1404 case IP_RECVDSTADDR:
1405 case IP_RECVIF:
1406 case IP_ERRORMTU:
1407 *mp = m = m_get(M_WAIT, MT_SOOPTS);
1408 MCLAIM(m, so->so_mowner);
1409 m->m_len = sizeof(int);
1410 switch (optname) {
1411
1412 case IP_TOS:
1413 optval = inp->inp_ip.ip_tos;
1414 break;
1415
1416 case IP_TTL:
1417 optval = inp->inp_ip.ip_ttl;
1418 break;
1419
1420 case IP_ERRORMTU:
1421 optval = inp->inp_errormtu;
1422 break;
1423
1424 #define OPTBIT(bit) (inp->inp_flags & bit ? 1 : 0)
1425
1426 case IP_RECVOPTS:
1427 optval = OPTBIT(INP_RECVOPTS);
1428 break;
1429
1430 case IP_RECVRETOPTS:
1431 optval = OPTBIT(INP_RECVRETOPTS);
1432 break;
1433
1434 case IP_RECVDSTADDR:
1435 optval = OPTBIT(INP_RECVDSTADDR);
1436 break;
1437
1438 case IP_RECVIF:
1439 optval = OPTBIT(INP_RECVIF);
1440 break;
1441 }
1442 *mtod(m, int *) = optval;
1443 break;
1444
1445 #if 0 /* defined(IPSEC) || defined(FAST_IPSEC) */
1446 /* XXX: code broken */
1447 case IP_IPSEC_POLICY:
1448 {
1449 caddr_t req = NULL;
1450 size_t len = 0;
1451
1452 if (m) {
1453 req = mtod(m, caddr_t);
1454 len = m->m_len;
1455 }
1456 error = ipsec4_get_policy(inp, req, len, mp);
1457 break;
1458 }
1459 #endif /*IPSEC*/
1460
1461 case IP_MULTICAST_IF:
1462 case IP_MULTICAST_TTL:
1463 case IP_MULTICAST_LOOP:
1464 case IP_ADD_MEMBERSHIP:
1465 case IP_DROP_MEMBERSHIP:
1466 error = ip_getmoptions(optname, inp->inp_moptions, mp);
1467 if (*mp)
1468 MCLAIM(*mp, so->so_mowner);
1469 break;
1470
1471 case IP_PORTRANGE:
1472 *mp = m = m_get(M_WAIT, MT_SOOPTS);
1473 MCLAIM(m, so->so_mowner);
1474 m->m_len = sizeof(int);
1475
1476 if (inp->inp_flags & INP_LOWPORT)
1477 optval = IP_PORTRANGE_LOW;
1478 else
1479 optval = IP_PORTRANGE_DEFAULT;
1480
1481 *mtod(m, int *) = optval;
1482 break;
1483
1484 default:
1485 error = ENOPROTOOPT;
1486 break;
1487 }
1488 break;
1489 }
1490 return (error);
1491 }
1492
1493 /*
1494 * Set up IP options in pcb for insertion in output packets.
1495 * Store in mbuf with pointer in pcbopt, adding pseudo-option
1496 * with destination address if source routed.
1497 */
1498 int
1499 #ifdef notyet
1500 ip_pcbopts(int optname, struct mbuf **pcbopt, struct mbuf *m)
1501 #else
1502 ip_pcbopts(struct mbuf **pcbopt, struct mbuf *m)
1503 #endif
1504 {
1505 int cnt, optlen;
1506 u_char *cp;
1507 u_char opt;
1508
1509 /* turn off any old options */
1510 if (*pcbopt)
1511 (void)m_free(*pcbopt);
1512 *pcbopt = 0;
1513 if (m == (struct mbuf *)0 || m->m_len == 0) {
1514 /*
1515 * Only turning off any previous options.
1516 */
1517 if (m)
1518 (void)m_free(m);
1519 return (0);
1520 }
1521
1522 #ifndef __vax__
1523 if (m->m_len % sizeof(int32_t))
1524 goto bad;
1525 #endif
1526 /*
1527 * IP first-hop destination address will be stored before
1528 * actual options; move other options back
1529 * and clear it when none present.
1530 */
1531 if (m->m_data + m->m_len + sizeof(struct in_addr) >= &m->m_dat[MLEN])
1532 goto bad;
1533 cnt = m->m_len;
1534 m->m_len += sizeof(struct in_addr);
1535 cp = mtod(m, u_char *) + sizeof(struct in_addr);
1536 memmove(cp, mtod(m, caddr_t), (unsigned)cnt);
1537 bzero(mtod(m, caddr_t), sizeof(struct in_addr));
1538
1539 for (; cnt > 0; cnt -= optlen, cp += optlen) {
1540 opt = cp[IPOPT_OPTVAL];
1541 if (opt == IPOPT_EOL)
1542 break;
1543 if (opt == IPOPT_NOP)
1544 optlen = 1;
1545 else {
1546 if (cnt < IPOPT_OLEN + sizeof(*cp))
1547 goto bad;
1548 optlen = cp[IPOPT_OLEN];
1549 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt)
1550 goto bad;
1551 }
1552 switch (opt) {
1553
1554 default:
1555 break;
1556
1557 case IPOPT_LSRR:
1558 case IPOPT_SSRR:
1559 /*
1560 * user process specifies route as:
1561 * ->A->B->C->D
1562 * D must be our final destination (but we can't
1563 * check that since we may not have connected yet).
1564 * A is first hop destination, which doesn't appear in
1565 * actual IP option, but is stored before the options.
1566 */
1567 if (optlen < IPOPT_MINOFF - 1 + sizeof(struct in_addr))
1568 goto bad;
1569 m->m_len -= sizeof(struct in_addr);
1570 cnt -= sizeof(struct in_addr);
1571 optlen -= sizeof(struct in_addr);
1572 cp[IPOPT_OLEN] = optlen;
1573 /*
1574 * Move first hop before start of options.
1575 */
1576 bcopy((caddr_t)&cp[IPOPT_OFFSET+1], mtod(m, caddr_t),
1577 sizeof(struct in_addr));
1578 /*
1579 * Then copy rest of options back
1580 * to close up the deleted entry.
1581 */
1582 (void)memmove(&cp[IPOPT_OFFSET+1],
1583 &cp[IPOPT_OFFSET+1] + sizeof(struct in_addr),
1584 (unsigned)cnt - (IPOPT_MINOFF - 1));
1585 break;
1586 }
1587 }
1588 if (m->m_len > MAX_IPOPTLEN + sizeof(struct in_addr))
1589 goto bad;
1590 *pcbopt = m;
1591 return (0);
1592
1593 bad:
1594 (void)m_free(m);
1595 return (EINVAL);
1596 }
1597
1598 /*
1599 * following RFC1724 section 3.3, 0.0.0.0/8 is interpreted as interface index.
1600 */
1601 static struct ifnet *
1602 ip_multicast_if(struct in_addr *a, int *ifindexp)
1603 {
1604 int ifindex;
1605 struct ifnet *ifp = NULL;
1606 struct in_ifaddr *ia;
1607
1608 if (ifindexp)
1609 *ifindexp = 0;
1610 if (ntohl(a->s_addr) >> 24 == 0) {
1611 ifindex = ntohl(a->s_addr) & 0xffffff;
1612 if (ifindex < 0 || if_indexlim <= ifindex)
1613 return NULL;
1614 ifp = ifindex2ifnet[ifindex];
1615 if (!ifp)
1616 return NULL;
1617 if (ifindexp)
1618 *ifindexp = ifindex;
1619 } else {
1620 LIST_FOREACH(ia, &IN_IFADDR_HASH(a->s_addr), ia_hash) {
1621 if (in_hosteq(ia->ia_addr.sin_addr, *a) &&
1622 (ia->ia_ifp->if_flags & IFF_MULTICAST) != 0) {
1623 ifp = ia->ia_ifp;
1624 break;
1625 }
1626 }
1627 }
1628 return ifp;
1629 }
1630
1631 static int
1632 ip_getoptval(struct mbuf *m, u_int8_t *val, u_int maxval)
1633 {
1634 u_int tval;
1635
1636 if (m == NULL)
1637 return EINVAL;
1638
1639 switch (m->m_len) {
1640 case sizeof(u_char):
1641 tval = *(mtod(m, u_char *));
1642 break;
1643 case sizeof(u_int):
1644 tval = *(mtod(m, u_int *));
1645 break;
1646 default:
1647 return EINVAL;
1648 }
1649
1650 if (tval > maxval)
1651 return EINVAL;
1652
1653 *val = tval;
1654 return 0;
1655 }
1656
1657 /*
1658 * Set the IP multicast options in response to user setsockopt().
1659 */
1660 int
1661 ip_setmoptions(int optname, struct ip_moptions **imop, struct mbuf *m)
1662 {
1663 int error = 0;
1664 int i;
1665 struct in_addr addr;
1666 struct ip_mreq *mreq;
1667 struct ifnet *ifp;
1668 struct ip_moptions *imo = *imop;
1669 struct route ro;
1670 struct sockaddr_in *dst;
1671 int ifindex;
1672
1673 if (imo == NULL) {
1674 /*
1675 * No multicast option buffer attached to the pcb;
1676 * allocate one and initialize to default values.
1677 */
1678 imo = (struct ip_moptions *)malloc(sizeof(*imo), M_IPMOPTS,
1679 M_WAITOK);
1680
1681 if (imo == NULL)
1682 return (ENOBUFS);
1683 *imop = imo;
1684 imo->imo_multicast_ifp = NULL;
1685 imo->imo_multicast_addr.s_addr = INADDR_ANY;
1686 imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
1687 imo->imo_multicast_loop = IP_DEFAULT_MULTICAST_LOOP;
1688 imo->imo_num_memberships = 0;
1689 }
1690
1691 switch (optname) {
1692
1693 case IP_MULTICAST_IF:
1694 /*
1695 * Select the interface for outgoing multicast packets.
1696 */
1697 if (m == NULL || m->m_len != sizeof(struct in_addr)) {
1698 error = EINVAL;
1699 break;
1700 }
1701 addr = *(mtod(m, struct in_addr *));
1702 /*
1703 * INADDR_ANY is used to remove a previous selection.
1704 * When no interface is selected, a default one is
1705 * chosen every time a multicast packet is sent.
1706 */
1707 if (in_nullhost(addr)) {
1708 imo->imo_multicast_ifp = NULL;
1709 break;
1710 }
1711 /*
1712 * The selected interface is identified by its local
1713 * IP address. Find the interface and confirm that
1714 * it supports multicasting.
1715 */
1716 ifp = ip_multicast_if(&addr, &ifindex);
1717 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) {
1718 error = EADDRNOTAVAIL;
1719 break;
1720 }
1721 imo->imo_multicast_ifp = ifp;
1722 if (ifindex)
1723 imo->imo_multicast_addr = addr;
1724 else
1725 imo->imo_multicast_addr.s_addr = INADDR_ANY;
1726 break;
1727
1728 case IP_MULTICAST_TTL:
1729 /*
1730 * Set the IP time-to-live for outgoing multicast packets.
1731 */
1732 error = ip_getoptval(m, &imo->imo_multicast_ttl, MAXTTL);
1733 break;
1734
1735 case IP_MULTICAST_LOOP:
1736 /*
1737 * Set the loopback flag for outgoing multicast packets.
1738 * Must be zero or one.
1739 */
1740 error = ip_getoptval(m, &imo->imo_multicast_loop, 1);
1741 break;
1742
1743 case IP_ADD_MEMBERSHIP:
1744 /*
1745 * Add a multicast group membership.
1746 * Group must be a valid IP multicast address.
1747 */
1748 if (m == NULL || m->m_len != sizeof(struct ip_mreq)) {
1749 error = EINVAL;
1750 break;
1751 }
1752 mreq = mtod(m, struct ip_mreq *);
1753 if (!IN_MULTICAST(mreq->imr_multiaddr.s_addr)) {
1754 error = EINVAL;
1755 break;
1756 }
1757 /*
1758 * If no interface address was provided, use the interface of
1759 * the route to the given multicast address.
1760 */
1761 if (in_nullhost(mreq->imr_interface)) {
1762 memset(&ro, 0, sizeof(ro));
1763 dst = satosin(&ro.ro_dst);
1764 dst->sin_len = sizeof(*dst);
1765 dst->sin_family = AF_INET;
1766 dst->sin_addr = mreq->imr_multiaddr;
1767 rtcache_init(&ro);
1768 ifp = (ro.ro_rt != NULL) ? ro.ro_rt->rt_ifp : NULL;
1769 rtcache_free(&ro);
1770 } else {
1771 ifp = ip_multicast_if(&mreq->imr_interface, NULL);
1772 }
1773 /*
1774 * See if we found an interface, and confirm that it
1775 * supports multicast.
1776 */
1777 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) {
1778 error = EADDRNOTAVAIL;
1779 break;
1780 }
1781 /*
1782 * See if the membership already exists or if all the
1783 * membership slots are full.
1784 */
1785 for (i = 0; i < imo->imo_num_memberships; ++i) {
1786 if (imo->imo_membership[i]->inm_ifp == ifp &&
1787 in_hosteq(imo->imo_membership[i]->inm_addr,
1788 mreq->imr_multiaddr))
1789 break;
1790 }
1791 if (i < imo->imo_num_memberships) {
1792 error = EADDRINUSE;
1793 break;
1794 }
1795 if (i == IP_MAX_MEMBERSHIPS) {
1796 error = ETOOMANYREFS;
1797 break;
1798 }
1799 /*
1800 * Everything looks good; add a new record to the multicast
1801 * address list for the given interface.
1802 */
1803 if ((imo->imo_membership[i] =
1804 in_addmulti(&mreq->imr_multiaddr, ifp)) == NULL) {
1805 error = ENOBUFS;
1806 break;
1807 }
1808 ++imo->imo_num_memberships;
1809 break;
1810
1811 case IP_DROP_MEMBERSHIP:
1812 /*
1813 * Drop a multicast group membership.
1814 * Group must be a valid IP multicast address.
1815 */
1816 if (m == NULL || m->m_len != sizeof(struct ip_mreq)) {
1817 error = EINVAL;
1818 break;
1819 }
1820 mreq = mtod(m, struct ip_mreq *);
1821 if (!IN_MULTICAST(mreq->imr_multiaddr.s_addr)) {
1822 error = EINVAL;
1823 break;
1824 }
1825 /*
1826 * If an interface address was specified, get a pointer
1827 * to its ifnet structure.
1828 */
1829 if (in_nullhost(mreq->imr_interface))
1830 ifp = NULL;
1831 else {
1832 ifp = ip_multicast_if(&mreq->imr_interface, NULL);
1833 if (ifp == NULL) {
1834 error = EADDRNOTAVAIL;
1835 break;
1836 }
1837 }
1838 /*
1839 * Find the membership in the membership array.
1840 */
1841 for (i = 0; i < imo->imo_num_memberships; ++i) {
1842 if ((ifp == NULL ||
1843 imo->imo_membership[i]->inm_ifp == ifp) &&
1844 in_hosteq(imo->imo_membership[i]->inm_addr,
1845 mreq->imr_multiaddr))
1846 break;
1847 }
1848 if (i == imo->imo_num_memberships) {
1849 error = EADDRNOTAVAIL;
1850 break;
1851 }
1852 /*
1853 * Give up the multicast address record to which the
1854 * membership points.
1855 */
1856 in_delmulti(imo->imo_membership[i]);
1857 /*
1858 * Remove the gap in the membership array.
1859 */
1860 for (++i; i < imo->imo_num_memberships; ++i)
1861 imo->imo_membership[i-1] = imo->imo_membership[i];
1862 --imo->imo_num_memberships;
1863 break;
1864
1865 default:
1866 error = EOPNOTSUPP;
1867 break;
1868 }
1869
1870 /*
1871 * If all options have default values, no need to keep the mbuf.
1872 */
1873 if (imo->imo_multicast_ifp == NULL &&
1874 imo->imo_multicast_ttl == IP_DEFAULT_MULTICAST_TTL &&
1875 imo->imo_multicast_loop == IP_DEFAULT_MULTICAST_LOOP &&
1876 imo->imo_num_memberships == 0) {
1877 free(*imop, M_IPMOPTS);
1878 *imop = NULL;
1879 }
1880
1881 return (error);
1882 }
1883
1884 /*
1885 * Return the IP multicast options in response to user getsockopt().
1886 */
1887 int
1888 ip_getmoptions(int optname, struct ip_moptions *imo, struct mbuf **mp)
1889 {
1890 u_char *ttl;
1891 u_char *loop;
1892 struct in_addr *addr;
1893 struct in_ifaddr *ia;
1894
1895 *mp = m_get(M_WAIT, MT_SOOPTS);
1896
1897 switch (optname) {
1898
1899 case IP_MULTICAST_IF:
1900 addr = mtod(*mp, struct in_addr *);
1901 (*mp)->m_len = sizeof(struct in_addr);
1902 if (imo == NULL || imo->imo_multicast_ifp == NULL)
1903 *addr = zeroin_addr;
1904 else if (imo->imo_multicast_addr.s_addr) {
1905 /* return the value user has set */
1906 *addr = imo->imo_multicast_addr;
1907 } else {
1908 IFP_TO_IA(imo->imo_multicast_ifp, ia);
1909 *addr = ia ? ia->ia_addr.sin_addr : zeroin_addr;
1910 }
1911 return (0);
1912
1913 case IP_MULTICAST_TTL:
1914 ttl = mtod(*mp, u_char *);
1915 (*mp)->m_len = 1;
1916 *ttl = imo ? imo->imo_multicast_ttl
1917 : IP_DEFAULT_MULTICAST_TTL;
1918 return (0);
1919
1920 case IP_MULTICAST_LOOP:
1921 loop = mtod(*mp, u_char *);
1922 (*mp)->m_len = 1;
1923 *loop = imo ? imo->imo_multicast_loop
1924 : IP_DEFAULT_MULTICAST_LOOP;
1925 return (0);
1926
1927 default:
1928 return (EOPNOTSUPP);
1929 }
1930 }
1931
1932 /*
1933 * Discard the IP multicast options.
1934 */
1935 void
1936 ip_freemoptions(struct ip_moptions *imo)
1937 {
1938 int i;
1939
1940 if (imo != NULL) {
1941 for (i = 0; i < imo->imo_num_memberships; ++i)
1942 in_delmulti(imo->imo_membership[i]);
1943 free(imo, M_IPMOPTS);
1944 }
1945 }
1946
1947 /*
1948 * Routine called from ip_output() to loop back a copy of an IP multicast
1949 * packet to the input queue of a specified interface. Note that this
1950 * calls the output routine of the loopback "driver", but with an interface
1951 * pointer that might NOT be lo0ifp -- easier than replicating that code here.
1952 */
1953 static void
1954 ip_mloopback(struct ifnet *ifp, struct mbuf *m, struct sockaddr_in *dst)
1955 {
1956 struct ip *ip;
1957 struct mbuf *copym;
1958
1959 copym = m_copy(m, 0, M_COPYALL);
1960 if (copym != NULL
1961 && (copym->m_flags & M_EXT || copym->m_len < sizeof(struct ip)))
1962 copym = m_pullup(copym, sizeof(struct ip));
1963 if (copym != NULL) {
1964 /*
1965 * We don't bother to fragment if the IP length is greater
1966 * than the interface's MTU. Can this possibly matter?
1967 */
1968 ip = mtod(copym, struct ip *);
1969
1970 if (copym->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1971 in_delayed_cksum(copym);
1972 copym->m_pkthdr.csum_flags &=
1973 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
1974 }
1975
1976 ip->ip_sum = 0;
1977 ip->ip_sum = in_cksum(copym, ip->ip_hl << 2);
1978 (void) looutput(ifp, copym, sintosa(dst), NULL);
1979 }
1980 }
1981