altq_subr.c revision 1.14.10.1 1 /* $NetBSD: altq_subr.c,v 1.14.10.1 2006/10/22 06:04:30 yamt Exp $ */
2 /* $KAME: altq_subr.c,v 1.24 2005/04/13 03:44:25 suz Exp $ */
3
4 /*
5 * Copyright (C) 1997-2003
6 * Sony Computer Science Laboratories Inc. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: altq_subr.c,v 1.14.10.1 2006/10/22 06:04:30 yamt Exp $");
32
33 #ifdef _KERNEL_OPT
34 #include "opt_altq.h"
35 #include "opt_inet.h"
36 #include "pf.h"
37 #endif
38
39 #include <sys/param.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/kernel.h>
47 #include <sys/errno.h>
48 #include <sys/syslog.h>
49 #include <sys/sysctl.h>
50 #include <sys/queue.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_types.h>
55
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #ifdef INET6
60 #include <netinet/ip6.h>
61 #endif
62 #include <netinet/tcp.h>
63 #include <netinet/udp.h>
64
65 #if NPF > 0
66 #include <net/pfvar.h>
67 #endif
68 #include <altq/altq.h>
69 #ifdef ALTQ3_COMPAT
70 #include <altq/altq_conf.h>
71 #endif
72
73 /* machine dependent clock related includes */
74 #ifdef __FreeBSD__
75 #include "opt_cpu.h" /* for FreeBSD-2.2.8 to get i586_ctr_freq */
76 #include <machine/clock.h>
77 #endif
78 #if defined(__i386__)
79 #include <machine/cpufunc.h> /* for pentium tsc */
80 #include <machine/specialreg.h> /* for CPUID_TSC */
81 #ifdef __FreeBSD__
82 #include <machine/md_var.h> /* for cpu_feature */
83 #elif defined(__NetBSD__) || defined(__OpenBSD__)
84 #include <machine/cpu.h> /* for cpu_feature */
85 #endif
86 #endif /* __i386__ */
87
88 /*
89 * internal function prototypes
90 */
91 static void tbr_timeout(void *);
92 int (*altq_input)(struct mbuf *, int) = NULL;
93 static int tbr_timer = 0; /* token bucket regulator timer */
94 static struct callout tbr_callout = CALLOUT_INITIALIZER;
95
96 #ifdef ALTQ3_CLFIER_COMPAT
97 static int extract_ports4(struct mbuf *, struct ip *, struct flowinfo_in *);
98 #ifdef INET6
99 static int extract_ports6(struct mbuf *, struct ip6_hdr *,
100 struct flowinfo_in6 *);
101 #endif
102 static int apply_filter4(u_int32_t, struct flow_filter *,
103 struct flowinfo_in *);
104 static int apply_ppfilter4(u_int32_t, struct flow_filter *,
105 struct flowinfo_in *);
106 #ifdef INET6
107 static int apply_filter6(u_int32_t, struct flow_filter6 *,
108 struct flowinfo_in6 *);
109 #endif
110 static int apply_tosfilter4(u_int32_t, struct flow_filter *,
111 struct flowinfo_in *);
112 static u_long get_filt_handle(struct acc_classifier *, int);
113 static struct acc_filter *filth_to_filtp(struct acc_classifier *, u_long);
114 static u_int32_t filt2fibmask(struct flow_filter *);
115
116 static void ip4f_cache(struct ip *, struct flowinfo_in *);
117 static int ip4f_lookup(struct ip *, struct flowinfo_in *);
118 static int ip4f_init(void);
119 static struct ip4_frag *ip4f_alloc(void);
120 static void ip4f_free(struct ip4_frag *);
121 #endif /* ALTQ3_CLFIER_COMPAT */
122
123 /*
124 * alternate queueing support routines
125 */
126
127 /* look up the queue state by the interface name and the queueing type. */
128 void *
129 altq_lookup(char *name, int type)
130 {
131 struct ifnet *ifp;
132
133 if ((ifp = ifunit(name)) != NULL) {
134 if (type != ALTQT_NONE && ifp->if_snd.altq_type == type)
135 return (ifp->if_snd.altq_disc);
136 }
137
138 return NULL;
139 }
140
141 int
142 altq_attach(struct ifaltq *ifq, int type, void *discipline,
143 int (*enqueue)(struct ifaltq *, struct mbuf *, struct altq_pktattr *),
144 struct mbuf *(*dequeue)(struct ifaltq *, int),
145 int (*request)(struct ifaltq *, int, void *),
146 void *clfier, void *(*classify)(void *, struct mbuf *, int))
147 {
148 if (!ALTQ_IS_READY(ifq))
149 return ENXIO;
150
151 #ifdef ALTQ3_COMPAT
152 /*
153 * pfaltq can override the existing discipline, but altq3 cannot.
154 * check these if clfier is not NULL (which implies altq3).
155 */
156 if (clfier != NULL) {
157 if (ALTQ_IS_ENABLED(ifq))
158 return EBUSY;
159 if (ALTQ_IS_ATTACHED(ifq))
160 return EEXIST;
161 }
162 #endif
163 ifq->altq_type = type;
164 ifq->altq_disc = discipline;
165 ifq->altq_enqueue = enqueue;
166 ifq->altq_dequeue = dequeue;
167 ifq->altq_request = request;
168 ifq->altq_clfier = clfier;
169 ifq->altq_classify = classify;
170 ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED);
171 #ifdef ALTQ3_COMPAT
172 #ifdef ALTQ_KLD
173 altq_module_incref(type);
174 #endif
175 #endif
176 return 0;
177 }
178
179 int
180 altq_detach(struct ifaltq *ifq)
181 {
182 if (!ALTQ_IS_READY(ifq))
183 return ENXIO;
184 if (ALTQ_IS_ENABLED(ifq))
185 return EBUSY;
186 if (!ALTQ_IS_ATTACHED(ifq))
187 return (0);
188 #ifdef ALTQ3_COMPAT
189 #ifdef ALTQ_KLD
190 altq_module_declref(ifq->altq_type);
191 #endif
192 #endif
193
194 ifq->altq_type = ALTQT_NONE;
195 ifq->altq_disc = NULL;
196 ifq->altq_enqueue = NULL;
197 ifq->altq_dequeue = NULL;
198 ifq->altq_request = NULL;
199 ifq->altq_clfier = NULL;
200 ifq->altq_classify = NULL;
201 ifq->altq_flags &= ALTQF_CANTCHANGE;
202 return 0;
203 }
204
205 int
206 altq_enable(struct ifaltq *ifq)
207 {
208 int s;
209
210 if (!ALTQ_IS_READY(ifq))
211 return ENXIO;
212 if (ALTQ_IS_ENABLED(ifq))
213 return 0;
214
215 s = splnet();
216 IFQ_PURGE(ifq);
217 ASSERT(ifq->ifq_len == 0);
218 ifq->altq_flags |= ALTQF_ENABLED;
219 if (ifq->altq_clfier != NULL)
220 ifq->altq_flags |= ALTQF_CLASSIFY;
221 splx(s);
222
223 return 0;
224 }
225
226 int
227 altq_disable(struct ifaltq *ifq)
228 {
229 int s;
230
231 if (!ALTQ_IS_ENABLED(ifq))
232 return 0;
233
234 s = splnet();
235 IFQ_PURGE(ifq);
236 ASSERT(ifq->ifq_len == 0);
237 ifq->altq_flags &= ~(ALTQF_ENABLED|ALTQF_CLASSIFY);
238 splx(s);
239 return 0;
240 }
241
242 #ifdef ALTQ_DEBUG
243 void
244 altq_assert(const char *file, int line, const char *failedexpr)
245 {
246 (void)printf("altq assertion \"%s\" failed: file \"%s\", line %d\n",
247 failedexpr, file, line);
248 panic("altq assertion");
249 /* NOTREACHED */
250 }
251 #endif
252
253 /*
254 * internal representation of token bucket parameters
255 * rate: byte_per_unittime << 32
256 * (((bits_per_sec) / 8) << 32) / machclk_freq
257 * depth: byte << 32
258 *
259 */
260 #define TBR_SHIFT 32
261 #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT)
262 #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT)
263
264 struct mbuf *
265 tbr_dequeue(struct ifaltq *ifq, int op)
266 {
267 struct tb_regulator *tbr;
268 struct mbuf *m;
269 int64_t interval;
270 u_int64_t now;
271
272 tbr = ifq->altq_tbr;
273 if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) {
274 /* if this is a remove after poll, bypass tbr check */
275 } else {
276 /* update token only when it is negative */
277 if (tbr->tbr_token <= 0) {
278 now = read_machclk();
279 interval = now - tbr->tbr_last;
280 if (interval >= tbr->tbr_filluptime)
281 tbr->tbr_token = tbr->tbr_depth;
282 else {
283 tbr->tbr_token += interval * tbr->tbr_rate;
284 if (tbr->tbr_token > tbr->tbr_depth)
285 tbr->tbr_token = tbr->tbr_depth;
286 }
287 tbr->tbr_last = now;
288 }
289 /* if token is still negative, don't allow dequeue */
290 if (tbr->tbr_token <= 0)
291 return (NULL);
292 }
293
294 if (ALTQ_IS_ENABLED(ifq))
295 m = (*ifq->altq_dequeue)(ifq, op);
296 else {
297 if (op == ALTDQ_POLL)
298 IF_POLL(ifq, m);
299 else
300 IF_DEQUEUE(ifq, m);
301 }
302
303 if (m != NULL && op == ALTDQ_REMOVE)
304 tbr->tbr_token -= TBR_SCALE(m_pktlen(m));
305 tbr->tbr_lastop = op;
306 return (m);
307 }
308
309 /*
310 * set a token bucket regulator.
311 * if the specified rate is zero, the token bucket regulator is deleted.
312 */
313 int
314 tbr_set(struct ifaltq *ifq, struct tb_profile *profile)
315 {
316 struct tb_regulator *tbr, *otbr;
317
318 if (machclk_freq == 0)
319 init_machclk();
320 if (machclk_freq == 0) {
321 printf("tbr_set: no CPU clock available!\n");
322 return (ENXIO);
323 }
324
325 if (profile->rate == 0) {
326 /* delete this tbr */
327 if ((tbr = ifq->altq_tbr) == NULL)
328 return (ENOENT);
329 ifq->altq_tbr = NULL;
330 free(tbr, M_DEVBUF);
331 return (0);
332 }
333
334 tbr = malloc(sizeof(struct tb_regulator), M_DEVBUF, M_WAITOK|M_ZERO);
335 if (tbr == NULL)
336 return (ENOMEM);
337
338 tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq;
339 tbr->tbr_depth = TBR_SCALE(profile->depth);
340 if (tbr->tbr_rate > 0)
341 tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate;
342 else
343 tbr->tbr_filluptime = 0xffffffffffffffffLL;
344 tbr->tbr_token = tbr->tbr_depth;
345 tbr->tbr_last = read_machclk();
346 tbr->tbr_lastop = ALTDQ_REMOVE;
347
348 otbr = ifq->altq_tbr;
349 ifq->altq_tbr = tbr; /* set the new tbr */
350
351 if (otbr != NULL)
352 free(otbr, M_DEVBUF);
353 else {
354 if (tbr_timer == 0) {
355 CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0);
356 tbr_timer = 1;
357 }
358 }
359 return (0);
360 }
361
362 /*
363 * tbr_timeout goes through the interface list, and kicks the drivers
364 * if necessary.
365 */
366 static void
367 tbr_timeout(void *arg __unused)
368 {
369 struct ifnet *ifp;
370 int active, s;
371
372 active = 0;
373 s = splnet();
374 for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) {
375 if (!TBR_IS_ENABLED(&ifp->if_snd))
376 continue;
377 active++;
378 if (!IFQ_IS_EMPTY(&ifp->if_snd) && ifp->if_start != NULL)
379 (*ifp->if_start)(ifp);
380 }
381 splx(s);
382 if (active > 0)
383 CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0);
384 else
385 tbr_timer = 0; /* don't need tbr_timer anymore */
386 #if defined(__alpha__) && !defined(ALTQ_NOPCC)
387 {
388 /*
389 * XXX read out the machine dependent clock once a second
390 * to detect counter wrap-around.
391 */
392 static u_int cnt;
393
394 if (++cnt >= hz) {
395 (void)read_machclk();
396 cnt = 0;
397 }
398 }
399 #endif /* __alpha__ && !ALTQ_NOPCC */
400 }
401
402 /*
403 * get token bucket regulator profile
404 */
405 int
406 tbr_get(struct ifaltq *ifq, struct tb_profile *profile)
407 {
408 struct tb_regulator *tbr;
409
410 if ((tbr = ifq->altq_tbr) == NULL) {
411 profile->rate = 0;
412 profile->depth = 0;
413 } else {
414 profile->rate =
415 (u_int)TBR_UNSCALE(tbr->tbr_rate * 8 * machclk_freq);
416 profile->depth = (u_int)TBR_UNSCALE(tbr->tbr_depth);
417 }
418 return (0);
419 }
420
421 #if NPF > 0
422 /*
423 * attach a discipline to the interface. if one already exists, it is
424 * overridden.
425 */
426 int
427 altq_pfattach(struct pf_altq *a)
428 {
429 int error = 0;
430
431 switch (a->scheduler) {
432 case ALTQT_NONE:
433 break;
434 #ifdef ALTQ_CBQ
435 case ALTQT_CBQ:
436 error = cbq_pfattach(a);
437 break;
438 #endif
439 #ifdef ALTQ_PRIQ
440 case ALTQT_PRIQ:
441 error = priq_pfattach(a);
442 break;
443 #endif
444 #ifdef ALTQ_HFSC
445 case ALTQT_HFSC:
446 error = hfsc_pfattach(a);
447 break;
448 #endif
449 default:
450 error = ENXIO;
451 }
452
453 return (error);
454 }
455
456 /*
457 * detach a discipline from the interface.
458 * it is possible that the discipline was already overridden by another
459 * discipline.
460 */
461 int
462 altq_pfdetach(struct pf_altq *a)
463 {
464 struct ifnet *ifp;
465 int s, error = 0;
466
467 if ((ifp = ifunit(a->ifname)) == NULL)
468 return (EINVAL);
469
470 /* if this discipline is no longer referenced, just return */
471 if (a->altq_disc == NULL || a->altq_disc != ifp->if_snd.altq_disc)
472 return (0);
473
474 s = splnet();
475 if (ALTQ_IS_ENABLED(&ifp->if_snd))
476 error = altq_disable(&ifp->if_snd);
477 if (error == 0)
478 error = altq_detach(&ifp->if_snd);
479 splx(s);
480
481 return (error);
482 }
483
484 /*
485 * add a discipline or a queue
486 */
487 int
488 altq_add(struct pf_altq *a)
489 {
490 int error = 0;
491
492 if (a->qname[0] != 0)
493 return (altq_add_queue(a));
494
495 if (machclk_freq == 0)
496 init_machclk();
497 if (machclk_freq == 0)
498 panic("altq_add: no CPU clock");
499
500 switch (a->scheduler) {
501 #ifdef ALTQ_CBQ
502 case ALTQT_CBQ:
503 error = cbq_add_altq(a);
504 break;
505 #endif
506 #ifdef ALTQ_PRIQ
507 case ALTQT_PRIQ:
508 error = priq_add_altq(a);
509 break;
510 #endif
511 #ifdef ALTQ_HFSC
512 case ALTQT_HFSC:
513 error = hfsc_add_altq(a);
514 break;
515 #endif
516 default:
517 error = ENXIO;
518 }
519
520 return (error);
521 }
522
523 /*
524 * remove a discipline or a queue
525 */
526 int
527 altq_remove(struct pf_altq *a)
528 {
529 int error = 0;
530
531 if (a->qname[0] != 0)
532 return (altq_remove_queue(a));
533
534 switch (a->scheduler) {
535 #ifdef ALTQ_CBQ
536 case ALTQT_CBQ:
537 error = cbq_remove_altq(a);
538 break;
539 #endif
540 #ifdef ALTQ_PRIQ
541 case ALTQT_PRIQ:
542 error = priq_remove_altq(a);
543 break;
544 #endif
545 #ifdef ALTQ_HFSC
546 case ALTQT_HFSC:
547 error = hfsc_remove_altq(a);
548 break;
549 #endif
550 default:
551 error = ENXIO;
552 }
553
554 return (error);
555 }
556
557 /*
558 * add a queue to the discipline
559 */
560 int
561 altq_add_queue(struct pf_altq *a)
562 {
563 int error = 0;
564
565 switch (a->scheduler) {
566 #ifdef ALTQ_CBQ
567 case ALTQT_CBQ:
568 error = cbq_add_queue(a);
569 break;
570 #endif
571 #ifdef ALTQ_PRIQ
572 case ALTQT_PRIQ:
573 error = priq_add_queue(a);
574 break;
575 #endif
576 #ifdef ALTQ_HFSC
577 case ALTQT_HFSC:
578 error = hfsc_add_queue(a);
579 break;
580 #endif
581 default:
582 error = ENXIO;
583 }
584
585 return (error);
586 }
587
588 /*
589 * remove a queue from the discipline
590 */
591 int
592 altq_remove_queue(struct pf_altq *a)
593 {
594 int error = 0;
595
596 switch (a->scheduler) {
597 #ifdef ALTQ_CBQ
598 case ALTQT_CBQ:
599 error = cbq_remove_queue(a);
600 break;
601 #endif
602 #ifdef ALTQ_PRIQ
603 case ALTQT_PRIQ:
604 error = priq_remove_queue(a);
605 break;
606 #endif
607 #ifdef ALTQ_HFSC
608 case ALTQT_HFSC:
609 error = hfsc_remove_queue(a);
610 break;
611 #endif
612 default:
613 error = ENXIO;
614 }
615
616 return (error);
617 }
618
619 /*
620 * get queue statistics
621 */
622 int
623 altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
624 {
625 int error = 0;
626
627 switch (a->scheduler) {
628 #ifdef ALTQ_CBQ
629 case ALTQT_CBQ:
630 error = cbq_getqstats(a, ubuf, nbytes);
631 break;
632 #endif
633 #ifdef ALTQ_PRIQ
634 case ALTQT_PRIQ:
635 error = priq_getqstats(a, ubuf, nbytes);
636 break;
637 #endif
638 #ifdef ALTQ_HFSC
639 case ALTQT_HFSC:
640 error = hfsc_getqstats(a, ubuf, nbytes);
641 break;
642 #endif
643 default:
644 error = ENXIO;
645 }
646
647 return (error);
648 }
649 #endif /* NPF > 0 */
650
651 /*
652 * read and write diffserv field in IPv4 or IPv6 header
653 */
654 u_int8_t
655 read_dsfield(struct mbuf *m, struct altq_pktattr *pktattr)
656 {
657 struct mbuf *m0;
658 u_int8_t ds_field = 0;
659
660 if (pktattr == NULL ||
661 (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
662 return ((u_int8_t)0);
663
664 /* verify that pattr_hdr is within the mbuf data */
665 for (m0 = m; m0 != NULL; m0 = m0->m_next)
666 if ((pktattr->pattr_hdr >= m0->m_data) &&
667 (pktattr->pattr_hdr < m0->m_data + m0->m_len))
668 break;
669 if (m0 == NULL) {
670 /* ick, pattr_hdr is stale */
671 pktattr->pattr_af = AF_UNSPEC;
672 #ifdef ALTQ_DEBUG
673 printf("read_dsfield: can't locate header!\n");
674 #endif
675 return ((u_int8_t)0);
676 }
677
678 if (pktattr->pattr_af == AF_INET) {
679 struct ip *ip = (struct ip *)pktattr->pattr_hdr;
680
681 if (ip->ip_v != 4)
682 return ((u_int8_t)0); /* version mismatch! */
683 ds_field = ip->ip_tos;
684 }
685 #ifdef INET6
686 else if (pktattr->pattr_af == AF_INET6) {
687 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
688 u_int32_t flowlabel;
689
690 flowlabel = ntohl(ip6->ip6_flow);
691 if ((flowlabel >> 28) != 6)
692 return ((u_int8_t)0); /* version mismatch! */
693 ds_field = (flowlabel >> 20) & 0xff;
694 }
695 #endif
696 return (ds_field);
697 }
698
699 void
700 write_dsfield(struct mbuf *m, struct altq_pktattr *pktattr, u_int8_t dsfield)
701 {
702 struct mbuf *m0;
703
704 if (pktattr == NULL ||
705 (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
706 return;
707
708 /* verify that pattr_hdr is within the mbuf data */
709 for (m0 = m; m0 != NULL; m0 = m0->m_next)
710 if ((pktattr->pattr_hdr >= m0->m_data) &&
711 (pktattr->pattr_hdr < m0->m_data + m0->m_len))
712 break;
713 if (m0 == NULL) {
714 /* ick, pattr_hdr is stale */
715 pktattr->pattr_af = AF_UNSPEC;
716 #ifdef ALTQ_DEBUG
717 printf("write_dsfield: can't locate header!\n");
718 #endif
719 return;
720 }
721
722 if (pktattr->pattr_af == AF_INET) {
723 struct ip *ip = (struct ip *)pktattr->pattr_hdr;
724 u_int8_t old;
725 int32_t sum;
726
727 if (ip->ip_v != 4)
728 return; /* version mismatch! */
729 old = ip->ip_tos;
730 dsfield |= old & 3; /* leave CU bits */
731 if (old == dsfield)
732 return;
733 ip->ip_tos = dsfield;
734 /*
735 * update checksum (from RFC1624)
736 * HC' = ~(~HC + ~m + m')
737 */
738 sum = ~ntohs(ip->ip_sum) & 0xffff;
739 sum += 0xff00 + (~old & 0xff) + dsfield;
740 sum = (sum >> 16) + (sum & 0xffff);
741 sum += (sum >> 16); /* add carry */
742
743 ip->ip_sum = htons(~sum & 0xffff);
744 }
745 #ifdef INET6
746 else if (pktattr->pattr_af == AF_INET6) {
747 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
748 u_int32_t flowlabel;
749
750 flowlabel = ntohl(ip6->ip6_flow);
751 if ((flowlabel >> 28) != 6)
752 return; /* version mismatch! */
753 flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20);
754 ip6->ip6_flow = htonl(flowlabel);
755 }
756 #endif
757 return;
758 }
759
760
761 /*
762 * high resolution clock support taking advantage of a machine dependent
763 * high resolution time counter (e.g., timestamp counter of intel pentium).
764 * we assume
765 * - 64-bit-long monotonically-increasing counter
766 * - frequency range is 100M-4GHz (CPU speed)
767 */
768 /* if pcc is not available or disabled, emulate 256MHz using microtime() */
769 #define MACHCLK_SHIFT 8
770
771 int machclk_usepcc;
772 u_int32_t machclk_freq = 0;
773 u_int32_t machclk_per_tick = 0;
774
775 #ifdef __alpha__
776 #ifdef __FreeBSD__
777 extern u_int32_t cycles_per_sec; /* alpha cpu clock frequency */
778 #elif defined(__NetBSD__) || defined(__OpenBSD__)
779 extern u_int64_t cycles_per_usec; /* alpha cpu clock frequency */
780 #endif
781 #endif /* __alpha__ */
782
783 void
784 init_machclk(void)
785 {
786 machclk_usepcc = 1;
787
788 #if (!defined(__i386__) && !defined(__alpha__)) || defined(ALTQ_NOPCC)
789 machclk_usepcc = 0;
790 #endif
791 #if defined(__FreeBSD__) && defined(SMP)
792 machclk_usepcc = 0;
793 #endif
794 #if defined(__NetBSD__) && defined(MULTIPROCESSOR)
795 machclk_usepcc = 0;
796 #endif
797 #ifdef __i386__
798 /* check if TSC is available */
799 if (machclk_usepcc == 1 && (cpu_feature & CPUID_TSC) == 0)
800 machclk_usepcc = 0;
801 #endif
802
803 if (machclk_usepcc == 0) {
804 /* emulate 256MHz using microtime() */
805 machclk_freq = 1000000 << MACHCLK_SHIFT;
806 machclk_per_tick = machclk_freq / hz;
807 #ifdef ALTQ_DEBUG
808 printf("altq: emulate %uHz CPU clock\n", machclk_freq);
809 #endif
810 return;
811 }
812
813 /*
814 * if the clock frequency (of Pentium TSC or Alpha PCC) is
815 * accessible, just use it.
816 */
817 #ifdef __i386__
818 #ifdef __FreeBSD__
819 #if (__FreeBSD_version > 300000)
820 machclk_freq = tsc_freq;
821 #else
822 machclk_freq = i586_ctr_freq;
823 #endif
824 #elif defined(__NetBSD__)
825 machclk_freq = (u_int32_t)curcpu()->ci_tsc_freq;
826 #elif defined(__OpenBSD__) && (defined(I586_CPU) || defined(I686_CPU))
827 machclk_freq = pentium_mhz * 1000000;
828 #endif
829 #elif defined(__alpha__)
830 #ifdef __FreeBSD__
831 machclk_freq = cycles_per_sec;
832 #elif defined(__NetBSD__) || defined(__OpenBSD__)
833 machclk_freq = (u_int32_t)(cycles_per_usec * 1000000);
834 #endif
835 #endif /* __alpha__ */
836
837 /*
838 * if we don't know the clock frequency, measure it.
839 */
840 if (machclk_freq == 0) {
841 static int wait;
842 struct timeval tv_start, tv_end;
843 u_int64_t start, end, diff;
844 int timo;
845
846 microtime(&tv_start);
847 start = read_machclk();
848 timo = hz; /* 1 sec */
849 (void)tsleep(&wait, PWAIT | PCATCH, "init_machclk", timo);
850 microtime(&tv_end);
851 end = read_machclk();
852 diff = (u_int64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000
853 + tv_end.tv_usec - tv_start.tv_usec;
854 if (diff != 0)
855 machclk_freq = (u_int)((end - start) * 1000000 / diff);
856 }
857
858 machclk_per_tick = machclk_freq / hz;
859
860 #ifdef ALTQ_DEBUG
861 printf("altq: CPU clock: %uHz\n", machclk_freq);
862 #endif
863 }
864
865 #if defined(__OpenBSD__) && defined(__i386__)
866 static inline u_int64_t
867 rdtsc(void)
868 {
869 u_int64_t rv;
870 __asm __volatile(".byte 0x0f, 0x31" : "=A" (rv));
871 return (rv);
872 }
873 #endif /* __OpenBSD__ && __i386__ */
874
875 u_int64_t
876 read_machclk(void)
877 {
878 u_int64_t val;
879
880 if (machclk_usepcc) {
881 #if defined(__i386__)
882 val = rdtsc();
883 #elif defined(__alpha__)
884 static u_int32_t last_pcc, upper;
885 u_int32_t pcc;
886
887 /*
888 * for alpha, make a 64bit counter value out of the 32bit
889 * alpha processor cycle counter.
890 * read_machclk must be called within a half of its
891 * wrap-around cycle (about 5 sec for 400MHz cpu) to properly
892 * detect a counter wrap-around.
893 * tbr_timeout calls read_machclk once a second.
894 */
895 pcc = (u_int32_t)alpha_rpcc();
896 if (pcc <= last_pcc)
897 upper++;
898 last_pcc = pcc;
899 val = ((u_int64_t)upper << 32) + pcc;
900 #else
901 panic("read_machclk");
902 #endif
903 } else {
904 struct timeval tv;
905
906 microtime(&tv);
907 val = (((u_int64_t)(tv.tv_sec - boottime.tv_sec) * 1000000
908 + tv.tv_usec) << MACHCLK_SHIFT);
909 }
910 return (val);
911 }
912
913 #ifdef ALTQ3_CLFIER_COMPAT
914
915 #ifndef IPPROTO_ESP
916 #define IPPROTO_ESP 50 /* encapsulating security payload */
917 #endif
918 #ifndef IPPROTO_AH
919 #define IPPROTO_AH 51 /* authentication header */
920 #endif
921
922 /*
923 * extract flow information from a given packet.
924 * filt_mask shows flowinfo fields required.
925 * we assume the ip header is in one mbuf, and addresses and ports are
926 * in network byte order.
927 */
928 int
929 altq_extractflow(struct mbuf *m, int af, struct flowinfo *flow,
930 u_int32_t filt_bmask)
931 {
932
933 switch (af) {
934 case PF_INET: {
935 struct flowinfo_in *fin;
936 struct ip *ip;
937
938 ip = mtod(m, struct ip *);
939
940 if (ip->ip_v != 4)
941 break;
942
943 fin = (struct flowinfo_in *)flow;
944 fin->fi_len = sizeof(struct flowinfo_in);
945 fin->fi_family = AF_INET;
946
947 fin->fi_proto = ip->ip_p;
948 fin->fi_tos = ip->ip_tos;
949
950 fin->fi_src.s_addr = ip->ip_src.s_addr;
951 fin->fi_dst.s_addr = ip->ip_dst.s_addr;
952
953 if (filt_bmask & FIMB4_PORTS)
954 /* if port info is required, extract port numbers */
955 extract_ports4(m, ip, fin);
956 else {
957 fin->fi_sport = 0;
958 fin->fi_dport = 0;
959 fin->fi_gpi = 0;
960 }
961 return (1);
962 }
963
964 #ifdef INET6
965 case PF_INET6: {
966 struct flowinfo_in6 *fin6;
967 struct ip6_hdr *ip6;
968
969 ip6 = mtod(m, struct ip6_hdr *);
970 /* should we check the ip version? */
971
972 fin6 = (struct flowinfo_in6 *)flow;
973 fin6->fi6_len = sizeof(struct flowinfo_in6);
974 fin6->fi6_family = AF_INET6;
975
976 fin6->fi6_proto = ip6->ip6_nxt;
977 fin6->fi6_tclass = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
978
979 fin6->fi6_flowlabel = ip6->ip6_flow & htonl(0x000fffff);
980 fin6->fi6_src = ip6->ip6_src;
981 fin6->fi6_dst = ip6->ip6_dst;
982
983 if ((filt_bmask & FIMB6_PORTS) ||
984 ((filt_bmask & FIMB6_PROTO)
985 && ip6->ip6_nxt > IPPROTO_IPV6))
986 /*
987 * if port info is required, or proto is required
988 * but there are option headers, extract port
989 * and protocol numbers.
990 */
991 extract_ports6(m, ip6, fin6);
992 else {
993 fin6->fi6_sport = 0;
994 fin6->fi6_dport = 0;
995 fin6->fi6_gpi = 0;
996 }
997 return (1);
998 }
999 #endif /* INET6 */
1000
1001 default:
1002 break;
1003 }
1004
1005 /* failed */
1006 flow->fi_len = sizeof(struct flowinfo);
1007 flow->fi_family = AF_UNSPEC;
1008 return (0);
1009 }
1010
1011 /*
1012 * helper routine to extract port numbers
1013 */
1014 /* structure for ipsec and ipv6 option header template */
1015 struct _opt6 {
1016 u_int8_t opt6_nxt; /* next header */
1017 u_int8_t opt6_hlen; /* header extension length */
1018 u_int16_t _pad;
1019 u_int32_t ah_spi; /* security parameter index
1020 for authentication header */
1021 };
1022
1023 /*
1024 * extract port numbers from a ipv4 packet.
1025 */
1026 static int
1027 extract_ports4(struct mbuf *m, struct ip *ip, struct flowinfo_in *fin)
1028 {
1029 struct mbuf *m0;
1030 u_short ip_off;
1031 u_int8_t proto;
1032 int off;
1033
1034 fin->fi_sport = 0;
1035 fin->fi_dport = 0;
1036 fin->fi_gpi = 0;
1037
1038 ip_off = ntohs(ip->ip_off);
1039 /* if it is a fragment, try cached fragment info */
1040 if (ip_off & IP_OFFMASK) {
1041 ip4f_lookup(ip, fin);
1042 return (1);
1043 }
1044
1045 /* locate the mbuf containing the protocol header */
1046 for (m0 = m; m0 != NULL; m0 = m0->m_next)
1047 if (((caddr_t)ip >= m0->m_data) &&
1048 ((caddr_t)ip < m0->m_data + m0->m_len))
1049 break;
1050 if (m0 == NULL) {
1051 #ifdef ALTQ_DEBUG
1052 printf("extract_ports4: can't locate header! ip=%p\n", ip);
1053 #endif
1054 return (0);
1055 }
1056 off = ((caddr_t)ip - m0->m_data) + (ip->ip_hl << 2);
1057 proto = ip->ip_p;
1058
1059 #ifdef ALTQ_IPSEC
1060 again:
1061 #endif
1062 while (off >= m0->m_len) {
1063 off -= m0->m_len;
1064 m0 = m0->m_next;
1065 if (m0 == NULL)
1066 return (0); /* bogus ip_hl! */
1067 }
1068 if (m0->m_len < off + 4)
1069 return (0);
1070
1071 switch (proto) {
1072 case IPPROTO_TCP:
1073 case IPPROTO_UDP: {
1074 struct udphdr *udp;
1075
1076 udp = (struct udphdr *)(mtod(m0, caddr_t) + off);
1077 fin->fi_sport = udp->uh_sport;
1078 fin->fi_dport = udp->uh_dport;
1079 fin->fi_proto = proto;
1080 }
1081 break;
1082
1083 #ifdef ALTQ_IPSEC
1084 case IPPROTO_ESP:
1085 if (fin->fi_gpi == 0){
1086 u_int32_t *gpi;
1087
1088 gpi = (u_int32_t *)(mtod(m0, caddr_t) + off);
1089 fin->fi_gpi = *gpi;
1090 }
1091 fin->fi_proto = proto;
1092 break;
1093
1094 case IPPROTO_AH: {
1095 /* get next header and header length */
1096 struct _opt6 *opt6;
1097
1098 opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off);
1099 proto = opt6->opt6_nxt;
1100 off += 8 + (opt6->opt6_hlen * 4);
1101 if (fin->fi_gpi == 0 && m0->m_len >= off + 8)
1102 fin->fi_gpi = opt6->ah_spi;
1103 }
1104 /* goto the next header */
1105 goto again;
1106 #endif /* ALTQ_IPSEC */
1107
1108 default:
1109 fin->fi_proto = proto;
1110 return (0);
1111 }
1112
1113 /* if this is a first fragment, cache it. */
1114 if (ip_off & IP_MF)
1115 ip4f_cache(ip, fin);
1116
1117 return (1);
1118 }
1119
1120 #ifdef INET6
1121 static int
1122 extract_ports6(struct mbuf *m, struct ip6_hdr *ip6, struct flowinfo_in6 *fin6)
1123 {
1124 struct mbuf *m0;
1125 int off;
1126 u_int8_t proto;
1127
1128 fin6->fi6_gpi = 0;
1129 fin6->fi6_sport = 0;
1130 fin6->fi6_dport = 0;
1131
1132 /* locate the mbuf containing the protocol header */
1133 for (m0 = m; m0 != NULL; m0 = m0->m_next)
1134 if (((caddr_t)ip6 >= m0->m_data) &&
1135 ((caddr_t)ip6 < m0->m_data + m0->m_len))
1136 break;
1137 if (m0 == NULL) {
1138 #ifdef ALTQ_DEBUG
1139 printf("extract_ports6: can't locate header! ip6=%p\n", ip6);
1140 #endif
1141 return (0);
1142 }
1143 off = ((caddr_t)ip6 - m0->m_data) + sizeof(struct ip6_hdr);
1144
1145 proto = ip6->ip6_nxt;
1146 do {
1147 while (off >= m0->m_len) {
1148 off -= m0->m_len;
1149 m0 = m0->m_next;
1150 if (m0 == NULL)
1151 return (0);
1152 }
1153 if (m0->m_len < off + 4)
1154 return (0);
1155
1156 switch (proto) {
1157 case IPPROTO_TCP:
1158 case IPPROTO_UDP: {
1159 struct udphdr *udp;
1160
1161 udp = (struct udphdr *)(mtod(m0, caddr_t) + off);
1162 fin6->fi6_sport = udp->uh_sport;
1163 fin6->fi6_dport = udp->uh_dport;
1164 fin6->fi6_proto = proto;
1165 }
1166 return (1);
1167
1168 case IPPROTO_ESP:
1169 if (fin6->fi6_gpi == 0) {
1170 u_int32_t *gpi;
1171
1172 gpi = (u_int32_t *)(mtod(m0, caddr_t) + off);
1173 fin6->fi6_gpi = *gpi;
1174 }
1175 fin6->fi6_proto = proto;
1176 return (1);
1177
1178 case IPPROTO_AH: {
1179 /* get next header and header length */
1180 struct _opt6 *opt6;
1181
1182 opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off);
1183 if (fin6->fi6_gpi == 0 && m0->m_len >= off + 8)
1184 fin6->fi6_gpi = opt6->ah_spi;
1185 proto = opt6->opt6_nxt;
1186 off += 8 + (opt6->opt6_hlen * 4);
1187 /* goto the next header */
1188 break;
1189 }
1190
1191 case IPPROTO_HOPOPTS:
1192 case IPPROTO_ROUTING:
1193 case IPPROTO_DSTOPTS: {
1194 /* get next header and header length */
1195 struct _opt6 *opt6;
1196
1197 opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off);
1198 proto = opt6->opt6_nxt;
1199 off += (opt6->opt6_hlen + 1) * 8;
1200 /* goto the next header */
1201 break;
1202 }
1203
1204 case IPPROTO_FRAGMENT:
1205 /* ipv6 fragmentations are not supported yet */
1206 default:
1207 fin6->fi6_proto = proto;
1208 return (0);
1209 }
1210 } while (1);
1211 /*NOTREACHED*/
1212 }
1213 #endif /* INET6 */
1214
1215 /*
1216 * altq common classifier
1217 */
1218 int
1219 acc_add_filter(struct acc_classifier *classifier, struct flow_filter *filter,
1220 void *class, u_long *phandle)
1221 {
1222 struct acc_filter *afp, *prev, *tmp;
1223 int i, s;
1224
1225 #ifdef INET6
1226 if (filter->ff_flow.fi_family != AF_INET &&
1227 filter->ff_flow.fi_family != AF_INET6)
1228 return (EINVAL);
1229 #else
1230 if (filter->ff_flow.fi_family != AF_INET)
1231 return (EINVAL);
1232 #endif
1233
1234 afp = malloc(sizeof(struct acc_filter), M_DEVBUF, M_WAITOK|M_ZERO);
1235 if (afp == NULL)
1236 return (ENOMEM);
1237
1238 afp->f_filter = *filter;
1239 afp->f_class = class;
1240
1241 i = ACC_WILDCARD_INDEX;
1242 if (filter->ff_flow.fi_family == AF_INET) {
1243 struct flow_filter *filter4 = &afp->f_filter;
1244
1245 /*
1246 * if address is 0, it's a wildcard. if address mask
1247 * isn't set, use full mask.
1248 */
1249 if (filter4->ff_flow.fi_dst.s_addr == 0)
1250 filter4->ff_mask.mask_dst.s_addr = 0;
1251 else if (filter4->ff_mask.mask_dst.s_addr == 0)
1252 filter4->ff_mask.mask_dst.s_addr = 0xffffffff;
1253 if (filter4->ff_flow.fi_src.s_addr == 0)
1254 filter4->ff_mask.mask_src.s_addr = 0;
1255 else if (filter4->ff_mask.mask_src.s_addr == 0)
1256 filter4->ff_mask.mask_src.s_addr = 0xffffffff;
1257
1258 /* clear extra bits in addresses */
1259 filter4->ff_flow.fi_dst.s_addr &=
1260 filter4->ff_mask.mask_dst.s_addr;
1261 filter4->ff_flow.fi_src.s_addr &=
1262 filter4->ff_mask.mask_src.s_addr;
1263
1264 /*
1265 * if dst address is a wildcard, use hash-entry
1266 * ACC_WILDCARD_INDEX.
1267 */
1268 if (filter4->ff_mask.mask_dst.s_addr != 0xffffffff)
1269 i = ACC_WILDCARD_INDEX;
1270 else
1271 i = ACC_GET_HASH_INDEX(filter4->ff_flow.fi_dst.s_addr);
1272 }
1273 #ifdef INET6
1274 else if (filter->ff_flow.fi_family == AF_INET6) {
1275 struct flow_filter6 *filter6 =
1276 (struct flow_filter6 *)&afp->f_filter;
1277 #ifndef IN6MASK0 /* taken from kame ipv6 */
1278 #define IN6MASK0 {{{ 0, 0, 0, 0 }}}
1279 #define IN6MASK128 {{{ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }}}
1280 const struct in6_addr in6mask0 = IN6MASK0;
1281 const struct in6_addr in6mask128 = IN6MASK128;
1282 #endif
1283
1284 if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_flow6.fi6_dst))
1285 filter6->ff_mask6.mask6_dst = in6mask0;
1286 else if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_mask6.mask6_dst))
1287 filter6->ff_mask6.mask6_dst = in6mask128;
1288 if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_flow6.fi6_src))
1289 filter6->ff_mask6.mask6_src = in6mask0;
1290 else if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_mask6.mask6_src))
1291 filter6->ff_mask6.mask6_src = in6mask128;
1292
1293 /* clear extra bits in addresses */
1294 for (i = 0; i < 16; i++)
1295 filter6->ff_flow6.fi6_dst.s6_addr[i] &=
1296 filter6->ff_mask6.mask6_dst.s6_addr[i];
1297 for (i = 0; i < 16; i++)
1298 filter6->ff_flow6.fi6_src.s6_addr[i] &=
1299 filter6->ff_mask6.mask6_src.s6_addr[i];
1300
1301 if (filter6->ff_flow6.fi6_flowlabel == 0)
1302 i = ACC_WILDCARD_INDEX;
1303 else
1304 i = ACC_GET_HASH_INDEX(filter6->ff_flow6.fi6_flowlabel);
1305 }
1306 #endif /* INET6 */
1307
1308 afp->f_handle = get_filt_handle(classifier, i);
1309
1310 /* update filter bitmask */
1311 afp->f_fbmask = filt2fibmask(filter);
1312 classifier->acc_fbmask |= afp->f_fbmask;
1313
1314 /*
1315 * add this filter to the filter list.
1316 * filters are ordered from the highest rule number.
1317 */
1318 s = splnet();
1319 prev = NULL;
1320 LIST_FOREACH(tmp, &classifier->acc_filters[i], f_chain) {
1321 if (tmp->f_filter.ff_ruleno > afp->f_filter.ff_ruleno)
1322 prev = tmp;
1323 else
1324 break;
1325 }
1326 if (prev == NULL)
1327 LIST_INSERT_HEAD(&classifier->acc_filters[i], afp, f_chain);
1328 else
1329 LIST_INSERT_AFTER(prev, afp, f_chain);
1330 splx(s);
1331
1332 *phandle = afp->f_handle;
1333 return (0);
1334 }
1335
1336 int
1337 acc_delete_filter(struct acc_classifier *classifier, u_long handle)
1338 {
1339 struct acc_filter *afp;
1340 int s;
1341
1342 if ((afp = filth_to_filtp(classifier, handle)) == NULL)
1343 return (EINVAL);
1344
1345 s = splnet();
1346 LIST_REMOVE(afp, f_chain);
1347 splx(s);
1348
1349 free(afp, M_DEVBUF);
1350
1351 /* todo: update filt_bmask */
1352
1353 return (0);
1354 }
1355
1356 /*
1357 * delete filters referencing to the specified class.
1358 * if the all flag is not 0, delete all the filters.
1359 */
1360 int
1361 acc_discard_filters(struct acc_classifier *classifier, void *class, int all)
1362 {
1363 struct acc_filter *afp;
1364 int i, s;
1365
1366 s = splnet();
1367 for (i = 0; i < ACC_FILTER_TABLESIZE; i++) {
1368 do {
1369 LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
1370 if (all || afp->f_class == class) {
1371 LIST_REMOVE(afp, f_chain);
1372 free(afp, M_DEVBUF);
1373 /* start again from the head */
1374 break;
1375 }
1376 } while (afp != NULL);
1377 }
1378 splx(s);
1379
1380 if (all)
1381 classifier->acc_fbmask = 0;
1382
1383 return (0);
1384 }
1385
1386 void *
1387 acc_classify(void *clfier, struct mbuf *m, int af)
1388 {
1389 struct acc_classifier *classifier;
1390 struct flowinfo flow;
1391 struct acc_filter *afp;
1392 int i;
1393
1394 classifier = (struct acc_classifier *)clfier;
1395 altq_extractflow(m, af, &flow, classifier->acc_fbmask);
1396
1397 if (flow.fi_family == AF_INET) {
1398 struct flowinfo_in *fp = (struct flowinfo_in *)&flow;
1399
1400 if ((classifier->acc_fbmask & FIMB4_ALL) == FIMB4_TOS) {
1401 /* only tos is used */
1402 LIST_FOREACH(afp,
1403 &classifier->acc_filters[ACC_WILDCARD_INDEX],
1404 f_chain)
1405 if (apply_tosfilter4(afp->f_fbmask,
1406 &afp->f_filter, fp))
1407 /* filter matched */
1408 return (afp->f_class);
1409 } else if ((classifier->acc_fbmask &
1410 (~(FIMB4_PROTO|FIMB4_SPORT|FIMB4_DPORT) & FIMB4_ALL))
1411 == 0) {
1412 /* only proto and ports are used */
1413 LIST_FOREACH(afp,
1414 &classifier->acc_filters[ACC_WILDCARD_INDEX],
1415 f_chain)
1416 if (apply_ppfilter4(afp->f_fbmask,
1417 &afp->f_filter, fp))
1418 /* filter matched */
1419 return (afp->f_class);
1420 } else {
1421 /* get the filter hash entry from its dest address */
1422 i = ACC_GET_HASH_INDEX(fp->fi_dst.s_addr);
1423 do {
1424 /*
1425 * go through this loop twice. first for dst
1426 * hash, second for wildcards.
1427 */
1428 LIST_FOREACH(afp, &classifier->acc_filters[i],
1429 f_chain)
1430 if (apply_filter4(afp->f_fbmask,
1431 &afp->f_filter, fp))
1432 /* filter matched */
1433 return (afp->f_class);
1434
1435 /*
1436 * check again for filters with a dst addr
1437 * wildcard.
1438 * (daddr == 0 || dmask != 0xffffffff).
1439 */
1440 if (i != ACC_WILDCARD_INDEX)
1441 i = ACC_WILDCARD_INDEX;
1442 else
1443 break;
1444 } while (1);
1445 }
1446 }
1447 #ifdef INET6
1448 else if (flow.fi_family == AF_INET6) {
1449 struct flowinfo_in6 *fp6 = (struct flowinfo_in6 *)&flow;
1450
1451 /* get the filter hash entry from its flow ID */
1452 if (fp6->fi6_flowlabel != 0)
1453 i = ACC_GET_HASH_INDEX(fp6->fi6_flowlabel);
1454 else
1455 /* flowlable can be zero */
1456 i = ACC_WILDCARD_INDEX;
1457
1458 /* go through this loop twice. first for flow hash, second
1459 for wildcards. */
1460 do {
1461 LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
1462 if (apply_filter6(afp->f_fbmask,
1463 (struct flow_filter6 *)&afp->f_filter,
1464 fp6))
1465 /* filter matched */
1466 return (afp->f_class);
1467
1468 /*
1469 * check again for filters with a wildcard.
1470 */
1471 if (i != ACC_WILDCARD_INDEX)
1472 i = ACC_WILDCARD_INDEX;
1473 else
1474 break;
1475 } while (1);
1476 }
1477 #endif /* INET6 */
1478
1479 /* no filter matched */
1480 return (NULL);
1481 }
1482
1483 static int
1484 apply_filter4(u_int32_t fbmask, struct flow_filter *filt,
1485 struct flowinfo_in *pkt)
1486 {
1487 if (filt->ff_flow.fi_family != AF_INET)
1488 return (0);
1489 if ((fbmask & FIMB4_SPORT) && filt->ff_flow.fi_sport != pkt->fi_sport)
1490 return (0);
1491 if ((fbmask & FIMB4_DPORT) && filt->ff_flow.fi_dport != pkt->fi_dport)
1492 return (0);
1493 if ((fbmask & FIMB4_DADDR) &&
1494 filt->ff_flow.fi_dst.s_addr !=
1495 (pkt->fi_dst.s_addr & filt->ff_mask.mask_dst.s_addr))
1496 return (0);
1497 if ((fbmask & FIMB4_SADDR) &&
1498 filt->ff_flow.fi_src.s_addr !=
1499 (pkt->fi_src.s_addr & filt->ff_mask.mask_src.s_addr))
1500 return (0);
1501 if ((fbmask & FIMB4_PROTO) && filt->ff_flow.fi_proto != pkt->fi_proto)
1502 return (0);
1503 if ((fbmask & FIMB4_TOS) && filt->ff_flow.fi_tos !=
1504 (pkt->fi_tos & filt->ff_mask.mask_tos))
1505 return (0);
1506 if ((fbmask & FIMB4_GPI) && filt->ff_flow.fi_gpi != (pkt->fi_gpi))
1507 return (0);
1508 /* match */
1509 return (1);
1510 }
1511
1512 /*
1513 * filter matching function optimized for a common case that checks
1514 * only protocol and port numbers
1515 */
1516 static int
1517 apply_ppfilter4(u_int32_t fbmask, struct flow_filter *filt,
1518 struct flowinfo_in *pkt)
1519 {
1520 if (filt->ff_flow.fi_family != AF_INET)
1521 return (0);
1522 if ((fbmask & FIMB4_SPORT) && filt->ff_flow.fi_sport != pkt->fi_sport)
1523 return (0);
1524 if ((fbmask & FIMB4_DPORT) && filt->ff_flow.fi_dport != pkt->fi_dport)
1525 return (0);
1526 if ((fbmask & FIMB4_PROTO) && filt->ff_flow.fi_proto != pkt->fi_proto)
1527 return (0);
1528 /* match */
1529 return (1);
1530 }
1531
1532 /*
1533 * filter matching function only for tos field.
1534 */
1535 static int
1536 apply_tosfilter4(u_int32_t fbmask, struct flow_filter *filt,
1537 struct flowinfo_in *pkt)
1538 {
1539 if (filt->ff_flow.fi_family != AF_INET)
1540 return (0);
1541 if ((fbmask & FIMB4_TOS) && filt->ff_flow.fi_tos !=
1542 (pkt->fi_tos & filt->ff_mask.mask_tos))
1543 return (0);
1544 /* match */
1545 return (1);
1546 }
1547
1548 #ifdef INET6
1549 static int
1550 apply_filter6(u_int32_t fbmask, struct flow_filter6 *filt,
1551 struct flowinfo_in6 *pkt)
1552 {
1553 int i;
1554
1555 if (filt->ff_flow6.fi6_family != AF_INET6)
1556 return (0);
1557 if ((fbmask & FIMB6_FLABEL) &&
1558 filt->ff_flow6.fi6_flowlabel != pkt->fi6_flowlabel)
1559 return (0);
1560 if ((fbmask & FIMB6_PROTO) &&
1561 filt->ff_flow6.fi6_proto != pkt->fi6_proto)
1562 return (0);
1563 if ((fbmask & FIMB6_SPORT) &&
1564 filt->ff_flow6.fi6_sport != pkt->fi6_sport)
1565 return (0);
1566 if ((fbmask & FIMB6_DPORT) &&
1567 filt->ff_flow6.fi6_dport != pkt->fi6_dport)
1568 return (0);
1569 if (fbmask & FIMB6_SADDR) {
1570 for (i = 0; i < 4; i++)
1571 if (filt->ff_flow6.fi6_src.s6_addr32[i] !=
1572 (pkt->fi6_src.s6_addr32[i] &
1573 filt->ff_mask6.mask6_src.s6_addr32[i]))
1574 return (0);
1575 }
1576 if (fbmask & FIMB6_DADDR) {
1577 for (i = 0; i < 4; i++)
1578 if (filt->ff_flow6.fi6_dst.s6_addr32[i] !=
1579 (pkt->fi6_dst.s6_addr32[i] &
1580 filt->ff_mask6.mask6_dst.s6_addr32[i]))
1581 return (0);
1582 }
1583 if ((fbmask & FIMB6_TCLASS) &&
1584 filt->ff_flow6.fi6_tclass !=
1585 (pkt->fi6_tclass & filt->ff_mask6.mask6_tclass))
1586 return (0);
1587 if ((fbmask & FIMB6_GPI) &&
1588 filt->ff_flow6.fi6_gpi != pkt->fi6_gpi)
1589 return (0);
1590 /* match */
1591 return (1);
1592 }
1593 #endif /* INET6 */
1594
1595 /*
1596 * filter handle:
1597 * bit 20-28: index to the filter hash table
1598 * bit 0-19: unique id in the hash bucket.
1599 */
1600 static u_long
1601 get_filt_handle(struct acc_classifier *classifier, int i)
1602 {
1603 static u_long handle_number = 1;
1604 u_long handle;
1605 struct acc_filter *afp;
1606
1607 while (1) {
1608 handle = handle_number++ & 0x000fffff;
1609
1610 if (LIST_EMPTY(&classifier->acc_filters[i]))
1611 break;
1612
1613 LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
1614 if ((afp->f_handle & 0x000fffff) == handle)
1615 break;
1616 if (afp == NULL)
1617 break;
1618 /* this handle is already used, try again */
1619 }
1620
1621 return ((i << 20) | handle);
1622 }
1623
1624 /* convert filter handle to filter pointer */
1625 static struct acc_filter *
1626 filth_to_filtp(struct acc_classifier *classifier, u_long handle)
1627 {
1628 struct acc_filter *afp;
1629 int i;
1630
1631 i = ACC_GET_HINDEX(handle);
1632
1633 LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
1634 if (afp->f_handle == handle)
1635 return (afp);
1636
1637 return (NULL);
1638 }
1639
1640 /* create flowinfo bitmask */
1641 static u_int32_t
1642 filt2fibmask(struct flow_filter *filt)
1643 {
1644 u_int32_t mask = 0;
1645 #ifdef INET6
1646 struct flow_filter6 *filt6;
1647 #endif
1648
1649 switch (filt->ff_flow.fi_family) {
1650 case AF_INET:
1651 if (filt->ff_flow.fi_proto != 0)
1652 mask |= FIMB4_PROTO;
1653 if (filt->ff_flow.fi_tos != 0)
1654 mask |= FIMB4_TOS;
1655 if (filt->ff_flow.fi_dst.s_addr != 0)
1656 mask |= FIMB4_DADDR;
1657 if (filt->ff_flow.fi_src.s_addr != 0)
1658 mask |= FIMB4_SADDR;
1659 if (filt->ff_flow.fi_sport != 0)
1660 mask |= FIMB4_SPORT;
1661 if (filt->ff_flow.fi_dport != 0)
1662 mask |= FIMB4_DPORT;
1663 if (filt->ff_flow.fi_gpi != 0)
1664 mask |= FIMB4_GPI;
1665 break;
1666 #ifdef INET6
1667 case AF_INET6:
1668 filt6 = (struct flow_filter6 *)filt;
1669
1670 if (filt6->ff_flow6.fi6_proto != 0)
1671 mask |= FIMB6_PROTO;
1672 if (filt6->ff_flow6.fi6_tclass != 0)
1673 mask |= FIMB6_TCLASS;
1674 if (!IN6_IS_ADDR_UNSPECIFIED(&filt6->ff_flow6.fi6_dst))
1675 mask |= FIMB6_DADDR;
1676 if (!IN6_IS_ADDR_UNSPECIFIED(&filt6->ff_flow6.fi6_src))
1677 mask |= FIMB6_SADDR;
1678 if (filt6->ff_flow6.fi6_sport != 0)
1679 mask |= FIMB6_SPORT;
1680 if (filt6->ff_flow6.fi6_dport != 0)
1681 mask |= FIMB6_DPORT;
1682 if (filt6->ff_flow6.fi6_gpi != 0)
1683 mask |= FIMB6_GPI;
1684 if (filt6->ff_flow6.fi6_flowlabel != 0)
1685 mask |= FIMB6_FLABEL;
1686 break;
1687 #endif /* INET6 */
1688 }
1689 return (mask);
1690 }
1691
1692
1693 /*
1694 * helper functions to handle IPv4 fragments.
1695 * currently only in-sequence fragments are handled.
1696 * - fragment info is cached in a LRU list.
1697 * - when a first fragment is found, cache its flow info.
1698 * - when a non-first fragment is found, lookup the cache.
1699 */
1700
1701 struct ip4_frag {
1702 TAILQ_ENTRY(ip4_frag) ip4f_chain;
1703 char ip4f_valid;
1704 u_short ip4f_id;
1705 struct flowinfo_in ip4f_info;
1706 };
1707
1708 static TAILQ_HEAD(ip4f_list, ip4_frag) ip4f_list; /* IPv4 fragment cache */
1709
1710 #define IP4F_TABSIZE 16 /* IPv4 fragment cache size */
1711
1712
1713 static void
1714 ip4f_cache(struct ip *ip, struct flowinfo_in *fin)
1715 {
1716 struct ip4_frag *fp;
1717
1718 if (TAILQ_EMPTY(&ip4f_list)) {
1719 /* first time call, allocate fragment cache entries. */
1720 if (ip4f_init() < 0)
1721 /* allocation failed! */
1722 return;
1723 }
1724
1725 fp = ip4f_alloc();
1726 fp->ip4f_id = ip->ip_id;
1727 fp->ip4f_info.fi_proto = ip->ip_p;
1728 fp->ip4f_info.fi_src.s_addr = ip->ip_src.s_addr;
1729 fp->ip4f_info.fi_dst.s_addr = ip->ip_dst.s_addr;
1730
1731 /* save port numbers */
1732 fp->ip4f_info.fi_sport = fin->fi_sport;
1733 fp->ip4f_info.fi_dport = fin->fi_dport;
1734 fp->ip4f_info.fi_gpi = fin->fi_gpi;
1735 }
1736
1737 static int
1738 ip4f_lookup(struct ip *ip, struct flowinfo_in *fin)
1739 {
1740 struct ip4_frag *fp;
1741
1742 for (fp = TAILQ_FIRST(&ip4f_list); fp != NULL && fp->ip4f_valid;
1743 fp = TAILQ_NEXT(fp, ip4f_chain))
1744 if (ip->ip_id == fp->ip4f_id &&
1745 ip->ip_src.s_addr == fp->ip4f_info.fi_src.s_addr &&
1746 ip->ip_dst.s_addr == fp->ip4f_info.fi_dst.s_addr &&
1747 ip->ip_p == fp->ip4f_info.fi_proto) {
1748
1749 /* found the matching entry */
1750 fin->fi_sport = fp->ip4f_info.fi_sport;
1751 fin->fi_dport = fp->ip4f_info.fi_dport;
1752 fin->fi_gpi = fp->ip4f_info.fi_gpi;
1753
1754 if ((ntohs(ip->ip_off) & IP_MF) == 0)
1755 /* this is the last fragment,
1756 release the entry. */
1757 ip4f_free(fp);
1758
1759 return (1);
1760 }
1761
1762 /* no matching entry found */
1763 return (0);
1764 }
1765
1766 static int
1767 ip4f_init(void)
1768 {
1769 struct ip4_frag *fp;
1770 int i;
1771
1772 TAILQ_INIT(&ip4f_list);
1773 for (i=0; i<IP4F_TABSIZE; i++) {
1774 fp = malloc(sizeof(struct ip4_frag), M_DEVBUF, M_NOWAIT);
1775 if (fp == NULL) {
1776 printf("ip4f_init: can't alloc %dth entry!\n", i);
1777 if (i == 0)
1778 return (-1);
1779 return (0);
1780 }
1781 fp->ip4f_valid = 0;
1782 TAILQ_INSERT_TAIL(&ip4f_list, fp, ip4f_chain);
1783 }
1784 return (0);
1785 }
1786
1787 static struct ip4_frag *
1788 ip4f_alloc(void)
1789 {
1790 struct ip4_frag *fp;
1791
1792 /* reclaim an entry at the tail, put it at the head */
1793 fp = TAILQ_LAST(&ip4f_list, ip4f_list);
1794 TAILQ_REMOVE(&ip4f_list, fp, ip4f_chain);
1795 fp->ip4f_valid = 1;
1796 TAILQ_INSERT_HEAD(&ip4f_list, fp, ip4f_chain);
1797 return (fp);
1798 }
1799
1800 static void
1801 ip4f_free(struct ip4_frag *fp)
1802 {
1803 TAILQ_REMOVE(&ip4f_list, fp, ip4f_chain);
1804 fp->ip4f_valid = 0;
1805 TAILQ_INSERT_TAIL(&ip4f_list, fp, ip4f_chain);
1806 }
1807
1808 #endif /* ALTQ3_CLFIER_COMPAT */
1809