bpf_filter.c revision 1.60 1 /* $NetBSD: bpf_filter.c,v 1.60 2013/10/05 22:38:52 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)bpf_filter.c 8.1 (Berkeley) 6/10/93
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.60 2013/10/05 22:38:52 rmind Exp $");
41
42 #if 0
43 #if !(defined(lint) || defined(KERNEL))
44 static const char rcsid[] =
45 "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp (LBL)";
46 #endif
47 #endif
48
49 #include <sys/param.h>
50 #include <sys/time.h>
51 #include <sys/kmem.h>
52 #include <sys/endian.h>
53
54 #include <net/bpf.h>
55
56 #ifdef _KERNEL
57
58 struct bpf_ctx {
59 const bpf_copfunc_t * copfuncs;
60 size_t nfuncs;
61 };
62
63 /* Default BPF context (zeroed). */
64 static bpf_ctx_t bpf_def_ctx;
65
66 bpf_ctx_t *
67 bpf_create(void)
68 {
69 return kmem_zalloc(sizeof(bpf_ctx_t), KM_SLEEP);
70 }
71
72 void
73 bpf_destroy(bpf_ctx_t *bc)
74 {
75 kmem_free(bc, sizeof(bpf_ctx_t));
76 }
77
78 int
79 bpf_set_cop(bpf_ctx_t *bc, const bpf_copfunc_t *funcs, size_t n)
80 {
81 bc->copfuncs = funcs;
82 bc->nfuncs = n;
83 return 0;
84 }
85
86 #endif
87
88 #define EXTRACT_SHORT(p) be16dec(p)
89 #define EXTRACT_LONG(p) be32dec(p)
90
91 #ifdef _KERNEL
92 #include <sys/mbuf.h>
93 #define MINDEX(len, m, k) \
94 { \
95 len = m->m_len; \
96 while (k >= len) { \
97 k -= len; \
98 m = m->m_next; \
99 if (m == 0) \
100 return 0; \
101 len = m->m_len; \
102 } \
103 }
104
105 uint32_t m_xword (const struct mbuf *, uint32_t, int *);
106 uint32_t m_xhalf (const struct mbuf *, uint32_t, int *);
107 uint32_t m_xbyte (const struct mbuf *, uint32_t, int *);
108
109 uint32_t
110 m_xword(const struct mbuf *m, uint32_t k, int *err)
111 {
112 int len;
113 u_char *cp, *np;
114 struct mbuf *m0;
115
116 *err = 1;
117 MINDEX(len, m, k);
118 cp = mtod(m, u_char *) + k;
119 if (len >= k + 4) {
120 *err = 0;
121 return EXTRACT_LONG(cp);
122 }
123 m0 = m->m_next;
124 if (m0 == 0 || m0->m_len + len - k < 4)
125 return 0;
126 *err = 0;
127 np = mtod(m0, u_char *);
128
129 switch (len - k) {
130 case 1:
131 return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
132 case 2:
133 return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
134 default:
135 return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
136 }
137 }
138
139 uint32_t
140 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
141 {
142 int len;
143 u_char *cp;
144 struct mbuf *m0;
145
146 *err = 1;
147 MINDEX(len, m, k);
148 cp = mtod(m, u_char *) + k;
149 if (len >= k + 2) {
150 *err = 0;
151 return EXTRACT_SHORT(cp);
152 }
153 m0 = m->m_next;
154 if (m0 == 0)
155 return 0;
156 *err = 0;
157 return (cp[0] << 8) | mtod(m0, u_char *)[0];
158 }
159
160 uint32_t
161 m_xbyte(const struct mbuf *m, uint32_t k, int *err)
162 {
163 int len;
164
165 *err = 0;
166 MINDEX(len, m, k);
167 return mtod(m, u_char *)[k];
168 }
169 #else /* _KERNEL */
170 #include <stdlib.h>
171 #endif /* !_KERNEL */
172
173 #include <net/bpf.h>
174
175 /*
176 * Execute the filter program starting at pc on the packet p
177 * wirelen is the length of the original packet
178 * buflen is the amount of data present
179 */
180 #ifdef _KERNEL
181
182 u_int
183 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
184 u_int buflen)
185 {
186 return bpf_filter_ext(&bpf_def_ctx, NULL, pc, p, wirelen, buflen);
187 }
188
189 u_int
190 bpf_filter_ext(bpf_ctx_t *bc, void *arg, const struct bpf_insn *pc,
191 const u_char *p, u_int wirelen, u_int buflen)
192 #else
193 u_int
194 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
195 u_int buflen)
196 #endif
197 {
198 uint32_t A, X, k;
199 uint32_t mem[BPF_MEMWORDS];
200 #ifdef _KERNEL
201 const struct mbuf * const m0 = (const struct mbuf *)p;
202
203 KASSERT(bc != NULL);
204 #endif
205
206 if (pc == 0) {
207 /*
208 * No filter means accept all.
209 */
210 return (u_int)-1;
211 }
212
213 /*
214 * Note: safe to leave memwords uninitialised, as the validation
215 * step ensures that it will not be read, if it was not written.
216 */
217 A = 0;
218 X = 0;
219 --pc;
220
221 for (;;) {
222 ++pc;
223 switch (pc->code) {
224
225 default:
226 #ifdef _KERNEL
227 return 0;
228 #else
229 abort();
230 /*NOTREACHED*/
231 #endif
232 case BPF_RET|BPF_K:
233 return (u_int)pc->k;
234
235 case BPF_RET|BPF_A:
236 return (u_int)A;
237
238 case BPF_LD|BPF_W|BPF_ABS:
239 k = pc->k;
240 if (k > buflen || sizeof(int32_t) > buflen - k) {
241 #ifdef _KERNEL
242 int merr;
243
244 if (buflen != 0)
245 return 0;
246 A = m_xword(m0, k, &merr);
247 if (merr != 0)
248 return 0;
249 continue;
250 #else
251 return 0;
252 #endif
253 }
254 A = EXTRACT_LONG(&p[k]);
255 continue;
256
257 case BPF_LD|BPF_H|BPF_ABS:
258 k = pc->k;
259 if (k > buflen || sizeof(int16_t) > buflen - k) {
260 #ifdef _KERNEL
261 int merr;
262
263 if (buflen != 0)
264 return 0;
265 A = m_xhalf(m0, k, &merr);
266 if (merr != 0)
267 return 0;
268 continue;
269 #else
270 return 0;
271 #endif
272 }
273 A = EXTRACT_SHORT(&p[k]);
274 continue;
275
276 case BPF_LD|BPF_B|BPF_ABS:
277 k = pc->k;
278 if (k >= buflen) {
279 #ifdef _KERNEL
280 int merr;
281
282 if (buflen != 0)
283 return 0;
284 A = m_xbyte(m0, k, &merr);
285 continue;
286 #else
287 return 0;
288 #endif
289 }
290 A = p[k];
291 continue;
292
293 case BPF_LD|BPF_W|BPF_LEN:
294 A = wirelen;
295 continue;
296
297 case BPF_LDX|BPF_W|BPF_LEN:
298 X = wirelen;
299 continue;
300
301 case BPF_LD|BPF_W|BPF_IND:
302 k = X + pc->k;
303 if (pc->k > buflen || X > buflen - pc->k ||
304 sizeof(int32_t) > buflen - k) {
305 #ifdef _KERNEL
306 int merr;
307
308 if (buflen != 0)
309 return 0;
310 A = m_xword(m0, k, &merr);
311 if (merr != 0)
312 return 0;
313 continue;
314 #else
315 return 0;
316 #endif
317 }
318 A = EXTRACT_LONG(&p[k]);
319 continue;
320
321 case BPF_LD|BPF_H|BPF_IND:
322 k = X + pc->k;
323 if (pc->k > buflen || X > buflen - pc->k ||
324 sizeof(int16_t) > buflen - k) {
325 #ifdef _KERNEL
326 int merr;
327
328 if (buflen != 0)
329 return 0;
330 A = m_xhalf(m0, k, &merr);
331 if (merr != 0)
332 return 0;
333 continue;
334 #else
335 return 0;
336 #endif
337 }
338 A = EXTRACT_SHORT(&p[k]);
339 continue;
340
341 case BPF_LD|BPF_B|BPF_IND:
342 k = X + pc->k;
343 if (pc->k >= buflen || X >= buflen - pc->k) {
344 #ifdef _KERNEL
345 int merr;
346
347 if (buflen != 0)
348 return 0;
349 A = m_xbyte(m0, k, &merr);
350 continue;
351 #else
352 return 0;
353 #endif
354 }
355 A = p[k];
356 continue;
357
358 case BPF_LDX|BPF_MSH|BPF_B:
359 k = pc->k;
360 if (k >= buflen) {
361 #ifdef _KERNEL
362 int merr;
363
364 if (buflen != 0)
365 return 0;
366 X = (m_xbyte(m0, k, &merr) & 0xf) << 2;
367 continue;
368 #else
369 return 0;
370 #endif
371 }
372 X = (p[pc->k] & 0xf) << 2;
373 continue;
374
375 case BPF_LD|BPF_IMM:
376 A = pc->k;
377 continue;
378
379 case BPF_LDX|BPF_IMM:
380 X = pc->k;
381 continue;
382
383 case BPF_LD|BPF_MEM:
384 A = mem[pc->k];
385 continue;
386
387 case BPF_LDX|BPF_MEM:
388 X = mem[pc->k];
389 continue;
390
391 case BPF_ST:
392 mem[pc->k] = A;
393 continue;
394
395 case BPF_STX:
396 mem[pc->k] = X;
397 continue;
398
399 case BPF_JMP|BPF_JA:
400 pc += pc->k;
401 continue;
402
403 case BPF_JMP|BPF_JGT|BPF_K:
404 pc += (A > pc->k) ? pc->jt : pc->jf;
405 continue;
406
407 case BPF_JMP|BPF_JGE|BPF_K:
408 pc += (A >= pc->k) ? pc->jt : pc->jf;
409 continue;
410
411 case BPF_JMP|BPF_JEQ|BPF_K:
412 pc += (A == pc->k) ? pc->jt : pc->jf;
413 continue;
414
415 case BPF_JMP|BPF_JSET|BPF_K:
416 pc += (A & pc->k) ? pc->jt : pc->jf;
417 continue;
418
419 case BPF_JMP|BPF_JGT|BPF_X:
420 pc += (A > X) ? pc->jt : pc->jf;
421 continue;
422
423 case BPF_JMP|BPF_JGE|BPF_X:
424 pc += (A >= X) ? pc->jt : pc->jf;
425 continue;
426
427 case BPF_JMP|BPF_JEQ|BPF_X:
428 pc += (A == X) ? pc->jt : pc->jf;
429 continue;
430
431 case BPF_JMP|BPF_JSET|BPF_X:
432 pc += (A & X) ? pc->jt : pc->jf;
433 continue;
434
435 case BPF_ALU|BPF_ADD|BPF_X:
436 A += X;
437 continue;
438
439 case BPF_ALU|BPF_SUB|BPF_X:
440 A -= X;
441 continue;
442
443 case BPF_ALU|BPF_MUL|BPF_X:
444 A *= X;
445 continue;
446
447 case BPF_ALU|BPF_DIV|BPF_X:
448 if (X == 0)
449 return 0;
450 A /= X;
451 continue;
452
453 case BPF_ALU|BPF_AND|BPF_X:
454 A &= X;
455 continue;
456
457 case BPF_ALU|BPF_OR|BPF_X:
458 A |= X;
459 continue;
460
461 case BPF_ALU|BPF_LSH|BPF_X:
462 A <<= X;
463 continue;
464
465 case BPF_ALU|BPF_RSH|BPF_X:
466 A >>= X;
467 continue;
468
469 case BPF_ALU|BPF_ADD|BPF_K:
470 A += pc->k;
471 continue;
472
473 case BPF_ALU|BPF_SUB|BPF_K:
474 A -= pc->k;
475 continue;
476
477 case BPF_ALU|BPF_MUL|BPF_K:
478 A *= pc->k;
479 continue;
480
481 case BPF_ALU|BPF_DIV|BPF_K:
482 A /= pc->k;
483 continue;
484
485 case BPF_ALU|BPF_AND|BPF_K:
486 A &= pc->k;
487 continue;
488
489 case BPF_ALU|BPF_OR|BPF_K:
490 A |= pc->k;
491 continue;
492
493 case BPF_ALU|BPF_LSH|BPF_K:
494 A <<= pc->k;
495 continue;
496
497 case BPF_ALU|BPF_RSH|BPF_K:
498 A >>= pc->k;
499 continue;
500
501 case BPF_ALU|BPF_NEG:
502 A = -A;
503 continue;
504
505 case BPF_MISC|BPF_TAX:
506 X = A;
507 continue;
508
509 case BPF_MISC|BPF_TXA:
510 A = X;
511 continue;
512
513 case BPF_MISC|BPF_COP:
514 #ifdef _KERNEL
515 if (pc->k < bc->nfuncs) {
516 const bpf_copfunc_t fn = bc->copfuncs[pc->k];
517 A = fn((const struct mbuf *)p, arg, A, mem);
518 continue;
519 }
520 #endif
521 return 0;
522
523 case BPF_MISC|BPF_COPX:
524 #ifdef _KERNEL
525 if (X < bc->nfuncs) {
526 const bpf_copfunc_t fn = bc->copfuncs[X];
527 A = fn((const struct mbuf *)p, arg, A, mem);
528 continue;
529 }
530 #endif
531 return 0;
532 }
533 }
534 }
535
536 /*
537 * Return true if the 'fcode' is a valid filter program.
538 * The constraints are that each jump be forward and to a valid
539 * code, that memory accesses are within valid ranges (to the
540 * extent that this can be checked statically; loads of packet
541 * data have to be, and are, also checked at run time), and that
542 * the code terminates with either an accept or reject.
543 *
544 * The kernel needs to be able to verify an application's filter code.
545 * Otherwise, a bogus program could easily crash the system.
546 */
547 __CTASSERT(BPF_MEMWORDS == sizeof(uint16_t) * NBBY);
548
549 #if defined(KERNEL) || defined(_KERNEL)
550
551 int
552 bpf_validate(const struct bpf_insn *f, int signed_len)
553 {
554 return bpf_validate_ext(&bpf_def_ctx, f, signed_len);
555 }
556
557 int
558 bpf_validate_ext(bpf_ctx_t *bc, const struct bpf_insn *f, int signed_len)
559 #else
560 int
561 bpf_validate(const struct bpf_insn *f, int signed_len)
562 #endif
563 {
564 u_int i, from, len, ok = 0;
565 const struct bpf_insn *p;
566 #if defined(KERNEL) || defined(_KERNEL)
567 uint16_t *mem, invalid;
568 size_t size;
569 #endif
570
571 len = (u_int)signed_len;
572 if (len < 1)
573 return 0;
574 #if defined(KERNEL) || defined(_KERNEL)
575 if (len > BPF_MAXINSNS)
576 return 0;
577 #endif
578 if (BPF_CLASS(f[len - 1].code) != BPF_RET)
579 return 0;
580
581 #if defined(KERNEL) || defined(_KERNEL)
582 mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
583 invalid = ~0; /* All is invalid on startup */
584 #endif
585
586 for (i = 0; i < len; ++i) {
587 #if defined(KERNEL) || defined(_KERNEL)
588 /* blend in any invalid bits for current pc */
589 invalid |= mem[i];
590 #endif
591 p = &f[i];
592 switch (BPF_CLASS(p->code)) {
593 /*
594 * Check that memory operations use valid addresses.
595 */
596 case BPF_LD:
597 case BPF_LDX:
598 switch (BPF_MODE(p->code)) {
599 case BPF_MEM:
600 /*
601 * There's no maximum packet data size
602 * in userland. The runtime packet length
603 * check suffices.
604 */
605 #if defined(KERNEL) || defined(_KERNEL)
606 /*
607 * More strict check with actual packet length
608 * is done runtime.
609 */
610 if (p->k >= BPF_MEMWORDS)
611 goto out;
612 /* check for current memory invalid */
613 if (invalid & (1 << p->k))
614 goto out;
615 #endif
616 break;
617 case BPF_ABS:
618 case BPF_IND:
619 case BPF_MSH:
620 case BPF_IMM:
621 case BPF_LEN:
622 break;
623 default:
624 goto out;
625 }
626 break;
627 case BPF_ST:
628 case BPF_STX:
629 if (p->k >= BPF_MEMWORDS)
630 goto out;
631 #if defined(KERNEL) || defined(_KERNEL)
632 /* validate the memory word */
633 invalid &= ~(1 << p->k);
634 #endif
635 break;
636 case BPF_ALU:
637 switch (BPF_OP(p->code)) {
638 case BPF_ADD:
639 case BPF_SUB:
640 case BPF_MUL:
641 case BPF_OR:
642 case BPF_AND:
643 case BPF_LSH:
644 case BPF_RSH:
645 case BPF_NEG:
646 break;
647 case BPF_DIV:
648 /*
649 * Check for constant division by 0.
650 */
651 if (BPF_SRC(p->code) == BPF_K && p->k == 0)
652 goto out;
653 break;
654 default:
655 goto out;
656 }
657 break;
658 case BPF_JMP:
659 /*
660 * Check that jumps are within the code block,
661 * and that unconditional branches don't go
662 * backwards as a result of an overflow.
663 * Unconditional branches have a 32-bit offset,
664 * so they could overflow; we check to make
665 * sure they don't. Conditional branches have
666 * an 8-bit offset, and the from address is <=
667 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
668 * is sufficiently small that adding 255 to it
669 * won't overflow.
670 *
671 * We know that len is <= BPF_MAXINSNS, and we
672 * assume that BPF_MAXINSNS is < the maximum size
673 * of a u_int, so that i + 1 doesn't overflow.
674 *
675 * For userland, we don't know that the from
676 * or len are <= BPF_MAXINSNS, but we know that
677 * from <= len, and, except on a 64-bit system,
678 * it's unlikely that len, if it truly reflects
679 * the size of the program we've been handed,
680 * will be anywhere near the maximum size of
681 * a u_int. We also don't check for backward
682 * branches, as we currently support them in
683 * userland for the protochain operation.
684 */
685 from = i + 1;
686 switch (BPF_OP(p->code)) {
687 case BPF_JA:
688 if (from + p->k >= len)
689 goto out;
690 #if defined(KERNEL) || defined(_KERNEL)
691 if (from + p->k < from)
692 goto out;
693 /*
694 * mark the currently invalid bits for the
695 * destination
696 */
697 mem[from + p->k] |= invalid;
698 invalid = 0;
699 #endif
700 break;
701 case BPF_JEQ:
702 case BPF_JGT:
703 case BPF_JGE:
704 case BPF_JSET:
705 if (from + p->jt >= len || from + p->jf >= len)
706 goto out;
707 #if defined(KERNEL) || defined(_KERNEL)
708 /*
709 * mark the currently invalid bits for both
710 * possible jump destinations
711 */
712 mem[from + p->jt] |= invalid;
713 mem[from + p->jf] |= invalid;
714 invalid = 0;
715 #endif
716 break;
717 default:
718 goto out;
719 }
720 break;
721 case BPF_RET:
722 break;
723 case BPF_MISC:
724 #if defined(KERNEL) || defined(_KERNEL)
725 switch (BPF_MISCOP(p->code)) {
726 case BPF_COP:
727 case BPF_COPX:
728 /* In-kernel COP use only. */
729 if (bc->copfuncs) {
730 invalid = 0;
731 }
732 break;
733 default:
734 break;
735 }
736 #endif
737 break;
738 default:
739 goto out;
740 }
741 }
742 ok = 1;
743 out:
744 #if defined(KERNEL) || defined(_KERNEL)
745 kmem_free(mem, size);
746 #endif
747 return ok;
748 }
749