bpf_filter.c revision 1.68 1 /* $NetBSD: bpf_filter.c,v 1.68 2014/11/19 19:35:21 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)bpf_filter.c 8.1 (Berkeley) 6/10/93
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.68 2014/11/19 19:35:21 christos Exp $");
41
42 #if 0
43 #if !(defined(lint) || defined(KERNEL))
44 static const char rcsid[] =
45 "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp (LBL)";
46 #endif
47 #endif
48
49 #include <sys/param.h>
50 #include <sys/time.h>
51 #include <sys/kmem.h>
52 #include <sys/endian.h>
53
54 #define __BPF_PRIVATE
55 #include <net/bpf.h>
56
57 #ifdef _KERNEL
58
59 bpf_ctx_t *
60 bpf_create(void)
61 {
62 return kmem_zalloc(sizeof(bpf_ctx_t), KM_SLEEP);
63 }
64
65 void
66 bpf_destroy(bpf_ctx_t *bc)
67 {
68 kmem_free(bc, sizeof(bpf_ctx_t));
69 }
70
71 int
72 bpf_set_cop(bpf_ctx_t *bc, const bpf_copfunc_t *funcs, size_t n)
73 {
74 bc->copfuncs = funcs;
75 bc->nfuncs = n;
76 return 0;
77 }
78
79 int
80 bpf_set_extmem(bpf_ctx_t *bc, size_t nwords, bpf_memword_init_t preinited)
81 {
82 if (nwords > BPF_MAX_MEMWORDS || (preinited >> nwords) != 0) {
83 return EINVAL;
84 }
85 bc->extwords = nwords;
86 bc->preinited = preinited;
87 return 0;
88 }
89
90 #endif
91
92 #define EXTRACT_SHORT(p) be16dec(p)
93 #define EXTRACT_LONG(p) be32dec(p)
94
95 #ifdef _KERNEL
96 #include <sys/mbuf.h>
97 #define MINDEX(len, m, k) \
98 { \
99 len = m->m_len; \
100 while (k >= len) { \
101 k -= len; \
102 m = m->m_next; \
103 if (m == 0) \
104 return 0; \
105 len = m->m_len; \
106 } \
107 }
108
109 uint32_t m_xword(const struct mbuf *, uint32_t, int *);
110 uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
111 uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
112
113 #define xword(p, k, err) m_xword((const struct mbuf *)(p), (k), (err))
114 #define xhalf(p, k, err) m_xhalf((const struct mbuf *)(p), (k), (err))
115 #define xbyte(p, k, err) m_xbyte((const struct mbuf *)(p), (k), (err))
116
117 uint32_t
118 m_xword(const struct mbuf *m, uint32_t k, int *err)
119 {
120 int len;
121 u_char *cp, *np;
122 struct mbuf *m0;
123
124 *err = 1;
125 MINDEX(len, m, k);
126 cp = mtod(m, u_char *) + k;
127 if (len - k >= 4) {
128 *err = 0;
129 return EXTRACT_LONG(cp);
130 }
131 m0 = m->m_next;
132 if (m0 == 0 || (len - k) + m0->m_len < 4)
133 return 0;
134 *err = 0;
135 np = mtod(m0, u_char *);
136
137 switch (len - k) {
138 case 1:
139 return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
140 case 2:
141 return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
142 default:
143 return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
144 }
145 }
146
147 uint32_t
148 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
149 {
150 int len;
151 u_char *cp;
152 struct mbuf *m0;
153
154 *err = 1;
155 MINDEX(len, m, k);
156 cp = mtod(m, u_char *) + k;
157 if (len - k >= 2) {
158 *err = 0;
159 return EXTRACT_SHORT(cp);
160 }
161 m0 = m->m_next;
162 if (m0 == 0)
163 return 0;
164 *err = 0;
165 return (cp[0] << 8) | mtod(m0, u_char *)[0];
166 }
167
168 uint32_t
169 m_xbyte(const struct mbuf *m, uint32_t k, int *err)
170 {
171 int len;
172
173 *err = 1;
174 MINDEX(len, m, k);
175 *err = 0;
176 return mtod(m, u_char *)[k];
177 }
178 #else /* _KERNEL */
179 #include <stdlib.h>
180 #endif /* !_KERNEL */
181
182 #include <net/bpf.h>
183
184 /*
185 * Execute the filter program starting at pc on the packet p
186 * wirelen is the length of the original packet
187 * buflen is the amount of data present
188 */
189 #ifdef _KERNEL
190
191 u_int
192 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
193 u_int buflen)
194 {
195 uint32_t mem[BPF_MEMWORDS];
196 bpf_args_t args = {
197 .pkt = p,
198 .wirelen = wirelen,
199 .buflen = buflen,
200 .mem = mem,
201 .arg = NULL
202 };
203
204 return bpf_filter_ext(NULL, pc, &args);
205 }
206
207 u_int
208 bpf_filter_ext(const bpf_ctx_t *bc, const struct bpf_insn *pc, bpf_args_t *args)
209 #else
210 u_int
211 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
212 u_int buflen)
213 #endif
214 {
215 uint32_t A, X, k;
216 #ifndef _KERNEL
217 uint32_t mem[BPF_MEMWORDS];
218 bpf_args_t args_store = {
219 .pkt = p,
220 .wirelen = wirelen,
221 .buflen = buflen,
222 .mem = mem,
223 .arg = NULL
224 };
225 bpf_args_t * const args = &args_store;
226 #else
227 const uint8_t * const p = args->pkt;
228 #endif
229 if (pc == 0) {
230 /*
231 * No filter means accept all.
232 */
233 return (u_int)-1;
234 }
235
236 /*
237 * Note: safe to leave memwords uninitialised, as the validation
238 * step ensures that it will not be read, if it was not written.
239 */
240 A = 0;
241 X = 0;
242 --pc;
243
244 for (;;) {
245 ++pc;
246 switch (pc->code) {
247
248 default:
249 #ifdef _KERNEL
250 return 0;
251 #else
252 abort();
253 /*NOTREACHED*/
254 #endif
255 case BPF_RET|BPF_K:
256 return (u_int)pc->k;
257
258 case BPF_RET|BPF_A:
259 return (u_int)A;
260
261 case BPF_LD|BPF_W|BPF_ABS:
262 k = pc->k;
263 if (k > args->buflen ||
264 sizeof(int32_t) > args->buflen - k) {
265 #ifdef _KERNEL
266 int merr;
267
268 if (args->buflen != 0)
269 return 0;
270 A = xword(args->pkt, k, &merr);
271 if (merr != 0)
272 return 0;
273 continue;
274 #else
275 return 0;
276 #endif
277 }
278 A = EXTRACT_LONG(&p[k]);
279 continue;
280
281 case BPF_LD|BPF_H|BPF_ABS:
282 k = pc->k;
283 if (k > args->buflen ||
284 sizeof(int16_t) > args->buflen - k) {
285 #ifdef _KERNEL
286 int merr;
287
288 if (args->buflen != 0)
289 return 0;
290 A = xhalf(args->pkt, k, &merr);
291 if (merr != 0)
292 return 0;
293 continue;
294 #else
295 return 0;
296 #endif
297 }
298 A = EXTRACT_SHORT(&p[k]);
299 continue;
300
301 case BPF_LD|BPF_B|BPF_ABS:
302 k = pc->k;
303 if (k >= args->buflen) {
304 #ifdef _KERNEL
305 int merr;
306
307 if (args->buflen != 0)
308 return 0;
309 A = xbyte(args->pkt, k, &merr);
310 if (merr != 0)
311 return 0;
312 continue;
313 #else
314 return 0;
315 #endif
316 }
317 A = p[k];
318 continue;
319
320 case BPF_LD|BPF_W|BPF_LEN:
321 A = args->wirelen;
322 continue;
323
324 case BPF_LDX|BPF_W|BPF_LEN:
325 X = args->wirelen;
326 continue;
327
328 case BPF_LD|BPF_W|BPF_IND:
329 k = X + pc->k;
330 if (k < X || k >= args->buflen ||
331 sizeof(int32_t) > args->buflen - k) {
332 #ifdef _KERNEL
333 int merr;
334
335 if (k < X || args->buflen != 0)
336 return 0;
337 A = xword(args->pkt, k, &merr);
338 if (merr != 0)
339 return 0;
340 continue;
341 #else
342 return 0;
343 #endif
344 }
345 A = EXTRACT_LONG(&p[k]);
346 continue;
347
348 case BPF_LD|BPF_H|BPF_IND:
349 k = X + pc->k;
350 if (k < X || k >= args->buflen ||
351 sizeof(int16_t) > args->buflen - k) {
352 #ifdef _KERNEL
353 int merr;
354
355 if (k < X || args->buflen != 0)
356 return 0;
357 A = xhalf(args->pkt, k, &merr);
358 if (merr != 0)
359 return 0;
360 continue;
361 #else
362 return 0;
363 #endif
364 }
365 A = EXTRACT_SHORT(&p[k]);
366 continue;
367
368 case BPF_LD|BPF_B|BPF_IND:
369 k = X + pc->k;
370 if (k < X || k >= args->buflen) {
371 #ifdef _KERNEL
372 int merr;
373
374 if (k < X || args->buflen != 0)
375 return 0;
376 A = xbyte(args->pkt, k, &merr);
377 if (merr != 0)
378 return 0;
379 continue;
380 #else
381 return 0;
382 #endif
383 }
384 A = p[k];
385 continue;
386
387 case BPF_LDX|BPF_MSH|BPF_B:
388 k = pc->k;
389 if (k >= args->buflen) {
390 #ifdef _KERNEL
391 int merr;
392
393 if (args->buflen != 0)
394 return 0;
395 X = (xbyte(args->pkt, k, &merr) & 0xf) << 2;
396 if (merr != 0)
397 return 0;
398 continue;
399 #else
400 return 0;
401 #endif
402 }
403 X = (p[pc->k] & 0xf) << 2;
404 continue;
405
406 case BPF_LD|BPF_IMM:
407 A = pc->k;
408 continue;
409
410 case BPF_LDX|BPF_IMM:
411 X = pc->k;
412 continue;
413
414 case BPF_LD|BPF_MEM:
415 A = args->mem[pc->k];
416 continue;
417
418 case BPF_LDX|BPF_MEM:
419 X = args->mem[pc->k];
420 continue;
421
422 case BPF_ST:
423 args->mem[pc->k] = A;
424 continue;
425
426 case BPF_STX:
427 args->mem[pc->k] = X;
428 continue;
429
430 case BPF_JMP|BPF_JA:
431 pc += pc->k;
432 continue;
433
434 case BPF_JMP|BPF_JGT|BPF_K:
435 pc += (A > pc->k) ? pc->jt : pc->jf;
436 continue;
437
438 case BPF_JMP|BPF_JGE|BPF_K:
439 pc += (A >= pc->k) ? pc->jt : pc->jf;
440 continue;
441
442 case BPF_JMP|BPF_JEQ|BPF_K:
443 pc += (A == pc->k) ? pc->jt : pc->jf;
444 continue;
445
446 case BPF_JMP|BPF_JSET|BPF_K:
447 pc += (A & pc->k) ? pc->jt : pc->jf;
448 continue;
449
450 case BPF_JMP|BPF_JGT|BPF_X:
451 pc += (A > X) ? pc->jt : pc->jf;
452 continue;
453
454 case BPF_JMP|BPF_JGE|BPF_X:
455 pc += (A >= X) ? pc->jt : pc->jf;
456 continue;
457
458 case BPF_JMP|BPF_JEQ|BPF_X:
459 pc += (A == X) ? pc->jt : pc->jf;
460 continue;
461
462 case BPF_JMP|BPF_JSET|BPF_X:
463 pc += (A & X) ? pc->jt : pc->jf;
464 continue;
465
466 case BPF_ALU|BPF_ADD|BPF_X:
467 A += X;
468 continue;
469
470 case BPF_ALU|BPF_SUB|BPF_X:
471 A -= X;
472 continue;
473
474 case BPF_ALU|BPF_MUL|BPF_X:
475 A *= X;
476 continue;
477
478 case BPF_ALU|BPF_DIV|BPF_X:
479 if (X == 0)
480 return 0;
481 A /= X;
482 continue;
483
484 case BPF_ALU|BPF_MOD|BPF_X:
485 if (X == 0)
486 return 0;
487 A %= X;
488 continue;
489
490 case BPF_ALU|BPF_AND|BPF_X:
491 A &= X;
492 continue;
493
494 case BPF_ALU|BPF_OR|BPF_X:
495 A |= X;
496 continue;
497
498 case BPF_ALU|BPF_XOR|BPF_X:
499 A ^= X;
500 continue;
501
502 case BPF_ALU|BPF_LSH|BPF_X:
503 A <<= X;
504 continue;
505
506 case BPF_ALU|BPF_RSH|BPF_X:
507 A >>= X;
508 continue;
509
510 case BPF_ALU|BPF_ADD|BPF_K:
511 A += pc->k;
512 continue;
513
514 case BPF_ALU|BPF_SUB|BPF_K:
515 A -= pc->k;
516 continue;
517
518 case BPF_ALU|BPF_MUL|BPF_K:
519 A *= pc->k;
520 continue;
521
522 case BPF_ALU|BPF_DIV|BPF_K:
523 A /= pc->k;
524 continue;
525
526 case BPF_ALU|BPF_MOD|BPF_K:
527 A %= pc->k;
528 continue;
529
530 case BPF_ALU|BPF_AND|BPF_K:
531 A &= pc->k;
532 continue;
533
534 case BPF_ALU|BPF_OR|BPF_K:
535 A |= pc->k;
536 continue;
537
538 case BPF_ALU|BPF_XOR|BPF_K:
539 A ^= pc->k;
540 continue;
541
542 case BPF_ALU|BPF_LSH|BPF_K:
543 A <<= pc->k;
544 continue;
545
546 case BPF_ALU|BPF_RSH|BPF_K:
547 A >>= pc->k;
548 continue;
549
550 case BPF_ALU|BPF_NEG:
551 A = -A;
552 continue;
553
554 case BPF_MISC|BPF_TAX:
555 X = A;
556 continue;
557
558 case BPF_MISC|BPF_TXA:
559 A = X;
560 continue;
561
562 case BPF_MISC|BPF_COP:
563 #ifdef _KERNEL
564 if (pc->k < bc->nfuncs) {
565 const bpf_copfunc_t fn = bc->copfuncs[pc->k];
566 A = fn(bc, args, A);
567 continue;
568 }
569 #endif
570 return 0;
571
572 case BPF_MISC|BPF_COPX:
573 #ifdef _KERNEL
574 if (X < bc->nfuncs) {
575 const bpf_copfunc_t fn = bc->copfuncs[X];
576 A = fn(bc, args, A);
577 continue;
578 }
579 #endif
580 return 0;
581 }
582 }
583 }
584
585 /*
586 * Return true if the 'fcode' is a valid filter program.
587 * The constraints are that each jump be forward and to a valid
588 * code, that memory accesses are within valid ranges (to the
589 * extent that this can be checked statically; loads of packet
590 * data have to be, and are, also checked at run time), and that
591 * the code terminates with either an accept or reject.
592 *
593 * The kernel needs to be able to verify an application's filter code.
594 * Otherwise, a bogus program could easily crash the system.
595 */
596
597 #if defined(KERNEL) || defined(_KERNEL)
598
599 int
600 bpf_validate(const struct bpf_insn *f, int signed_len)
601 {
602 return bpf_validate_ext(NULL, f, signed_len);
603 }
604
605 int
606 bpf_validate_ext(const bpf_ctx_t *bc, const struct bpf_insn *f, int signed_len)
607 #else
608 int
609 bpf_validate(const struct bpf_insn *f, int signed_len)
610 #endif
611 {
612 u_int i, from, len, ok = 0;
613 const struct bpf_insn *p;
614 #if defined(KERNEL) || defined(_KERNEL)
615 bpf_memword_init_t *mem, invalid;
616 size_t size;
617 const size_t extwords = bc ? bc->extwords : 0;
618 const size_t memwords = extwords ? extwords : BPF_MEMWORDS;
619 const bpf_memword_init_t preinited = extwords ? bc->preinited : 0;
620 #else
621 const size_t memwords = BPF_MEMWORDS;
622 #endif
623
624 len = (u_int)signed_len;
625 if (len < 1)
626 return 0;
627 #if defined(KERNEL) || defined(_KERNEL)
628 if (len > BPF_MAXINSNS)
629 return 0;
630 #endif
631 if (BPF_CLASS(f[len - 1].code) != BPF_RET)
632 return 0;
633
634 #if defined(KERNEL) || defined(_KERNEL)
635 /* Note: only the pre-initialised is valid on startup */
636 mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
637 invalid = ~preinited;
638 #endif
639
640 for (i = 0; i < len; ++i) {
641 #if defined(KERNEL) || defined(_KERNEL)
642 /* blend in any invalid bits for current pc */
643 invalid |= mem[i];
644 #endif
645 p = &f[i];
646 switch (BPF_CLASS(p->code)) {
647 /*
648 * Check that memory operations use valid addresses.
649 */
650 case BPF_LD:
651 case BPF_LDX:
652 switch (BPF_MODE(p->code)) {
653 case BPF_MEM:
654 /*
655 * There's no maximum packet data size
656 * in userland. The runtime packet length
657 * check suffices.
658 */
659 #if defined(KERNEL) || defined(_KERNEL)
660 /*
661 * More strict check with actual packet length
662 * is done runtime.
663 */
664 if (p->k >= memwords)
665 goto out;
666 /* check for current memory invalid */
667 if (invalid & BPF_MEMWORD_INIT(p->k))
668 goto out;
669 #endif
670 break;
671 case BPF_ABS:
672 case BPF_IND:
673 case BPF_MSH:
674 case BPF_IMM:
675 case BPF_LEN:
676 break;
677 default:
678 goto out;
679 }
680 break;
681 case BPF_ST:
682 case BPF_STX:
683 if (p->k >= memwords)
684 goto out;
685 #if defined(KERNEL) || defined(_KERNEL)
686 /* validate the memory word */
687 invalid &= ~BPF_MEMWORD_INIT(p->k);
688 #endif
689 break;
690 case BPF_ALU:
691 switch (BPF_OP(p->code)) {
692 case BPF_ADD:
693 case BPF_SUB:
694 case BPF_MUL:
695 case BPF_OR:
696 case BPF_XOR:
697 case BPF_AND:
698 case BPF_LSH:
699 case BPF_RSH:
700 case BPF_NEG:
701 break;
702 case BPF_DIV:
703 case BPF_MOD:
704 /*
705 * Check for constant division by 0.
706 */
707 if (BPF_SRC(p->code) == BPF_K && p->k == 0)
708 goto out;
709 break;
710 default:
711 goto out;
712 }
713 break;
714 case BPF_JMP:
715 /*
716 * Check that jumps are within the code block,
717 * and that unconditional branches don't go
718 * backwards as a result of an overflow.
719 * Unconditional branches have a 32-bit offset,
720 * so they could overflow; we check to make
721 * sure they don't. Conditional branches have
722 * an 8-bit offset, and the from address is <=
723 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
724 * is sufficiently small that adding 255 to it
725 * won't overflow.
726 *
727 * We know that len is <= BPF_MAXINSNS, and we
728 * assume that BPF_MAXINSNS is < the maximum size
729 * of a u_int, so that i + 1 doesn't overflow.
730 *
731 * For userland, we don't know that the from
732 * or len are <= BPF_MAXINSNS, but we know that
733 * from <= len, and, except on a 64-bit system,
734 * it's unlikely that len, if it truly reflects
735 * the size of the program we've been handed,
736 * will be anywhere near the maximum size of
737 * a u_int. We also don't check for backward
738 * branches, as we currently support them in
739 * userland for the protochain operation.
740 */
741 from = i + 1;
742 switch (BPF_OP(p->code)) {
743 case BPF_JA:
744 if (from + p->k >= len)
745 goto out;
746 #if defined(KERNEL) || defined(_KERNEL)
747 if (from + p->k < from)
748 goto out;
749 /*
750 * mark the currently invalid bits for the
751 * destination
752 */
753 mem[from + p->k] |= invalid;
754 invalid = 0;
755 #endif
756 break;
757 case BPF_JEQ:
758 case BPF_JGT:
759 case BPF_JGE:
760 case BPF_JSET:
761 if (from + p->jt >= len || from + p->jf >= len)
762 goto out;
763 #if defined(KERNEL) || defined(_KERNEL)
764 /*
765 * mark the currently invalid bits for both
766 * possible jump destinations
767 */
768 mem[from + p->jt] |= invalid;
769 mem[from + p->jf] |= invalid;
770 invalid = 0;
771 #endif
772 break;
773 default:
774 goto out;
775 }
776 break;
777 case BPF_RET:
778 break;
779 case BPF_MISC:
780 switch (BPF_MISCOP(p->code)) {
781 case BPF_COP:
782 case BPF_COPX:
783 /* In-kernel COP use only. */
784 #if defined(KERNEL) || defined(_KERNEL)
785 if (bc == NULL || bc->copfuncs == NULL)
786 goto out;
787 if (BPF_MISCOP(p->code) == BPF_COP &&
788 p->k >= bc->nfuncs) {
789 goto out;
790 }
791 break;
792 #else
793 goto out;
794 #endif
795 default:
796 break;
797 }
798 break;
799 default:
800 goto out;
801 }
802 }
803 ok = 1;
804 out:
805 #if defined(KERNEL) || defined(_KERNEL)
806 kmem_free(mem, size);
807 #endif
808 return ok;
809 }
810