bpf_filter.c revision 1.62 1 /* $NetBSD: bpf_filter.c,v 1.62 2014/06/24 10:53:30 alnsn Exp $ */
2
3 /*-
4 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)bpf_filter.c 8.1 (Berkeley) 6/10/93
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.62 2014/06/24 10:53:30 alnsn Exp $");
41
42 #if 0
43 #if !(defined(lint) || defined(KERNEL))
44 static const char rcsid[] =
45 "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp (LBL)";
46 #endif
47 #endif
48
49 #include <sys/param.h>
50 #include <sys/time.h>
51 #include <sys/kmem.h>
52 #include <sys/endian.h>
53
54 #define __BPF_PRIVATE
55 #include <net/bpf.h>
56
57 #ifdef _KERNEL
58
59 bpf_ctx_t *
60 bpf_create(void)
61 {
62 return kmem_zalloc(sizeof(bpf_ctx_t), KM_SLEEP);
63 }
64
65 void
66 bpf_destroy(bpf_ctx_t *bc)
67 {
68 kmem_free(bc, sizeof(bpf_ctx_t));
69 }
70
71 int
72 bpf_set_cop(bpf_ctx_t *bc, const bpf_copfunc_t *funcs, size_t n)
73 {
74 bc->copfuncs = funcs;
75 bc->nfuncs = n;
76 return 0;
77 }
78
79 int
80 bpf_set_extmem(bpf_ctx_t *bc, size_t nwords, bpf_memword_init_t preinited)
81 {
82 /* XXX check arguments */
83
84 bc->extwords = nwords;
85 bc->noinit = preinited;
86 return 0;
87 }
88
89 #endif
90
91 #define EXTRACT_SHORT(p) be16dec(p)
92 #define EXTRACT_LONG(p) be32dec(p)
93
94 #ifdef _KERNEL
95 #include <sys/mbuf.h>
96 #define MINDEX(len, m, k) \
97 { \
98 len = m->m_len; \
99 while (k >= len) { \
100 k -= len; \
101 m = m->m_next; \
102 if (m == 0) \
103 return 0; \
104 len = m->m_len; \
105 } \
106 }
107
108 uint32_t m_xword(const struct mbuf *, uint32_t, int *);
109 uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
110 uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
111
112 #define xword(p, k, err) m_xword((const struct mbuf *)(p), (k), (err))
113 #define xhalf(p, k, err) m_xhalf((const struct mbuf *)(p), (k), (err))
114 #define xbyte(p, k, err) m_xbyte((const struct mbuf *)(p), (k), (err))
115
116 uint32_t
117 m_xword(const struct mbuf *m, uint32_t k, int *err)
118 {
119 int len;
120 u_char *cp, *np;
121 struct mbuf *m0;
122
123 *err = 1;
124 MINDEX(len, m, k);
125 cp = mtod(m, u_char *) + k;
126 if (len >= k + 4) {
127 *err = 0;
128 return EXTRACT_LONG(cp);
129 }
130 m0 = m->m_next;
131 if (m0 == 0 || m0->m_len + len - k < 4)
132 return 0;
133 *err = 0;
134 np = mtod(m0, u_char *);
135
136 switch (len - k) {
137 case 1:
138 return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
139 case 2:
140 return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
141 default:
142 return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
143 }
144 }
145
146 uint32_t
147 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
148 {
149 int len;
150 u_char *cp;
151 struct mbuf *m0;
152
153 *err = 1;
154 MINDEX(len, m, k);
155 cp = mtod(m, u_char *) + k;
156 if (len >= k + 2) {
157 *err = 0;
158 return EXTRACT_SHORT(cp);
159 }
160 m0 = m->m_next;
161 if (m0 == 0)
162 return 0;
163 *err = 0;
164 return (cp[0] << 8) | mtod(m0, u_char *)[0];
165 }
166
167 uint32_t
168 m_xbyte(const struct mbuf *m, uint32_t k, int *err)
169 {
170 int len;
171
172 *err = 0;
173 MINDEX(len, m, k);
174 return mtod(m, u_char *)[k];
175 }
176 #else /* _KERNEL */
177 #include <stdlib.h>
178 #endif /* !_KERNEL */
179
180 #include <net/bpf.h>
181
182 /*
183 * Execute the filter program starting at pc on the packet p
184 * wirelen is the length of the original packet
185 * buflen is the amount of data present
186 */
187 #ifdef _KERNEL
188
189 u_int
190 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
191 u_int buflen)
192 {
193 uint32_t mem[BPF_MEMWORDS];
194 bpf_args_t args = {
195 .pkt = p,
196 .wirelen = wirelen,
197 .buflen = buflen,
198 .mem = mem,
199 .arg = NULL
200 };
201
202 return bpf_filter_ext(NULL, pc, &args);
203 }
204
205 u_int
206 bpf_filter_ext(const bpf_ctx_t *bc, const struct bpf_insn *pc, bpf_args_t *args)
207 #else
208 u_int
209 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
210 u_int buflen)
211 #endif
212 {
213 uint32_t A, X, k;
214 #ifndef _KERNEL
215 uint32_t mem[BPF_MEMWORDS];
216 bpf_args_t args_store = {
217 .pkt = p,
218 .wirelen = wirelen,
219 .buflen = buflen,
220 .mem = mem,
221 .arg = NULL
222 };
223 bpf_args_t * const args = &args_store;
224 #else
225 const uint8_t * const p = args->pkt;
226 #endif
227 if (pc == 0) {
228 /*
229 * No filter means accept all.
230 */
231 return (u_int)-1;
232 }
233
234 /*
235 * Note: safe to leave memwords uninitialised, as the validation
236 * step ensures that it will not be read, if it was not written.
237 */
238 A = 0;
239 X = 0;
240 --pc;
241
242 for (;;) {
243 ++pc;
244 switch (pc->code) {
245
246 default:
247 #ifdef _KERNEL
248 return 0;
249 #else
250 abort();
251 /*NOTREACHED*/
252 #endif
253 case BPF_RET|BPF_K:
254 return (u_int)pc->k;
255
256 case BPF_RET|BPF_A:
257 return (u_int)A;
258
259 case BPF_LD|BPF_W|BPF_ABS:
260 k = pc->k;
261 if (k > args->buflen ||
262 sizeof(int32_t) > args->buflen - k) {
263 #ifdef _KERNEL
264 int merr;
265
266 if (args->buflen != 0)
267 return 0;
268 A = xword(args->pkt, k, &merr);
269 if (merr != 0)
270 return 0;
271 continue;
272 #else
273 return 0;
274 #endif
275 }
276 A = EXTRACT_LONG(&p[k]);
277 continue;
278
279 case BPF_LD|BPF_H|BPF_ABS:
280 k = pc->k;
281 if (k > args->buflen ||
282 sizeof(int16_t) > args->buflen - k) {
283 #ifdef _KERNEL
284 int merr;
285
286 if (args->buflen != 0)
287 return 0;
288 A = xhalf(args->pkt, k, &merr);
289 if (merr != 0)
290 return 0;
291 continue;
292 #else
293 return 0;
294 #endif
295 }
296 A = EXTRACT_SHORT(&p[k]);
297 continue;
298
299 case BPF_LD|BPF_B|BPF_ABS:
300 k = pc->k;
301 if (k >= args->buflen) {
302 #ifdef _KERNEL
303 int merr;
304
305 if (args->buflen != 0)
306 return 0;
307 A = xbyte(args->pkt, k, &merr);
308 continue;
309 #else
310 return 0;
311 #endif
312 }
313 A = p[k];
314 continue;
315
316 case BPF_LD|BPF_W|BPF_LEN:
317 A = args->wirelen;
318 continue;
319
320 case BPF_LDX|BPF_W|BPF_LEN:
321 X = args->wirelen;
322 continue;
323
324 case BPF_LD|BPF_W|BPF_IND:
325 k = X + pc->k;
326 if (pc->k > args->buflen ||
327 X > args->buflen - pc->k ||
328 sizeof(int32_t) > args->buflen - k) {
329 #ifdef _KERNEL
330 int merr;
331
332 if (args->buflen != 0)
333 return 0;
334 A = xword(args->pkt, k, &merr);
335 if (merr != 0)
336 return 0;
337 continue;
338 #else
339 return 0;
340 #endif
341 }
342 A = EXTRACT_LONG(&p[k]);
343 continue;
344
345 case BPF_LD|BPF_H|BPF_IND:
346 k = X + pc->k;
347 if (pc->k > args->buflen ||
348 X > args->buflen - pc->k ||
349 sizeof(int16_t) > args->buflen - k) {
350 #ifdef _KERNEL
351 int merr;
352
353 if (args->buflen != 0)
354 return 0;
355 A = xhalf(args->pkt, k, &merr);
356 if (merr != 0)
357 return 0;
358 continue;
359 #else
360 return 0;
361 #endif
362 }
363 A = EXTRACT_SHORT(&p[k]);
364 continue;
365
366 case BPF_LD|BPF_B|BPF_IND:
367 k = X + pc->k;
368 if (pc->k >= args->buflen ||
369 X >= args->buflen - pc->k) {
370 #ifdef _KERNEL
371 int merr;
372
373 if (args->buflen != 0)
374 return 0;
375 A = xbyte(args->pkt, k, &merr);
376 continue;
377 #else
378 return 0;
379 #endif
380 }
381 A = p[k];
382 continue;
383
384 case BPF_LDX|BPF_MSH|BPF_B:
385 k = pc->k;
386 if (k >= args->buflen) {
387 #ifdef _KERNEL
388 int merr;
389
390 if (args->buflen != 0)
391 return 0;
392 X = (xbyte(args->pkt, k, &merr) & 0xf) << 2;
393 continue;
394 #else
395 return 0;
396 #endif
397 }
398 X = (p[pc->k] & 0xf) << 2;
399 continue;
400
401 case BPF_LD|BPF_IMM:
402 A = pc->k;
403 continue;
404
405 case BPF_LDX|BPF_IMM:
406 X = pc->k;
407 continue;
408
409 case BPF_LD|BPF_MEM:
410 A = args->mem[pc->k];
411 continue;
412
413 case BPF_LDX|BPF_MEM:
414 X = args->mem[pc->k];
415 continue;
416
417 case BPF_ST:
418 args->mem[pc->k] = A;
419 continue;
420
421 case BPF_STX:
422 args->mem[pc->k] = X;
423 continue;
424
425 case BPF_JMP|BPF_JA:
426 pc += pc->k;
427 continue;
428
429 case BPF_JMP|BPF_JGT|BPF_K:
430 pc += (A > pc->k) ? pc->jt : pc->jf;
431 continue;
432
433 case BPF_JMP|BPF_JGE|BPF_K:
434 pc += (A >= pc->k) ? pc->jt : pc->jf;
435 continue;
436
437 case BPF_JMP|BPF_JEQ|BPF_K:
438 pc += (A == pc->k) ? pc->jt : pc->jf;
439 continue;
440
441 case BPF_JMP|BPF_JSET|BPF_K:
442 pc += (A & pc->k) ? pc->jt : pc->jf;
443 continue;
444
445 case BPF_JMP|BPF_JGT|BPF_X:
446 pc += (A > X) ? pc->jt : pc->jf;
447 continue;
448
449 case BPF_JMP|BPF_JGE|BPF_X:
450 pc += (A >= X) ? pc->jt : pc->jf;
451 continue;
452
453 case BPF_JMP|BPF_JEQ|BPF_X:
454 pc += (A == X) ? pc->jt : pc->jf;
455 continue;
456
457 case BPF_JMP|BPF_JSET|BPF_X:
458 pc += (A & X) ? pc->jt : pc->jf;
459 continue;
460
461 case BPF_ALU|BPF_ADD|BPF_X:
462 A += X;
463 continue;
464
465 case BPF_ALU|BPF_SUB|BPF_X:
466 A -= X;
467 continue;
468
469 case BPF_ALU|BPF_MUL|BPF_X:
470 A *= X;
471 continue;
472
473 case BPF_ALU|BPF_DIV|BPF_X:
474 if (X == 0)
475 return 0;
476 A /= X;
477 continue;
478
479 case BPF_ALU|BPF_AND|BPF_X:
480 A &= X;
481 continue;
482
483 case BPF_ALU|BPF_OR|BPF_X:
484 A |= X;
485 continue;
486
487 case BPF_ALU|BPF_LSH|BPF_X:
488 A <<= X;
489 continue;
490
491 case BPF_ALU|BPF_RSH|BPF_X:
492 A >>= X;
493 continue;
494
495 case BPF_ALU|BPF_ADD|BPF_K:
496 A += pc->k;
497 continue;
498
499 case BPF_ALU|BPF_SUB|BPF_K:
500 A -= pc->k;
501 continue;
502
503 case BPF_ALU|BPF_MUL|BPF_K:
504 A *= pc->k;
505 continue;
506
507 case BPF_ALU|BPF_DIV|BPF_K:
508 A /= pc->k;
509 continue;
510
511 case BPF_ALU|BPF_AND|BPF_K:
512 A &= pc->k;
513 continue;
514
515 case BPF_ALU|BPF_OR|BPF_K:
516 A |= pc->k;
517 continue;
518
519 case BPF_ALU|BPF_LSH|BPF_K:
520 A <<= pc->k;
521 continue;
522
523 case BPF_ALU|BPF_RSH|BPF_K:
524 A >>= pc->k;
525 continue;
526
527 case BPF_ALU|BPF_NEG:
528 A = -A;
529 continue;
530
531 case BPF_MISC|BPF_TAX:
532 X = A;
533 continue;
534
535 case BPF_MISC|BPF_TXA:
536 A = X;
537 continue;
538
539 case BPF_MISC|BPF_COP:
540 #ifdef _KERNEL
541 if (pc->k < bc->nfuncs) {
542 const bpf_copfunc_t fn = bc->copfuncs[pc->k];
543 A = fn(bc, args, A);
544 continue;
545 }
546 #endif
547 return 0;
548
549 case BPF_MISC|BPF_COPX:
550 #ifdef _KERNEL
551 if (X < bc->nfuncs) {
552 const bpf_copfunc_t fn = bc->copfuncs[X];
553 A = fn(bc, args, A);
554 continue;
555 }
556 #endif
557 return 0;
558 }
559 }
560 }
561
562 /*
563 * Return true if the 'fcode' is a valid filter program.
564 * The constraints are that each jump be forward and to a valid
565 * code, that memory accesses are within valid ranges (to the
566 * extent that this can be checked statically; loads of packet
567 * data have to be, and are, also checked at run time), and that
568 * the code terminates with either an accept or reject.
569 *
570 * The kernel needs to be able to verify an application's filter code.
571 * Otherwise, a bogus program could easily crash the system.
572 */
573
574 #if defined(KERNEL) || defined(_KERNEL)
575
576 int
577 bpf_validate(const struct bpf_insn *f, int signed_len)
578 {
579 return bpf_validate_ext(NULL, f, signed_len);
580 }
581
582 int
583 bpf_validate_ext(const bpf_ctx_t *bc, const struct bpf_insn *f, int signed_len)
584 #else
585 int
586 bpf_validate(const struct bpf_insn *f, int signed_len)
587 #endif
588 {
589 u_int i, from, len, ok = 0;
590 const struct bpf_insn *p;
591 #if defined(KERNEL) || defined(_KERNEL)
592 bpf_memword_init_t *mem, invalid;
593 size_t size;
594 const size_t extwords = (bc != NULL) ? bc->extwords : 0;
595 const size_t memwords = (extwords != 0) ? extwords : BPF_MEMWORDS;
596 const bpf_memword_init_t noinit = (extwords != 0) ? bc->noinit : 0;
597 #else
598 const size_t memwords = BPF_MEMWORDS;
599 #endif
600
601 len = (u_int)signed_len;
602 if (len < 1)
603 return 0;
604 #if defined(KERNEL) || defined(_KERNEL)
605 if (len > BPF_MAXINSNS)
606 return 0;
607 #endif
608 if (BPF_CLASS(f[len - 1].code) != BPF_RET)
609 return 0;
610
611 #if defined(KERNEL) || defined(_KERNEL)
612 mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
613 invalid = ~noinit; /* Only pre-initialised memory is valid on startup */
614 #endif
615
616 for (i = 0; i < len; ++i) {
617 #if defined(KERNEL) || defined(_KERNEL)
618 /* blend in any invalid bits for current pc */
619 invalid |= mem[i];
620 #endif
621 p = &f[i];
622 switch (BPF_CLASS(p->code)) {
623 /*
624 * Check that memory operations use valid addresses.
625 */
626 case BPF_LD:
627 case BPF_LDX:
628 switch (BPF_MODE(p->code)) {
629 case BPF_MEM:
630 /*
631 * There's no maximum packet data size
632 * in userland. The runtime packet length
633 * check suffices.
634 */
635 #if defined(KERNEL) || defined(_KERNEL)
636 /*
637 * More strict check with actual packet length
638 * is done runtime.
639 */
640 if (p->k >= memwords)
641 goto out;
642 /* check for current memory invalid */
643 if (invalid & BPF_MEMWORD_INIT(p->k))
644 goto out;
645 #endif
646 break;
647 case BPF_ABS:
648 case BPF_IND:
649 case BPF_MSH:
650 case BPF_IMM:
651 case BPF_LEN:
652 break;
653 default:
654 goto out;
655 }
656 break;
657 case BPF_ST:
658 case BPF_STX:
659 if (p->k >= memwords)
660 goto out;
661 #if defined(KERNEL) || defined(_KERNEL)
662 /* validate the memory word */
663 invalid &= ~BPF_MEMWORD_INIT(1 << p->k);
664 #endif
665 break;
666 case BPF_ALU:
667 switch (BPF_OP(p->code)) {
668 case BPF_ADD:
669 case BPF_SUB:
670 case BPF_MUL:
671 case BPF_OR:
672 case BPF_AND:
673 case BPF_LSH:
674 case BPF_RSH:
675 case BPF_NEG:
676 break;
677 case BPF_DIV:
678 /*
679 * Check for constant division by 0.
680 */
681 if (BPF_SRC(p->code) == BPF_K && p->k == 0)
682 goto out;
683 break;
684 default:
685 goto out;
686 }
687 break;
688 case BPF_JMP:
689 /*
690 * Check that jumps are within the code block,
691 * and that unconditional branches don't go
692 * backwards as a result of an overflow.
693 * Unconditional branches have a 32-bit offset,
694 * so they could overflow; we check to make
695 * sure they don't. Conditional branches have
696 * an 8-bit offset, and the from address is <=
697 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
698 * is sufficiently small that adding 255 to it
699 * won't overflow.
700 *
701 * We know that len is <= BPF_MAXINSNS, and we
702 * assume that BPF_MAXINSNS is < the maximum size
703 * of a u_int, so that i + 1 doesn't overflow.
704 *
705 * For userland, we don't know that the from
706 * or len are <= BPF_MAXINSNS, but we know that
707 * from <= len, and, except on a 64-bit system,
708 * it's unlikely that len, if it truly reflects
709 * the size of the program we've been handed,
710 * will be anywhere near the maximum size of
711 * a u_int. We also don't check for backward
712 * branches, as we currently support them in
713 * userland for the protochain operation.
714 */
715 from = i + 1;
716 switch (BPF_OP(p->code)) {
717 case BPF_JA:
718 if (from + p->k >= len)
719 goto out;
720 #if defined(KERNEL) || defined(_KERNEL)
721 if (from + p->k < from)
722 goto out;
723 /*
724 * mark the currently invalid bits for the
725 * destination
726 */
727 mem[from + p->k] |= invalid;
728 invalid = 0;
729 #endif
730 break;
731 case BPF_JEQ:
732 case BPF_JGT:
733 case BPF_JGE:
734 case BPF_JSET:
735 if (from + p->jt >= len || from + p->jf >= len)
736 goto out;
737 #if defined(KERNEL) || defined(_KERNEL)
738 /*
739 * mark the currently invalid bits for both
740 * possible jump destinations
741 */
742 mem[from + p->jt] |= invalid;
743 mem[from + p->jf] |= invalid;
744 invalid = 0;
745 #endif
746 break;
747 default:
748 goto out;
749 }
750 break;
751 case BPF_RET:
752 break;
753 case BPF_MISC:
754 switch (BPF_MISCOP(p->code)) {
755 case BPF_COP:
756 case BPF_COPX:
757 /* In-kernel COP use only. */
758 #if defined(KERNEL) || defined(_KERNEL)
759 if (bc == NULL || bc->copfuncs == NULL)
760 goto out;
761 if (BPF_MISCOP(p->code) == BPF_COP &&
762 p->k >= bc->nfuncs) {
763 goto out;
764 }
765 break;
766 #else
767 goto out;
768 #endif
769 default:
770 break;
771 }
772 break;
773 default:
774 goto out;
775 }
776 }
777 ok = 1;
778 out:
779 #if defined(KERNEL) || defined(_KERNEL)
780 kmem_free(mem, size);
781 #endif
782 return ok;
783 }
784