Home | History | Annotate | Line # | Download | only in net
bpf_filter.c revision 1.48.2.2
      1 /*	$NetBSD: bpf_filter.c,v 1.48.2.2 2012/10/30 17:22:42 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from the Stanford/CMU enet packet filter,
      8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
      9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
     10  * Berkeley Laboratory.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.48.2.2 2012/10/30 17:22:42 yamt Exp $");
     41 
     42 #if 0
     43 #if !(defined(lint) || defined(KERNEL))
     44 static const char rcsid[] =
     45     "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp  (LBL)";
     46 #endif
     47 #endif
     48 
     49 #include <sys/param.h>
     50 #include <sys/time.h>
     51 #include <sys/kmem.h>
     52 #include <sys/endian.h>
     53 
     54 #define EXTRACT_SHORT(p)	be16dec(p)
     55 #define EXTRACT_LONG(p)		be32dec(p)
     56 
     57 #ifdef _KERNEL
     58 #include <sys/mbuf.h>
     59 #define MINDEX(len, m, k) 		\
     60 {					\
     61 	len = m->m_len; 		\
     62 	while (k >= len) { 		\
     63 		k -= len; 		\
     64 		m = m->m_next; 		\
     65 		if (m == 0) 		\
     66 			return 0; 	\
     67 		len = m->m_len; 	\
     68 	}				\
     69 }
     70 
     71 uint32_t m_xword (const struct mbuf *, uint32_t, int *);
     72 uint32_t m_xhalf (const struct mbuf *, uint32_t, int *);
     73 uint32_t m_xbyte (const struct mbuf *, uint32_t, int *);
     74 
     75 uint32_t
     76 m_xword(const struct mbuf *m, uint32_t k, int *err)
     77 {
     78 	int len;
     79 	u_char *cp, *np;
     80 	struct mbuf *m0;
     81 
     82 	*err = 1;
     83 	MINDEX(len, m, k);
     84 	cp = mtod(m, u_char *) + k;
     85 	if (len >= k + 4) {
     86 		*err = 0;
     87 		return EXTRACT_LONG(cp);
     88 	}
     89 	m0 = m->m_next;
     90 	if (m0 == 0 || m0->m_len + len - k < 4)
     91 		return 0;
     92 	*err = 0;
     93 	np = mtod(m0, u_char *);
     94 
     95 	switch (len - k) {
     96 	case 1:
     97 		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
     98 	case 2:
     99 		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
    100 	default:
    101 		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
    102 	}
    103 }
    104 
    105 uint32_t
    106 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
    107 {
    108 	int len;
    109 	u_char *cp;
    110 	struct mbuf *m0;
    111 
    112 	*err = 1;
    113 	MINDEX(len, m, k);
    114 	cp = mtod(m, u_char *) + k;
    115 	if (len >= k + 2) {
    116 		*err = 0;
    117 		return EXTRACT_SHORT(cp);
    118 	}
    119 	m0 = m->m_next;
    120 	if (m0 == 0)
    121 		return 0;
    122 	*err = 0;
    123 	return (cp[0] << 8) | mtod(m0, u_char *)[0];
    124 }
    125 
    126 uint32_t
    127 m_xbyte(const struct mbuf *m, uint32_t k, int *err)
    128 {
    129 	int len;
    130 
    131 	*err = 0;
    132 	MINDEX(len, m, k);
    133 	return mtod(m, u_char *)[k];
    134 }
    135 #else /* _KERNEL */
    136 #include <stdlib.h>
    137 #endif /* !_KERNEL */
    138 
    139 #include <net/bpf.h>
    140 
    141 /*
    142  * Execute the filter program starting at pc on the packet p
    143  * wirelen is the length of the original packet
    144  * buflen is the amount of data present
    145  */
    146 u_int
    147 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
    148     u_int buflen)
    149 {
    150 	uint32_t A, X, k;
    151 	uint32_t mem[BPF_MEMWORDS];
    152 
    153 	if (pc == 0) {
    154 		/*
    155 		 * No filter means accept all.
    156 		 */
    157 		return (u_int)-1;
    158 	}
    159 
    160 	/*
    161 	 * Note: safe to leave memwords uninitialised, as the validation
    162 	 * step ensures that it will not be read, if it was not written.
    163 	 */
    164 	A = 0;
    165 	X = 0;
    166 	--pc;
    167 
    168 	for (;;) {
    169 		++pc;
    170 		switch (pc->code) {
    171 
    172 		default:
    173 #ifdef _KERNEL
    174 			return 0;
    175 #else
    176 			abort();
    177 			/*NOTREACHED*/
    178 #endif
    179 		case BPF_RET|BPF_K:
    180 			return (u_int)pc->k;
    181 
    182 		case BPF_RET|BPF_A:
    183 			return (u_int)A;
    184 
    185 		case BPF_LD|BPF_W|BPF_ABS:
    186 			k = pc->k;
    187 			if (k > buflen || sizeof(int32_t) > buflen - k) {
    188 #ifdef _KERNEL
    189 				int merr;
    190 
    191 				if (buflen != 0)
    192 					return 0;
    193 				A = m_xword((const struct mbuf *)p, k, &merr);
    194 				if (merr != 0)
    195 					return 0;
    196 				continue;
    197 #else
    198 				return 0;
    199 #endif
    200 			}
    201 			A = EXTRACT_LONG(&p[k]);
    202 			continue;
    203 
    204 		case BPF_LD|BPF_H|BPF_ABS:
    205 			k = pc->k;
    206 			if (k > buflen || sizeof(int16_t) > buflen - k) {
    207 #ifdef _KERNEL
    208 				int merr;
    209 
    210 				if (buflen != 0)
    211 					return 0;
    212 				A = m_xhalf((const struct mbuf *)p, k, &merr);
    213 				if (merr != 0)
    214 					return 0;
    215 				continue;
    216 #else
    217 				return 0;
    218 #endif
    219 			}
    220 			A = EXTRACT_SHORT(&p[k]);
    221 			continue;
    222 
    223 		case BPF_LD|BPF_B|BPF_ABS:
    224 			k = pc->k;
    225 			if (k >= buflen) {
    226 #ifdef _KERNEL
    227 				const struct mbuf *m;
    228 				int len;
    229 
    230 				if (buflen != 0)
    231 					return 0;
    232 				m = (const struct mbuf *)p;
    233 				MINDEX(len, m, k);
    234 				A = mtod(m, u_char *)[k];
    235 				continue;
    236 #else
    237 				return 0;
    238 #endif
    239 			}
    240 			A = p[k];
    241 			continue;
    242 
    243 		case BPF_LD|BPF_W|BPF_LEN:
    244 			A = wirelen;
    245 			continue;
    246 
    247 		case BPF_LDX|BPF_W|BPF_LEN:
    248 			X = wirelen;
    249 			continue;
    250 
    251 		case BPF_LD|BPF_W|BPF_IND:
    252 			k = X + pc->k;
    253 			if (pc->k > buflen || X > buflen - pc->k ||
    254 			    sizeof(int32_t) > buflen - k) {
    255 #ifdef _KERNEL
    256 				int merr;
    257 
    258 				if (buflen != 0)
    259 					return 0;
    260 				A = m_xword((const struct mbuf *)p, k, &merr);
    261 				if (merr != 0)
    262 					return 0;
    263 				continue;
    264 #else
    265 				return 0;
    266 #endif
    267 			}
    268 			A = EXTRACT_LONG(&p[k]);
    269 			continue;
    270 
    271 		case BPF_LD|BPF_H|BPF_IND:
    272 			k = X + pc->k;
    273 			if (pc->k > buflen || X > buflen - pc->k ||
    274 			    sizeof(int16_t) > buflen - k) {
    275 #ifdef _KERNEL
    276 				int merr;
    277 
    278 				if (buflen != 0)
    279 					return 0;
    280 				A = m_xhalf((const struct mbuf *)p, k, &merr);
    281 				if (merr != 0)
    282 					return 0;
    283 				continue;
    284 #else
    285 				return 0;
    286 #endif
    287 			}
    288 			A = EXTRACT_SHORT(&p[k]);
    289 			continue;
    290 
    291 		case BPF_LD|BPF_B|BPF_IND:
    292 			k = X + pc->k;
    293 			if (pc->k >= buflen || X >= buflen - pc->k) {
    294 #ifdef _KERNEL
    295 				const struct mbuf *m;
    296 				int len;
    297 
    298 				if (buflen != 0)
    299 					return 0;
    300 				m = (const struct mbuf *)p;
    301 				MINDEX(len, m, k);
    302 				A = mtod(m, u_char *)[k];
    303 				continue;
    304 #else
    305 				return 0;
    306 #endif
    307 			}
    308 			A = p[k];
    309 			continue;
    310 
    311 		case BPF_LDX|BPF_MSH|BPF_B:
    312 			k = pc->k;
    313 			if (k >= buflen) {
    314 #ifdef _KERNEL
    315 				const struct mbuf *m;
    316 				int len;
    317 
    318 				if (buflen != 0)
    319 					return 0;
    320 				m = (const struct mbuf *)p;
    321 				MINDEX(len, m, k);
    322 				X = (mtod(m, char *)[k] & 0xf) << 2;
    323 				continue;
    324 #else
    325 				return 0;
    326 #endif
    327 			}
    328 			X = (p[pc->k] & 0xf) << 2;
    329 			continue;
    330 
    331 		case BPF_LD|BPF_IMM:
    332 			A = pc->k;
    333 			continue;
    334 
    335 		case BPF_LDX|BPF_IMM:
    336 			X = pc->k;
    337 			continue;
    338 
    339 		case BPF_LD|BPF_MEM:
    340 			A = mem[pc->k];
    341 			continue;
    342 
    343 		case BPF_LDX|BPF_MEM:
    344 			X = mem[pc->k];
    345 			continue;
    346 
    347 		case BPF_ST:
    348 			mem[pc->k] = A;
    349 			continue;
    350 
    351 		case BPF_STX:
    352 			mem[pc->k] = X;
    353 			continue;
    354 
    355 		case BPF_JMP|BPF_JA:
    356 			pc += pc->k;
    357 			continue;
    358 
    359 		case BPF_JMP|BPF_JGT|BPF_K:
    360 			pc += (A > pc->k) ? pc->jt : pc->jf;
    361 			continue;
    362 
    363 		case BPF_JMP|BPF_JGE|BPF_K:
    364 			pc += (A >= pc->k) ? pc->jt : pc->jf;
    365 			continue;
    366 
    367 		case BPF_JMP|BPF_JEQ|BPF_K:
    368 			pc += (A == pc->k) ? pc->jt : pc->jf;
    369 			continue;
    370 
    371 		case BPF_JMP|BPF_JSET|BPF_K:
    372 			pc += (A & pc->k) ? pc->jt : pc->jf;
    373 			continue;
    374 
    375 		case BPF_JMP|BPF_JGT|BPF_X:
    376 			pc += (A > X) ? pc->jt : pc->jf;
    377 			continue;
    378 
    379 		case BPF_JMP|BPF_JGE|BPF_X:
    380 			pc += (A >= X) ? pc->jt : pc->jf;
    381 			continue;
    382 
    383 		case BPF_JMP|BPF_JEQ|BPF_X:
    384 			pc += (A == X) ? pc->jt : pc->jf;
    385 			continue;
    386 
    387 		case BPF_JMP|BPF_JSET|BPF_X:
    388 			pc += (A & X) ? pc->jt : pc->jf;
    389 			continue;
    390 
    391 		case BPF_ALU|BPF_ADD|BPF_X:
    392 			A += X;
    393 			continue;
    394 
    395 		case BPF_ALU|BPF_SUB|BPF_X:
    396 			A -= X;
    397 			continue;
    398 
    399 		case BPF_ALU|BPF_MUL|BPF_X:
    400 			A *= X;
    401 			continue;
    402 
    403 		case BPF_ALU|BPF_DIV|BPF_X:
    404 			if (X == 0)
    405 				return 0;
    406 			A /= X;
    407 			continue;
    408 
    409 		case BPF_ALU|BPF_AND|BPF_X:
    410 			A &= X;
    411 			continue;
    412 
    413 		case BPF_ALU|BPF_OR|BPF_X:
    414 			A |= X;
    415 			continue;
    416 
    417 		case BPF_ALU|BPF_LSH|BPF_X:
    418 			A <<= X;
    419 			continue;
    420 
    421 		case BPF_ALU|BPF_RSH|BPF_X:
    422 			A >>= X;
    423 			continue;
    424 
    425 		case BPF_ALU|BPF_ADD|BPF_K:
    426 			A += pc->k;
    427 			continue;
    428 
    429 		case BPF_ALU|BPF_SUB|BPF_K:
    430 			A -= pc->k;
    431 			continue;
    432 
    433 		case BPF_ALU|BPF_MUL|BPF_K:
    434 			A *= pc->k;
    435 			continue;
    436 
    437 		case BPF_ALU|BPF_DIV|BPF_K:
    438 			A /= pc->k;
    439 			continue;
    440 
    441 		case BPF_ALU|BPF_AND|BPF_K:
    442 			A &= pc->k;
    443 			continue;
    444 
    445 		case BPF_ALU|BPF_OR|BPF_K:
    446 			A |= pc->k;
    447 			continue;
    448 
    449 		case BPF_ALU|BPF_LSH|BPF_K:
    450 			A <<= pc->k;
    451 			continue;
    452 
    453 		case BPF_ALU|BPF_RSH|BPF_K:
    454 			A >>= pc->k;
    455 			continue;
    456 
    457 		case BPF_ALU|BPF_NEG:
    458 			A = -A;
    459 			continue;
    460 
    461 		case BPF_MISC|BPF_TAX:
    462 			X = A;
    463 			continue;
    464 
    465 		case BPF_MISC|BPF_TXA:
    466 			A = X;
    467 			continue;
    468 		}
    469 	}
    470 }
    471 
    472 /*
    473  * Return true if the 'fcode' is a valid filter program.
    474  * The constraints are that each jump be forward and to a valid
    475  * code, that memory accesses are within valid ranges (to the
    476  * extent that this can be checked statically; loads of packet
    477  * data have to be, and are, also checked at run time), and that
    478  * the code terminates with either an accept or reject.
    479  *
    480  * The kernel needs to be able to verify an application's filter code.
    481  * Otherwise, a bogus program could easily crash the system.
    482  */
    483 __CTASSERT(BPF_MEMWORDS == sizeof(uint16_t) * NBBY);
    484 
    485 int
    486 bpf_validate(const struct bpf_insn *f, int signed_len)
    487 {
    488 	u_int i, from, len, ok = 0;
    489 	const struct bpf_insn *p;
    490 #if defined(KERNEL) || defined(_KERNEL)
    491 	uint16_t *mem, invalid;
    492 	size_t size;
    493 #endif
    494 
    495 	len = (u_int)signed_len;
    496 	if (len < 1)
    497 		return 0;
    498 #if defined(KERNEL) || defined(_KERNEL)
    499 	if (len > BPF_MAXINSNS)
    500 		return 0;
    501 #endif
    502 	if (BPF_CLASS(f[len - 1].code) != BPF_RET)
    503 		return 0;
    504 
    505 #if defined(KERNEL) || defined(_KERNEL)
    506 	mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
    507 	invalid = ~0;	/* All is invalid on startup */
    508 #endif
    509 
    510 	for (i = 0; i < len; ++i) {
    511 #if defined(KERNEL) || defined(_KERNEL)
    512 		/* blend in any invalid bits for current pc */
    513 		invalid |= mem[i];
    514 #endif
    515 		p = &f[i];
    516 		switch (BPF_CLASS(p->code)) {
    517 		/*
    518 		 * Check that memory operations use valid addresses.
    519 		 */
    520 		case BPF_LD:
    521 		case BPF_LDX:
    522 			switch (BPF_MODE(p->code)) {
    523 			case BPF_MEM:
    524 				/*
    525 				 * There's no maximum packet data size
    526 				 * in userland.  The runtime packet length
    527 				 * check suffices.
    528 				 */
    529 #if defined(KERNEL) || defined(_KERNEL)
    530 				/*
    531 				 * More strict check with actual packet length
    532 				 * is done runtime.
    533 				 */
    534 				if (p->k >= BPF_MEMWORDS)
    535 					goto out;
    536 				/* check for current memory invalid */
    537 				if (invalid & (1 << p->k))
    538 					goto out;
    539 #endif
    540 				break;
    541 			case BPF_ABS:
    542 			case BPF_IND:
    543 			case BPF_MSH:
    544 			case BPF_IMM:
    545 			case BPF_LEN:
    546 				break;
    547 			default:
    548 				goto out;
    549 			}
    550 			break;
    551 		case BPF_ST:
    552 		case BPF_STX:
    553 			if (p->k >= BPF_MEMWORDS)
    554 				goto out;
    555 #if defined(KERNEL) || defined(_KERNEL)
    556 			/* validate the memory word */
    557 			invalid &= ~(1 << p->k);
    558 #endif
    559 			break;
    560 		case BPF_ALU:
    561 			switch (BPF_OP(p->code)) {
    562 			case BPF_ADD:
    563 			case BPF_SUB:
    564 			case BPF_MUL:
    565 			case BPF_OR:
    566 			case BPF_AND:
    567 			case BPF_LSH:
    568 			case BPF_RSH:
    569 			case BPF_NEG:
    570 				break;
    571 			case BPF_DIV:
    572 				/*
    573 				 * Check for constant division by 0.
    574 				 */
    575 				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
    576 					goto out;
    577 				break;
    578 			default:
    579 				goto out;
    580 			}
    581 			break;
    582 		case BPF_JMP:
    583 			/*
    584 			 * Check that jumps are within the code block,
    585 			 * and that unconditional branches don't go
    586 			 * backwards as a result of an overflow.
    587 			 * Unconditional branches have a 32-bit offset,
    588 			 * so they could overflow; we check to make
    589 			 * sure they don't.  Conditional branches have
    590 			 * an 8-bit offset, and the from address is <=
    591 			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
    592 			 * is sufficiently small that adding 255 to it
    593 			 * won't overflow.
    594 			 *
    595 			 * We know that len is <= BPF_MAXINSNS, and we
    596 			 * assume that BPF_MAXINSNS is < the maximum size
    597 			 * of a u_int, so that i + 1 doesn't overflow.
    598 			 *
    599 			 * For userland, we don't know that the from
    600 			 * or len are <= BPF_MAXINSNS, but we know that
    601 			 * from <= len, and, except on a 64-bit system,
    602 			 * it's unlikely that len, if it truly reflects
    603 			 * the size of the program we've been handed,
    604 			 * will be anywhere near the maximum size of
    605 			 * a u_int.  We also don't check for backward
    606 			 * branches, as we currently support them in
    607 			 * userland for the protochain operation.
    608 			 */
    609 			from = i + 1;
    610 			switch (BPF_OP(p->code)) {
    611 			case BPF_JA:
    612 				if (from + p->k >= len)
    613 					goto out;
    614 #if defined(KERNEL) || defined(_KERNEL)
    615 				if (from + p->k < from)
    616 					goto out;
    617 				/*
    618 				 * mark the currently invalid bits for the
    619 				 * destination
    620 				 */
    621 				mem[from + p->k] |= invalid;
    622 				invalid = 0;
    623 #endif
    624 				break;
    625 			case BPF_JEQ:
    626 			case BPF_JGT:
    627 			case BPF_JGE:
    628 			case BPF_JSET:
    629 				if (from + p->jt >= len || from + p->jf >= len)
    630 					goto out;
    631 #if defined(KERNEL) || defined(_KERNEL)
    632 				/*
    633 				 * mark the currently invalid bits for both
    634 				 * possible jump destinations
    635 				 */
    636 				mem[from + p->jt] |= invalid;
    637 				mem[from + p->jf] |= invalid;
    638 				invalid = 0;
    639 #endif
    640 				break;
    641 			default:
    642 				goto out;
    643 			}
    644 			break;
    645 		case BPF_RET:
    646 			break;
    647 		case BPF_MISC:
    648 			break;
    649 		default:
    650 			goto out;
    651 		}
    652 	}
    653 	ok = 1;
    654 out:
    655 #if defined(KERNEL) || defined(_KERNEL)
    656 	kmem_free(mem, size);
    657 #endif
    658 	return ok;
    659 }
    660