Home | History | Annotate | Line # | Download | only in net
bpf_filter.c revision 1.58
      1 /*	$NetBSD: bpf_filter.c,v 1.58 2013/09/18 23:34:55 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from the Stanford/CMU enet packet filter,
      8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
      9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
     10  * Berkeley Laboratory.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.58 2013/09/18 23:34:55 rmind Exp $");
     41 
     42 #if 0
     43 #if !(defined(lint) || defined(KERNEL))
     44 static const char rcsid[] =
     45     "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp  (LBL)";
     46 #endif
     47 #endif
     48 
     49 #include <sys/param.h>
     50 #include <sys/time.h>
     51 #include <sys/kmem.h>
     52 #include <sys/endian.h>
     53 
     54 #include <net/bpf.h>
     55 
     56 #ifdef _KERNEL
     57 
     58 struct bpf_ctx {
     59 	const bpf_copfunc_t *	copfuncs;
     60 	size_t			nfuncs;
     61 };
     62 
     63 /* Default BPF context (zeroed). */
     64 static bpf_ctx_t		bpf_def_ctx;
     65 
     66 bpf_ctx_t *
     67 bpf_create(void)
     68 {
     69 	return kmem_zalloc(sizeof(bpf_ctx_t), KM_SLEEP);
     70 }
     71 
     72 void
     73 bpf_destroy(bpf_ctx_t *bc)
     74 {
     75 	kmem_free(bc, sizeof(bpf_ctx_t));
     76 }
     77 
     78 int
     79 bpf_set_cop(bpf_ctx_t *bc, const bpf_copfunc_t *funcs, size_t n)
     80 {
     81 	bc->copfuncs = funcs;
     82 	bc->nfuncs = n;
     83 	return 0;
     84 }
     85 
     86 #endif
     87 
     88 #define EXTRACT_SHORT(p)	be16dec(p)
     89 #define EXTRACT_LONG(p)		be32dec(p)
     90 
     91 #ifdef _KERNEL
     92 #include <sys/mbuf.h>
     93 #define MINDEX(len, m, k) 		\
     94 {					\
     95 	len = m->m_len; 		\
     96 	while (k >= len) { 		\
     97 		k -= len; 		\
     98 		m = m->m_next; 		\
     99 		if (m == 0) 		\
    100 			return 0; 	\
    101 		len = m->m_len; 	\
    102 	}				\
    103 }
    104 
    105 uint32_t m_xword (const struct mbuf *, uint32_t, int *);
    106 uint32_t m_xhalf (const struct mbuf *, uint32_t, int *);
    107 uint32_t m_xbyte (const struct mbuf *, uint32_t, int *);
    108 
    109 uint32_t
    110 m_xword(const struct mbuf *m, uint32_t k, int *err)
    111 {
    112 	int len;
    113 	u_char *cp, *np;
    114 	struct mbuf *m0;
    115 
    116 	*err = 1;
    117 	MINDEX(len, m, k);
    118 	cp = mtod(m, u_char *) + k;
    119 	if (len >= k + 4) {
    120 		*err = 0;
    121 		return EXTRACT_LONG(cp);
    122 	}
    123 	m0 = m->m_next;
    124 	if (m0 == 0 || m0->m_len + len - k < 4)
    125 		return 0;
    126 	*err = 0;
    127 	np = mtod(m0, u_char *);
    128 
    129 	switch (len - k) {
    130 	case 1:
    131 		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
    132 	case 2:
    133 		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
    134 	default:
    135 		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
    136 	}
    137 }
    138 
    139 uint32_t
    140 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
    141 {
    142 	int len;
    143 	u_char *cp;
    144 	struct mbuf *m0;
    145 
    146 	*err = 1;
    147 	MINDEX(len, m, k);
    148 	cp = mtod(m, u_char *) + k;
    149 	if (len >= k + 2) {
    150 		*err = 0;
    151 		return EXTRACT_SHORT(cp);
    152 	}
    153 	m0 = m->m_next;
    154 	if (m0 == 0)
    155 		return 0;
    156 	*err = 0;
    157 	return (cp[0] << 8) | mtod(m0, u_char *)[0];
    158 }
    159 
    160 uint32_t
    161 m_xbyte(const struct mbuf *m, uint32_t k, int *err)
    162 {
    163 	int len;
    164 
    165 	*err = 0;
    166 	MINDEX(len, m, k);
    167 	return mtod(m, u_char *)[k];
    168 }
    169 #else /* _KERNEL */
    170 #include <stdlib.h>
    171 #endif /* !_KERNEL */
    172 
    173 #include <net/bpf.h>
    174 
    175 /*
    176  * Execute the filter program starting at pc on the packet p
    177  * wirelen is the length of the original packet
    178  * buflen is the amount of data present
    179  */
    180 #ifdef _KERNEL
    181 
    182 u_int
    183 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
    184     u_int buflen)
    185 {
    186 	return bpf_filter_ext(&bpf_def_ctx, NULL, pc, p, wirelen, buflen);
    187 }
    188 
    189 u_int
    190 bpf_filter_ext(bpf_ctx_t *bc, void *arg, const struct bpf_insn *pc,
    191     const u_char *p, u_int wirelen, u_int buflen)
    192 #else
    193 u_int
    194 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
    195     u_int buflen)
    196 #endif
    197 {
    198 	uint32_t A, X, k;
    199 	uint32_t mem[BPF_MEMWORDS];
    200 
    201 #ifdef _KERNEL
    202 	KASSERT(bc != NULL);
    203 #endif
    204 
    205 	if (pc == 0) {
    206 		/*
    207 		 * No filter means accept all.
    208 		 */
    209 		return (u_int)-1;
    210 	}
    211 
    212 	/*
    213 	 * Note: safe to leave memwords uninitialised, as the validation
    214 	 * step ensures that it will not be read, if it was not written.
    215 	 */
    216 	A = 0;
    217 	X = 0;
    218 	--pc;
    219 
    220 	for (;;) {
    221 		++pc;
    222 		switch (pc->code) {
    223 
    224 		default:
    225 #ifdef _KERNEL
    226 			return 0;
    227 #else
    228 			abort();
    229 			/*NOTREACHED*/
    230 #endif
    231 		case BPF_RET|BPF_K:
    232 			return (u_int)pc->k;
    233 
    234 		case BPF_RET|BPF_A:
    235 			return (u_int)A;
    236 
    237 		case BPF_LD|BPF_W|BPF_ABS:
    238 			k = pc->k;
    239 			if (k > buflen || sizeof(int32_t) > buflen - k) {
    240 #ifdef _KERNEL
    241 				int merr;
    242 
    243 				if (buflen != 0)
    244 					return 0;
    245 				A = m_xword((const struct mbuf *)p, k, &merr);
    246 				if (merr != 0)
    247 					return 0;
    248 				continue;
    249 #else
    250 				return 0;
    251 #endif
    252 			}
    253 			A = EXTRACT_LONG(&p[k]);
    254 			continue;
    255 
    256 		case BPF_LD|BPF_H|BPF_ABS:
    257 			k = pc->k;
    258 			if (k > buflen || sizeof(int16_t) > buflen - k) {
    259 #ifdef _KERNEL
    260 				int merr;
    261 
    262 				if (buflen != 0)
    263 					return 0;
    264 				A = m_xhalf((const struct mbuf *)p, k, &merr);
    265 				if (merr != 0)
    266 					return 0;
    267 				continue;
    268 #else
    269 				return 0;
    270 #endif
    271 			}
    272 			A = EXTRACT_SHORT(&p[k]);
    273 			continue;
    274 
    275 		case BPF_LD|BPF_B|BPF_ABS:
    276 			k = pc->k;
    277 			if (k >= buflen) {
    278 #ifdef _KERNEL
    279 				const struct mbuf *m;
    280 				int len;
    281 
    282 				if (buflen != 0)
    283 					return 0;
    284 				m = (const struct mbuf *)p;
    285 				MINDEX(len, m, k);
    286 				A = mtod(m, u_char *)[k];
    287 				continue;
    288 #else
    289 				return 0;
    290 #endif
    291 			}
    292 			A = p[k];
    293 			continue;
    294 
    295 		case BPF_LD|BPF_W|BPF_LEN:
    296 			A = wirelen;
    297 			continue;
    298 
    299 		case BPF_LDX|BPF_W|BPF_LEN:
    300 			X = wirelen;
    301 			continue;
    302 
    303 		case BPF_LD|BPF_W|BPF_IND:
    304 			k = X + pc->k;
    305 			if (pc->k > buflen || X > buflen - pc->k ||
    306 			    sizeof(int32_t) > buflen - k) {
    307 #ifdef _KERNEL
    308 				int merr;
    309 
    310 				if (buflen != 0)
    311 					return 0;
    312 				A = m_xword((const struct mbuf *)p, k, &merr);
    313 				if (merr != 0)
    314 					return 0;
    315 				continue;
    316 #else
    317 				return 0;
    318 #endif
    319 			}
    320 			A = EXTRACT_LONG(&p[k]);
    321 			continue;
    322 
    323 		case BPF_LD|BPF_H|BPF_IND:
    324 			k = X + pc->k;
    325 			if (pc->k > buflen || X > buflen - pc->k ||
    326 			    sizeof(int16_t) > buflen - k) {
    327 #ifdef _KERNEL
    328 				int merr;
    329 
    330 				if (buflen != 0)
    331 					return 0;
    332 				A = m_xhalf((const struct mbuf *)p, k, &merr);
    333 				if (merr != 0)
    334 					return 0;
    335 				continue;
    336 #else
    337 				return 0;
    338 #endif
    339 			}
    340 			A = EXTRACT_SHORT(&p[k]);
    341 			continue;
    342 
    343 		case BPF_LD|BPF_B|BPF_IND:
    344 			k = X + pc->k;
    345 			if (pc->k >= buflen || X >= buflen - pc->k) {
    346 #ifdef _KERNEL
    347 				const struct mbuf *m;
    348 				int len;
    349 
    350 				if (buflen != 0)
    351 					return 0;
    352 				m = (const struct mbuf *)p;
    353 				MINDEX(len, m, k);
    354 				A = mtod(m, u_char *)[k];
    355 				continue;
    356 #else
    357 				return 0;
    358 #endif
    359 			}
    360 			A = p[k];
    361 			continue;
    362 
    363 		case BPF_LDX|BPF_MSH|BPF_B:
    364 			k = pc->k;
    365 			if (k >= buflen) {
    366 #ifdef _KERNEL
    367 				const struct mbuf *m;
    368 				int len;
    369 
    370 				if (buflen != 0)
    371 					return 0;
    372 				m = (const struct mbuf *)p;
    373 				MINDEX(len, m, k);
    374 				X = (mtod(m, char *)[k] & 0xf) << 2;
    375 				continue;
    376 #else
    377 				return 0;
    378 #endif
    379 			}
    380 			X = (p[pc->k] & 0xf) << 2;
    381 			continue;
    382 
    383 		case BPF_LD|BPF_IMM:
    384 			A = pc->k;
    385 			continue;
    386 
    387 		case BPF_LDX|BPF_IMM:
    388 			X = pc->k;
    389 			continue;
    390 
    391 		case BPF_LD|BPF_MEM:
    392 			A = mem[pc->k];
    393 			continue;
    394 
    395 		case BPF_LDX|BPF_MEM:
    396 			X = mem[pc->k];
    397 			continue;
    398 
    399 		case BPF_ST:
    400 			mem[pc->k] = A;
    401 			continue;
    402 
    403 		case BPF_STX:
    404 			mem[pc->k] = X;
    405 			continue;
    406 
    407 		case BPF_JMP|BPF_JA:
    408 			pc += pc->k;
    409 			continue;
    410 
    411 		case BPF_JMP|BPF_JGT|BPF_K:
    412 			pc += (A > pc->k) ? pc->jt : pc->jf;
    413 			continue;
    414 
    415 		case BPF_JMP|BPF_JGE|BPF_K:
    416 			pc += (A >= pc->k) ? pc->jt : pc->jf;
    417 			continue;
    418 
    419 		case BPF_JMP|BPF_JEQ|BPF_K:
    420 			pc += (A == pc->k) ? pc->jt : pc->jf;
    421 			continue;
    422 
    423 		case BPF_JMP|BPF_JSET|BPF_K:
    424 			pc += (A & pc->k) ? pc->jt : pc->jf;
    425 			continue;
    426 
    427 		case BPF_JMP|BPF_JGT|BPF_X:
    428 			pc += (A > X) ? pc->jt : pc->jf;
    429 			continue;
    430 
    431 		case BPF_JMP|BPF_JGE|BPF_X:
    432 			pc += (A >= X) ? pc->jt : pc->jf;
    433 			continue;
    434 
    435 		case BPF_JMP|BPF_JEQ|BPF_X:
    436 			pc += (A == X) ? pc->jt : pc->jf;
    437 			continue;
    438 
    439 		case BPF_JMP|BPF_JSET|BPF_X:
    440 			pc += (A & X) ? pc->jt : pc->jf;
    441 			continue;
    442 
    443 		case BPF_ALU|BPF_ADD|BPF_X:
    444 			A += X;
    445 			continue;
    446 
    447 		case BPF_ALU|BPF_SUB|BPF_X:
    448 			A -= X;
    449 			continue;
    450 
    451 		case BPF_ALU|BPF_MUL|BPF_X:
    452 			A *= X;
    453 			continue;
    454 
    455 		case BPF_ALU|BPF_DIV|BPF_X:
    456 			if (X == 0)
    457 				return 0;
    458 			A /= X;
    459 			continue;
    460 
    461 		case BPF_ALU|BPF_AND|BPF_X:
    462 			A &= X;
    463 			continue;
    464 
    465 		case BPF_ALU|BPF_OR|BPF_X:
    466 			A |= X;
    467 			continue;
    468 
    469 		case BPF_ALU|BPF_LSH|BPF_X:
    470 			A <<= X;
    471 			continue;
    472 
    473 		case BPF_ALU|BPF_RSH|BPF_X:
    474 			A >>= X;
    475 			continue;
    476 
    477 		case BPF_ALU|BPF_ADD|BPF_K:
    478 			A += pc->k;
    479 			continue;
    480 
    481 		case BPF_ALU|BPF_SUB|BPF_K:
    482 			A -= pc->k;
    483 			continue;
    484 
    485 		case BPF_ALU|BPF_MUL|BPF_K:
    486 			A *= pc->k;
    487 			continue;
    488 
    489 		case BPF_ALU|BPF_DIV|BPF_K:
    490 			A /= pc->k;
    491 			continue;
    492 
    493 		case BPF_ALU|BPF_AND|BPF_K:
    494 			A &= pc->k;
    495 			continue;
    496 
    497 		case BPF_ALU|BPF_OR|BPF_K:
    498 			A |= pc->k;
    499 			continue;
    500 
    501 		case BPF_ALU|BPF_LSH|BPF_K:
    502 			A <<= pc->k;
    503 			continue;
    504 
    505 		case BPF_ALU|BPF_RSH|BPF_K:
    506 			A >>= pc->k;
    507 			continue;
    508 
    509 		case BPF_ALU|BPF_NEG:
    510 			A = -A;
    511 			continue;
    512 
    513 		case BPF_MISC|BPF_TAX:
    514 			X = A;
    515 			continue;
    516 
    517 		case BPF_MISC|BPF_TXA:
    518 			A = X;
    519 			continue;
    520 
    521 		case BPF_MISC|BPF_COP:
    522 #ifdef _KERNEL
    523 			if (pc->k < bc->nfuncs) {
    524 				const bpf_copfunc_t fn = bc->copfuncs[pc->k];
    525 				A = fn((const struct mbuf *)p, arg, A, mem);
    526 				continue;
    527 			}
    528 #endif
    529 			return 0;
    530 
    531 		case BPF_MISC|BPF_COPX:
    532 #ifdef _KERNEL
    533 			if (X < bc->nfuncs) {
    534 				const bpf_copfunc_t fn = bc->copfuncs[X];
    535 				A = fn((const struct mbuf *)p, arg, A, mem);
    536 				continue;
    537 			}
    538 #endif
    539 			return 0;
    540 		}
    541 	}
    542 }
    543 
    544 /*
    545  * Return true if the 'fcode' is a valid filter program.
    546  * The constraints are that each jump be forward and to a valid
    547  * code, that memory accesses are within valid ranges (to the
    548  * extent that this can be checked statically; loads of packet
    549  * data have to be, and are, also checked at run time), and that
    550  * the code terminates with either an accept or reject.
    551  *
    552  * The kernel needs to be able to verify an application's filter code.
    553  * Otherwise, a bogus program could easily crash the system.
    554  */
    555 __CTASSERT(BPF_MEMWORDS == sizeof(uint16_t) * NBBY);
    556 
    557 #if defined(KERNEL) || defined(_KERNEL)
    558 
    559 int
    560 bpf_validate(const struct bpf_insn *f, int signed_len)
    561 {
    562 	return bpf_validate_ext(&bpf_def_ctx, f, signed_len);
    563 }
    564 
    565 int
    566 bpf_validate_ext(bpf_ctx_t *bc, const struct bpf_insn *f, int signed_len)
    567 #else
    568 int
    569 bpf_validate(const struct bpf_insn *f, int signed_len)
    570 #endif
    571 {
    572 	u_int i, from, len, ok = 0;
    573 	const struct bpf_insn *p;
    574 #if defined(KERNEL) || defined(_KERNEL)
    575 	uint16_t *mem, invalid;
    576 	size_t size;
    577 #endif
    578 
    579 	len = (u_int)signed_len;
    580 	if (len < 1)
    581 		return 0;
    582 #if defined(KERNEL) || defined(_KERNEL)
    583 	if (len > BPF_MAXINSNS)
    584 		return 0;
    585 #endif
    586 	if (BPF_CLASS(f[len - 1].code) != BPF_RET)
    587 		return 0;
    588 
    589 #if defined(KERNEL) || defined(_KERNEL)
    590 	mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
    591 	invalid = ~0;	/* All is invalid on startup */
    592 #endif
    593 
    594 	for (i = 0; i < len; ++i) {
    595 #if defined(KERNEL) || defined(_KERNEL)
    596 		/* blend in any invalid bits for current pc */
    597 		invalid |= mem[i];
    598 #endif
    599 		p = &f[i];
    600 		switch (BPF_CLASS(p->code)) {
    601 		/*
    602 		 * Check that memory operations use valid addresses.
    603 		 */
    604 		case BPF_LD:
    605 		case BPF_LDX:
    606 			switch (BPF_MODE(p->code)) {
    607 			case BPF_MEM:
    608 				/*
    609 				 * There's no maximum packet data size
    610 				 * in userland.  The runtime packet length
    611 				 * check suffices.
    612 				 */
    613 #if defined(KERNEL) || defined(_KERNEL)
    614 				/*
    615 				 * More strict check with actual packet length
    616 				 * is done runtime.
    617 				 */
    618 				if (p->k >= BPF_MEMWORDS)
    619 					goto out;
    620 				/* check for current memory invalid */
    621 				if (invalid & (1 << p->k))
    622 					goto out;
    623 #endif
    624 				break;
    625 			case BPF_ABS:
    626 			case BPF_IND:
    627 			case BPF_MSH:
    628 			case BPF_IMM:
    629 			case BPF_LEN:
    630 				break;
    631 			default:
    632 				goto out;
    633 			}
    634 			break;
    635 		case BPF_ST:
    636 		case BPF_STX:
    637 			if (p->k >= BPF_MEMWORDS)
    638 				goto out;
    639 #if defined(KERNEL) || defined(_KERNEL)
    640 			/* validate the memory word */
    641 			invalid &= ~(1 << p->k);
    642 #endif
    643 			break;
    644 		case BPF_ALU:
    645 			switch (BPF_OP(p->code)) {
    646 			case BPF_ADD:
    647 			case BPF_SUB:
    648 			case BPF_MUL:
    649 			case BPF_OR:
    650 			case BPF_AND:
    651 			case BPF_LSH:
    652 			case BPF_RSH:
    653 			case BPF_NEG:
    654 				break;
    655 			case BPF_DIV:
    656 				/*
    657 				 * Check for constant division by 0.
    658 				 */
    659 				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
    660 					goto out;
    661 				break;
    662 			default:
    663 				goto out;
    664 			}
    665 			break;
    666 		case BPF_JMP:
    667 			/*
    668 			 * Check that jumps are within the code block,
    669 			 * and that unconditional branches don't go
    670 			 * backwards as a result of an overflow.
    671 			 * Unconditional branches have a 32-bit offset,
    672 			 * so they could overflow; we check to make
    673 			 * sure they don't.  Conditional branches have
    674 			 * an 8-bit offset, and the from address is <=
    675 			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
    676 			 * is sufficiently small that adding 255 to it
    677 			 * won't overflow.
    678 			 *
    679 			 * We know that len is <= BPF_MAXINSNS, and we
    680 			 * assume that BPF_MAXINSNS is < the maximum size
    681 			 * of a u_int, so that i + 1 doesn't overflow.
    682 			 *
    683 			 * For userland, we don't know that the from
    684 			 * or len are <= BPF_MAXINSNS, but we know that
    685 			 * from <= len, and, except on a 64-bit system,
    686 			 * it's unlikely that len, if it truly reflects
    687 			 * the size of the program we've been handed,
    688 			 * will be anywhere near the maximum size of
    689 			 * a u_int.  We also don't check for backward
    690 			 * branches, as we currently support them in
    691 			 * userland for the protochain operation.
    692 			 */
    693 			from = i + 1;
    694 			switch (BPF_OP(p->code)) {
    695 			case BPF_JA:
    696 				if (from + p->k >= len)
    697 					goto out;
    698 #if defined(KERNEL) || defined(_KERNEL)
    699 				if (from + p->k < from)
    700 					goto out;
    701 				/*
    702 				 * mark the currently invalid bits for the
    703 				 * destination
    704 				 */
    705 				mem[from + p->k] |= invalid;
    706 				invalid = 0;
    707 #endif
    708 				break;
    709 			case BPF_JEQ:
    710 			case BPF_JGT:
    711 			case BPF_JGE:
    712 			case BPF_JSET:
    713 				if (from + p->jt >= len || from + p->jf >= len)
    714 					goto out;
    715 #if defined(KERNEL) || defined(_KERNEL)
    716 				/*
    717 				 * mark the currently invalid bits for both
    718 				 * possible jump destinations
    719 				 */
    720 				mem[from + p->jt] |= invalid;
    721 				mem[from + p->jf] |= invalid;
    722 				invalid = 0;
    723 #endif
    724 				break;
    725 			default:
    726 				goto out;
    727 			}
    728 			break;
    729 		case BPF_RET:
    730 			break;
    731 		case BPF_MISC:
    732 			break;
    733 		default:
    734 			goto out;
    735 		}
    736 	}
    737 	ok = 1;
    738 out:
    739 #if defined(KERNEL) || defined(_KERNEL)
    740 	kmem_free(mem, size);
    741 #endif
    742 	return ok;
    743 }
    744