Home | History | Annotate | Line # | Download | only in net
bpf_filter.c revision 1.55.2.1
      1 /*	$NetBSD: bpf_filter.c,v 1.55.2.1 2014/05/18 17:46:12 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from the Stanford/CMU enet packet filter,
      8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
      9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
     10  * Berkeley Laboratory.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.55.2.1 2014/05/18 17:46:12 rmind Exp $");
     41 
     42 #if 0
     43 #if !(defined(lint) || defined(KERNEL))
     44 static const char rcsid[] =
     45     "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp  (LBL)";
     46 #endif
     47 #endif
     48 
     49 #include <sys/param.h>
     50 #include <sys/time.h>
     51 #include <sys/kmem.h>
     52 #include <sys/endian.h>
     53 
     54 #define	__BPF_PRIVATE
     55 #include <net/bpf.h>
     56 
     57 #ifdef _KERNEL
     58 
     59 /* Default BPF context (zeroed). */
     60 static bpf_ctx_t		bpf_def_ctx;
     61 
     62 bpf_ctx_t *
     63 bpf_create(void)
     64 {
     65 	return kmem_zalloc(sizeof(bpf_ctx_t), KM_SLEEP);
     66 }
     67 
     68 void
     69 bpf_destroy(bpf_ctx_t *bc)
     70 {
     71 	kmem_free(bc, sizeof(bpf_ctx_t));
     72 }
     73 
     74 int
     75 bpf_set_cop(bpf_ctx_t *bc, const bpf_copfunc_t *funcs, size_t n)
     76 {
     77 	bc->copfuncs = funcs;
     78 	bc->nfuncs = n;
     79 	return 0;
     80 }
     81 
     82 bpf_ctx_t *
     83 bpf_default_ctx(void)
     84 {
     85 	return &bpf_def_ctx;
     86 }
     87 
     88 #endif
     89 
     90 #define EXTRACT_SHORT(p)	be16dec(p)
     91 #define EXTRACT_LONG(p)		be32dec(p)
     92 
     93 #ifdef _KERNEL
     94 #include <sys/mbuf.h>
     95 #define MINDEX(len, m, k) 		\
     96 {					\
     97 	len = m->m_len; 		\
     98 	while (k >= len) { 		\
     99 		k -= len; 		\
    100 		m = m->m_next; 		\
    101 		if (m == 0) 		\
    102 			return 0; 	\
    103 		len = m->m_len; 	\
    104 	}				\
    105 }
    106 
    107 uint32_t m_xword (const struct mbuf *, uint32_t, int *);
    108 uint32_t m_xhalf (const struct mbuf *, uint32_t, int *);
    109 uint32_t m_xbyte (const struct mbuf *, uint32_t, int *);
    110 
    111 uint32_t
    112 m_xword(const struct mbuf *m, uint32_t k, int *err)
    113 {
    114 	int len;
    115 	u_char *cp, *np;
    116 	struct mbuf *m0;
    117 
    118 	*err = 1;
    119 	MINDEX(len, m, k);
    120 	cp = mtod(m, u_char *) + k;
    121 	if (len >= k + 4) {
    122 		*err = 0;
    123 		return EXTRACT_LONG(cp);
    124 	}
    125 	m0 = m->m_next;
    126 	if (m0 == 0 || m0->m_len + len - k < 4)
    127 		return 0;
    128 	*err = 0;
    129 	np = mtod(m0, u_char *);
    130 
    131 	switch (len - k) {
    132 	case 1:
    133 		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
    134 	case 2:
    135 		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
    136 	default:
    137 		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
    138 	}
    139 }
    140 
    141 uint32_t
    142 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
    143 {
    144 	int len;
    145 	u_char *cp;
    146 	struct mbuf *m0;
    147 
    148 	*err = 1;
    149 	MINDEX(len, m, k);
    150 	cp = mtod(m, u_char *) + k;
    151 	if (len >= k + 2) {
    152 		*err = 0;
    153 		return EXTRACT_SHORT(cp);
    154 	}
    155 	m0 = m->m_next;
    156 	if (m0 == 0)
    157 		return 0;
    158 	*err = 0;
    159 	return (cp[0] << 8) | mtod(m0, u_char *)[0];
    160 }
    161 
    162 uint32_t
    163 m_xbyte(const struct mbuf *m, uint32_t k, int *err)
    164 {
    165 	int len;
    166 
    167 	*err = 0;
    168 	MINDEX(len, m, k);
    169 	return mtod(m, u_char *)[k];
    170 }
    171 #else /* _KERNEL */
    172 #include <stdlib.h>
    173 #endif /* !_KERNEL */
    174 
    175 #include <net/bpf.h>
    176 
    177 /*
    178  * Execute the filter program starting at pc on the packet p
    179  * wirelen is the length of the original packet
    180  * buflen is the amount of data present
    181  */
    182 #ifdef _KERNEL
    183 
    184 u_int
    185 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
    186     u_int buflen)
    187 {
    188 	bpf_args_t args = {
    189 		.pkt = (const struct mbuf *)p,
    190 		.wirelen = wirelen,
    191 		.buflen = buflen,
    192 		.arg = NULL
    193 	};
    194 	return bpf_filter_ext(&bpf_def_ctx, pc, &args);
    195 }
    196 
    197 u_int
    198 bpf_filter_ext(bpf_ctx_t *bc, const struct bpf_insn *pc, bpf_args_t *args)
    199 #else
    200 u_int
    201 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
    202     u_int buflen)
    203 #endif
    204 {
    205 	uint32_t A, X, k;
    206 #ifndef _KERNEL
    207 	bpf_args_t args_store = {
    208 		.pkt = (const struct mbuf *)p,
    209 		.wirelen = wirelen,
    210 		.buflen = buflen,
    211 		.arg = NULL
    212 	};
    213 	bpf_args_t * const args = &args_store;
    214 #else
    215 	const uint8_t * const p = (const uint8_t *)args->pkt;
    216 #endif
    217 	if (pc == 0) {
    218 		/*
    219 		 * No filter means accept all.
    220 		 */
    221 		return (u_int)-1;
    222 	}
    223 
    224 	/*
    225 	 * Note: safe to leave memwords uninitialised, as the validation
    226 	 * step ensures that it will not be read, if it was not written.
    227 	 */
    228 	A = 0;
    229 	X = 0;
    230 	--pc;
    231 
    232 	for (;;) {
    233 		++pc;
    234 		switch (pc->code) {
    235 
    236 		default:
    237 #ifdef _KERNEL
    238 			return 0;
    239 #else
    240 			abort();
    241 			/*NOTREACHED*/
    242 #endif
    243 		case BPF_RET|BPF_K:
    244 			return (u_int)pc->k;
    245 
    246 		case BPF_RET|BPF_A:
    247 			return (u_int)A;
    248 
    249 		case BPF_LD|BPF_W|BPF_ABS:
    250 			k = pc->k;
    251 			if (k > args->buflen ||
    252 			    sizeof(int32_t) > args->buflen - k) {
    253 #ifdef _KERNEL
    254 				int merr;
    255 
    256 				if (args->buflen != 0)
    257 					return 0;
    258 				A = m_xword(args->pkt, k, &merr);
    259 				if (merr != 0)
    260 					return 0;
    261 				continue;
    262 #else
    263 				return 0;
    264 #endif
    265 			}
    266 			A = EXTRACT_LONG(&p[k]);
    267 			continue;
    268 
    269 		case BPF_LD|BPF_H|BPF_ABS:
    270 			k = pc->k;
    271 			if (k > args->buflen ||
    272 			    sizeof(int16_t) > args->buflen - k) {
    273 #ifdef _KERNEL
    274 				int merr;
    275 
    276 				if (args->buflen != 0)
    277 					return 0;
    278 				A = m_xhalf(args->pkt, k, &merr);
    279 				if (merr != 0)
    280 					return 0;
    281 				continue;
    282 #else
    283 				return 0;
    284 #endif
    285 			}
    286 			A = EXTRACT_SHORT(&p[k]);
    287 			continue;
    288 
    289 		case BPF_LD|BPF_B|BPF_ABS:
    290 			k = pc->k;
    291 			if (k >= args->buflen) {
    292 #ifdef _KERNEL
    293 				int merr;
    294 
    295 				if (args->buflen != 0)
    296 					return 0;
    297 				A = m_xbyte(args->pkt, k, &merr);
    298 				continue;
    299 #else
    300 				return 0;
    301 #endif
    302 			}
    303 			A = p[k];
    304 			continue;
    305 
    306 		case BPF_LD|BPF_W|BPF_LEN:
    307 			A = args->wirelen;
    308 			continue;
    309 
    310 		case BPF_LDX|BPF_W|BPF_LEN:
    311 			X = args->wirelen;
    312 			continue;
    313 
    314 		case BPF_LD|BPF_W|BPF_IND:
    315 			k = X + pc->k;
    316 			if (pc->k > args->buflen ||
    317 			    X > args->buflen - pc->k ||
    318 			    sizeof(int32_t) > args->buflen - k) {
    319 #ifdef _KERNEL
    320 				int merr;
    321 
    322 				if (args->buflen != 0)
    323 					return 0;
    324 				A = m_xword(args->pkt, k, &merr);
    325 				if (merr != 0)
    326 					return 0;
    327 				continue;
    328 #else
    329 				return 0;
    330 #endif
    331 			}
    332 			A = EXTRACT_LONG(&p[k]);
    333 			continue;
    334 
    335 		case BPF_LD|BPF_H|BPF_IND:
    336 			k = X + pc->k;
    337 			if (pc->k > args->buflen ||
    338 			    X > args->buflen - pc->k ||
    339 			    sizeof(int16_t) > args->buflen - k) {
    340 #ifdef _KERNEL
    341 				int merr;
    342 
    343 				if (args->buflen != 0)
    344 					return 0;
    345 				A = m_xhalf(args->pkt, k, &merr);
    346 				if (merr != 0)
    347 					return 0;
    348 				continue;
    349 #else
    350 				return 0;
    351 #endif
    352 			}
    353 			A = EXTRACT_SHORT(&p[k]);
    354 			continue;
    355 
    356 		case BPF_LD|BPF_B|BPF_IND:
    357 			k = X + pc->k;
    358 			if (pc->k >= args->buflen ||
    359 			    X >= args->buflen - pc->k) {
    360 #ifdef _KERNEL
    361 				int merr;
    362 
    363 				if (args->buflen != 0)
    364 					return 0;
    365 				A = m_xbyte(args->pkt, k, &merr);
    366 				continue;
    367 #else
    368 				return 0;
    369 #endif
    370 			}
    371 			A = p[k];
    372 			continue;
    373 
    374 		case BPF_LDX|BPF_MSH|BPF_B:
    375 			k = pc->k;
    376 			if (k >= args->buflen) {
    377 #ifdef _KERNEL
    378 				int merr;
    379 
    380 				if (args->buflen != 0)
    381 					return 0;
    382 				X = (m_xbyte(args->pkt, k, &merr) & 0xf) << 2;
    383 				continue;
    384 #else
    385 				return 0;
    386 #endif
    387 			}
    388 			X = (p[pc->k] & 0xf) << 2;
    389 			continue;
    390 
    391 		case BPF_LD|BPF_IMM:
    392 			A = pc->k;
    393 			continue;
    394 
    395 		case BPF_LDX|BPF_IMM:
    396 			X = pc->k;
    397 			continue;
    398 
    399 		case BPF_LD|BPF_MEM:
    400 			A = args->mem[pc->k];
    401 			continue;
    402 
    403 		case BPF_LDX|BPF_MEM:
    404 			X = args->mem[pc->k];
    405 			continue;
    406 
    407 		case BPF_ST:
    408 			args->mem[pc->k] = A;
    409 			continue;
    410 
    411 		case BPF_STX:
    412 			args->mem[pc->k] = X;
    413 			continue;
    414 
    415 		case BPF_JMP|BPF_JA:
    416 			pc += pc->k;
    417 			continue;
    418 
    419 		case BPF_JMP|BPF_JGT|BPF_K:
    420 			pc += (A > pc->k) ? pc->jt : pc->jf;
    421 			continue;
    422 
    423 		case BPF_JMP|BPF_JGE|BPF_K:
    424 			pc += (A >= pc->k) ? pc->jt : pc->jf;
    425 			continue;
    426 
    427 		case BPF_JMP|BPF_JEQ|BPF_K:
    428 			pc += (A == pc->k) ? pc->jt : pc->jf;
    429 			continue;
    430 
    431 		case BPF_JMP|BPF_JSET|BPF_K:
    432 			pc += (A & pc->k) ? pc->jt : pc->jf;
    433 			continue;
    434 
    435 		case BPF_JMP|BPF_JGT|BPF_X:
    436 			pc += (A > X) ? pc->jt : pc->jf;
    437 			continue;
    438 
    439 		case BPF_JMP|BPF_JGE|BPF_X:
    440 			pc += (A >= X) ? pc->jt : pc->jf;
    441 			continue;
    442 
    443 		case BPF_JMP|BPF_JEQ|BPF_X:
    444 			pc += (A == X) ? pc->jt : pc->jf;
    445 			continue;
    446 
    447 		case BPF_JMP|BPF_JSET|BPF_X:
    448 			pc += (A & X) ? pc->jt : pc->jf;
    449 			continue;
    450 
    451 		case BPF_ALU|BPF_ADD|BPF_X:
    452 			A += X;
    453 			continue;
    454 
    455 		case BPF_ALU|BPF_SUB|BPF_X:
    456 			A -= X;
    457 			continue;
    458 
    459 		case BPF_ALU|BPF_MUL|BPF_X:
    460 			A *= X;
    461 			continue;
    462 
    463 		case BPF_ALU|BPF_DIV|BPF_X:
    464 			if (X == 0)
    465 				return 0;
    466 			A /= X;
    467 			continue;
    468 
    469 		case BPF_ALU|BPF_AND|BPF_X:
    470 			A &= X;
    471 			continue;
    472 
    473 		case BPF_ALU|BPF_OR|BPF_X:
    474 			A |= X;
    475 			continue;
    476 
    477 		case BPF_ALU|BPF_LSH|BPF_X:
    478 			A <<= X;
    479 			continue;
    480 
    481 		case BPF_ALU|BPF_RSH|BPF_X:
    482 			A >>= X;
    483 			continue;
    484 
    485 		case BPF_ALU|BPF_ADD|BPF_K:
    486 			A += pc->k;
    487 			continue;
    488 
    489 		case BPF_ALU|BPF_SUB|BPF_K:
    490 			A -= pc->k;
    491 			continue;
    492 
    493 		case BPF_ALU|BPF_MUL|BPF_K:
    494 			A *= pc->k;
    495 			continue;
    496 
    497 		case BPF_ALU|BPF_DIV|BPF_K:
    498 			A /= pc->k;
    499 			continue;
    500 
    501 		case BPF_ALU|BPF_AND|BPF_K:
    502 			A &= pc->k;
    503 			continue;
    504 
    505 		case BPF_ALU|BPF_OR|BPF_K:
    506 			A |= pc->k;
    507 			continue;
    508 
    509 		case BPF_ALU|BPF_LSH|BPF_K:
    510 			A <<= pc->k;
    511 			continue;
    512 
    513 		case BPF_ALU|BPF_RSH|BPF_K:
    514 			A >>= pc->k;
    515 			continue;
    516 
    517 		case BPF_ALU|BPF_NEG:
    518 			A = -A;
    519 			continue;
    520 
    521 		case BPF_MISC|BPF_TAX:
    522 			X = A;
    523 			continue;
    524 
    525 		case BPF_MISC|BPF_TXA:
    526 			A = X;
    527 			continue;
    528 
    529 		case BPF_MISC|BPF_COP:
    530 #ifdef _KERNEL
    531 			if (pc->k < bc->nfuncs) {
    532 				const bpf_copfunc_t fn = bc->copfuncs[pc->k];
    533 				A = fn(bc, args, A);
    534 				continue;
    535 			}
    536 #endif
    537 			return 0;
    538 
    539 		case BPF_MISC|BPF_COPX:
    540 #ifdef _KERNEL
    541 			if (X < bc->nfuncs) {
    542 				const bpf_copfunc_t fn = bc->copfuncs[X];
    543 				A = fn(bc, args, A);
    544 				continue;
    545 			}
    546 #endif
    547 			return 0;
    548 		}
    549 	}
    550 }
    551 
    552 /*
    553  * Return true if the 'fcode' is a valid filter program.
    554  * The constraints are that each jump be forward and to a valid
    555  * code, that memory accesses are within valid ranges (to the
    556  * extent that this can be checked statically; loads of packet
    557  * data have to be, and are, also checked at run time), and that
    558  * the code terminates with either an accept or reject.
    559  *
    560  * The kernel needs to be able to verify an application's filter code.
    561  * Otherwise, a bogus program could easily crash the system.
    562  */
    563 __CTASSERT(BPF_MEMWORDS == sizeof(uint16_t) * NBBY);
    564 
    565 #if defined(KERNEL) || defined(_KERNEL)
    566 
    567 int
    568 bpf_validate(const struct bpf_insn *f, int signed_len)
    569 {
    570 	return bpf_validate_ext(&bpf_def_ctx, f, signed_len);
    571 }
    572 
    573 int
    574 bpf_validate_ext(bpf_ctx_t *bc, const struct bpf_insn *f, int signed_len)
    575 #else
    576 int
    577 bpf_validate(const struct bpf_insn *f, int signed_len)
    578 #endif
    579 {
    580 	u_int i, from, len, ok = 0;
    581 	const struct bpf_insn *p;
    582 #if defined(KERNEL) || defined(_KERNEL)
    583 	uint16_t *mem, invalid;
    584 	size_t size;
    585 #endif
    586 
    587 	len = (u_int)signed_len;
    588 	if (len < 1)
    589 		return 0;
    590 #if defined(KERNEL) || defined(_KERNEL)
    591 	if (len > BPF_MAXINSNS)
    592 		return 0;
    593 #endif
    594 	if (BPF_CLASS(f[len - 1].code) != BPF_RET)
    595 		return 0;
    596 
    597 #if defined(KERNEL) || defined(_KERNEL)
    598 	mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
    599 	invalid = ~0;	/* All is invalid on startup */
    600 #endif
    601 
    602 	for (i = 0; i < len; ++i) {
    603 #if defined(KERNEL) || defined(_KERNEL)
    604 		/* blend in any invalid bits for current pc */
    605 		invalid |= mem[i];
    606 #endif
    607 		p = &f[i];
    608 		switch (BPF_CLASS(p->code)) {
    609 		/*
    610 		 * Check that memory operations use valid addresses.
    611 		 */
    612 		case BPF_LD:
    613 		case BPF_LDX:
    614 			switch (BPF_MODE(p->code)) {
    615 			case BPF_MEM:
    616 				/*
    617 				 * There's no maximum packet data size
    618 				 * in userland.  The runtime packet length
    619 				 * check suffices.
    620 				 */
    621 #if defined(KERNEL) || defined(_KERNEL)
    622 				/*
    623 				 * More strict check with actual packet length
    624 				 * is done runtime.
    625 				 */
    626 				if (p->k >= BPF_MEMWORDS)
    627 					goto out;
    628 				/* check for current memory invalid */
    629 				if (invalid & (1 << p->k))
    630 					goto out;
    631 #endif
    632 				break;
    633 			case BPF_ABS:
    634 			case BPF_IND:
    635 			case BPF_MSH:
    636 			case BPF_IMM:
    637 			case BPF_LEN:
    638 				break;
    639 			default:
    640 				goto out;
    641 			}
    642 			break;
    643 		case BPF_ST:
    644 		case BPF_STX:
    645 			if (p->k >= BPF_MEMWORDS)
    646 				goto out;
    647 #if defined(KERNEL) || defined(_KERNEL)
    648 			/* validate the memory word */
    649 			invalid &= ~(1 << p->k);
    650 #endif
    651 			break;
    652 		case BPF_ALU:
    653 			switch (BPF_OP(p->code)) {
    654 			case BPF_ADD:
    655 			case BPF_SUB:
    656 			case BPF_MUL:
    657 			case BPF_OR:
    658 			case BPF_AND:
    659 			case BPF_LSH:
    660 			case BPF_RSH:
    661 			case BPF_NEG:
    662 				break;
    663 			case BPF_DIV:
    664 				/*
    665 				 * Check for constant division by 0.
    666 				 */
    667 				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
    668 					goto out;
    669 				break;
    670 			default:
    671 				goto out;
    672 			}
    673 			break;
    674 		case BPF_JMP:
    675 			/*
    676 			 * Check that jumps are within the code block,
    677 			 * and that unconditional branches don't go
    678 			 * backwards as a result of an overflow.
    679 			 * Unconditional branches have a 32-bit offset,
    680 			 * so they could overflow; we check to make
    681 			 * sure they don't.  Conditional branches have
    682 			 * an 8-bit offset, and the from address is <=
    683 			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
    684 			 * is sufficiently small that adding 255 to it
    685 			 * won't overflow.
    686 			 *
    687 			 * We know that len is <= BPF_MAXINSNS, and we
    688 			 * assume that BPF_MAXINSNS is < the maximum size
    689 			 * of a u_int, so that i + 1 doesn't overflow.
    690 			 *
    691 			 * For userland, we don't know that the from
    692 			 * or len are <= BPF_MAXINSNS, but we know that
    693 			 * from <= len, and, except on a 64-bit system,
    694 			 * it's unlikely that len, if it truly reflects
    695 			 * the size of the program we've been handed,
    696 			 * will be anywhere near the maximum size of
    697 			 * a u_int.  We also don't check for backward
    698 			 * branches, as we currently support them in
    699 			 * userland for the protochain operation.
    700 			 */
    701 			from = i + 1;
    702 			switch (BPF_OP(p->code)) {
    703 			case BPF_JA:
    704 				if (from + p->k >= len)
    705 					goto out;
    706 #if defined(KERNEL) || defined(_KERNEL)
    707 				if (from + p->k < from)
    708 					goto out;
    709 				/*
    710 				 * mark the currently invalid bits for the
    711 				 * destination
    712 				 */
    713 				mem[from + p->k] |= invalid;
    714 				invalid = 0;
    715 #endif
    716 				break;
    717 			case BPF_JEQ:
    718 			case BPF_JGT:
    719 			case BPF_JGE:
    720 			case BPF_JSET:
    721 				if (from + p->jt >= len || from + p->jf >= len)
    722 					goto out;
    723 #if defined(KERNEL) || defined(_KERNEL)
    724 				/*
    725 				 * mark the currently invalid bits for both
    726 				 * possible jump destinations
    727 				 */
    728 				mem[from + p->jt] |= invalid;
    729 				mem[from + p->jf] |= invalid;
    730 				invalid = 0;
    731 #endif
    732 				break;
    733 			default:
    734 				goto out;
    735 			}
    736 			break;
    737 		case BPF_RET:
    738 			break;
    739 		case BPF_MISC:
    740 #if defined(KERNEL) || defined(_KERNEL)
    741 			switch (BPF_MISCOP(p->code)) {
    742 			case BPF_COP:
    743 			case BPF_COPX:
    744 				/* In-kernel COP use only. */
    745 				if (bc->copfuncs) {
    746 					invalid = 0;
    747 				}
    748 				break;
    749 			default:
    750 				break;
    751 			}
    752 #endif
    753 			break;
    754 		default:
    755 			goto out;
    756 		}
    757 	}
    758 	ok = 1;
    759 out:
    760 #if defined(KERNEL) || defined(_KERNEL)
    761 	kmem_free(mem, size);
    762 #endif
    763 	return ok;
    764 }
    765