Home | History | Annotate | Line # | Download | only in net
bpfjit.c revision 1.35
      1 /*	$NetBSD: bpfjit.c,v 1.35 2014/11/20 19:18:52 alnsn Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2011-2014 Alexander Nasonov.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  *
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in
     15  *    the documentation and/or other materials provided with the
     16  *    distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
     22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     23  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
     24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     28  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 #ifdef _KERNEL
     34 __KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.35 2014/11/20 19:18:52 alnsn Exp $");
     35 #else
     36 __RCSID("$NetBSD: bpfjit.c,v 1.35 2014/11/20 19:18:52 alnsn Exp $");
     37 #endif
     38 
     39 #include <sys/types.h>
     40 #include <sys/queue.h>
     41 
     42 #ifndef _KERNEL
     43 #include <assert.h>
     44 #define BJ_ASSERT(c) assert(c)
     45 #else
     46 #define BJ_ASSERT(c) KASSERT(c)
     47 #endif
     48 
     49 #ifndef _KERNEL
     50 #include <stdlib.h>
     51 #define BJ_ALLOC(sz) malloc(sz)
     52 #define BJ_FREE(p, sz) free(p)
     53 #else
     54 #include <sys/kmem.h>
     55 #define BJ_ALLOC(sz) kmem_alloc(sz, KM_SLEEP)
     56 #define BJ_FREE(p, sz) kmem_free(p, sz)
     57 #endif
     58 
     59 #ifndef _KERNEL
     60 #include <limits.h>
     61 #include <stdbool.h>
     62 #include <stddef.h>
     63 #include <stdint.h>
     64 #else
     65 #include <sys/atomic.h>
     66 #include <sys/module.h>
     67 #endif
     68 
     69 #define	__BPF_PRIVATE
     70 #include <net/bpf.h>
     71 #include <net/bpfjit.h>
     72 #include <sljitLir.h>
     73 
     74 #if !defined(_KERNEL) && defined(SLJIT_VERBOSE) && SLJIT_VERBOSE
     75 #include <stdio.h> /* for stderr */
     76 #endif
     77 
     78 /*
     79  * XXX: Until we support SLJIT_UMOD properly
     80  */
     81 #undef BPFJIT_USE_UDIV
     82 
     83 /*
     84  * Arguments of generated bpfjit_func_t.
     85  * The first argument is reassigned upon entry
     86  * to a more frequently used buf argument.
     87  */
     88 #define BJ_CTX_ARG	SLJIT_SAVED_REG1
     89 #define BJ_ARGS		SLJIT_SAVED_REG2
     90 
     91 /*
     92  * Permanent register assignments.
     93  */
     94 #define BJ_BUF		SLJIT_SAVED_REG1
     95 //#define BJ_ARGS	SLJIT_SAVED_REG2
     96 #define BJ_BUFLEN	SLJIT_SAVED_REG3
     97 #define BJ_AREG		SLJIT_SCRATCH_REG1
     98 #define BJ_TMP1REG	SLJIT_SCRATCH_REG2
     99 #define BJ_TMP2REG	SLJIT_SCRATCH_REG3
    100 #define BJ_XREG		SLJIT_TEMPORARY_EREG1
    101 #define BJ_TMP3REG	SLJIT_TEMPORARY_EREG2
    102 
    103 #ifdef _KERNEL
    104 #define MAX_MEMWORDS BPF_MAX_MEMWORDS
    105 #else
    106 #define MAX_MEMWORDS BPF_MEMWORDS
    107 #endif
    108 
    109 #define BJ_INIT_NOBITS  ((bpf_memword_init_t)0)
    110 #define BJ_INIT_MBIT(k) BPF_MEMWORD_INIT(k)
    111 #define BJ_INIT_ABIT    BJ_INIT_MBIT(MAX_MEMWORDS)
    112 #define BJ_INIT_XBIT    BJ_INIT_MBIT(MAX_MEMWORDS + 1)
    113 
    114 /*
    115  * Get a number of memwords and external memwords from a bpf_ctx object.
    116  */
    117 #define GET_EXTWORDS(bc) ((bc) ? (bc)->extwords : 0)
    118 #define GET_MEMWORDS(bc) (GET_EXTWORDS(bc) ? GET_EXTWORDS(bc) : BPF_MEMWORDS)
    119 
    120 /*
    121  * Optimization hints.
    122  */
    123 typedef unsigned int bpfjit_hint_t;
    124 #define BJ_HINT_ABS  0x01 /* packet read at absolute offset   */
    125 #define BJ_HINT_IND  0x02 /* packet read at variable offset   */
    126 #define BJ_HINT_MSH  0x04 /* BPF_MSH instruction              */
    127 #define BJ_HINT_COP  0x08 /* BPF_COP or BPF_COPX instruction  */
    128 #define BJ_HINT_COPX 0x10 /* BPF_COPX instruction             */
    129 #define BJ_HINT_XREG 0x20 /* BJ_XREG is needed                */
    130 #define BJ_HINT_LDX  0x40 /* BPF_LDX instruction              */
    131 #define BJ_HINT_PKT  (BJ_HINT_ABS|BJ_HINT_IND|BJ_HINT_MSH)
    132 
    133 /*
    134  * Datatype for Array Bounds Check Elimination (ABC) pass.
    135  */
    136 typedef uint64_t bpfjit_abc_length_t;
    137 #define MAX_ABC_LENGTH (UINT32_MAX + UINT64_C(4)) /* max. width is 4 */
    138 
    139 struct bpfjit_stack
    140 {
    141 	bpf_ctx_t *ctx;
    142 	uint32_t *extmem; /* pointer to external memory store */
    143 	uint32_t reg; /* saved A or X register */
    144 #ifdef _KERNEL
    145 	int err; /* 3rd argument for m_xword/m_xhalf/m_xbyte function call */
    146 #endif
    147 	uint32_t mem[BPF_MEMWORDS]; /* internal memory store */
    148 };
    149 
    150 /*
    151  * Data for BPF_JMP instruction.
    152  * Forward declaration for struct bpfjit_jump.
    153  */
    154 struct bpfjit_jump_data;
    155 
    156 /*
    157  * Node of bjumps list.
    158  */
    159 struct bpfjit_jump {
    160 	struct sljit_jump *sjump;
    161 	SLIST_ENTRY(bpfjit_jump) entries;
    162 	struct bpfjit_jump_data *jdata;
    163 };
    164 
    165 /*
    166  * Data for BPF_JMP instruction.
    167  */
    168 struct bpfjit_jump_data {
    169 	/*
    170 	 * These entries make up bjumps list:
    171 	 * jtf[0] - when coming from jt path,
    172 	 * jtf[1] - when coming from jf path.
    173 	 */
    174 	struct bpfjit_jump jtf[2];
    175 	/*
    176 	 * Length calculated by Array Bounds Check Elimination (ABC) pass.
    177 	 */
    178 	bpfjit_abc_length_t abc_length;
    179 	/*
    180 	 * Length checked by the last out-of-bounds check.
    181 	 */
    182 	bpfjit_abc_length_t checked_length;
    183 };
    184 
    185 /*
    186  * Data for "read from packet" instructions.
    187  * See also read_pkt_insn() function below.
    188  */
    189 struct bpfjit_read_pkt_data {
    190 	/*
    191 	 * Length calculated by Array Bounds Check Elimination (ABC) pass.
    192 	 */
    193 	bpfjit_abc_length_t abc_length;
    194 	/*
    195 	 * If positive, emit "if (buflen < check_length) return 0"
    196 	 * out-of-bounds check.
    197 	 * Values greater than UINT32_MAX generate unconditional "return 0".
    198 	 */
    199 	bpfjit_abc_length_t check_length;
    200 };
    201 
    202 /*
    203  * Additional (optimization-related) data for bpf_insn.
    204  */
    205 struct bpfjit_insn_data {
    206 	/* List of jumps to this insn. */
    207 	SLIST_HEAD(, bpfjit_jump) bjumps;
    208 
    209 	union {
    210 		struct bpfjit_jump_data     jdata;
    211 		struct bpfjit_read_pkt_data rdata;
    212 	} u;
    213 
    214 	bpf_memword_init_t invalid;
    215 	bool unreachable;
    216 };
    217 
    218 #ifdef _KERNEL
    219 
    220 uint32_t m_xword(const struct mbuf *, uint32_t, int *);
    221 uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
    222 uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
    223 
    224 MODULE(MODULE_CLASS_MISC, bpfjit, "sljit")
    225 
    226 static int
    227 bpfjit_modcmd(modcmd_t cmd, void *arg)
    228 {
    229 
    230 	switch (cmd) {
    231 	case MODULE_CMD_INIT:
    232 		bpfjit_module_ops.bj_free_code = &bpfjit_free_code;
    233 		membar_producer();
    234 		bpfjit_module_ops.bj_generate_code = &bpfjit_generate_code;
    235 		membar_producer();
    236 		return 0;
    237 
    238 	case MODULE_CMD_FINI:
    239 		return EOPNOTSUPP;
    240 
    241 	default:
    242 		return ENOTTY;
    243 	}
    244 }
    245 #endif
    246 
    247 /*
    248  * Return a number of scratch registers to pass
    249  * to sljit_emit_enter() function.
    250  */
    251 static sljit_si
    252 nscratches(bpfjit_hint_t hints)
    253 {
    254 	sljit_si rv = 2;
    255 
    256 #ifdef _KERNEL
    257 	if (hints & BJ_HINT_PKT)
    258 		rv = 3; /* xcall with three arguments */
    259 #endif
    260 
    261 	if (hints & BJ_HINT_IND)
    262 		rv = 3; /* uses BJ_TMP2REG */
    263 
    264 	if (hints & BJ_HINT_COP)
    265 		rv = 3; /* calls copfunc with three arguments */
    266 
    267 	if (hints & BJ_HINT_XREG)
    268 		rv = 4; /* uses BJ_XREG */
    269 
    270 #ifdef _KERNEL
    271 	if (hints & BJ_HINT_LDX)
    272 		rv = 5; /* uses BJ_TMP3REG */
    273 #endif
    274 
    275 	if (hints & BJ_HINT_COPX)
    276 		rv = 5; /* uses BJ_TMP3REG */
    277 
    278 	return rv;
    279 }
    280 
    281 /*
    282  * Return a number of saved registers to pass
    283  * to sljit_emit_enter() function.
    284  */
    285 static sljit_si
    286 nsaveds(bpfjit_hint_t hints)
    287 {
    288 	sljit_si rv = 3;
    289 
    290 	return rv;
    291 }
    292 
    293 static uint32_t
    294 read_width(const struct bpf_insn *pc)
    295 {
    296 
    297 	switch (BPF_SIZE(pc->code)) {
    298 	case BPF_W:
    299 		return 4;
    300 	case BPF_H:
    301 		return 2;
    302 	case BPF_B:
    303 		return 1;
    304 	default:
    305 		BJ_ASSERT(false);
    306 		return 0;
    307 	}
    308 }
    309 
    310 /*
    311  * Copy buf and buflen members of bpf_args from BJ_ARGS
    312  * pointer to BJ_BUF and BJ_BUFLEN registers.
    313  */
    314 static int
    315 load_buf_buflen(struct sljit_compiler *compiler)
    316 {
    317 	int status;
    318 
    319 	status = sljit_emit_op1(compiler,
    320 	    SLJIT_MOV_P,
    321 	    BJ_BUF, 0,
    322 	    SLJIT_MEM1(BJ_ARGS),
    323 	    offsetof(struct bpf_args, pkt));
    324 	if (status != SLJIT_SUCCESS)
    325 		return status;
    326 
    327 	status = sljit_emit_op1(compiler,
    328 	    SLJIT_MOV, /* size_t source */
    329 	    BJ_BUFLEN, 0,
    330 	    SLJIT_MEM1(BJ_ARGS),
    331 	    offsetof(struct bpf_args, buflen));
    332 
    333 	return status;
    334 }
    335 
    336 static bool
    337 grow_jumps(struct sljit_jump ***jumps, size_t *size)
    338 {
    339 	struct sljit_jump **newptr;
    340 	const size_t elemsz = sizeof(struct sljit_jump *);
    341 	size_t old_size = *size;
    342 	size_t new_size = 2 * old_size;
    343 
    344 	if (new_size < old_size || new_size > SIZE_MAX / elemsz)
    345 		return false;
    346 
    347 	newptr = BJ_ALLOC(new_size * elemsz);
    348 	if (newptr == NULL)
    349 		return false;
    350 
    351 	memcpy(newptr, *jumps, old_size * elemsz);
    352 	BJ_FREE(*jumps, old_size * elemsz);
    353 
    354 	*jumps = newptr;
    355 	*size = new_size;
    356 	return true;
    357 }
    358 
    359 static bool
    360 append_jump(struct sljit_jump *jump, struct sljit_jump ***jumps,
    361     size_t *size, size_t *max_size)
    362 {
    363 	if (*size == *max_size && !grow_jumps(jumps, max_size))
    364 		return false;
    365 
    366 	(*jumps)[(*size)++] = jump;
    367 	return true;
    368 }
    369 
    370 /*
    371  * Emit code for BPF_LD+BPF_B+BPF_ABS    A <- P[k:1].
    372  */
    373 static int
    374 emit_read8(struct sljit_compiler *compiler, sljit_si src, uint32_t k)
    375 {
    376 
    377 	return sljit_emit_op1(compiler,
    378 	    SLJIT_MOV_UB,
    379 	    BJ_AREG, 0,
    380 	    SLJIT_MEM1(src), k);
    381 }
    382 
    383 /*
    384  * Emit code for BPF_LD+BPF_H+BPF_ABS    A <- P[k:2].
    385  */
    386 static int
    387 emit_read16(struct sljit_compiler *compiler, sljit_si src, uint32_t k)
    388 {
    389 	int status;
    390 
    391 	BJ_ASSERT(k <= UINT32_MAX - 1);
    392 
    393 	/* A = buf[k]; */
    394 	status = sljit_emit_op1(compiler,
    395 	    SLJIT_MOV_UB,
    396 	    BJ_AREG, 0,
    397 	    SLJIT_MEM1(src), k);
    398 	if (status != SLJIT_SUCCESS)
    399 		return status;
    400 
    401 	/* tmp1 = buf[k+1]; */
    402 	status = sljit_emit_op1(compiler,
    403 	    SLJIT_MOV_UB,
    404 	    BJ_TMP1REG, 0,
    405 	    SLJIT_MEM1(src), k+1);
    406 	if (status != SLJIT_SUCCESS)
    407 		return status;
    408 
    409 	/* A = A << 8; */
    410 	status = sljit_emit_op2(compiler,
    411 	    SLJIT_SHL,
    412 	    BJ_AREG, 0,
    413 	    BJ_AREG, 0,
    414 	    SLJIT_IMM, 8);
    415 	if (status != SLJIT_SUCCESS)
    416 		return status;
    417 
    418 	/* A = A + tmp1; */
    419 	status = sljit_emit_op2(compiler,
    420 	    SLJIT_ADD,
    421 	    BJ_AREG, 0,
    422 	    BJ_AREG, 0,
    423 	    BJ_TMP1REG, 0);
    424 	return status;
    425 }
    426 
    427 /*
    428  * Emit code for BPF_LD+BPF_W+BPF_ABS    A <- P[k:4].
    429  */
    430 static int
    431 emit_read32(struct sljit_compiler *compiler, sljit_si src, uint32_t k)
    432 {
    433 	int status;
    434 
    435 	BJ_ASSERT(k <= UINT32_MAX - 3);
    436 
    437 	/* A = buf[k]; */
    438 	status = sljit_emit_op1(compiler,
    439 	    SLJIT_MOV_UB,
    440 	    BJ_AREG, 0,
    441 	    SLJIT_MEM1(src), k);
    442 	if (status != SLJIT_SUCCESS)
    443 		return status;
    444 
    445 	/* tmp1 = buf[k+1]; */
    446 	status = sljit_emit_op1(compiler,
    447 	    SLJIT_MOV_UB,
    448 	    BJ_TMP1REG, 0,
    449 	    SLJIT_MEM1(src), k+1);
    450 	if (status != SLJIT_SUCCESS)
    451 		return status;
    452 
    453 	/* A = A << 8; */
    454 	status = sljit_emit_op2(compiler,
    455 	    SLJIT_SHL,
    456 	    BJ_AREG, 0,
    457 	    BJ_AREG, 0,
    458 	    SLJIT_IMM, 8);
    459 	if (status != SLJIT_SUCCESS)
    460 		return status;
    461 
    462 	/* A = A + tmp1; */
    463 	status = sljit_emit_op2(compiler,
    464 	    SLJIT_ADD,
    465 	    BJ_AREG, 0,
    466 	    BJ_AREG, 0,
    467 	    BJ_TMP1REG, 0);
    468 	if (status != SLJIT_SUCCESS)
    469 		return status;
    470 
    471 	/* tmp1 = buf[k+2]; */
    472 	status = sljit_emit_op1(compiler,
    473 	    SLJIT_MOV_UB,
    474 	    BJ_TMP1REG, 0,
    475 	    SLJIT_MEM1(src), k+2);
    476 	if (status != SLJIT_SUCCESS)
    477 		return status;
    478 
    479 	/* A = A << 8; */
    480 	status = sljit_emit_op2(compiler,
    481 	    SLJIT_SHL,
    482 	    BJ_AREG, 0,
    483 	    BJ_AREG, 0,
    484 	    SLJIT_IMM, 8);
    485 	if (status != SLJIT_SUCCESS)
    486 		return status;
    487 
    488 	/* A = A + tmp1; */
    489 	status = sljit_emit_op2(compiler,
    490 	    SLJIT_ADD,
    491 	    BJ_AREG, 0,
    492 	    BJ_AREG, 0,
    493 	    BJ_TMP1REG, 0);
    494 	if (status != SLJIT_SUCCESS)
    495 		return status;
    496 
    497 	/* tmp1 = buf[k+3]; */
    498 	status = sljit_emit_op1(compiler,
    499 	    SLJIT_MOV_UB,
    500 	    BJ_TMP1REG, 0,
    501 	    SLJIT_MEM1(src), k+3);
    502 	if (status != SLJIT_SUCCESS)
    503 		return status;
    504 
    505 	/* A = A << 8; */
    506 	status = sljit_emit_op2(compiler,
    507 	    SLJIT_SHL,
    508 	    BJ_AREG, 0,
    509 	    BJ_AREG, 0,
    510 	    SLJIT_IMM, 8);
    511 	if (status != SLJIT_SUCCESS)
    512 		return status;
    513 
    514 	/* A = A + tmp1; */
    515 	status = sljit_emit_op2(compiler,
    516 	    SLJIT_ADD,
    517 	    BJ_AREG, 0,
    518 	    BJ_AREG, 0,
    519 	    BJ_TMP1REG, 0);
    520 	return status;
    521 }
    522 
    523 #ifdef _KERNEL
    524 /*
    525  * Emit code for m_xword/m_xhalf/m_xbyte call.
    526  *
    527  * @pc BPF_LD+BPF_W+BPF_ABS    A <- P[k:4]
    528  *     BPF_LD+BPF_H+BPF_ABS    A <- P[k:2]
    529  *     BPF_LD+BPF_B+BPF_ABS    A <- P[k:1]
    530  *     BPF_LD+BPF_W+BPF_IND    A <- P[X+k:4]
    531  *     BPF_LD+BPF_H+BPF_IND    A <- P[X+k:2]
    532  *     BPF_LD+BPF_B+BPF_IND    A <- P[X+k:1]
    533  *     BPF_LDX+BPF_B+BPF_MSH   X <- 4*(P[k:1]&0xf)
    534  */
    535 static int
    536 emit_xcall(struct sljit_compiler *compiler, bpfjit_hint_t hints,
    537     const struct bpf_insn *pc, int dst, struct sljit_jump ***ret0,
    538     size_t *ret0_size, size_t *ret0_maxsize,
    539     uint32_t (*fn)(const struct mbuf *, uint32_t, int *))
    540 {
    541 #if BJ_XREG == SLJIT_RETURN_REG   || \
    542     BJ_XREG == SLJIT_SCRATCH_REG1 || \
    543     BJ_XREG == SLJIT_SCRATCH_REG2 || \
    544     BJ_XREG == SLJIT_SCRATCH_REG3
    545 #error "Not supported assignment of registers."
    546 #endif
    547 	struct sljit_jump *jump;
    548 	sljit_si save_reg;
    549 	int status;
    550 
    551 	save_reg = (BPF_CLASS(pc->code) == BPF_LDX) ? BJ_AREG : BJ_XREG;
    552 
    553 	if (save_reg == BJ_AREG || (hints & BJ_HINT_XREG)) {
    554 		/* save A or X */
    555 		status = sljit_emit_op1(compiler,
    556 		    SLJIT_MOV_UI, /* uint32_t destination */
    557 		    SLJIT_MEM1(SLJIT_LOCALS_REG),
    558 		    offsetof(struct bpfjit_stack, reg),
    559 		    save_reg, 0);
    560 		if (status != SLJIT_SUCCESS)
    561 			return status;
    562 	}
    563 
    564 	/*
    565 	 * Prepare registers for fn(mbuf, k, &err) call.
    566 	 */
    567 	status = sljit_emit_op1(compiler,
    568 	    SLJIT_MOV,
    569 	    SLJIT_SCRATCH_REG1, 0,
    570 	    BJ_BUF, 0);
    571 	if (status != SLJIT_SUCCESS)
    572 		return status;
    573 
    574 	if (BPF_CLASS(pc->code) == BPF_LD && BPF_MODE(pc->code) == BPF_IND) {
    575 		if (pc->k == 0) {
    576 			/* k = X; */
    577 			status = sljit_emit_op1(compiler,
    578 			    SLJIT_MOV,
    579 			    SLJIT_SCRATCH_REG2, 0,
    580 			    BJ_XREG, 0);
    581 			if (status != SLJIT_SUCCESS)
    582 				return status;
    583 		} else {
    584 			/* if (X > UINT32_MAX - pc->k) return 0; */
    585 			jump = sljit_emit_cmp(compiler,
    586 			    SLJIT_C_GREATER,
    587 			    BJ_XREG, 0,
    588 			    SLJIT_IMM, UINT32_MAX - pc->k);
    589 			if (jump == NULL)
    590 				return SLJIT_ERR_ALLOC_FAILED;
    591 			if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
    592 				return SLJIT_ERR_ALLOC_FAILED;
    593 
    594 			/* k = X + pc->k; */
    595 			status = sljit_emit_op2(compiler,
    596 			    SLJIT_ADD,
    597 			    SLJIT_SCRATCH_REG2, 0,
    598 			    BJ_XREG, 0,
    599 			    SLJIT_IMM, (uint32_t)pc->k);
    600 			if (status != SLJIT_SUCCESS)
    601 				return status;
    602 		}
    603 	} else {
    604 		/* k = pc->k */
    605 		status = sljit_emit_op1(compiler,
    606 		    SLJIT_MOV,
    607 		    SLJIT_SCRATCH_REG2, 0,
    608 		    SLJIT_IMM, (uint32_t)pc->k);
    609 		if (status != SLJIT_SUCCESS)
    610 			return status;
    611 	}
    612 
    613 	/*
    614 	 * The third argument of fn is an address on stack.
    615 	 */
    616 	status = sljit_get_local_base(compiler,
    617 	    SLJIT_SCRATCH_REG3, 0,
    618 	    offsetof(struct bpfjit_stack, err));
    619 	if (status != SLJIT_SUCCESS)
    620 		return status;
    621 
    622 	/* fn(buf, k, &err); */
    623 	status = sljit_emit_ijump(compiler,
    624 	    SLJIT_CALL3,
    625 	    SLJIT_IMM, SLJIT_FUNC_OFFSET(fn));
    626 	if (status != SLJIT_SUCCESS)
    627 		return status;
    628 
    629 	if (dst != SLJIT_RETURN_REG) {
    630 		/* move return value to dst */
    631 		status = sljit_emit_op1(compiler,
    632 		    SLJIT_MOV,
    633 		    dst, 0,
    634 		    SLJIT_RETURN_REG, 0);
    635 		if (status != SLJIT_SUCCESS)
    636 			return status;
    637 	}
    638 
    639 	/* if (*err != 0) return 0; */
    640 	jump = sljit_emit_cmp(compiler,
    641 	    SLJIT_C_NOT_EQUAL|SLJIT_INT_OP,
    642 	    SLJIT_MEM1(SLJIT_LOCALS_REG),
    643 	    offsetof(struct bpfjit_stack, err),
    644 	    SLJIT_IMM, 0);
    645 	if (jump == NULL)
    646 		return SLJIT_ERR_ALLOC_FAILED;
    647 
    648 	if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
    649 		return SLJIT_ERR_ALLOC_FAILED;
    650 
    651 	if (save_reg == BJ_AREG || (hints & BJ_HINT_XREG)) {
    652 		/* restore A or X */
    653 		status = sljit_emit_op1(compiler,
    654 		    SLJIT_MOV_UI, /* uint32_t source */
    655 		    save_reg, 0,
    656 		    SLJIT_MEM1(SLJIT_LOCALS_REG),
    657 		    offsetof(struct bpfjit_stack, reg));
    658 		if (status != SLJIT_SUCCESS)
    659 			return status;
    660 	}
    661 
    662 	return SLJIT_SUCCESS;
    663 }
    664 #endif
    665 
    666 /*
    667  * Emit code for BPF_COP and BPF_COPX instructions.
    668  */
    669 static int
    670 emit_cop(struct sljit_compiler *compiler, bpfjit_hint_t hints,
    671     const bpf_ctx_t *bc, const struct bpf_insn *pc,
    672     struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize)
    673 {
    674 #if BJ_XREG    == SLJIT_RETURN_REG   || \
    675     BJ_XREG    == SLJIT_SCRATCH_REG1 || \
    676     BJ_XREG    == SLJIT_SCRATCH_REG2 || \
    677     BJ_XREG    == SLJIT_SCRATCH_REG3 || \
    678     BJ_TMP3REG == SLJIT_SCRATCH_REG1 || \
    679     BJ_TMP3REG == SLJIT_SCRATCH_REG2 || \
    680     BJ_TMP3REG == SLJIT_SCRATCH_REG3
    681 #error "Not supported assignment of registers."
    682 #endif
    683 
    684 	struct sljit_jump *jump;
    685 	sljit_si call_reg;
    686 	sljit_sw call_off;
    687 	int status;
    688 
    689 	BJ_ASSERT(bc != NULL && bc->copfuncs != NULL);
    690 
    691 	if (hints & BJ_HINT_LDX) {
    692 		/* save X */
    693 		status = sljit_emit_op1(compiler,
    694 		    SLJIT_MOV_UI, /* uint32_t destination */
    695 		    SLJIT_MEM1(SLJIT_LOCALS_REG),
    696 		    offsetof(struct bpfjit_stack, reg),
    697 		    BJ_XREG, 0);
    698 		if (status != SLJIT_SUCCESS)
    699 			return status;
    700 	}
    701 
    702 	if (BPF_MISCOP(pc->code) == BPF_COP) {
    703 		call_reg = SLJIT_IMM;
    704 		call_off = SLJIT_FUNC_OFFSET(bc->copfuncs[pc->k]);
    705 	} else {
    706 		/* if (X >= bc->nfuncs) return 0; */
    707 		jump = sljit_emit_cmp(compiler,
    708 		    SLJIT_C_GREATER_EQUAL,
    709 		    BJ_XREG, 0,
    710 		    SLJIT_IMM, bc->nfuncs);
    711 		if (jump == NULL)
    712 			return SLJIT_ERR_ALLOC_FAILED;
    713 		if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
    714 			return SLJIT_ERR_ALLOC_FAILED;
    715 
    716 		/* tmp1 = ctx; */
    717 		status = sljit_emit_op1(compiler,
    718 		    SLJIT_MOV_P,
    719 		    BJ_TMP1REG, 0,
    720 		    SLJIT_MEM1(SLJIT_LOCALS_REG),
    721 		    offsetof(struct bpfjit_stack, ctx));
    722 		if (status != SLJIT_SUCCESS)
    723 			return status;
    724 
    725 		/* tmp1 = ctx->copfuncs; */
    726 		status = sljit_emit_op1(compiler,
    727 		    SLJIT_MOV_P,
    728 		    BJ_TMP1REG, 0,
    729 		    SLJIT_MEM1(BJ_TMP1REG),
    730 		    offsetof(struct bpf_ctx, copfuncs));
    731 		if (status != SLJIT_SUCCESS)
    732 			return status;
    733 
    734 		/* tmp2 = X; */
    735 		status = sljit_emit_op1(compiler,
    736 		    SLJIT_MOV,
    737 		    BJ_TMP2REG, 0,
    738 		    BJ_XREG, 0);
    739 		if (status != SLJIT_SUCCESS)
    740 			return status;
    741 
    742 		/* tmp3 = ctx->copfuncs[tmp2]; */
    743 		call_reg = BJ_TMP3REG;
    744 		call_off = 0;
    745 		status = sljit_emit_op1(compiler,
    746 		    SLJIT_MOV_P,
    747 		    call_reg, call_off,
    748 		    SLJIT_MEM2(BJ_TMP1REG, BJ_TMP2REG),
    749 		    SLJIT_WORD_SHIFT);
    750 		if (status != SLJIT_SUCCESS)
    751 			return status;
    752 	}
    753 
    754 	/*
    755 	 * Copy bpf_copfunc_t arguments to registers.
    756 	 */
    757 #if BJ_AREG != SLJIT_SCRATCH_REG3
    758 	status = sljit_emit_op1(compiler,
    759 	    SLJIT_MOV_UI,
    760 	    SLJIT_SCRATCH_REG3, 0,
    761 	    BJ_AREG, 0);
    762 	if (status != SLJIT_SUCCESS)
    763 		return status;
    764 #endif
    765 
    766 	status = sljit_emit_op1(compiler,
    767 	    SLJIT_MOV_P,
    768 	    SLJIT_SCRATCH_REG1, 0,
    769 	    SLJIT_MEM1(SLJIT_LOCALS_REG),
    770 	    offsetof(struct bpfjit_stack, ctx));
    771 	if (status != SLJIT_SUCCESS)
    772 		return status;
    773 
    774 	status = sljit_emit_op1(compiler,
    775 	    SLJIT_MOV_P,
    776 	    SLJIT_SCRATCH_REG2, 0,
    777 	    BJ_ARGS, 0);
    778 	if (status != SLJIT_SUCCESS)
    779 		return status;
    780 
    781 	status = sljit_emit_ijump(compiler,
    782 	    SLJIT_CALL3, call_reg, call_off);
    783 	if (status != SLJIT_SUCCESS)
    784 		return status;
    785 
    786 #if BJ_AREG != SLJIT_RETURN_REG
    787 	status = sljit_emit_op1(compiler,
    788 	    SLJIT_MOV,
    789 	    BJ_AREG, 0,
    790 	    SLJIT_RETURN_REG, 0);
    791 	if (status != SLJIT_SUCCESS)
    792 		return status;
    793 #endif
    794 
    795 	if (hints & BJ_HINT_LDX) {
    796 		/* restore X */
    797 		status = sljit_emit_op1(compiler,
    798 		    SLJIT_MOV_UI, /* uint32_t source */
    799 		    BJ_XREG, 0,
    800 		    SLJIT_MEM1(SLJIT_LOCALS_REG),
    801 		    offsetof(struct bpfjit_stack, reg));
    802 		if (status != SLJIT_SUCCESS)
    803 			return status;
    804 	}
    805 
    806 	return SLJIT_SUCCESS;
    807 }
    808 
    809 /*
    810  * Generate code for
    811  * BPF_LD+BPF_W+BPF_ABS    A <- P[k:4]
    812  * BPF_LD+BPF_H+BPF_ABS    A <- P[k:2]
    813  * BPF_LD+BPF_B+BPF_ABS    A <- P[k:1]
    814  * BPF_LD+BPF_W+BPF_IND    A <- P[X+k:4]
    815  * BPF_LD+BPF_H+BPF_IND    A <- P[X+k:2]
    816  * BPF_LD+BPF_B+BPF_IND    A <- P[X+k:1]
    817  */
    818 static int
    819 emit_pkt_read(struct sljit_compiler *compiler, bpfjit_hint_t hints,
    820     const struct bpf_insn *pc, struct sljit_jump *to_mchain_jump,
    821     struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize)
    822 {
    823 	int status = SLJIT_ERR_ALLOC_FAILED;
    824 	uint32_t width;
    825 	sljit_si ld_reg;
    826 	struct sljit_jump *jump;
    827 #ifdef _KERNEL
    828 	struct sljit_label *label;
    829 	struct sljit_jump *over_mchain_jump;
    830 	const bool check_zero_buflen = (to_mchain_jump != NULL);
    831 #endif
    832 	const uint32_t k = pc->k;
    833 
    834 #ifdef _KERNEL
    835 	if (to_mchain_jump == NULL) {
    836 		to_mchain_jump = sljit_emit_cmp(compiler,
    837 		    SLJIT_C_EQUAL,
    838 		    BJ_BUFLEN, 0,
    839 		    SLJIT_IMM, 0);
    840 		if (to_mchain_jump == NULL)
    841 			return SLJIT_ERR_ALLOC_FAILED;
    842 	}
    843 #endif
    844 
    845 	ld_reg = BJ_BUF;
    846 	width = read_width(pc);
    847 
    848 	if (BPF_MODE(pc->code) == BPF_IND) {
    849 		/* tmp1 = buflen - (pc->k + width); */
    850 		status = sljit_emit_op2(compiler,
    851 		    SLJIT_SUB,
    852 		    BJ_TMP1REG, 0,
    853 		    BJ_BUFLEN, 0,
    854 		    SLJIT_IMM, k + width);
    855 		if (status != SLJIT_SUCCESS)
    856 			return status;
    857 
    858 		/* ld_reg = buf + X; */
    859 		ld_reg = BJ_TMP2REG;
    860 		status = sljit_emit_op2(compiler,
    861 		    SLJIT_ADD,
    862 		    ld_reg, 0,
    863 		    BJ_BUF, 0,
    864 		    BJ_XREG, 0);
    865 		if (status != SLJIT_SUCCESS)
    866 			return status;
    867 
    868 		/* if (tmp1 < X) return 0; */
    869 		jump = sljit_emit_cmp(compiler,
    870 		    SLJIT_C_LESS,
    871 		    BJ_TMP1REG, 0,
    872 		    BJ_XREG, 0);
    873 		if (jump == NULL)
    874 			return SLJIT_ERR_ALLOC_FAILED;
    875 		if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
    876 			return SLJIT_ERR_ALLOC_FAILED;
    877 	}
    878 
    879 	switch (width) {
    880 	case 4:
    881 		status = emit_read32(compiler, ld_reg, k);
    882 		break;
    883 	case 2:
    884 		status = emit_read16(compiler, ld_reg, k);
    885 		break;
    886 	case 1:
    887 		status = emit_read8(compiler, ld_reg, k);
    888 		break;
    889 	}
    890 
    891 	if (status != SLJIT_SUCCESS)
    892 		return status;
    893 
    894 #ifdef _KERNEL
    895 	over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP);
    896 	if (over_mchain_jump == NULL)
    897 		return SLJIT_ERR_ALLOC_FAILED;
    898 
    899 	/* entry point to mchain handler */
    900 	label = sljit_emit_label(compiler);
    901 	if (label == NULL)
    902 		return SLJIT_ERR_ALLOC_FAILED;
    903 	sljit_set_label(to_mchain_jump, label);
    904 
    905 	if (check_zero_buflen) {
    906 		/* if (buflen != 0) return 0; */
    907 		jump = sljit_emit_cmp(compiler,
    908 		    SLJIT_C_NOT_EQUAL,
    909 		    BJ_BUFLEN, 0,
    910 		    SLJIT_IMM, 0);
    911 		if (jump == NULL)
    912 			return SLJIT_ERR_ALLOC_FAILED;
    913 		if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
    914 			return SLJIT_ERR_ALLOC_FAILED;
    915 	}
    916 
    917 	switch (width) {
    918 	case 4:
    919 		status = emit_xcall(compiler, hints, pc, BJ_AREG,
    920 		    ret0, ret0_size, ret0_maxsize, &m_xword);
    921 		break;
    922 	case 2:
    923 		status = emit_xcall(compiler, hints, pc, BJ_AREG,
    924 		    ret0, ret0_size, ret0_maxsize, &m_xhalf);
    925 		break;
    926 	case 1:
    927 		status = emit_xcall(compiler, hints, pc, BJ_AREG,
    928 		    ret0, ret0_size, ret0_maxsize, &m_xbyte);
    929 		break;
    930 	}
    931 
    932 	if (status != SLJIT_SUCCESS)
    933 		return status;
    934 
    935 	label = sljit_emit_label(compiler);
    936 	if (label == NULL)
    937 		return SLJIT_ERR_ALLOC_FAILED;
    938 	sljit_set_label(over_mchain_jump, label);
    939 #endif
    940 
    941 	return SLJIT_SUCCESS;
    942 }
    943 
    944 static int
    945 emit_memload(struct sljit_compiler *compiler,
    946     sljit_si dst, uint32_t k, size_t extwords)
    947 {
    948 	int status;
    949 	sljit_si src;
    950 	sljit_sw srcw;
    951 
    952 	srcw = k * sizeof(uint32_t);
    953 
    954 	if (extwords == 0) {
    955 		src = SLJIT_MEM1(SLJIT_LOCALS_REG);
    956 		srcw += offsetof(struct bpfjit_stack, mem);
    957 	} else {
    958 		/* copy extmem pointer to the tmp1 register */
    959 		status = sljit_emit_op1(compiler,
    960 		    SLJIT_MOV_P,
    961 		    BJ_TMP1REG, 0,
    962 		    SLJIT_MEM1(SLJIT_LOCALS_REG),
    963 		    offsetof(struct bpfjit_stack, extmem));
    964 		if (status != SLJIT_SUCCESS)
    965 			return status;
    966 		src = SLJIT_MEM1(BJ_TMP1REG);
    967 	}
    968 
    969 	return sljit_emit_op1(compiler, SLJIT_MOV_UI, dst, 0, src, srcw);
    970 }
    971 
    972 static int
    973 emit_memstore(struct sljit_compiler *compiler,
    974     sljit_si src, uint32_t k, size_t extwords)
    975 {
    976 	int status;
    977 	sljit_si dst;
    978 	sljit_sw dstw;
    979 
    980 	dstw = k * sizeof(uint32_t);
    981 
    982 	if (extwords == 0) {
    983 		dst = SLJIT_MEM1(SLJIT_LOCALS_REG);
    984 		dstw += offsetof(struct bpfjit_stack, mem);
    985 	} else {
    986 		/* copy extmem pointer to the tmp1 register */
    987 		status = sljit_emit_op1(compiler,
    988 		    SLJIT_MOV_P,
    989 		    BJ_TMP1REG, 0,
    990 		    SLJIT_MEM1(SLJIT_LOCALS_REG),
    991 		    offsetof(struct bpfjit_stack, extmem));
    992 		if (status != SLJIT_SUCCESS)
    993 			return status;
    994 		dst = SLJIT_MEM1(BJ_TMP1REG);
    995 	}
    996 
    997 	return sljit_emit_op1(compiler, SLJIT_MOV_UI, dst, dstw, src, 0);
    998 }
    999 
   1000 /*
   1001  * Emit code for BPF_LDX+BPF_B+BPF_MSH    X <- 4*(P[k:1]&0xf).
   1002  */
   1003 static int
   1004 emit_msh(struct sljit_compiler *compiler, bpfjit_hint_t hints,
   1005     const struct bpf_insn *pc, struct sljit_jump *to_mchain_jump,
   1006     struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize)
   1007 {
   1008 	int status;
   1009 #ifdef _KERNEL
   1010 	struct sljit_label *label;
   1011 	struct sljit_jump *jump, *over_mchain_jump;
   1012 	const bool check_zero_buflen = (to_mchain_jump != NULL);
   1013 #endif
   1014 	const uint32_t k = pc->k;
   1015 
   1016 #ifdef _KERNEL
   1017 	if (to_mchain_jump == NULL) {
   1018 		to_mchain_jump = sljit_emit_cmp(compiler,
   1019 		    SLJIT_C_EQUAL,
   1020 		    BJ_BUFLEN, 0,
   1021 		    SLJIT_IMM, 0);
   1022 		if (to_mchain_jump == NULL)
   1023 			return SLJIT_ERR_ALLOC_FAILED;
   1024 	}
   1025 #endif
   1026 
   1027 	/* tmp1 = buf[k] */
   1028 	status = sljit_emit_op1(compiler,
   1029 	    SLJIT_MOV_UB,
   1030 	    BJ_TMP1REG, 0,
   1031 	    SLJIT_MEM1(BJ_BUF), k);
   1032 	if (status != SLJIT_SUCCESS)
   1033 		return status;
   1034 
   1035 #ifdef _KERNEL
   1036 	over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP);
   1037 	if (over_mchain_jump == NULL)
   1038 		return SLJIT_ERR_ALLOC_FAILED;
   1039 
   1040 	/* entry point to mchain handler */
   1041 	label = sljit_emit_label(compiler);
   1042 	if (label == NULL)
   1043 		return SLJIT_ERR_ALLOC_FAILED;
   1044 	sljit_set_label(to_mchain_jump, label);
   1045 
   1046 	if (check_zero_buflen) {
   1047 		/* if (buflen != 0) return 0; */
   1048 		jump = sljit_emit_cmp(compiler,
   1049 		    SLJIT_C_NOT_EQUAL,
   1050 		    BJ_BUFLEN, 0,
   1051 		    SLJIT_IMM, 0);
   1052 		if (jump == NULL)
   1053 			return SLJIT_ERR_ALLOC_FAILED;
   1054 		if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
   1055 			return SLJIT_ERR_ALLOC_FAILED;
   1056 	}
   1057 
   1058 	status = emit_xcall(compiler, hints, pc, BJ_TMP1REG,
   1059 	    ret0, ret0_size, ret0_maxsize, &m_xbyte);
   1060 	if (status != SLJIT_SUCCESS)
   1061 		return status;
   1062 
   1063 	label = sljit_emit_label(compiler);
   1064 	if (label == NULL)
   1065 		return SLJIT_ERR_ALLOC_FAILED;
   1066 	sljit_set_label(over_mchain_jump, label);
   1067 #endif
   1068 
   1069 	/* tmp1 &= 0xf */
   1070 	status = sljit_emit_op2(compiler,
   1071 	    SLJIT_AND,
   1072 	    BJ_TMP1REG, 0,
   1073 	    BJ_TMP1REG, 0,
   1074 	    SLJIT_IMM, 0xf);
   1075 	if (status != SLJIT_SUCCESS)
   1076 		return status;
   1077 
   1078 	/* X = tmp1 << 2 */
   1079 	status = sljit_emit_op2(compiler,
   1080 	    SLJIT_SHL,
   1081 	    BJ_XREG, 0,
   1082 	    BJ_TMP1REG, 0,
   1083 	    SLJIT_IMM, 2);
   1084 	if (status != SLJIT_SUCCESS)
   1085 		return status;
   1086 
   1087 	return SLJIT_SUCCESS;
   1088 }
   1089 
   1090 /*
   1091  * Emit code for A = A / k or A = A % k when k is a power of 2.
   1092  * @pc BPF_DIV or BPF_MOD instruction.
   1093  */
   1094 static int
   1095 emit_pow2_moddiv(struct sljit_compiler *compiler, const struct bpf_insn *pc)
   1096 {
   1097 	uint32_t k = pc->k;
   1098 	int status = SLJIT_SUCCESS;
   1099 
   1100 	BJ_ASSERT(k != 0 && (k & (k - 1)) == 0);
   1101 
   1102 	if (BPF_OP(pc->code) == BPF_MOD) {
   1103 		status = sljit_emit_op2(compiler,
   1104 		    SLJIT_AND,
   1105 		    BJ_AREG, 0,
   1106 		    BJ_AREG, 0,
   1107 		    SLJIT_IMM, k - 1);
   1108 	} else {
   1109 		int shift = 0;
   1110 
   1111 		/*
   1112 		 * Do shift = __builtin_ctz(k).
   1113 		 * The loop is slower, but that's ok.
   1114 		 */
   1115 		while (k > 1) {
   1116 			k >>= 1;
   1117 			shift++;
   1118 		}
   1119 
   1120 		if (shift != 0) {
   1121 			status = sljit_emit_op2(compiler,
   1122 			    SLJIT_LSHR|SLJIT_INT_OP,
   1123 			    BJ_AREG, 0,
   1124 			    BJ_AREG, 0,
   1125 			    SLJIT_IMM, shift);
   1126 		}
   1127 	}
   1128 
   1129 	return status;
   1130 }
   1131 
   1132 #if !defined(BPFJIT_USE_UDIV)
   1133 static sljit_uw
   1134 divide(sljit_uw x, sljit_uw y)
   1135 {
   1136 
   1137 	return (uint32_t)x / (uint32_t)y;
   1138 }
   1139 
   1140 static sljit_uw
   1141 modulus(sljit_uw x, sljit_uw y)
   1142 {
   1143 
   1144 	return (uint32_t)x % (uint32_t)y;
   1145 }
   1146 #endif
   1147 
   1148 /*
   1149  * Emit code for A = A / div or A = A % div.
   1150  * @pc BPF_DIV or BPF_MOD instruction.
   1151  */
   1152 static int
   1153 emit_moddiv(struct sljit_compiler *compiler, const struct bpf_insn *pc)
   1154 {
   1155 	int status;
   1156 	const bool div = BPF_OP(pc->code) == BPF_DIV;
   1157 	const bool xreg = BPF_SRC(pc->code) == BPF_X;
   1158 
   1159 #if BJ_XREG == SLJIT_RETURN_REG   || \
   1160     BJ_XREG == SLJIT_SCRATCH_REG1 || \
   1161     BJ_XREG == SLJIT_SCRATCH_REG2 || \
   1162     BJ_AREG == SLJIT_SCRATCH_REG2
   1163 #error "Not supported assignment of registers."
   1164 #endif
   1165 
   1166 #if BJ_AREG != SLJIT_SCRATCH_REG1
   1167 	status = sljit_emit_op1(compiler,
   1168 	    SLJIT_MOV,
   1169 	    SLJIT_SCRATCH_REG1, 0,
   1170 	    BJ_AREG, 0);
   1171 	if (status != SLJIT_SUCCESS)
   1172 		return status;
   1173 #endif
   1174 
   1175 	status = sljit_emit_op1(compiler,
   1176 	    SLJIT_MOV,
   1177 	    SLJIT_SCRATCH_REG2, 0,
   1178 	    xreg ? BJ_XREG : SLJIT_IMM,
   1179 	    xreg ? 0 : (uint32_t)pc->k);
   1180 	if (status != SLJIT_SUCCESS)
   1181 		return status;
   1182 
   1183 #if defined(BPFJIT_USE_UDIV)
   1184 	status = sljit_emit_op0(compiler, SLJIT_UDIV|SLJIT_INT_OP);
   1185 
   1186 #if BJ_AREG != SLJIT_SCRATCH_REG1
   1187 	status = sljit_emit_op1(compiler,
   1188 	    SLJIT_MOV,
   1189 	    BJ_AREG, 0,
   1190 	    SLJIT_SCRATCH_REG1, 0);
   1191 	if (status != SLJIT_SUCCESS)
   1192 		return status;
   1193 #endif
   1194 #else
   1195 	status = sljit_emit_ijump(compiler,
   1196 	    SLJIT_CALL2,
   1197 	    SLJIT_IMM, div ? SLJIT_FUNC_OFFSET(divide) :
   1198 		SLJIT_FUNC_OFFSET(modulus));
   1199 
   1200 #if BJ_AREG != SLJIT_RETURN_REG
   1201 	status = sljit_emit_op1(compiler,
   1202 	    SLJIT_MOV,
   1203 	    BJ_AREG, 0,
   1204 	    SLJIT_RETURN_REG, 0);
   1205 	if (status != SLJIT_SUCCESS)
   1206 		return status;
   1207 #endif
   1208 #endif
   1209 
   1210 	return status;
   1211 }
   1212 
   1213 /*
   1214  * Return true if pc is a "read from packet" instruction.
   1215  * If length is not NULL and return value is true, *length will
   1216  * be set to a safe length required to read a packet.
   1217  */
   1218 static bool
   1219 read_pkt_insn(const struct bpf_insn *pc, bpfjit_abc_length_t *length)
   1220 {
   1221 	bool rv;
   1222 	bpfjit_abc_length_t width;
   1223 
   1224 	switch (BPF_CLASS(pc->code)) {
   1225 	default:
   1226 		rv = false;
   1227 		break;
   1228 
   1229 	case BPF_LD:
   1230 		rv = BPF_MODE(pc->code) == BPF_ABS ||
   1231 		     BPF_MODE(pc->code) == BPF_IND;
   1232 		if (rv)
   1233 			width = read_width(pc);
   1234 		break;
   1235 
   1236 	case BPF_LDX:
   1237 		rv = pc->code == (BPF_LDX|BPF_B|BPF_MSH);
   1238 		width = 1;
   1239 		break;
   1240 	}
   1241 
   1242 	if (rv && length != NULL) {
   1243 		/*
   1244 		 * Values greater than UINT32_MAX will generate
   1245 		 * unconditional "return 0".
   1246 		 */
   1247 		*length = (uint32_t)pc->k + width;
   1248 	}
   1249 
   1250 	return rv;
   1251 }
   1252 
   1253 static void
   1254 optimize_init(struct bpfjit_insn_data *insn_dat, size_t insn_count)
   1255 {
   1256 	size_t i;
   1257 
   1258 	for (i = 0; i < insn_count; i++) {
   1259 		SLIST_INIT(&insn_dat[i].bjumps);
   1260 		insn_dat[i].invalid = BJ_INIT_NOBITS;
   1261 	}
   1262 }
   1263 
   1264 /*
   1265  * The function divides instructions into blocks. Destination of a jump
   1266  * instruction starts a new block. BPF_RET and BPF_JMP instructions
   1267  * terminate a block. Blocks are linear, that is, there are no jumps out
   1268  * from the middle of a block and there are no jumps in to the middle of
   1269  * a block.
   1270  *
   1271  * The function also sets bits in *initmask for memwords that
   1272  * need to be initialized to zero. Note that this set should be empty
   1273  * for any valid kernel filter program.
   1274  */
   1275 static bool
   1276 optimize_pass1(const bpf_ctx_t *bc, const struct bpf_insn *insns,
   1277     struct bpfjit_insn_data *insn_dat, size_t insn_count,
   1278     bpf_memword_init_t *initmask, bpfjit_hint_t *hints)
   1279 {
   1280 	struct bpfjit_jump *jtf;
   1281 	size_t i;
   1282 	uint32_t jt, jf;
   1283 	bpfjit_abc_length_t length;
   1284 	bpf_memword_init_t invalid; /* borrowed from bpf_filter() */
   1285 	bool unreachable;
   1286 
   1287 	const size_t memwords = GET_MEMWORDS(bc);
   1288 
   1289 	*hints = 0;
   1290 	*initmask = BJ_INIT_NOBITS;
   1291 
   1292 	unreachable = false;
   1293 	invalid = ~BJ_INIT_NOBITS;
   1294 
   1295 	for (i = 0; i < insn_count; i++) {
   1296 		if (!SLIST_EMPTY(&insn_dat[i].bjumps))
   1297 			unreachable = false;
   1298 		insn_dat[i].unreachable = unreachable;
   1299 
   1300 		if (unreachable)
   1301 			continue;
   1302 
   1303 		invalid |= insn_dat[i].invalid;
   1304 
   1305 		if (read_pkt_insn(&insns[i], &length) && length > UINT32_MAX)
   1306 			unreachable = true;
   1307 
   1308 		switch (BPF_CLASS(insns[i].code)) {
   1309 		case BPF_RET:
   1310 			if (BPF_RVAL(insns[i].code) == BPF_A)
   1311 				*initmask |= invalid & BJ_INIT_ABIT;
   1312 
   1313 			unreachable = true;
   1314 			continue;
   1315 
   1316 		case BPF_LD:
   1317 			if (BPF_MODE(insns[i].code) == BPF_ABS)
   1318 				*hints |= BJ_HINT_ABS;
   1319 
   1320 			if (BPF_MODE(insns[i].code) == BPF_IND) {
   1321 				*hints |= BJ_HINT_IND | BJ_HINT_XREG;
   1322 				*initmask |= invalid & BJ_INIT_XBIT;
   1323 			}
   1324 
   1325 			if (BPF_MODE(insns[i].code) == BPF_MEM &&
   1326 			    (uint32_t)insns[i].k < memwords) {
   1327 				*initmask |= invalid & BJ_INIT_MBIT(insns[i].k);
   1328 			}
   1329 
   1330 			invalid &= ~BJ_INIT_ABIT;
   1331 			continue;
   1332 
   1333 		case BPF_LDX:
   1334 			*hints |= BJ_HINT_XREG | BJ_HINT_LDX;
   1335 
   1336 			if (BPF_MODE(insns[i].code) == BPF_MEM &&
   1337 			    (uint32_t)insns[i].k < memwords) {
   1338 				*initmask |= invalid & BJ_INIT_MBIT(insns[i].k);
   1339 			}
   1340 
   1341 			if (BPF_MODE(insns[i].code) == BPF_MSH &&
   1342 			    BPF_SIZE(insns[i].code) == BPF_B) {
   1343 				*hints |= BJ_HINT_MSH;
   1344 			}
   1345 
   1346 			invalid &= ~BJ_INIT_XBIT;
   1347 			continue;
   1348 
   1349 		case BPF_ST:
   1350 			*initmask |= invalid & BJ_INIT_ABIT;
   1351 
   1352 			if ((uint32_t)insns[i].k < memwords)
   1353 				invalid &= ~BJ_INIT_MBIT(insns[i].k);
   1354 
   1355 			continue;
   1356 
   1357 		case BPF_STX:
   1358 			*hints |= BJ_HINT_XREG;
   1359 			*initmask |= invalid & BJ_INIT_XBIT;
   1360 
   1361 			if ((uint32_t)insns[i].k < memwords)
   1362 				invalid &= ~BJ_INIT_MBIT(insns[i].k);
   1363 
   1364 			continue;
   1365 
   1366 		case BPF_ALU:
   1367 			*initmask |= invalid & BJ_INIT_ABIT;
   1368 
   1369 			if (insns[i].code != (BPF_ALU|BPF_NEG) &&
   1370 			    BPF_SRC(insns[i].code) == BPF_X) {
   1371 				*hints |= BJ_HINT_XREG;
   1372 				*initmask |= invalid & BJ_INIT_XBIT;
   1373 			}
   1374 
   1375 			invalid &= ~BJ_INIT_ABIT;
   1376 			continue;
   1377 
   1378 		case BPF_MISC:
   1379 			switch (BPF_MISCOP(insns[i].code)) {
   1380 			case BPF_TAX: // X <- A
   1381 				*hints |= BJ_HINT_XREG;
   1382 				*initmask |= invalid & BJ_INIT_ABIT;
   1383 				invalid &= ~BJ_INIT_XBIT;
   1384 				continue;
   1385 
   1386 			case BPF_TXA: // A <- X
   1387 				*hints |= BJ_HINT_XREG;
   1388 				*initmask |= invalid & BJ_INIT_XBIT;
   1389 				invalid &= ~BJ_INIT_ABIT;
   1390 				continue;
   1391 
   1392 			case BPF_COPX:
   1393 				*hints |= BJ_HINT_XREG | BJ_HINT_COPX;
   1394 				/* FALLTHROUGH */
   1395 
   1396 			case BPF_COP:
   1397 				*hints |= BJ_HINT_COP;
   1398 				*initmask |= invalid & BJ_INIT_ABIT;
   1399 				invalid &= ~BJ_INIT_ABIT;
   1400 				continue;
   1401 			}
   1402 
   1403 			continue;
   1404 
   1405 		case BPF_JMP:
   1406 			/* Initialize abc_length for ABC pass. */
   1407 			insn_dat[i].u.jdata.abc_length = MAX_ABC_LENGTH;
   1408 
   1409 			if (BPF_OP(insns[i].code) == BPF_JA) {
   1410 				jt = jf = insns[i].k;
   1411 			} else {
   1412 				jt = insns[i].jt;
   1413 				jf = insns[i].jf;
   1414 			}
   1415 
   1416 			if (jt >= insn_count - (i + 1) ||
   1417 			    jf >= insn_count - (i + 1)) {
   1418 				return false;
   1419 			}
   1420 
   1421 			if (jt > 0 && jf > 0)
   1422 				unreachable = true;
   1423 
   1424 			jt += i + 1;
   1425 			jf += i + 1;
   1426 
   1427 			jtf = insn_dat[i].u.jdata.jtf;
   1428 
   1429 			jtf[0].jdata = &insn_dat[i].u.jdata;
   1430 			SLIST_INSERT_HEAD(&insn_dat[jt].bjumps,
   1431 			    &jtf[0], entries);
   1432 
   1433 			if (jf != jt) {
   1434 				jtf[1].jdata = &insn_dat[i].u.jdata;
   1435 				SLIST_INSERT_HEAD(&insn_dat[jf].bjumps,
   1436 				    &jtf[1], entries);
   1437 			}
   1438 
   1439 			insn_dat[jf].invalid |= invalid;
   1440 			insn_dat[jt].invalid |= invalid;
   1441 			invalid = 0;
   1442 
   1443 			continue;
   1444 		}
   1445 	}
   1446 
   1447 	return true;
   1448 }
   1449 
   1450 /*
   1451  * Array Bounds Check Elimination (ABC) pass.
   1452  */
   1453 static void
   1454 optimize_pass2(const bpf_ctx_t *bc, const struct bpf_insn *insns,
   1455     struct bpfjit_insn_data *insn_dat, size_t insn_count)
   1456 {
   1457 	struct bpfjit_jump *jmp;
   1458 	const struct bpf_insn *pc;
   1459 	struct bpfjit_insn_data *pd;
   1460 	size_t i;
   1461 	bpfjit_abc_length_t length, abc_length = 0;
   1462 
   1463 	const size_t extwords = GET_EXTWORDS(bc);
   1464 
   1465 	for (i = insn_count; i != 0; i--) {
   1466 		pc = &insns[i-1];
   1467 		pd = &insn_dat[i-1];
   1468 
   1469 		if (pd->unreachable)
   1470 			continue;
   1471 
   1472 		switch (BPF_CLASS(pc->code)) {
   1473 		case BPF_RET:
   1474 			/*
   1475 			 * It's quite common for bpf programs to
   1476 			 * check packet bytes in increasing order
   1477 			 * and return zero if bytes don't match
   1478 			 * specified critetion. Such programs disable
   1479 			 * ABC optimization completely because for
   1480 			 * every jump there is a branch with no read
   1481 			 * instruction.
   1482 			 * With no side effects, BPF_STMT(BPF_RET+BPF_K, 0)
   1483 			 * is indistinguishable from out-of-bound load.
   1484 			 * Therefore, abc_length can be set to
   1485 			 * MAX_ABC_LENGTH and enable ABC for many
   1486 			 * bpf programs.
   1487 			 * If this optimization encounters any
   1488 			 * instruction with a side effect, it will
   1489 			 * reset abc_length.
   1490 			 */
   1491 			if (BPF_RVAL(pc->code) == BPF_K && pc->k == 0)
   1492 				abc_length = MAX_ABC_LENGTH;
   1493 			else
   1494 				abc_length = 0;
   1495 			break;
   1496 
   1497 		case BPF_MISC:
   1498 			if (BPF_MISCOP(pc->code) == BPF_COP ||
   1499 			    BPF_MISCOP(pc->code) == BPF_COPX) {
   1500 				/* COP instructions can have side effects. */
   1501 				abc_length = 0;
   1502 			}
   1503 			break;
   1504 
   1505 		case BPF_ST:
   1506 		case BPF_STX:
   1507 			if (extwords != 0) {
   1508 				/* Write to memory is visible after a call. */
   1509 				abc_length = 0;
   1510 			}
   1511 			break;
   1512 
   1513 		case BPF_JMP:
   1514 			abc_length = pd->u.jdata.abc_length;
   1515 			break;
   1516 
   1517 		default:
   1518 			if (read_pkt_insn(pc, &length)) {
   1519 				if (abc_length < length)
   1520 					abc_length = length;
   1521 				pd->u.rdata.abc_length = abc_length;
   1522 			}
   1523 			break;
   1524 		}
   1525 
   1526 		SLIST_FOREACH(jmp, &pd->bjumps, entries) {
   1527 			if (jmp->jdata->abc_length > abc_length)
   1528 				jmp->jdata->abc_length = abc_length;
   1529 		}
   1530 	}
   1531 }
   1532 
   1533 static void
   1534 optimize_pass3(const struct bpf_insn *insns,
   1535     struct bpfjit_insn_data *insn_dat, size_t insn_count)
   1536 {
   1537 	struct bpfjit_jump *jmp;
   1538 	size_t i;
   1539 	bpfjit_abc_length_t checked_length = 0;
   1540 
   1541 	for (i = 0; i < insn_count; i++) {
   1542 		if (insn_dat[i].unreachable)
   1543 			continue;
   1544 
   1545 		SLIST_FOREACH(jmp, &insn_dat[i].bjumps, entries) {
   1546 			if (jmp->jdata->checked_length < checked_length)
   1547 				checked_length = jmp->jdata->checked_length;
   1548 		}
   1549 
   1550 		if (BPF_CLASS(insns[i].code) == BPF_JMP) {
   1551 			insn_dat[i].u.jdata.checked_length = checked_length;
   1552 		} else if (read_pkt_insn(&insns[i], NULL)) {
   1553 			struct bpfjit_read_pkt_data *rdata =
   1554 			    &insn_dat[i].u.rdata;
   1555 			rdata->check_length = 0;
   1556 			if (checked_length < rdata->abc_length) {
   1557 				checked_length = rdata->abc_length;
   1558 				rdata->check_length = checked_length;
   1559 			}
   1560 		}
   1561 	}
   1562 }
   1563 
   1564 static bool
   1565 optimize(const bpf_ctx_t *bc, const struct bpf_insn *insns,
   1566     struct bpfjit_insn_data *insn_dat, size_t insn_count,
   1567     bpf_memword_init_t *initmask, bpfjit_hint_t *hints)
   1568 {
   1569 
   1570 	optimize_init(insn_dat, insn_count);
   1571 
   1572 	if (!optimize_pass1(bc, insns, insn_dat, insn_count, initmask, hints))
   1573 		return false;
   1574 
   1575 	optimize_pass2(bc, insns, insn_dat, insn_count);
   1576 	optimize_pass3(insns, insn_dat, insn_count);
   1577 
   1578 	return true;
   1579 }
   1580 
   1581 /*
   1582  * Convert BPF_ALU operations except BPF_NEG and BPF_DIV to sljit operation.
   1583  */
   1584 static int
   1585 bpf_alu_to_sljit_op(const struct bpf_insn *pc)
   1586 {
   1587 
   1588 	/*
   1589 	 * Note: all supported 64bit arches have 32bit multiply
   1590 	 * instruction so SLJIT_INT_OP doesn't have any overhead.
   1591 	 */
   1592 	switch (BPF_OP(pc->code)) {
   1593 	case BPF_ADD: return SLJIT_ADD;
   1594 	case BPF_SUB: return SLJIT_SUB;
   1595 	case BPF_MUL: return SLJIT_MUL|SLJIT_INT_OP;
   1596 	case BPF_OR:  return SLJIT_OR;
   1597 	case BPF_XOR: return SLJIT_XOR;
   1598 	case BPF_AND: return SLJIT_AND;
   1599 	case BPF_LSH: return SLJIT_SHL;
   1600 	case BPF_RSH: return SLJIT_LSHR|SLJIT_INT_OP;
   1601 	default:
   1602 		BJ_ASSERT(false);
   1603 		return 0;
   1604 	}
   1605 }
   1606 
   1607 /*
   1608  * Convert BPF_JMP operations except BPF_JA to sljit condition.
   1609  */
   1610 static int
   1611 bpf_jmp_to_sljit_cond(const struct bpf_insn *pc, bool negate)
   1612 {
   1613 	/*
   1614 	 * Note: all supported 64bit arches have 32bit comparison
   1615 	 * instructions so SLJIT_INT_OP doesn't have any overhead.
   1616 	 */
   1617 	int rv = SLJIT_INT_OP;
   1618 
   1619 	switch (BPF_OP(pc->code)) {
   1620 	case BPF_JGT:
   1621 		rv |= negate ? SLJIT_C_LESS_EQUAL : SLJIT_C_GREATER;
   1622 		break;
   1623 	case BPF_JGE:
   1624 		rv |= negate ? SLJIT_C_LESS : SLJIT_C_GREATER_EQUAL;
   1625 		break;
   1626 	case BPF_JEQ:
   1627 		rv |= negate ? SLJIT_C_NOT_EQUAL : SLJIT_C_EQUAL;
   1628 		break;
   1629 	case BPF_JSET:
   1630 		rv |= negate ? SLJIT_C_EQUAL : SLJIT_C_NOT_EQUAL;
   1631 		break;
   1632 	default:
   1633 		BJ_ASSERT(false);
   1634 	}
   1635 
   1636 	return rv;
   1637 }
   1638 
   1639 /*
   1640  * Convert BPF_K and BPF_X to sljit register.
   1641  */
   1642 static int
   1643 kx_to_reg(const struct bpf_insn *pc)
   1644 {
   1645 
   1646 	switch (BPF_SRC(pc->code)) {
   1647 	case BPF_K: return SLJIT_IMM;
   1648 	case BPF_X: return BJ_XREG;
   1649 	default:
   1650 		BJ_ASSERT(false);
   1651 		return 0;
   1652 	}
   1653 }
   1654 
   1655 static sljit_sw
   1656 kx_to_reg_arg(const struct bpf_insn *pc)
   1657 {
   1658 
   1659 	switch (BPF_SRC(pc->code)) {
   1660 	case BPF_K: return (uint32_t)pc->k; /* SLJIT_IMM, pc->k, */
   1661 	case BPF_X: return 0;               /* BJ_XREG, 0,      */
   1662 	default:
   1663 		BJ_ASSERT(false);
   1664 		return 0;
   1665 	}
   1666 }
   1667 
   1668 static bool
   1669 generate_insn_code(struct sljit_compiler *compiler, bpfjit_hint_t hints,
   1670     const bpf_ctx_t *bc, const struct bpf_insn *insns,
   1671     struct bpfjit_insn_data *insn_dat, size_t insn_count)
   1672 {
   1673 	/* a list of jumps to out-of-bound return from a generated function */
   1674 	struct sljit_jump **ret0;
   1675 	size_t ret0_size, ret0_maxsize;
   1676 
   1677 	struct sljit_jump *jump;
   1678 	struct sljit_label *label;
   1679 	const struct bpf_insn *pc;
   1680 	struct bpfjit_jump *bjump, *jtf;
   1681 	struct sljit_jump *to_mchain_jump;
   1682 
   1683 	size_t i;
   1684 	int status;
   1685 	int branching, negate;
   1686 	unsigned int rval, mode, src, op;
   1687 	uint32_t jt, jf;
   1688 
   1689 	bool unconditional_ret;
   1690 	bool rv;
   1691 
   1692 	const size_t extwords = GET_EXTWORDS(bc);
   1693 	const size_t memwords = GET_MEMWORDS(bc);
   1694 
   1695 	ret0 = NULL;
   1696 	rv = false;
   1697 
   1698 	ret0_size = 0;
   1699 	ret0_maxsize = 64;
   1700 	ret0 = BJ_ALLOC(ret0_maxsize * sizeof(ret0[0]));
   1701 	if (ret0 == NULL)
   1702 		goto fail;
   1703 
   1704 	/* reset sjump members of jdata */
   1705 	for (i = 0; i < insn_count; i++) {
   1706 		if (insn_dat[i].unreachable ||
   1707 		    BPF_CLASS(insns[i].code) != BPF_JMP) {
   1708 			continue;
   1709 		}
   1710 
   1711 		jtf = insn_dat[i].u.jdata.jtf;
   1712 		jtf[0].sjump = jtf[1].sjump = NULL;
   1713 	}
   1714 
   1715 	/* main loop */
   1716 	for (i = 0; i < insn_count; i++) {
   1717 		if (insn_dat[i].unreachable)
   1718 			continue;
   1719 
   1720 		/*
   1721 		 * Resolve jumps to the current insn.
   1722 		 */
   1723 		label = NULL;
   1724 		SLIST_FOREACH(bjump, &insn_dat[i].bjumps, entries) {
   1725 			if (bjump->sjump != NULL) {
   1726 				if (label == NULL)
   1727 					label = sljit_emit_label(compiler);
   1728 				if (label == NULL)
   1729 					goto fail;
   1730 				sljit_set_label(bjump->sjump, label);
   1731 			}
   1732 		}
   1733 
   1734 		to_mchain_jump = NULL;
   1735 		unconditional_ret = false;
   1736 
   1737 		if (read_pkt_insn(&insns[i], NULL)) {
   1738 			if (insn_dat[i].u.rdata.check_length > UINT32_MAX) {
   1739 				/* Jump to "return 0" unconditionally. */
   1740 				unconditional_ret = true;
   1741 				jump = sljit_emit_jump(compiler, SLJIT_JUMP);
   1742 				if (jump == NULL)
   1743 					goto fail;
   1744 				if (!append_jump(jump, &ret0,
   1745 				    &ret0_size, &ret0_maxsize))
   1746 					goto fail;
   1747 			} else if (insn_dat[i].u.rdata.check_length > 0) {
   1748 				/* if (buflen < check_length) return 0; */
   1749 				jump = sljit_emit_cmp(compiler,
   1750 				    SLJIT_C_LESS,
   1751 				    BJ_BUFLEN, 0,
   1752 				    SLJIT_IMM,
   1753 				    insn_dat[i].u.rdata.check_length);
   1754 				if (jump == NULL)
   1755 					goto fail;
   1756 #ifdef _KERNEL
   1757 				to_mchain_jump = jump;
   1758 #else
   1759 				if (!append_jump(jump, &ret0,
   1760 				    &ret0_size, &ret0_maxsize))
   1761 					goto fail;
   1762 #endif
   1763 			}
   1764 		}
   1765 
   1766 		pc = &insns[i];
   1767 		switch (BPF_CLASS(pc->code)) {
   1768 
   1769 		default:
   1770 			goto fail;
   1771 
   1772 		case BPF_LD:
   1773 			/* BPF_LD+BPF_IMM          A <- k */
   1774 			if (pc->code == (BPF_LD|BPF_IMM)) {
   1775 				status = sljit_emit_op1(compiler,
   1776 				    SLJIT_MOV,
   1777 				    BJ_AREG, 0,
   1778 				    SLJIT_IMM, (uint32_t)pc->k);
   1779 				if (status != SLJIT_SUCCESS)
   1780 					goto fail;
   1781 
   1782 				continue;
   1783 			}
   1784 
   1785 			/* BPF_LD+BPF_MEM          A <- M[k] */
   1786 			if (pc->code == (BPF_LD|BPF_MEM)) {
   1787 				if ((uint32_t)pc->k >= memwords)
   1788 					goto fail;
   1789 				status = emit_memload(compiler,
   1790 				    BJ_AREG, pc->k, extwords);
   1791 				if (status != SLJIT_SUCCESS)
   1792 					goto fail;
   1793 
   1794 				continue;
   1795 			}
   1796 
   1797 			/* BPF_LD+BPF_W+BPF_LEN    A <- len */
   1798 			if (pc->code == (BPF_LD|BPF_W|BPF_LEN)) {
   1799 				status = sljit_emit_op1(compiler,
   1800 				    SLJIT_MOV, /* size_t source */
   1801 				    BJ_AREG, 0,
   1802 				    SLJIT_MEM1(BJ_ARGS),
   1803 				    offsetof(struct bpf_args, wirelen));
   1804 				if (status != SLJIT_SUCCESS)
   1805 					goto fail;
   1806 
   1807 				continue;
   1808 			}
   1809 
   1810 			mode = BPF_MODE(pc->code);
   1811 			if (mode != BPF_ABS && mode != BPF_IND)
   1812 				goto fail;
   1813 
   1814 			if (unconditional_ret)
   1815 				continue;
   1816 
   1817 			status = emit_pkt_read(compiler, hints, pc,
   1818 			    to_mchain_jump, &ret0, &ret0_size, &ret0_maxsize);
   1819 			if (status != SLJIT_SUCCESS)
   1820 				goto fail;
   1821 
   1822 			continue;
   1823 
   1824 		case BPF_LDX:
   1825 			mode = BPF_MODE(pc->code);
   1826 
   1827 			/* BPF_LDX+BPF_W+BPF_IMM    X <- k */
   1828 			if (mode == BPF_IMM) {
   1829 				if (BPF_SIZE(pc->code) != BPF_W)
   1830 					goto fail;
   1831 				status = sljit_emit_op1(compiler,
   1832 				    SLJIT_MOV,
   1833 				    BJ_XREG, 0,
   1834 				    SLJIT_IMM, (uint32_t)pc->k);
   1835 				if (status != SLJIT_SUCCESS)
   1836 					goto fail;
   1837 
   1838 				continue;
   1839 			}
   1840 
   1841 			/* BPF_LDX+BPF_W+BPF_LEN    X <- len */
   1842 			if (mode == BPF_LEN) {
   1843 				if (BPF_SIZE(pc->code) != BPF_W)
   1844 					goto fail;
   1845 				status = sljit_emit_op1(compiler,
   1846 				    SLJIT_MOV, /* size_t source */
   1847 				    BJ_XREG, 0,
   1848 				    SLJIT_MEM1(BJ_ARGS),
   1849 				    offsetof(struct bpf_args, wirelen));
   1850 				if (status != SLJIT_SUCCESS)
   1851 					goto fail;
   1852 
   1853 				continue;
   1854 			}
   1855 
   1856 			/* BPF_LDX+BPF_W+BPF_MEM    X <- M[k] */
   1857 			if (mode == BPF_MEM) {
   1858 				if (BPF_SIZE(pc->code) != BPF_W)
   1859 					goto fail;
   1860 				if ((uint32_t)pc->k >= memwords)
   1861 					goto fail;
   1862 				status = emit_memload(compiler,
   1863 				    BJ_XREG, pc->k, extwords);
   1864 				if (status != SLJIT_SUCCESS)
   1865 					goto fail;
   1866 
   1867 				continue;
   1868 			}
   1869 
   1870 			/* BPF_LDX+BPF_B+BPF_MSH    X <- 4*(P[k:1]&0xf) */
   1871 			if (mode != BPF_MSH || BPF_SIZE(pc->code) != BPF_B)
   1872 				goto fail;
   1873 
   1874 			if (unconditional_ret)
   1875 				continue;
   1876 
   1877 			status = emit_msh(compiler, hints, pc,
   1878 			    to_mchain_jump, &ret0, &ret0_size, &ret0_maxsize);
   1879 			if (status != SLJIT_SUCCESS)
   1880 				goto fail;
   1881 
   1882 			continue;
   1883 
   1884 		case BPF_ST:
   1885 			if (pc->code != BPF_ST ||
   1886 			    (uint32_t)pc->k >= memwords) {
   1887 				goto fail;
   1888 			}
   1889 
   1890 			status = emit_memstore(compiler,
   1891 			    BJ_AREG, pc->k, extwords);
   1892 			if (status != SLJIT_SUCCESS)
   1893 				goto fail;
   1894 
   1895 			continue;
   1896 
   1897 		case BPF_STX:
   1898 			if (pc->code != BPF_STX ||
   1899 			    (uint32_t)pc->k >= memwords) {
   1900 				goto fail;
   1901 			}
   1902 
   1903 			status = emit_memstore(compiler,
   1904 			    BJ_XREG, pc->k, extwords);
   1905 			if (status != SLJIT_SUCCESS)
   1906 				goto fail;
   1907 
   1908 			continue;
   1909 
   1910 		case BPF_ALU:
   1911 			if (pc->code == (BPF_ALU|BPF_NEG)) {
   1912 				status = sljit_emit_op1(compiler,
   1913 				    SLJIT_NEG,
   1914 				    BJ_AREG, 0,
   1915 				    BJ_AREG, 0);
   1916 				if (status != SLJIT_SUCCESS)
   1917 					goto fail;
   1918 
   1919 				continue;
   1920 			}
   1921 
   1922 			op = BPF_OP(pc->code);
   1923 			if (op != BPF_DIV && op != BPF_MOD) {
   1924 				status = sljit_emit_op2(compiler,
   1925 				    bpf_alu_to_sljit_op(pc),
   1926 				    BJ_AREG, 0,
   1927 				    BJ_AREG, 0,
   1928 				    kx_to_reg(pc), kx_to_reg_arg(pc));
   1929 				if (status != SLJIT_SUCCESS)
   1930 					goto fail;
   1931 
   1932 				continue;
   1933 			}
   1934 
   1935 			/* BPF_DIV/BPF_MOD */
   1936 
   1937 			src = BPF_SRC(pc->code);
   1938 			if (src != BPF_X && src != BPF_K)
   1939 				goto fail;
   1940 
   1941 			/* division by zero? */
   1942 			if (src == BPF_X) {
   1943 				jump = sljit_emit_cmp(compiler,
   1944 				    SLJIT_C_EQUAL|SLJIT_INT_OP,
   1945 				    BJ_XREG, 0,
   1946 				    SLJIT_IMM, 0);
   1947 				if (jump == NULL)
   1948 					goto fail;
   1949 				if (!append_jump(jump, &ret0,
   1950 				    &ret0_size, &ret0_maxsize))
   1951 					goto fail;
   1952 			} else if (pc->k == 0) {
   1953 				jump = sljit_emit_jump(compiler, SLJIT_JUMP);
   1954 				if (jump == NULL)
   1955 					goto fail;
   1956 				if (!append_jump(jump, &ret0,
   1957 				    &ret0_size, &ret0_maxsize))
   1958 					goto fail;
   1959 			}
   1960 
   1961 			if (src == BPF_X) {
   1962 				status = emit_moddiv(compiler, pc);
   1963 				if (status != SLJIT_SUCCESS)
   1964 					goto fail;
   1965 			} else if (pc->k != 0) {
   1966 				if (pc->k & (pc->k - 1)) {
   1967 					status = emit_moddiv(compiler, pc);
   1968 				} else {
   1969 					status = emit_pow2_moddiv(compiler, pc);
   1970 				}
   1971 				if (status != SLJIT_SUCCESS)
   1972 					goto fail;
   1973 			}
   1974 
   1975 			continue;
   1976 
   1977 		case BPF_JMP:
   1978 			op = BPF_OP(pc->code);
   1979 			if (op == BPF_JA) {
   1980 				jt = jf = pc->k;
   1981 			} else {
   1982 				jt = pc->jt;
   1983 				jf = pc->jf;
   1984 			}
   1985 
   1986 			negate = (jt == 0) ? 1 : 0;
   1987 			branching = (jt == jf) ? 0 : 1;
   1988 			jtf = insn_dat[i].u.jdata.jtf;
   1989 
   1990 			if (branching) {
   1991 				if (op != BPF_JSET) {
   1992 					jump = sljit_emit_cmp(compiler,
   1993 					    bpf_jmp_to_sljit_cond(pc, negate),
   1994 					    BJ_AREG, 0,
   1995 					    kx_to_reg(pc), kx_to_reg_arg(pc));
   1996 				} else {
   1997 					status = sljit_emit_op2(compiler,
   1998 					    SLJIT_AND,
   1999 					    BJ_TMP1REG, 0,
   2000 					    BJ_AREG, 0,
   2001 					    kx_to_reg(pc), kx_to_reg_arg(pc));
   2002 					if (status != SLJIT_SUCCESS)
   2003 						goto fail;
   2004 
   2005 					jump = sljit_emit_cmp(compiler,
   2006 					    bpf_jmp_to_sljit_cond(pc, negate),
   2007 					    BJ_TMP1REG, 0,
   2008 					    SLJIT_IMM, 0);
   2009 				}
   2010 
   2011 				if (jump == NULL)
   2012 					goto fail;
   2013 
   2014 				BJ_ASSERT(jtf[negate].sjump == NULL);
   2015 				jtf[negate].sjump = jump;
   2016 			}
   2017 
   2018 			if (!branching || (jt != 0 && jf != 0)) {
   2019 				jump = sljit_emit_jump(compiler, SLJIT_JUMP);
   2020 				if (jump == NULL)
   2021 					goto fail;
   2022 
   2023 				BJ_ASSERT(jtf[branching].sjump == NULL);
   2024 				jtf[branching].sjump = jump;
   2025 			}
   2026 
   2027 			continue;
   2028 
   2029 		case BPF_RET:
   2030 			rval = BPF_RVAL(pc->code);
   2031 			if (rval == BPF_X)
   2032 				goto fail;
   2033 
   2034 			/* BPF_RET+BPF_K    accept k bytes */
   2035 			if (rval == BPF_K) {
   2036 				status = sljit_emit_return(compiler,
   2037 				    SLJIT_MOV_UI,
   2038 				    SLJIT_IMM, (uint32_t)pc->k);
   2039 				if (status != SLJIT_SUCCESS)
   2040 					goto fail;
   2041 			}
   2042 
   2043 			/* BPF_RET+BPF_A    accept A bytes */
   2044 			if (rval == BPF_A) {
   2045 				status = sljit_emit_return(compiler,
   2046 				    SLJIT_MOV_UI,
   2047 				    BJ_AREG, 0);
   2048 				if (status != SLJIT_SUCCESS)
   2049 					goto fail;
   2050 			}
   2051 
   2052 			continue;
   2053 
   2054 		case BPF_MISC:
   2055 			switch (BPF_MISCOP(pc->code)) {
   2056 			case BPF_TAX:
   2057 				status = sljit_emit_op1(compiler,
   2058 				    SLJIT_MOV_UI,
   2059 				    BJ_XREG, 0,
   2060 				    BJ_AREG, 0);
   2061 				if (status != SLJIT_SUCCESS)
   2062 					goto fail;
   2063 
   2064 				continue;
   2065 
   2066 			case BPF_TXA:
   2067 				status = sljit_emit_op1(compiler,
   2068 				    SLJIT_MOV,
   2069 				    BJ_AREG, 0,
   2070 				    BJ_XREG, 0);
   2071 				if (status != SLJIT_SUCCESS)
   2072 					goto fail;
   2073 
   2074 				continue;
   2075 
   2076 			case BPF_COP:
   2077 			case BPF_COPX:
   2078 				if (bc == NULL || bc->copfuncs == NULL)
   2079 					goto fail;
   2080 				if (BPF_MISCOP(pc->code) == BPF_COP &&
   2081 				    (uint32_t)pc->k >= bc->nfuncs) {
   2082 					goto fail;
   2083 				}
   2084 
   2085 				status = emit_cop(compiler, hints, bc, pc,
   2086 				    &ret0, &ret0_size, &ret0_maxsize);
   2087 				if (status != SLJIT_SUCCESS)
   2088 					goto fail;
   2089 
   2090 				continue;
   2091 			}
   2092 
   2093 			goto fail;
   2094 		} /* switch */
   2095 	} /* main loop */
   2096 
   2097 	BJ_ASSERT(ret0_size <= ret0_maxsize);
   2098 
   2099 	if (ret0_size > 0) {
   2100 		label = sljit_emit_label(compiler);
   2101 		if (label == NULL)
   2102 			goto fail;
   2103 		for (i = 0; i < ret0_size; i++)
   2104 			sljit_set_label(ret0[i], label);
   2105 	}
   2106 
   2107 	status = sljit_emit_return(compiler,
   2108 	    SLJIT_MOV_UI,
   2109 	    SLJIT_IMM, 0);
   2110 	if (status != SLJIT_SUCCESS)
   2111 		goto fail;
   2112 
   2113 	rv = true;
   2114 
   2115 fail:
   2116 	if (ret0 != NULL)
   2117 		BJ_FREE(ret0, ret0_maxsize * sizeof(ret0[0]));
   2118 
   2119 	return rv;
   2120 }
   2121 
   2122 bpfjit_func_t
   2123 bpfjit_generate_code(const bpf_ctx_t *bc,
   2124     const struct bpf_insn *insns, size_t insn_count)
   2125 {
   2126 	void *rv;
   2127 	struct sljit_compiler *compiler;
   2128 
   2129 	size_t i;
   2130 	int status;
   2131 
   2132 	/* optimization related */
   2133 	bpf_memword_init_t initmask;
   2134 	bpfjit_hint_t hints;
   2135 
   2136 	/* memory store location for initial zero initialization */
   2137 	sljit_si mem_reg;
   2138 	sljit_sw mem_off;
   2139 
   2140 	struct bpfjit_insn_data *insn_dat;
   2141 
   2142 	const size_t extwords = GET_EXTWORDS(bc);
   2143 	const size_t memwords = GET_MEMWORDS(bc);
   2144 	const bpf_memword_init_t preinited = extwords ? bc->preinited : 0;
   2145 
   2146 	rv = NULL;
   2147 	compiler = NULL;
   2148 	insn_dat = NULL;
   2149 
   2150 	if (memwords > MAX_MEMWORDS)
   2151 		goto fail;
   2152 
   2153 	if (insn_count == 0 || insn_count > SIZE_MAX / sizeof(insn_dat[0]))
   2154 		goto fail;
   2155 
   2156 	insn_dat = BJ_ALLOC(insn_count * sizeof(insn_dat[0]));
   2157 	if (insn_dat == NULL)
   2158 		goto fail;
   2159 
   2160 	if (!optimize(bc, insns, insn_dat, insn_count, &initmask, &hints))
   2161 		goto fail;
   2162 
   2163 	compiler = sljit_create_compiler();
   2164 	if (compiler == NULL)
   2165 		goto fail;
   2166 
   2167 #if !defined(_KERNEL) && defined(SLJIT_VERBOSE) && SLJIT_VERBOSE
   2168 	sljit_compiler_verbose(compiler, stderr);
   2169 #endif
   2170 
   2171 	status = sljit_emit_enter(compiler,
   2172 	    2, nscratches(hints), nsaveds(hints), sizeof(struct bpfjit_stack));
   2173 	if (status != SLJIT_SUCCESS)
   2174 		goto fail;
   2175 
   2176 	if (hints & BJ_HINT_COP) {
   2177 		/* save ctx argument */
   2178 		status = sljit_emit_op1(compiler,
   2179 		    SLJIT_MOV_P,
   2180 		    SLJIT_MEM1(SLJIT_LOCALS_REG),
   2181 		    offsetof(struct bpfjit_stack, ctx),
   2182 		    BJ_CTX_ARG, 0);
   2183 		if (status != SLJIT_SUCCESS)
   2184 			goto fail;
   2185 	}
   2186 
   2187 	if (extwords == 0) {
   2188 		mem_reg = SLJIT_MEM1(SLJIT_LOCALS_REG);
   2189 		mem_off = offsetof(struct bpfjit_stack, mem);
   2190 	} else {
   2191 		/* copy "mem" argument from bpf_args to bpfjit_stack */
   2192 		status = sljit_emit_op1(compiler,
   2193 		    SLJIT_MOV_P,
   2194 		    BJ_TMP1REG, 0,
   2195 		    SLJIT_MEM1(BJ_ARGS), offsetof(struct bpf_args, mem));
   2196 		if (status != SLJIT_SUCCESS)
   2197 			goto fail;
   2198 
   2199 		status = sljit_emit_op1(compiler,
   2200 		    SLJIT_MOV_P,
   2201 		    SLJIT_MEM1(SLJIT_LOCALS_REG),
   2202 		    offsetof(struct bpfjit_stack, extmem),
   2203 		    BJ_TMP1REG, 0);
   2204 		if (status != SLJIT_SUCCESS)
   2205 			goto fail;
   2206 
   2207 		mem_reg = SLJIT_MEM1(BJ_TMP1REG);
   2208 		mem_off = 0;
   2209 	}
   2210 
   2211 	/*
   2212 	 * Exclude pre-initialised external memory words but keep
   2213 	 * initialization statuses of A and X registers in case
   2214 	 * bc->preinited wrongly sets those two bits.
   2215 	 */
   2216 	initmask &= ~preinited | BJ_INIT_ABIT | BJ_INIT_XBIT;
   2217 
   2218 #if defined(_KERNEL)
   2219 	/* bpf_filter() checks initialization of memwords. */
   2220 	BJ_ASSERT((initmask & (BJ_INIT_MBIT(memwords) - 1)) == 0);
   2221 #endif
   2222 	for (i = 0; i < memwords; i++) {
   2223 		if (initmask & BJ_INIT_MBIT(i)) {
   2224 			/* M[i] = 0; */
   2225 			status = sljit_emit_op1(compiler,
   2226 			    SLJIT_MOV_UI,
   2227 			    mem_reg, mem_off + i * sizeof(uint32_t),
   2228 			    SLJIT_IMM, 0);
   2229 			if (status != SLJIT_SUCCESS)
   2230 				goto fail;
   2231 		}
   2232 	}
   2233 
   2234 	if (initmask & BJ_INIT_ABIT) {
   2235 		/* A = 0; */
   2236 		status = sljit_emit_op1(compiler,
   2237 		    SLJIT_MOV,
   2238 		    BJ_AREG, 0,
   2239 		    SLJIT_IMM, 0);
   2240 		if (status != SLJIT_SUCCESS)
   2241 			goto fail;
   2242 	}
   2243 
   2244 	if (initmask & BJ_INIT_XBIT) {
   2245 		/* X = 0; */
   2246 		status = sljit_emit_op1(compiler,
   2247 		    SLJIT_MOV,
   2248 		    BJ_XREG, 0,
   2249 		    SLJIT_IMM, 0);
   2250 		if (status != SLJIT_SUCCESS)
   2251 			goto fail;
   2252 	}
   2253 
   2254 	status = load_buf_buflen(compiler);
   2255 	if (status != SLJIT_SUCCESS)
   2256 		goto fail;
   2257 
   2258 	if (!generate_insn_code(compiler, hints,
   2259 	    bc, insns, insn_dat, insn_count)) {
   2260 		goto fail;
   2261 	}
   2262 
   2263 	rv = sljit_generate_code(compiler);
   2264 
   2265 fail:
   2266 	if (compiler != NULL)
   2267 		sljit_free_compiler(compiler);
   2268 
   2269 	if (insn_dat != NULL)
   2270 		BJ_FREE(insn_dat, insn_count * sizeof(insn_dat[0]));
   2271 
   2272 	return (bpfjit_func_t)rv;
   2273 }
   2274 
   2275 void
   2276 bpfjit_free_code(bpfjit_func_t code)
   2277 {
   2278 
   2279 	sljit_free_code((void *)code);
   2280 }
   2281