1 1.48 riastrad /* $NetBSD: bpfjit.c,v 1.48 2020/02/01 02:54:02 riastradh Exp $ */ 2 1.3 rmind 3 1.1 alnsn /*- 4 1.43 alnsn * Copyright (c) 2011-2015 Alexander Nasonov. 5 1.1 alnsn * All rights reserved. 6 1.1 alnsn * 7 1.1 alnsn * Redistribution and use in source and binary forms, with or without 8 1.1 alnsn * modification, are permitted provided that the following conditions 9 1.1 alnsn * are met: 10 1.1 alnsn * 11 1.1 alnsn * 1. Redistributions of source code must retain the above copyright 12 1.1 alnsn * notice, this list of conditions and the following disclaimer. 13 1.1 alnsn * 2. Redistributions in binary form must reproduce the above copyright 14 1.1 alnsn * notice, this list of conditions and the following disclaimer in 15 1.1 alnsn * the documentation and/or other materials provided with the 16 1.1 alnsn * distribution. 17 1.1 alnsn * 18 1.1 alnsn * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 1.1 alnsn * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 1.1 alnsn * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 1.1 alnsn * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 1.1 alnsn * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 1.1 alnsn * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 1.1 alnsn * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 1.1 alnsn * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 1.1 alnsn * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 1.1 alnsn * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 28 1.1 alnsn * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 1.1 alnsn * SUCH DAMAGE. 30 1.1 alnsn */ 31 1.1 alnsn 32 1.2 alnsn #include <sys/cdefs.h> 33 1.2 alnsn #ifdef _KERNEL 34 1.48 riastrad __KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.48 2020/02/01 02:54:02 riastradh Exp $"); 35 1.2 alnsn #else 36 1.48 riastrad __RCSID("$NetBSD: bpfjit.c,v 1.48 2020/02/01 02:54:02 riastradh Exp $"); 37 1.2 alnsn #endif 38 1.2 alnsn 39 1.3 rmind #include <sys/types.h> 40 1.3 rmind #include <sys/queue.h> 41 1.1 alnsn 42 1.1 alnsn #ifndef _KERNEL 43 1.7 alnsn #include <assert.h> 44 1.7 alnsn #define BJ_ASSERT(c) assert(c) 45 1.7 alnsn #else 46 1.7 alnsn #define BJ_ASSERT(c) KASSERT(c) 47 1.7 alnsn #endif 48 1.7 alnsn 49 1.7 alnsn #ifndef _KERNEL 50 1.3 rmind #include <stdlib.h> 51 1.7 alnsn #define BJ_ALLOC(sz) malloc(sz) 52 1.7 alnsn #define BJ_FREE(p, sz) free(p) 53 1.1 alnsn #else 54 1.3 rmind #include <sys/kmem.h> 55 1.7 alnsn #define BJ_ALLOC(sz) kmem_alloc(sz, KM_SLEEP) 56 1.7 alnsn #define BJ_FREE(p, sz) kmem_free(p, sz) 57 1.1 alnsn #endif 58 1.1 alnsn 59 1.1 alnsn #ifndef _KERNEL 60 1.1 alnsn #include <limits.h> 61 1.1 alnsn #include <stdbool.h> 62 1.1 alnsn #include <stddef.h> 63 1.1 alnsn #include <stdint.h> 64 1.47 alnsn #include <string.h> 65 1.1 alnsn #else 66 1.1 alnsn #include <sys/atomic.h> 67 1.1 alnsn #include <sys/module.h> 68 1.1 alnsn #endif 69 1.1 alnsn 70 1.5 rmind #define __BPF_PRIVATE 71 1.5 rmind #include <net/bpf.h> 72 1.3 rmind #include <net/bpfjit.h> 73 1.1 alnsn #include <sljitLir.h> 74 1.1 alnsn 75 1.7 alnsn #if !defined(_KERNEL) && defined(SLJIT_VERBOSE) && SLJIT_VERBOSE 76 1.7 alnsn #include <stdio.h> /* for stderr */ 77 1.7 alnsn #endif 78 1.7 alnsn 79 1.7 alnsn /* 80 1.44 alnsn * Number of saved registers to pass to sljit_emit_enter() function. 81 1.44 alnsn */ 82 1.44 alnsn #define NSAVEDS 3 83 1.44 alnsn 84 1.44 alnsn /* 85 1.13 alnsn * Arguments of generated bpfjit_func_t. 86 1.13 alnsn * The first argument is reassigned upon entry 87 1.13 alnsn * to a more frequently used buf argument. 88 1.13 alnsn */ 89 1.45 alnsn #define BJ_CTX_ARG SLJIT_S0 90 1.45 alnsn #define BJ_ARGS SLJIT_S1 91 1.13 alnsn 92 1.13 alnsn /* 93 1.7 alnsn * Permanent register assignments. 94 1.7 alnsn */ 95 1.45 alnsn #define BJ_BUF SLJIT_S0 96 1.45 alnsn //#define BJ_ARGS SLJIT_S1 97 1.45 alnsn #define BJ_BUFLEN SLJIT_S2 98 1.45 alnsn #define BJ_AREG SLJIT_R0 99 1.45 alnsn #define BJ_TMP1REG SLJIT_R1 100 1.45 alnsn #define BJ_TMP2REG SLJIT_R2 101 1.45 alnsn #define BJ_XREG SLJIT_R3 102 1.45 alnsn #define BJ_TMP3REG SLJIT_R4 103 1.7 alnsn 104 1.13 alnsn #ifdef _KERNEL 105 1.13 alnsn #define MAX_MEMWORDS BPF_MAX_MEMWORDS 106 1.13 alnsn #else 107 1.13 alnsn #define MAX_MEMWORDS BPF_MEMWORDS 108 1.13 alnsn #endif 109 1.13 alnsn 110 1.13 alnsn #define BJ_INIT_NOBITS ((bpf_memword_init_t)0) 111 1.13 alnsn #define BJ_INIT_MBIT(k) BPF_MEMWORD_INIT(k) 112 1.13 alnsn #define BJ_INIT_ABIT BJ_INIT_MBIT(MAX_MEMWORDS) 113 1.13 alnsn #define BJ_INIT_XBIT BJ_INIT_MBIT(MAX_MEMWORDS + 1) 114 1.1 alnsn 115 1.9 alnsn /* 116 1.19 alnsn * Get a number of memwords and external memwords from a bpf_ctx object. 117 1.19 alnsn */ 118 1.19 alnsn #define GET_EXTWORDS(bc) ((bc) ? (bc)->extwords : 0) 119 1.19 alnsn #define GET_MEMWORDS(bc) (GET_EXTWORDS(bc) ? GET_EXTWORDS(bc) : BPF_MEMWORDS) 120 1.19 alnsn 121 1.19 alnsn /* 122 1.20 alnsn * Optimization hints. 123 1.20 alnsn */ 124 1.20 alnsn typedef unsigned int bpfjit_hint_t; 125 1.28 alnsn #define BJ_HINT_ABS 0x01 /* packet read at absolute offset */ 126 1.28 alnsn #define BJ_HINT_IND 0x02 /* packet read at variable offset */ 127 1.29 alnsn #define BJ_HINT_MSH 0x04 /* BPF_MSH instruction */ 128 1.29 alnsn #define BJ_HINT_COP 0x08 /* BPF_COP or BPF_COPX instruction */ 129 1.29 alnsn #define BJ_HINT_COPX 0x10 /* BPF_COPX instruction */ 130 1.29 alnsn #define BJ_HINT_XREG 0x20 /* BJ_XREG is needed */ 131 1.29 alnsn #define BJ_HINT_LDX 0x40 /* BPF_LDX instruction */ 132 1.29 alnsn #define BJ_HINT_PKT (BJ_HINT_ABS|BJ_HINT_IND|BJ_HINT_MSH) 133 1.20 alnsn 134 1.20 alnsn /* 135 1.9 alnsn * Datatype for Array Bounds Check Elimination (ABC) pass. 136 1.9 alnsn */ 137 1.9 alnsn typedef uint64_t bpfjit_abc_length_t; 138 1.9 alnsn #define MAX_ABC_LENGTH (UINT32_MAX + UINT64_C(4)) /* max. width is 4 */ 139 1.8 alnsn 140 1.7 alnsn struct bpfjit_stack 141 1.7 alnsn { 142 1.13 alnsn bpf_ctx_t *ctx; 143 1.13 alnsn uint32_t *extmem; /* pointer to external memory store */ 144 1.32 alnsn uint32_t reg; /* saved A or X register */ 145 1.7 alnsn #ifdef _KERNEL 146 1.21 alnsn int err; /* 3rd argument for m_xword/m_xhalf/m_xbyte function call */ 147 1.7 alnsn #endif 148 1.13 alnsn uint32_t mem[BPF_MEMWORDS]; /* internal memory store */ 149 1.7 alnsn }; 150 1.7 alnsn 151 1.7 alnsn /* 152 1.7 alnsn * Data for BPF_JMP instruction. 153 1.7 alnsn * Forward declaration for struct bpfjit_jump. 154 1.1 alnsn */ 155 1.7 alnsn struct bpfjit_jump_data; 156 1.1 alnsn 157 1.1 alnsn /* 158 1.7 alnsn * Node of bjumps list. 159 1.1 alnsn */ 160 1.3 rmind struct bpfjit_jump { 161 1.7 alnsn struct sljit_jump *sjump; 162 1.7 alnsn SLIST_ENTRY(bpfjit_jump) entries; 163 1.7 alnsn struct bpfjit_jump_data *jdata; 164 1.1 alnsn }; 165 1.1 alnsn 166 1.1 alnsn /* 167 1.1 alnsn * Data for BPF_JMP instruction. 168 1.1 alnsn */ 169 1.3 rmind struct bpfjit_jump_data { 170 1.1 alnsn /* 171 1.7 alnsn * These entries make up bjumps list: 172 1.7 alnsn * jtf[0] - when coming from jt path, 173 1.7 alnsn * jtf[1] - when coming from jf path. 174 1.1 alnsn */ 175 1.7 alnsn struct bpfjit_jump jtf[2]; 176 1.7 alnsn /* 177 1.7 alnsn * Length calculated by Array Bounds Check Elimination (ABC) pass. 178 1.7 alnsn */ 179 1.8 alnsn bpfjit_abc_length_t abc_length; 180 1.7 alnsn /* 181 1.7 alnsn * Length checked by the last out-of-bounds check. 182 1.7 alnsn */ 183 1.8 alnsn bpfjit_abc_length_t checked_length; 184 1.1 alnsn }; 185 1.1 alnsn 186 1.1 alnsn /* 187 1.1 alnsn * Data for "read from packet" instructions. 188 1.1 alnsn * See also read_pkt_insn() function below. 189 1.1 alnsn */ 190 1.3 rmind struct bpfjit_read_pkt_data { 191 1.1 alnsn /* 192 1.7 alnsn * Length calculated by Array Bounds Check Elimination (ABC) pass. 193 1.7 alnsn */ 194 1.8 alnsn bpfjit_abc_length_t abc_length; 195 1.7 alnsn /* 196 1.7 alnsn * If positive, emit "if (buflen < check_length) return 0" 197 1.7 alnsn * out-of-bounds check. 198 1.9 alnsn * Values greater than UINT32_MAX generate unconditional "return 0". 199 1.1 alnsn */ 200 1.8 alnsn bpfjit_abc_length_t check_length; 201 1.1 alnsn }; 202 1.1 alnsn 203 1.1 alnsn /* 204 1.1 alnsn * Additional (optimization-related) data for bpf_insn. 205 1.1 alnsn */ 206 1.3 rmind struct bpfjit_insn_data { 207 1.1 alnsn /* List of jumps to this insn. */ 208 1.7 alnsn SLIST_HEAD(, bpfjit_jump) bjumps; 209 1.1 alnsn 210 1.1 alnsn union { 211 1.7 alnsn struct bpfjit_jump_data jdata; 212 1.7 alnsn struct bpfjit_read_pkt_data rdata; 213 1.7 alnsn } u; 214 1.1 alnsn 215 1.13 alnsn bpf_memword_init_t invalid; 216 1.7 alnsn bool unreachable; 217 1.1 alnsn }; 218 1.1 alnsn 219 1.1 alnsn #ifdef _KERNEL 220 1.1 alnsn 221 1.1 alnsn uint32_t m_xword(const struct mbuf *, uint32_t, int *); 222 1.1 alnsn uint32_t m_xhalf(const struct mbuf *, uint32_t, int *); 223 1.1 alnsn uint32_t m_xbyte(const struct mbuf *, uint32_t, int *); 224 1.1 alnsn 225 1.1 alnsn MODULE(MODULE_CLASS_MISC, bpfjit, "sljit") 226 1.1 alnsn 227 1.1 alnsn static int 228 1.1 alnsn bpfjit_modcmd(modcmd_t cmd, void *arg) 229 1.1 alnsn { 230 1.1 alnsn 231 1.1 alnsn switch (cmd) { 232 1.1 alnsn case MODULE_CMD_INIT: 233 1.1 alnsn bpfjit_module_ops.bj_free_code = &bpfjit_free_code; 234 1.48 riastrad atomic_store_release(&bpfjit_module_ops.bj_generate_code, 235 1.48 riastrad &bpfjit_generate_code); 236 1.1 alnsn return 0; 237 1.1 alnsn 238 1.1 alnsn case MODULE_CMD_FINI: 239 1.1 alnsn return EOPNOTSUPP; 240 1.1 alnsn 241 1.1 alnsn default: 242 1.1 alnsn return ENOTTY; 243 1.1 alnsn } 244 1.1 alnsn } 245 1.1 alnsn #endif 246 1.1 alnsn 247 1.20 alnsn /* 248 1.21 alnsn * Return a number of scratch registers to pass 249 1.20 alnsn * to sljit_emit_enter() function. 250 1.20 alnsn */ 251 1.45 alnsn static sljit_s32 252 1.20 alnsn nscratches(bpfjit_hint_t hints) 253 1.20 alnsn { 254 1.45 alnsn sljit_s32 rv = 2; 255 1.20 alnsn 256 1.22 alnsn #ifdef _KERNEL 257 1.24 alnsn if (hints & BJ_HINT_PKT) 258 1.24 alnsn rv = 3; /* xcall with three arguments */ 259 1.22 alnsn #endif 260 1.22 alnsn 261 1.27 alnsn if (hints & BJ_HINT_IND) 262 1.20 alnsn rv = 3; /* uses BJ_TMP2REG */ 263 1.20 alnsn 264 1.20 alnsn if (hints & BJ_HINT_COP) 265 1.20 alnsn rv = 3; /* calls copfunc with three arguments */ 266 1.20 alnsn 267 1.32 alnsn if (hints & BJ_HINT_XREG) 268 1.32 alnsn rv = 4; /* uses BJ_XREG */ 269 1.32 alnsn 270 1.32 alnsn #ifdef _KERNEL 271 1.32 alnsn if (hints & BJ_HINT_LDX) 272 1.32 alnsn rv = 5; /* uses BJ_TMP3REG */ 273 1.32 alnsn #endif 274 1.32 alnsn 275 1.29 alnsn if (hints & BJ_HINT_COPX) 276 1.32 alnsn rv = 5; /* uses BJ_TMP3REG */ 277 1.29 alnsn 278 1.29 alnsn return rv; 279 1.29 alnsn } 280 1.29 alnsn 281 1.1 alnsn static uint32_t 282 1.7 alnsn read_width(const struct bpf_insn *pc) 283 1.1 alnsn { 284 1.1 alnsn 285 1.1 alnsn switch (BPF_SIZE(pc->code)) { 286 1.39 alnsn case BPF_W: return 4; 287 1.39 alnsn case BPF_H: return 2; 288 1.39 alnsn case BPF_B: return 1; 289 1.39 alnsn default: return 0; 290 1.1 alnsn } 291 1.1 alnsn } 292 1.1 alnsn 293 1.13 alnsn /* 294 1.13 alnsn * Copy buf and buflen members of bpf_args from BJ_ARGS 295 1.13 alnsn * pointer to BJ_BUF and BJ_BUFLEN registers. 296 1.13 alnsn */ 297 1.13 alnsn static int 298 1.13 alnsn load_buf_buflen(struct sljit_compiler *compiler) 299 1.13 alnsn { 300 1.13 alnsn int status; 301 1.13 alnsn 302 1.13 alnsn status = sljit_emit_op1(compiler, 303 1.13 alnsn SLJIT_MOV_P, 304 1.13 alnsn BJ_BUF, 0, 305 1.13 alnsn SLJIT_MEM1(BJ_ARGS), 306 1.13 alnsn offsetof(struct bpf_args, pkt)); 307 1.13 alnsn if (status != SLJIT_SUCCESS) 308 1.13 alnsn return status; 309 1.13 alnsn 310 1.13 alnsn status = sljit_emit_op1(compiler, 311 1.21 alnsn SLJIT_MOV, /* size_t source */ 312 1.13 alnsn BJ_BUFLEN, 0, 313 1.13 alnsn SLJIT_MEM1(BJ_ARGS), 314 1.13 alnsn offsetof(struct bpf_args, buflen)); 315 1.13 alnsn 316 1.13 alnsn return status; 317 1.13 alnsn } 318 1.13 alnsn 319 1.7 alnsn static bool 320 1.7 alnsn grow_jumps(struct sljit_jump ***jumps, size_t *size) 321 1.7 alnsn { 322 1.7 alnsn struct sljit_jump **newptr; 323 1.7 alnsn const size_t elemsz = sizeof(struct sljit_jump *); 324 1.7 alnsn size_t old_size = *size; 325 1.7 alnsn size_t new_size = 2 * old_size; 326 1.7 alnsn 327 1.7 alnsn if (new_size < old_size || new_size > SIZE_MAX / elemsz) 328 1.7 alnsn return false; 329 1.7 alnsn 330 1.7 alnsn newptr = BJ_ALLOC(new_size * elemsz); 331 1.7 alnsn if (newptr == NULL) 332 1.7 alnsn return false; 333 1.7 alnsn 334 1.7 alnsn memcpy(newptr, *jumps, old_size * elemsz); 335 1.7 alnsn BJ_FREE(*jumps, old_size * elemsz); 336 1.7 alnsn 337 1.7 alnsn *jumps = newptr; 338 1.7 alnsn *size = new_size; 339 1.7 alnsn return true; 340 1.7 alnsn } 341 1.7 alnsn 342 1.7 alnsn static bool 343 1.7 alnsn append_jump(struct sljit_jump *jump, struct sljit_jump ***jumps, 344 1.7 alnsn size_t *size, size_t *max_size) 345 1.1 alnsn { 346 1.7 alnsn if (*size == *max_size && !grow_jumps(jumps, max_size)) 347 1.7 alnsn return false; 348 1.1 alnsn 349 1.7 alnsn (*jumps)[(*size)++] = jump; 350 1.7 alnsn return true; 351 1.1 alnsn } 352 1.1 alnsn 353 1.1 alnsn /* 354 1.24 alnsn * Emit code for BPF_LD+BPF_B+BPF_ABS A <- P[k:1]. 355 1.1 alnsn */ 356 1.1 alnsn static int 357 1.45 alnsn emit_read8(struct sljit_compiler *compiler, sljit_s32 src, uint32_t k) 358 1.1 alnsn { 359 1.1 alnsn 360 1.1 alnsn return sljit_emit_op1(compiler, 361 1.45 alnsn SLJIT_MOV_U8, 362 1.7 alnsn BJ_AREG, 0, 363 1.27 alnsn SLJIT_MEM1(src), k); 364 1.1 alnsn } 365 1.1 alnsn 366 1.1 alnsn /* 367 1.24 alnsn * Emit code for BPF_LD+BPF_H+BPF_ABS A <- P[k:2]. 368 1.1 alnsn */ 369 1.1 alnsn static int 370 1.45 alnsn emit_read16(struct sljit_compiler *compiler, sljit_s32 src, uint32_t k) 371 1.1 alnsn { 372 1.1 alnsn int status; 373 1.1 alnsn 374 1.27 alnsn BJ_ASSERT(k <= UINT32_MAX - 1); 375 1.27 alnsn 376 1.27 alnsn /* A = buf[k]; */ 377 1.1 alnsn status = sljit_emit_op1(compiler, 378 1.45 alnsn SLJIT_MOV_U8, 379 1.27 alnsn BJ_AREG, 0, 380 1.27 alnsn SLJIT_MEM1(src), k); 381 1.1 alnsn if (status != SLJIT_SUCCESS) 382 1.1 alnsn return status; 383 1.1 alnsn 384 1.27 alnsn /* tmp1 = buf[k+1]; */ 385 1.1 alnsn status = sljit_emit_op1(compiler, 386 1.45 alnsn SLJIT_MOV_U8, 387 1.27 alnsn BJ_TMP1REG, 0, 388 1.27 alnsn SLJIT_MEM1(src), k+1); 389 1.1 alnsn if (status != SLJIT_SUCCESS) 390 1.1 alnsn return status; 391 1.1 alnsn 392 1.27 alnsn /* A = A << 8; */ 393 1.1 alnsn status = sljit_emit_op2(compiler, 394 1.1 alnsn SLJIT_SHL, 395 1.27 alnsn BJ_AREG, 0, 396 1.27 alnsn BJ_AREG, 0, 397 1.1 alnsn SLJIT_IMM, 8); 398 1.1 alnsn if (status != SLJIT_SUCCESS) 399 1.1 alnsn return status; 400 1.1 alnsn 401 1.1 alnsn /* A = A + tmp1; */ 402 1.1 alnsn status = sljit_emit_op2(compiler, 403 1.1 alnsn SLJIT_ADD, 404 1.7 alnsn BJ_AREG, 0, 405 1.7 alnsn BJ_AREG, 0, 406 1.7 alnsn BJ_TMP1REG, 0); 407 1.1 alnsn return status; 408 1.1 alnsn } 409 1.1 alnsn 410 1.1 alnsn /* 411 1.24 alnsn * Emit code for BPF_LD+BPF_W+BPF_ABS A <- P[k:4]. 412 1.1 alnsn */ 413 1.1 alnsn static int 414 1.45 alnsn emit_read32(struct sljit_compiler *compiler, sljit_s32 src, uint32_t k) 415 1.1 alnsn { 416 1.1 alnsn int status; 417 1.1 alnsn 418 1.27 alnsn BJ_ASSERT(k <= UINT32_MAX - 3); 419 1.1 alnsn 420 1.27 alnsn /* A = buf[k]; */ 421 1.1 alnsn status = sljit_emit_op1(compiler, 422 1.45 alnsn SLJIT_MOV_U8, 423 1.27 alnsn BJ_AREG, 0, 424 1.27 alnsn SLJIT_MEM1(src), k); 425 1.1 alnsn if (status != SLJIT_SUCCESS) 426 1.1 alnsn return status; 427 1.1 alnsn 428 1.27 alnsn /* tmp1 = buf[k+1]; */ 429 1.1 alnsn status = sljit_emit_op1(compiler, 430 1.45 alnsn SLJIT_MOV_U8, 431 1.27 alnsn BJ_TMP1REG, 0, 432 1.27 alnsn SLJIT_MEM1(src), k+1); 433 1.1 alnsn if (status != SLJIT_SUCCESS) 434 1.1 alnsn return status; 435 1.1 alnsn 436 1.27 alnsn /* A = A << 8; */ 437 1.1 alnsn status = sljit_emit_op2(compiler, 438 1.1 alnsn SLJIT_SHL, 439 1.27 alnsn BJ_AREG, 0, 440 1.27 alnsn BJ_AREG, 0, 441 1.27 alnsn SLJIT_IMM, 8); 442 1.1 alnsn if (status != SLJIT_SUCCESS) 443 1.1 alnsn return status; 444 1.1 alnsn 445 1.1 alnsn /* A = A + tmp1; */ 446 1.1 alnsn status = sljit_emit_op2(compiler, 447 1.1 alnsn SLJIT_ADD, 448 1.7 alnsn BJ_AREG, 0, 449 1.7 alnsn BJ_AREG, 0, 450 1.7 alnsn BJ_TMP1REG, 0); 451 1.1 alnsn if (status != SLJIT_SUCCESS) 452 1.1 alnsn return status; 453 1.1 alnsn 454 1.1 alnsn /* tmp1 = buf[k+2]; */ 455 1.1 alnsn status = sljit_emit_op1(compiler, 456 1.45 alnsn SLJIT_MOV_U8, 457 1.7 alnsn BJ_TMP1REG, 0, 458 1.27 alnsn SLJIT_MEM1(src), k+2); 459 1.1 alnsn if (status != SLJIT_SUCCESS) 460 1.1 alnsn return status; 461 1.1 alnsn 462 1.27 alnsn /* A = A << 8; */ 463 1.1 alnsn status = sljit_emit_op2(compiler, 464 1.1 alnsn SLJIT_SHL, 465 1.27 alnsn BJ_AREG, 0, 466 1.27 alnsn BJ_AREG, 0, 467 1.27 alnsn SLJIT_IMM, 8); 468 1.1 alnsn if (status != SLJIT_SUCCESS) 469 1.1 alnsn return status; 470 1.1 alnsn 471 1.27 alnsn /* A = A + tmp1; */ 472 1.1 alnsn status = sljit_emit_op2(compiler, 473 1.1 alnsn SLJIT_ADD, 474 1.7 alnsn BJ_AREG, 0, 475 1.7 alnsn BJ_AREG, 0, 476 1.27 alnsn BJ_TMP1REG, 0); 477 1.27 alnsn if (status != SLJIT_SUCCESS) 478 1.27 alnsn return status; 479 1.27 alnsn 480 1.27 alnsn /* tmp1 = buf[k+3]; */ 481 1.27 alnsn status = sljit_emit_op1(compiler, 482 1.45 alnsn SLJIT_MOV_U8, 483 1.27 alnsn BJ_TMP1REG, 0, 484 1.27 alnsn SLJIT_MEM1(src), k+3); 485 1.1 alnsn if (status != SLJIT_SUCCESS) 486 1.1 alnsn return status; 487 1.1 alnsn 488 1.27 alnsn /* A = A << 8; */ 489 1.1 alnsn status = sljit_emit_op2(compiler, 490 1.1 alnsn SLJIT_SHL, 491 1.27 alnsn BJ_AREG, 0, 492 1.27 alnsn BJ_AREG, 0, 493 1.1 alnsn SLJIT_IMM, 8); 494 1.1 alnsn if (status != SLJIT_SUCCESS) 495 1.1 alnsn return status; 496 1.1 alnsn 497 1.1 alnsn /* A = A + tmp1; */ 498 1.1 alnsn status = sljit_emit_op2(compiler, 499 1.1 alnsn SLJIT_ADD, 500 1.7 alnsn BJ_AREG, 0, 501 1.7 alnsn BJ_AREG, 0, 502 1.7 alnsn BJ_TMP1REG, 0); 503 1.1 alnsn return status; 504 1.1 alnsn } 505 1.1 alnsn 506 1.1 alnsn #ifdef _KERNEL 507 1.1 alnsn /* 508 1.24 alnsn * Emit code for m_xword/m_xhalf/m_xbyte call. 509 1.1 alnsn * 510 1.24 alnsn * @pc BPF_LD+BPF_W+BPF_ABS A <- P[k:4] 511 1.24 alnsn * BPF_LD+BPF_H+BPF_ABS A <- P[k:2] 512 1.24 alnsn * BPF_LD+BPF_B+BPF_ABS A <- P[k:1] 513 1.24 alnsn * BPF_LD+BPF_W+BPF_IND A <- P[X+k:4] 514 1.24 alnsn * BPF_LD+BPF_H+BPF_IND A <- P[X+k:2] 515 1.24 alnsn * BPF_LD+BPF_B+BPF_IND A <- P[X+k:1] 516 1.24 alnsn * BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf) 517 1.1 alnsn */ 518 1.1 alnsn static int 519 1.32 alnsn emit_xcall(struct sljit_compiler *compiler, bpfjit_hint_t hints, 520 1.32 alnsn const struct bpf_insn *pc, int dst, struct sljit_jump ***ret0, 521 1.32 alnsn size_t *ret0_size, size_t *ret0_maxsize, 522 1.1 alnsn uint32_t (*fn)(const struct mbuf *, uint32_t, int *)) 523 1.1 alnsn { 524 1.32 alnsn #if BJ_XREG == SLJIT_RETURN_REG || \ 525 1.45 alnsn BJ_XREG == SLJIT_R0 || \ 526 1.45 alnsn BJ_XREG == SLJIT_R1 || \ 527 1.45 alnsn BJ_XREG == SLJIT_R2 528 1.32 alnsn #error "Not supported assignment of registers." 529 1.32 alnsn #endif 530 1.23 alnsn struct sljit_jump *jump; 531 1.45 alnsn sljit_s32 save_reg; 532 1.1 alnsn int status; 533 1.1 alnsn 534 1.32 alnsn save_reg = (BPF_CLASS(pc->code) == BPF_LDX) ? BJ_AREG : BJ_XREG; 535 1.23 alnsn 536 1.32 alnsn if (save_reg == BJ_AREG || (hints & BJ_HINT_XREG)) { 537 1.32 alnsn /* save A or X */ 538 1.1 alnsn status = sljit_emit_op1(compiler, 539 1.45 alnsn SLJIT_MOV_U32, 540 1.45 alnsn SLJIT_MEM1(SLJIT_SP), 541 1.32 alnsn offsetof(struct bpfjit_stack, reg), 542 1.32 alnsn save_reg, 0); 543 1.1 alnsn if (status != SLJIT_SUCCESS) 544 1.1 alnsn return status; 545 1.1 alnsn } 546 1.1 alnsn 547 1.1 alnsn /* 548 1.23 alnsn * Prepare registers for fn(mbuf, k, &err) call. 549 1.1 alnsn */ 550 1.1 alnsn status = sljit_emit_op1(compiler, 551 1.1 alnsn SLJIT_MOV, 552 1.45 alnsn SLJIT_R0, 0, 553 1.7 alnsn BJ_BUF, 0); 554 1.1 alnsn if (status != SLJIT_SUCCESS) 555 1.1 alnsn return status; 556 1.1 alnsn 557 1.1 alnsn if (BPF_CLASS(pc->code) == BPF_LD && BPF_MODE(pc->code) == BPF_IND) { 558 1.31 alnsn if (pc->k == 0) { 559 1.31 alnsn /* k = X; */ 560 1.31 alnsn status = sljit_emit_op1(compiler, 561 1.31 alnsn SLJIT_MOV, 562 1.45 alnsn SLJIT_R1, 0, 563 1.31 alnsn BJ_XREG, 0); 564 1.31 alnsn if (status != SLJIT_SUCCESS) 565 1.31 alnsn return status; 566 1.31 alnsn } else { 567 1.31 alnsn /* if (X > UINT32_MAX - pc->k) return 0; */ 568 1.31 alnsn jump = sljit_emit_cmp(compiler, 569 1.45 alnsn SLJIT_GREATER, 570 1.31 alnsn BJ_XREG, 0, 571 1.31 alnsn SLJIT_IMM, UINT32_MAX - pc->k); 572 1.31 alnsn if (jump == NULL) 573 1.31 alnsn return SLJIT_ERR_ALLOC_FAILED; 574 1.31 alnsn if (!append_jump(jump, ret0, ret0_size, ret0_maxsize)) 575 1.31 alnsn return SLJIT_ERR_ALLOC_FAILED; 576 1.31 alnsn 577 1.31 alnsn /* k = X + pc->k; */ 578 1.31 alnsn status = sljit_emit_op2(compiler, 579 1.31 alnsn SLJIT_ADD, 580 1.45 alnsn SLJIT_R1, 0, 581 1.31 alnsn BJ_XREG, 0, 582 1.31 alnsn SLJIT_IMM, (uint32_t)pc->k); 583 1.31 alnsn if (status != SLJIT_SUCCESS) 584 1.31 alnsn return status; 585 1.31 alnsn } 586 1.1 alnsn } else { 587 1.23 alnsn /* k = pc->k */ 588 1.1 alnsn status = sljit_emit_op1(compiler, 589 1.1 alnsn SLJIT_MOV, 590 1.45 alnsn SLJIT_R1, 0, 591 1.1 alnsn SLJIT_IMM, (uint32_t)pc->k); 592 1.24 alnsn if (status != SLJIT_SUCCESS) 593 1.24 alnsn return status; 594 1.1 alnsn } 595 1.1 alnsn 596 1.21 alnsn /* 597 1.21 alnsn * The third argument of fn is an address on stack. 598 1.21 alnsn */ 599 1.1 alnsn status = sljit_get_local_base(compiler, 600 1.45 alnsn SLJIT_R2, 0, 601 1.21 alnsn offsetof(struct bpfjit_stack, err)); 602 1.1 alnsn if (status != SLJIT_SUCCESS) 603 1.1 alnsn return status; 604 1.1 alnsn 605 1.1 alnsn /* fn(buf, k, &err); */ 606 1.1 alnsn status = sljit_emit_ijump(compiler, 607 1.1 alnsn SLJIT_CALL3, 608 1.1 alnsn SLJIT_IMM, SLJIT_FUNC_OFFSET(fn)); 609 1.24 alnsn if (status != SLJIT_SUCCESS) 610 1.24 alnsn return status; 611 1.1 alnsn 612 1.7 alnsn if (dst != SLJIT_RETURN_REG) { 613 1.1 alnsn /* move return value to dst */ 614 1.1 alnsn status = sljit_emit_op1(compiler, 615 1.1 alnsn SLJIT_MOV, 616 1.23 alnsn dst, 0, 617 1.1 alnsn SLJIT_RETURN_REG, 0); 618 1.1 alnsn if (status != SLJIT_SUCCESS) 619 1.1 alnsn return status; 620 1.7 alnsn } 621 1.1 alnsn 622 1.30 alnsn /* if (*err != 0) return 0; */ 623 1.30 alnsn jump = sljit_emit_cmp(compiler, 624 1.45 alnsn SLJIT_NOT_EQUAL|SLJIT_I32_OP, 625 1.45 alnsn SLJIT_MEM1(SLJIT_SP), 626 1.30 alnsn offsetof(struct bpfjit_stack, err), 627 1.1 alnsn SLJIT_IMM, 0); 628 1.23 alnsn if (jump == NULL) 629 1.23 alnsn return SLJIT_ERR_ALLOC_FAILED; 630 1.23 alnsn 631 1.23 alnsn if (!append_jump(jump, ret0, ret0_size, ret0_maxsize)) 632 1.1 alnsn return SLJIT_ERR_ALLOC_FAILED; 633 1.1 alnsn 634 1.32 alnsn if (save_reg == BJ_AREG || (hints & BJ_HINT_XREG)) { 635 1.32 alnsn /* restore A or X */ 636 1.26 alnsn status = sljit_emit_op1(compiler, 637 1.45 alnsn SLJIT_MOV_U32, 638 1.32 alnsn save_reg, 0, 639 1.45 alnsn SLJIT_MEM1(SLJIT_SP), 640 1.32 alnsn offsetof(struct bpfjit_stack, reg)); 641 1.26 alnsn if (status != SLJIT_SUCCESS) 642 1.26 alnsn return status; 643 1.26 alnsn } 644 1.26 alnsn 645 1.24 alnsn return SLJIT_SUCCESS; 646 1.1 alnsn } 647 1.1 alnsn #endif 648 1.1 alnsn 649 1.1 alnsn /* 650 1.13 alnsn * Emit code for BPF_COP and BPF_COPX instructions. 651 1.13 alnsn */ 652 1.13 alnsn static int 653 1.32 alnsn emit_cop(struct sljit_compiler *compiler, bpfjit_hint_t hints, 654 1.28 alnsn const bpf_ctx_t *bc, const struct bpf_insn *pc, 655 1.28 alnsn struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize) 656 1.13 alnsn { 657 1.32 alnsn #if BJ_XREG == SLJIT_RETURN_REG || \ 658 1.45 alnsn BJ_XREG == SLJIT_R0 || \ 659 1.45 alnsn BJ_XREG == SLJIT_R1 || \ 660 1.45 alnsn BJ_XREG == SLJIT_R2 || \ 661 1.45 alnsn BJ_TMP3REG == SLJIT_R0 || \ 662 1.45 alnsn BJ_TMP3REG == SLJIT_R1 || \ 663 1.45 alnsn BJ_TMP3REG == SLJIT_R2 664 1.13 alnsn #error "Not supported assignment of registers." 665 1.13 alnsn #endif 666 1.13 alnsn 667 1.13 alnsn struct sljit_jump *jump; 668 1.45 alnsn sljit_s32 call_reg; 669 1.28 alnsn sljit_sw call_off; 670 1.13 alnsn int status; 671 1.13 alnsn 672 1.13 alnsn BJ_ASSERT(bc != NULL && bc->copfuncs != NULL); 673 1.13 alnsn 674 1.32 alnsn if (hints & BJ_HINT_LDX) { 675 1.32 alnsn /* save X */ 676 1.32 alnsn status = sljit_emit_op1(compiler, 677 1.45 alnsn SLJIT_MOV_U32, 678 1.45 alnsn SLJIT_MEM1(SLJIT_SP), 679 1.32 alnsn offsetof(struct bpfjit_stack, reg), 680 1.32 alnsn BJ_XREG, 0); 681 1.32 alnsn if (status != SLJIT_SUCCESS) 682 1.32 alnsn return status; 683 1.32 alnsn } 684 1.32 alnsn 685 1.28 alnsn if (BPF_MISCOP(pc->code) == BPF_COP) { 686 1.28 alnsn call_reg = SLJIT_IMM; 687 1.28 alnsn call_off = SLJIT_FUNC_OFFSET(bc->copfuncs[pc->k]); 688 1.28 alnsn } else { 689 1.13 alnsn /* if (X >= bc->nfuncs) return 0; */ 690 1.13 alnsn jump = sljit_emit_cmp(compiler, 691 1.45 alnsn SLJIT_GREATER_EQUAL, 692 1.13 alnsn BJ_XREG, 0, 693 1.13 alnsn SLJIT_IMM, bc->nfuncs); 694 1.13 alnsn if (jump == NULL) 695 1.13 alnsn return SLJIT_ERR_ALLOC_FAILED; 696 1.28 alnsn if (!append_jump(jump, ret0, ret0_size, ret0_maxsize)) 697 1.28 alnsn return SLJIT_ERR_ALLOC_FAILED; 698 1.28 alnsn 699 1.28 alnsn /* tmp1 = ctx; */ 700 1.28 alnsn status = sljit_emit_op1(compiler, 701 1.28 alnsn SLJIT_MOV_P, 702 1.28 alnsn BJ_TMP1REG, 0, 703 1.45 alnsn SLJIT_MEM1(SLJIT_SP), 704 1.28 alnsn offsetof(struct bpfjit_stack, ctx)); 705 1.28 alnsn if (status != SLJIT_SUCCESS) 706 1.28 alnsn return status; 707 1.28 alnsn 708 1.28 alnsn /* tmp1 = ctx->copfuncs; */ 709 1.28 alnsn status = sljit_emit_op1(compiler, 710 1.28 alnsn SLJIT_MOV_P, 711 1.28 alnsn BJ_TMP1REG, 0, 712 1.28 alnsn SLJIT_MEM1(BJ_TMP1REG), 713 1.28 alnsn offsetof(struct bpf_ctx, copfuncs)); 714 1.28 alnsn if (status != SLJIT_SUCCESS) 715 1.28 alnsn return status; 716 1.28 alnsn 717 1.28 alnsn /* tmp2 = X; */ 718 1.28 alnsn status = sljit_emit_op1(compiler, 719 1.28 alnsn SLJIT_MOV, 720 1.28 alnsn BJ_TMP2REG, 0, 721 1.28 alnsn BJ_XREG, 0); 722 1.28 alnsn if (status != SLJIT_SUCCESS) 723 1.28 alnsn return status; 724 1.28 alnsn 725 1.28 alnsn /* tmp3 = ctx->copfuncs[tmp2]; */ 726 1.28 alnsn call_reg = BJ_TMP3REG; 727 1.28 alnsn call_off = 0; 728 1.28 alnsn status = sljit_emit_op1(compiler, 729 1.28 alnsn SLJIT_MOV_P, 730 1.28 alnsn call_reg, call_off, 731 1.28 alnsn SLJIT_MEM2(BJ_TMP1REG, BJ_TMP2REG), 732 1.28 alnsn SLJIT_WORD_SHIFT); 733 1.28 alnsn if (status != SLJIT_SUCCESS) 734 1.28 alnsn return status; 735 1.13 alnsn } 736 1.13 alnsn 737 1.13 alnsn /* 738 1.13 alnsn * Copy bpf_copfunc_t arguments to registers. 739 1.13 alnsn */ 740 1.45 alnsn #if BJ_AREG != SLJIT_R2 741 1.13 alnsn status = sljit_emit_op1(compiler, 742 1.45 alnsn SLJIT_MOV_U32, 743 1.45 alnsn SLJIT_R2, 0, 744 1.13 alnsn BJ_AREG, 0); 745 1.13 alnsn if (status != SLJIT_SUCCESS) 746 1.13 alnsn return status; 747 1.13 alnsn #endif 748 1.13 alnsn 749 1.13 alnsn status = sljit_emit_op1(compiler, 750 1.13 alnsn SLJIT_MOV_P, 751 1.45 alnsn SLJIT_R0, 0, 752 1.45 alnsn SLJIT_MEM1(SLJIT_SP), 753 1.13 alnsn offsetof(struct bpfjit_stack, ctx)); 754 1.13 alnsn if (status != SLJIT_SUCCESS) 755 1.13 alnsn return status; 756 1.13 alnsn 757 1.13 alnsn status = sljit_emit_op1(compiler, 758 1.13 alnsn SLJIT_MOV_P, 759 1.45 alnsn SLJIT_R1, 0, 760 1.13 alnsn BJ_ARGS, 0); 761 1.13 alnsn if (status != SLJIT_SUCCESS) 762 1.13 alnsn return status; 763 1.13 alnsn 764 1.28 alnsn status = sljit_emit_ijump(compiler, 765 1.28 alnsn SLJIT_CALL3, call_reg, call_off); 766 1.28 alnsn if (status != SLJIT_SUCCESS) 767 1.28 alnsn return status; 768 1.13 alnsn 769 1.13 alnsn #if BJ_AREG != SLJIT_RETURN_REG 770 1.13 alnsn status = sljit_emit_op1(compiler, 771 1.13 alnsn SLJIT_MOV, 772 1.13 alnsn BJ_AREG, 0, 773 1.13 alnsn SLJIT_RETURN_REG, 0); 774 1.13 alnsn if (status != SLJIT_SUCCESS) 775 1.13 alnsn return status; 776 1.13 alnsn #endif 777 1.13 alnsn 778 1.32 alnsn if (hints & BJ_HINT_LDX) { 779 1.32 alnsn /* restore X */ 780 1.32 alnsn status = sljit_emit_op1(compiler, 781 1.45 alnsn SLJIT_MOV_U32, 782 1.32 alnsn BJ_XREG, 0, 783 1.45 alnsn SLJIT_MEM1(SLJIT_SP), 784 1.32 alnsn offsetof(struct bpfjit_stack, reg)); 785 1.32 alnsn if (status != SLJIT_SUCCESS) 786 1.32 alnsn return status; 787 1.32 alnsn } 788 1.32 alnsn 789 1.24 alnsn return SLJIT_SUCCESS; 790 1.13 alnsn } 791 1.13 alnsn 792 1.13 alnsn /* 793 1.1 alnsn * Generate code for 794 1.1 alnsn * BPF_LD+BPF_W+BPF_ABS A <- P[k:4] 795 1.1 alnsn * BPF_LD+BPF_H+BPF_ABS A <- P[k:2] 796 1.1 alnsn * BPF_LD+BPF_B+BPF_ABS A <- P[k:1] 797 1.1 alnsn * BPF_LD+BPF_W+BPF_IND A <- P[X+k:4] 798 1.1 alnsn * BPF_LD+BPF_H+BPF_IND A <- P[X+k:2] 799 1.1 alnsn * BPF_LD+BPF_B+BPF_IND A <- P[X+k:1] 800 1.1 alnsn */ 801 1.1 alnsn static int 802 1.32 alnsn emit_pkt_read(struct sljit_compiler *compiler, bpfjit_hint_t hints, 803 1.7 alnsn const struct bpf_insn *pc, struct sljit_jump *to_mchain_jump, 804 1.7 alnsn struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize) 805 1.1 alnsn { 806 1.25 alnsn int status = SLJIT_ERR_ALLOC_FAILED; 807 1.1 alnsn uint32_t width; 808 1.45 alnsn sljit_s32 ld_reg; 809 1.1 alnsn struct sljit_jump *jump; 810 1.1 alnsn #ifdef _KERNEL 811 1.1 alnsn struct sljit_label *label; 812 1.1 alnsn struct sljit_jump *over_mchain_jump; 813 1.1 alnsn const bool check_zero_buflen = (to_mchain_jump != NULL); 814 1.1 alnsn #endif 815 1.1 alnsn const uint32_t k = pc->k; 816 1.1 alnsn 817 1.1 alnsn #ifdef _KERNEL 818 1.1 alnsn if (to_mchain_jump == NULL) { 819 1.1 alnsn to_mchain_jump = sljit_emit_cmp(compiler, 820 1.45 alnsn SLJIT_EQUAL, 821 1.7 alnsn BJ_BUFLEN, 0, 822 1.1 alnsn SLJIT_IMM, 0); 823 1.1 alnsn if (to_mchain_jump == NULL) 824 1.7 alnsn return SLJIT_ERR_ALLOC_FAILED; 825 1.1 alnsn } 826 1.1 alnsn #endif 827 1.1 alnsn 828 1.27 alnsn ld_reg = BJ_BUF; 829 1.1 alnsn width = read_width(pc); 830 1.39 alnsn if (width == 0) 831 1.39 alnsn return SLJIT_ERR_ALLOC_FAILED; 832 1.1 alnsn 833 1.1 alnsn if (BPF_MODE(pc->code) == BPF_IND) { 834 1.1 alnsn /* tmp1 = buflen - (pc->k + width); */ 835 1.1 alnsn status = sljit_emit_op2(compiler, 836 1.1 alnsn SLJIT_SUB, 837 1.7 alnsn BJ_TMP1REG, 0, 838 1.7 alnsn BJ_BUFLEN, 0, 839 1.1 alnsn SLJIT_IMM, k + width); 840 1.1 alnsn if (status != SLJIT_SUCCESS) 841 1.1 alnsn return status; 842 1.1 alnsn 843 1.27 alnsn /* ld_reg = buf + X; */ 844 1.27 alnsn ld_reg = BJ_TMP2REG; 845 1.1 alnsn status = sljit_emit_op2(compiler, 846 1.1 alnsn SLJIT_ADD, 847 1.27 alnsn ld_reg, 0, 848 1.7 alnsn BJ_BUF, 0, 849 1.7 alnsn BJ_XREG, 0); 850 1.1 alnsn if (status != SLJIT_SUCCESS) 851 1.1 alnsn return status; 852 1.1 alnsn 853 1.1 alnsn /* if (tmp1 < X) return 0; */ 854 1.1 alnsn jump = sljit_emit_cmp(compiler, 855 1.45 alnsn SLJIT_LESS, 856 1.7 alnsn BJ_TMP1REG, 0, 857 1.7 alnsn BJ_XREG, 0); 858 1.1 alnsn if (jump == NULL) 859 1.7 alnsn return SLJIT_ERR_ALLOC_FAILED; 860 1.7 alnsn if (!append_jump(jump, ret0, ret0_size, ret0_maxsize)) 861 1.7 alnsn return SLJIT_ERR_ALLOC_FAILED; 862 1.1 alnsn } 863 1.1 alnsn 864 1.40 alnsn /* 865 1.40 alnsn * Don't emit wrapped-around reads. They're dead code but 866 1.40 alnsn * dead code elimination logic isn't smart enough to figure 867 1.40 alnsn * it out. 868 1.40 alnsn */ 869 1.40 alnsn if (k <= UINT32_MAX - width + 1) { 870 1.40 alnsn switch (width) { 871 1.40 alnsn case 4: 872 1.40 alnsn status = emit_read32(compiler, ld_reg, k); 873 1.40 alnsn break; 874 1.40 alnsn case 2: 875 1.40 alnsn status = emit_read16(compiler, ld_reg, k); 876 1.40 alnsn break; 877 1.40 alnsn case 1: 878 1.40 alnsn status = emit_read8(compiler, ld_reg, k); 879 1.40 alnsn break; 880 1.40 alnsn } 881 1.40 alnsn 882 1.40 alnsn if (status != SLJIT_SUCCESS) 883 1.40 alnsn return status; 884 1.1 alnsn } 885 1.1 alnsn 886 1.1 alnsn #ifdef _KERNEL 887 1.1 alnsn over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP); 888 1.1 alnsn if (over_mchain_jump == NULL) 889 1.7 alnsn return SLJIT_ERR_ALLOC_FAILED; 890 1.1 alnsn 891 1.1 alnsn /* entry point to mchain handler */ 892 1.1 alnsn label = sljit_emit_label(compiler); 893 1.1 alnsn if (label == NULL) 894 1.7 alnsn return SLJIT_ERR_ALLOC_FAILED; 895 1.1 alnsn sljit_set_label(to_mchain_jump, label); 896 1.1 alnsn 897 1.1 alnsn if (check_zero_buflen) { 898 1.1 alnsn /* if (buflen != 0) return 0; */ 899 1.1 alnsn jump = sljit_emit_cmp(compiler, 900 1.45 alnsn SLJIT_NOT_EQUAL, 901 1.7 alnsn BJ_BUFLEN, 0, 902 1.1 alnsn SLJIT_IMM, 0); 903 1.1 alnsn if (jump == NULL) 904 1.1 alnsn return SLJIT_ERR_ALLOC_FAILED; 905 1.7 alnsn if (!append_jump(jump, ret0, ret0_size, ret0_maxsize)) 906 1.7 alnsn return SLJIT_ERR_ALLOC_FAILED; 907 1.1 alnsn } 908 1.1 alnsn 909 1.1 alnsn switch (width) { 910 1.1 alnsn case 4: 911 1.32 alnsn status = emit_xcall(compiler, hints, pc, BJ_AREG, 912 1.23 alnsn ret0, ret0_size, ret0_maxsize, &m_xword); 913 1.1 alnsn break; 914 1.1 alnsn case 2: 915 1.32 alnsn status = emit_xcall(compiler, hints, pc, BJ_AREG, 916 1.23 alnsn ret0, ret0_size, ret0_maxsize, &m_xhalf); 917 1.1 alnsn break; 918 1.1 alnsn case 1: 919 1.32 alnsn status = emit_xcall(compiler, hints, pc, BJ_AREG, 920 1.23 alnsn ret0, ret0_size, ret0_maxsize, &m_xbyte); 921 1.1 alnsn break; 922 1.1 alnsn } 923 1.1 alnsn 924 1.1 alnsn if (status != SLJIT_SUCCESS) 925 1.1 alnsn return status; 926 1.1 alnsn 927 1.1 alnsn label = sljit_emit_label(compiler); 928 1.1 alnsn if (label == NULL) 929 1.1 alnsn return SLJIT_ERR_ALLOC_FAILED; 930 1.1 alnsn sljit_set_label(over_mchain_jump, label); 931 1.1 alnsn #endif 932 1.1 alnsn 933 1.24 alnsn return SLJIT_SUCCESS; 934 1.1 alnsn } 935 1.1 alnsn 936 1.13 alnsn static int 937 1.19 alnsn emit_memload(struct sljit_compiler *compiler, 938 1.45 alnsn sljit_s32 dst, uint32_t k, size_t extwords) 939 1.13 alnsn { 940 1.13 alnsn int status; 941 1.45 alnsn sljit_s32 src; 942 1.13 alnsn sljit_sw srcw; 943 1.13 alnsn 944 1.13 alnsn srcw = k * sizeof(uint32_t); 945 1.13 alnsn 946 1.13 alnsn if (extwords == 0) { 947 1.45 alnsn src = SLJIT_MEM1(SLJIT_SP); 948 1.13 alnsn srcw += offsetof(struct bpfjit_stack, mem); 949 1.13 alnsn } else { 950 1.13 alnsn /* copy extmem pointer to the tmp1 register */ 951 1.13 alnsn status = sljit_emit_op1(compiler, 952 1.16 alnsn SLJIT_MOV_P, 953 1.13 alnsn BJ_TMP1REG, 0, 954 1.45 alnsn SLJIT_MEM1(SLJIT_SP), 955 1.13 alnsn offsetof(struct bpfjit_stack, extmem)); 956 1.13 alnsn if (status != SLJIT_SUCCESS) 957 1.13 alnsn return status; 958 1.13 alnsn src = SLJIT_MEM1(BJ_TMP1REG); 959 1.13 alnsn } 960 1.13 alnsn 961 1.45 alnsn return sljit_emit_op1(compiler, SLJIT_MOV_U32, dst, 0, src, srcw); 962 1.13 alnsn } 963 1.13 alnsn 964 1.13 alnsn static int 965 1.19 alnsn emit_memstore(struct sljit_compiler *compiler, 966 1.45 alnsn sljit_s32 src, uint32_t k, size_t extwords) 967 1.13 alnsn { 968 1.13 alnsn int status; 969 1.45 alnsn sljit_s32 dst; 970 1.13 alnsn sljit_sw dstw; 971 1.13 alnsn 972 1.13 alnsn dstw = k * sizeof(uint32_t); 973 1.13 alnsn 974 1.13 alnsn if (extwords == 0) { 975 1.45 alnsn dst = SLJIT_MEM1(SLJIT_SP); 976 1.13 alnsn dstw += offsetof(struct bpfjit_stack, mem); 977 1.13 alnsn } else { 978 1.13 alnsn /* copy extmem pointer to the tmp1 register */ 979 1.13 alnsn status = sljit_emit_op1(compiler, 980 1.16 alnsn SLJIT_MOV_P, 981 1.13 alnsn BJ_TMP1REG, 0, 982 1.45 alnsn SLJIT_MEM1(SLJIT_SP), 983 1.13 alnsn offsetof(struct bpfjit_stack, extmem)); 984 1.13 alnsn if (status != SLJIT_SUCCESS) 985 1.13 alnsn return status; 986 1.13 alnsn dst = SLJIT_MEM1(BJ_TMP1REG); 987 1.13 alnsn } 988 1.13 alnsn 989 1.45 alnsn return sljit_emit_op1(compiler, SLJIT_MOV_U32, dst, dstw, src, 0); 990 1.13 alnsn } 991 1.13 alnsn 992 1.1 alnsn /* 993 1.24 alnsn * Emit code for BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf). 994 1.1 alnsn */ 995 1.1 alnsn static int 996 1.32 alnsn emit_msh(struct sljit_compiler *compiler, bpfjit_hint_t hints, 997 1.7 alnsn const struct bpf_insn *pc, struct sljit_jump *to_mchain_jump, 998 1.7 alnsn struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize) 999 1.1 alnsn { 1000 1.1 alnsn int status; 1001 1.1 alnsn #ifdef _KERNEL 1002 1.1 alnsn struct sljit_label *label; 1003 1.1 alnsn struct sljit_jump *jump, *over_mchain_jump; 1004 1.1 alnsn const bool check_zero_buflen = (to_mchain_jump != NULL); 1005 1.1 alnsn #endif 1006 1.1 alnsn const uint32_t k = pc->k; 1007 1.1 alnsn 1008 1.1 alnsn #ifdef _KERNEL 1009 1.1 alnsn if (to_mchain_jump == NULL) { 1010 1.1 alnsn to_mchain_jump = sljit_emit_cmp(compiler, 1011 1.45 alnsn SLJIT_EQUAL, 1012 1.7 alnsn BJ_BUFLEN, 0, 1013 1.1 alnsn SLJIT_IMM, 0); 1014 1.1 alnsn if (to_mchain_jump == NULL) 1015 1.7 alnsn return SLJIT_ERR_ALLOC_FAILED; 1016 1.1 alnsn } 1017 1.1 alnsn #endif 1018 1.1 alnsn 1019 1.1 alnsn /* tmp1 = buf[k] */ 1020 1.1 alnsn status = sljit_emit_op1(compiler, 1021 1.45 alnsn SLJIT_MOV_U8, 1022 1.7 alnsn BJ_TMP1REG, 0, 1023 1.7 alnsn SLJIT_MEM1(BJ_BUF), k); 1024 1.1 alnsn if (status != SLJIT_SUCCESS) 1025 1.1 alnsn return status; 1026 1.1 alnsn 1027 1.1 alnsn #ifdef _KERNEL 1028 1.1 alnsn over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP); 1029 1.1 alnsn if (over_mchain_jump == NULL) 1030 1.1 alnsn return SLJIT_ERR_ALLOC_FAILED; 1031 1.1 alnsn 1032 1.1 alnsn /* entry point to mchain handler */ 1033 1.1 alnsn label = sljit_emit_label(compiler); 1034 1.1 alnsn if (label == NULL) 1035 1.1 alnsn return SLJIT_ERR_ALLOC_FAILED; 1036 1.1 alnsn sljit_set_label(to_mchain_jump, label); 1037 1.1 alnsn 1038 1.1 alnsn if (check_zero_buflen) { 1039 1.1 alnsn /* if (buflen != 0) return 0; */ 1040 1.1 alnsn jump = sljit_emit_cmp(compiler, 1041 1.45 alnsn SLJIT_NOT_EQUAL, 1042 1.7 alnsn BJ_BUFLEN, 0, 1043 1.1 alnsn SLJIT_IMM, 0); 1044 1.1 alnsn if (jump == NULL) 1045 1.7 alnsn return SLJIT_ERR_ALLOC_FAILED; 1046 1.7 alnsn if (!append_jump(jump, ret0, ret0_size, ret0_maxsize)) 1047 1.7 alnsn return SLJIT_ERR_ALLOC_FAILED; 1048 1.1 alnsn } 1049 1.1 alnsn 1050 1.32 alnsn status = emit_xcall(compiler, hints, pc, BJ_TMP1REG, 1051 1.23 alnsn ret0, ret0_size, ret0_maxsize, &m_xbyte); 1052 1.1 alnsn if (status != SLJIT_SUCCESS) 1053 1.1 alnsn return status; 1054 1.7 alnsn 1055 1.30 alnsn label = sljit_emit_label(compiler); 1056 1.30 alnsn if (label == NULL) 1057 1.30 alnsn return SLJIT_ERR_ALLOC_FAILED; 1058 1.30 alnsn sljit_set_label(over_mchain_jump, label); 1059 1.30 alnsn #endif 1060 1.30 alnsn 1061 1.1 alnsn /* tmp1 &= 0xf */ 1062 1.1 alnsn status = sljit_emit_op2(compiler, 1063 1.1 alnsn SLJIT_AND, 1064 1.7 alnsn BJ_TMP1REG, 0, 1065 1.7 alnsn BJ_TMP1REG, 0, 1066 1.1 alnsn SLJIT_IMM, 0xf); 1067 1.1 alnsn if (status != SLJIT_SUCCESS) 1068 1.1 alnsn return status; 1069 1.1 alnsn 1070 1.30 alnsn /* X = tmp1 << 2 */ 1071 1.1 alnsn status = sljit_emit_op2(compiler, 1072 1.1 alnsn SLJIT_SHL, 1073 1.7 alnsn BJ_XREG, 0, 1074 1.7 alnsn BJ_TMP1REG, 0, 1075 1.1 alnsn SLJIT_IMM, 2); 1076 1.1 alnsn if (status != SLJIT_SUCCESS) 1077 1.1 alnsn return status; 1078 1.1 alnsn 1079 1.24 alnsn return SLJIT_SUCCESS; 1080 1.1 alnsn } 1081 1.1 alnsn 1082 1.35 alnsn /* 1083 1.35 alnsn * Emit code for A = A / k or A = A % k when k is a power of 2. 1084 1.35 alnsn * @pc BPF_DIV or BPF_MOD instruction. 1085 1.35 alnsn */ 1086 1.1 alnsn static int 1087 1.35 alnsn emit_pow2_moddiv(struct sljit_compiler *compiler, const struct bpf_insn *pc) 1088 1.1 alnsn { 1089 1.35 alnsn uint32_t k = pc->k; 1090 1.1 alnsn int status = SLJIT_SUCCESS; 1091 1.1 alnsn 1092 1.35 alnsn BJ_ASSERT(k != 0 && (k & (k - 1)) == 0); 1093 1.1 alnsn 1094 1.35 alnsn if (BPF_OP(pc->code) == BPF_MOD) { 1095 1.1 alnsn status = sljit_emit_op2(compiler, 1096 1.35 alnsn SLJIT_AND, 1097 1.7 alnsn BJ_AREG, 0, 1098 1.7 alnsn BJ_AREG, 0, 1099 1.35 alnsn SLJIT_IMM, k - 1); 1100 1.35 alnsn } else { 1101 1.35 alnsn int shift = 0; 1102 1.35 alnsn 1103 1.35 alnsn /* 1104 1.35 alnsn * Do shift = __builtin_ctz(k). 1105 1.35 alnsn * The loop is slower, but that's ok. 1106 1.35 alnsn */ 1107 1.35 alnsn while (k > 1) { 1108 1.35 alnsn k >>= 1; 1109 1.35 alnsn shift++; 1110 1.35 alnsn } 1111 1.35 alnsn 1112 1.35 alnsn if (shift != 0) { 1113 1.35 alnsn status = sljit_emit_op2(compiler, 1114 1.45 alnsn SLJIT_LSHR|SLJIT_I32_OP, 1115 1.35 alnsn BJ_AREG, 0, 1116 1.35 alnsn BJ_AREG, 0, 1117 1.35 alnsn SLJIT_IMM, shift); 1118 1.35 alnsn } 1119 1.1 alnsn } 1120 1.1 alnsn 1121 1.1 alnsn return status; 1122 1.1 alnsn } 1123 1.1 alnsn 1124 1.1 alnsn #if !defined(BPFJIT_USE_UDIV) 1125 1.1 alnsn static sljit_uw 1126 1.1 alnsn divide(sljit_uw x, sljit_uw y) 1127 1.1 alnsn { 1128 1.1 alnsn 1129 1.1 alnsn return (uint32_t)x / (uint32_t)y; 1130 1.1 alnsn } 1131 1.33 christos 1132 1.33 christos static sljit_uw 1133 1.33 christos modulus(sljit_uw x, sljit_uw y) 1134 1.33 christos { 1135 1.33 christos 1136 1.33 christos return (uint32_t)x % (uint32_t)y; 1137 1.33 christos } 1138 1.1 alnsn #endif 1139 1.1 alnsn 1140 1.1 alnsn /* 1141 1.35 alnsn * Emit code for A = A / div or A = A % div. 1142 1.35 alnsn * @pc BPF_DIV or BPF_MOD instruction. 1143 1.1 alnsn */ 1144 1.1 alnsn static int 1145 1.35 alnsn emit_moddiv(struct sljit_compiler *compiler, const struct bpf_insn *pc) 1146 1.1 alnsn { 1147 1.1 alnsn int status; 1148 1.38 christos const bool xdiv = BPF_OP(pc->code) == BPF_DIV; 1149 1.35 alnsn const bool xreg = BPF_SRC(pc->code) == BPF_X; 1150 1.1 alnsn 1151 1.32 alnsn #if BJ_XREG == SLJIT_RETURN_REG || \ 1152 1.45 alnsn BJ_XREG == SLJIT_R0 || \ 1153 1.45 alnsn BJ_XREG == SLJIT_R1 || \ 1154 1.45 alnsn BJ_AREG == SLJIT_R1 1155 1.32 alnsn #error "Not supported assignment of registers." 1156 1.32 alnsn #endif 1157 1.32 alnsn 1158 1.45 alnsn #if BJ_AREG != SLJIT_R0 1159 1.1 alnsn status = sljit_emit_op1(compiler, 1160 1.1 alnsn SLJIT_MOV, 1161 1.45 alnsn SLJIT_R0, 0, 1162 1.7 alnsn BJ_AREG, 0); 1163 1.1 alnsn if (status != SLJIT_SUCCESS) 1164 1.1 alnsn return status; 1165 1.1 alnsn #endif 1166 1.1 alnsn 1167 1.1 alnsn status = sljit_emit_op1(compiler, 1168 1.1 alnsn SLJIT_MOV, 1169 1.45 alnsn SLJIT_R1, 0, 1170 1.35 alnsn xreg ? BJ_XREG : SLJIT_IMM, 1171 1.35 alnsn xreg ? 0 : (uint32_t)pc->k); 1172 1.1 alnsn if (status != SLJIT_SUCCESS) 1173 1.1 alnsn return status; 1174 1.1 alnsn 1175 1.1 alnsn #if defined(BPFJIT_USE_UDIV) 1176 1.45 alnsn status = sljit_emit_op0(compiler, SLJIT_UDIV|SLJIT_I32_OP); 1177 1.1 alnsn 1178 1.36 alnsn if (BPF_OP(pc->code) == BPF_DIV) { 1179 1.45 alnsn #if BJ_AREG != SLJIT_R0 1180 1.36 alnsn status = sljit_emit_op1(compiler, 1181 1.36 alnsn SLJIT_MOV, 1182 1.36 alnsn BJ_AREG, 0, 1183 1.45 alnsn SLJIT_R0, 0); 1184 1.36 alnsn #endif 1185 1.36 alnsn } else { 1186 1.45 alnsn #if BJ_AREG != SLJIT_R1 1187 1.45 alnsn /* Remainder is in SLJIT_R1. */ 1188 1.36 alnsn status = sljit_emit_op1(compiler, 1189 1.36 alnsn SLJIT_MOV, 1190 1.36 alnsn BJ_AREG, 0, 1191 1.45 alnsn SLJIT_R1, 0); 1192 1.36 alnsn #endif 1193 1.36 alnsn } 1194 1.36 alnsn 1195 1.1 alnsn if (status != SLJIT_SUCCESS) 1196 1.1 alnsn return status; 1197 1.1 alnsn #else 1198 1.1 alnsn status = sljit_emit_ijump(compiler, 1199 1.1 alnsn SLJIT_CALL2, 1200 1.38 christos SLJIT_IMM, xdiv ? SLJIT_FUNC_OFFSET(divide) : 1201 1.33 christos SLJIT_FUNC_OFFSET(modulus)); 1202 1.1 alnsn 1203 1.7 alnsn #if BJ_AREG != SLJIT_RETURN_REG 1204 1.1 alnsn status = sljit_emit_op1(compiler, 1205 1.1 alnsn SLJIT_MOV, 1206 1.7 alnsn BJ_AREG, 0, 1207 1.1 alnsn SLJIT_RETURN_REG, 0); 1208 1.1 alnsn if (status != SLJIT_SUCCESS) 1209 1.1 alnsn return status; 1210 1.1 alnsn #endif 1211 1.1 alnsn #endif 1212 1.1 alnsn 1213 1.1 alnsn return status; 1214 1.1 alnsn } 1215 1.1 alnsn 1216 1.1 alnsn /* 1217 1.1 alnsn * Return true if pc is a "read from packet" instruction. 1218 1.1 alnsn * If length is not NULL and return value is true, *length will 1219 1.1 alnsn * be set to a safe length required to read a packet. 1220 1.1 alnsn */ 1221 1.1 alnsn static bool 1222 1.8 alnsn read_pkt_insn(const struct bpf_insn *pc, bpfjit_abc_length_t *length) 1223 1.1 alnsn { 1224 1.1 alnsn bool rv; 1225 1.37 justin bpfjit_abc_length_t width = 0; /* XXXuninit */ 1226 1.1 alnsn 1227 1.1 alnsn switch (BPF_CLASS(pc->code)) { 1228 1.1 alnsn default: 1229 1.1 alnsn rv = false; 1230 1.1 alnsn break; 1231 1.1 alnsn 1232 1.1 alnsn case BPF_LD: 1233 1.1 alnsn rv = BPF_MODE(pc->code) == BPF_ABS || 1234 1.1 alnsn BPF_MODE(pc->code) == BPF_IND; 1235 1.39 alnsn if (rv) { 1236 1.1 alnsn width = read_width(pc); 1237 1.39 alnsn rv = (width != 0); 1238 1.39 alnsn } 1239 1.1 alnsn break; 1240 1.1 alnsn 1241 1.1 alnsn case BPF_LDX: 1242 1.39 alnsn rv = BPF_MODE(pc->code) == BPF_MSH && 1243 1.39 alnsn BPF_SIZE(pc->code) == BPF_B; 1244 1.1 alnsn width = 1; 1245 1.1 alnsn break; 1246 1.1 alnsn } 1247 1.1 alnsn 1248 1.1 alnsn if (rv && length != NULL) { 1249 1.9 alnsn /* 1250 1.9 alnsn * Values greater than UINT32_MAX will generate 1251 1.9 alnsn * unconditional "return 0". 1252 1.9 alnsn */ 1253 1.9 alnsn *length = (uint32_t)pc->k + width; 1254 1.1 alnsn } 1255 1.1 alnsn 1256 1.1 alnsn return rv; 1257 1.1 alnsn } 1258 1.1 alnsn 1259 1.1 alnsn static void 1260 1.7 alnsn optimize_init(struct bpfjit_insn_data *insn_dat, size_t insn_count) 1261 1.1 alnsn { 1262 1.7 alnsn size_t i; 1263 1.1 alnsn 1264 1.7 alnsn for (i = 0; i < insn_count; i++) { 1265 1.7 alnsn SLIST_INIT(&insn_dat[i].bjumps); 1266 1.7 alnsn insn_dat[i].invalid = BJ_INIT_NOBITS; 1267 1.1 alnsn } 1268 1.1 alnsn } 1269 1.1 alnsn 1270 1.1 alnsn /* 1271 1.1 alnsn * The function divides instructions into blocks. Destination of a jump 1272 1.1 alnsn * instruction starts a new block. BPF_RET and BPF_JMP instructions 1273 1.1 alnsn * terminate a block. Blocks are linear, that is, there are no jumps out 1274 1.1 alnsn * from the middle of a block and there are no jumps in to the middle of 1275 1.1 alnsn * a block. 1276 1.7 alnsn * 1277 1.7 alnsn * The function also sets bits in *initmask for memwords that 1278 1.7 alnsn * need to be initialized to zero. Note that this set should be empty 1279 1.7 alnsn * for any valid kernel filter program. 1280 1.1 alnsn */ 1281 1.7 alnsn static bool 1282 1.19 alnsn optimize_pass1(const bpf_ctx_t *bc, const struct bpf_insn *insns, 1283 1.19 alnsn struct bpfjit_insn_data *insn_dat, size_t insn_count, 1284 1.20 alnsn bpf_memword_init_t *initmask, bpfjit_hint_t *hints) 1285 1.1 alnsn { 1286 1.7 alnsn struct bpfjit_jump *jtf; 1287 1.1 alnsn size_t i; 1288 1.7 alnsn uint32_t jt, jf; 1289 1.10 alnsn bpfjit_abc_length_t length; 1290 1.13 alnsn bpf_memword_init_t invalid; /* borrowed from bpf_filter() */ 1291 1.1 alnsn bool unreachable; 1292 1.1 alnsn 1293 1.19 alnsn const size_t memwords = GET_MEMWORDS(bc); 1294 1.13 alnsn 1295 1.20 alnsn *hints = 0; 1296 1.7 alnsn *initmask = BJ_INIT_NOBITS; 1297 1.1 alnsn 1298 1.1 alnsn unreachable = false; 1299 1.7 alnsn invalid = ~BJ_INIT_NOBITS; 1300 1.1 alnsn 1301 1.1 alnsn for (i = 0; i < insn_count; i++) { 1302 1.7 alnsn if (!SLIST_EMPTY(&insn_dat[i].bjumps)) 1303 1.1 alnsn unreachable = false; 1304 1.7 alnsn insn_dat[i].unreachable = unreachable; 1305 1.1 alnsn 1306 1.1 alnsn if (unreachable) 1307 1.1 alnsn continue; 1308 1.1 alnsn 1309 1.7 alnsn invalid |= insn_dat[i].invalid; 1310 1.1 alnsn 1311 1.10 alnsn if (read_pkt_insn(&insns[i], &length) && length > UINT32_MAX) 1312 1.10 alnsn unreachable = true; 1313 1.10 alnsn 1314 1.1 alnsn switch (BPF_CLASS(insns[i].code)) { 1315 1.1 alnsn case BPF_RET: 1316 1.7 alnsn if (BPF_RVAL(insns[i].code) == BPF_A) 1317 1.7 alnsn *initmask |= invalid & BJ_INIT_ABIT; 1318 1.7 alnsn 1319 1.1 alnsn unreachable = true; 1320 1.1 alnsn continue; 1321 1.1 alnsn 1322 1.7 alnsn case BPF_LD: 1323 1.27 alnsn if (BPF_MODE(insns[i].code) == BPF_ABS) 1324 1.27 alnsn *hints |= BJ_HINT_ABS; 1325 1.7 alnsn 1326 1.20 alnsn if (BPF_MODE(insns[i].code) == BPF_IND) { 1327 1.27 alnsn *hints |= BJ_HINT_IND | BJ_HINT_XREG; 1328 1.7 alnsn *initmask |= invalid & BJ_INIT_XBIT; 1329 1.20 alnsn } 1330 1.7 alnsn 1331 1.7 alnsn if (BPF_MODE(insns[i].code) == BPF_MEM && 1332 1.13 alnsn (uint32_t)insns[i].k < memwords) { 1333 1.7 alnsn *initmask |= invalid & BJ_INIT_MBIT(insns[i].k); 1334 1.7 alnsn } 1335 1.7 alnsn 1336 1.7 alnsn invalid &= ~BJ_INIT_ABIT; 1337 1.7 alnsn continue; 1338 1.7 alnsn 1339 1.7 alnsn case BPF_LDX: 1340 1.20 alnsn *hints |= BJ_HINT_XREG | BJ_HINT_LDX; 1341 1.7 alnsn 1342 1.7 alnsn if (BPF_MODE(insns[i].code) == BPF_MEM && 1343 1.13 alnsn (uint32_t)insns[i].k < memwords) { 1344 1.7 alnsn *initmask |= invalid & BJ_INIT_MBIT(insns[i].k); 1345 1.7 alnsn } 1346 1.7 alnsn 1347 1.29 alnsn if (BPF_MODE(insns[i].code) == BPF_MSH && 1348 1.29 alnsn BPF_SIZE(insns[i].code) == BPF_B) { 1349 1.29 alnsn *hints |= BJ_HINT_MSH; 1350 1.29 alnsn } 1351 1.29 alnsn 1352 1.7 alnsn invalid &= ~BJ_INIT_XBIT; 1353 1.7 alnsn continue; 1354 1.7 alnsn 1355 1.7 alnsn case BPF_ST: 1356 1.7 alnsn *initmask |= invalid & BJ_INIT_ABIT; 1357 1.7 alnsn 1358 1.13 alnsn if ((uint32_t)insns[i].k < memwords) 1359 1.7 alnsn invalid &= ~BJ_INIT_MBIT(insns[i].k); 1360 1.7 alnsn 1361 1.7 alnsn continue; 1362 1.7 alnsn 1363 1.7 alnsn case BPF_STX: 1364 1.20 alnsn *hints |= BJ_HINT_XREG; 1365 1.7 alnsn *initmask |= invalid & BJ_INIT_XBIT; 1366 1.7 alnsn 1367 1.13 alnsn if ((uint32_t)insns[i].k < memwords) 1368 1.7 alnsn invalid &= ~BJ_INIT_MBIT(insns[i].k); 1369 1.7 alnsn 1370 1.7 alnsn continue; 1371 1.7 alnsn 1372 1.7 alnsn case BPF_ALU: 1373 1.7 alnsn *initmask |= invalid & BJ_INIT_ABIT; 1374 1.7 alnsn 1375 1.7 alnsn if (insns[i].code != (BPF_ALU|BPF_NEG) && 1376 1.7 alnsn BPF_SRC(insns[i].code) == BPF_X) { 1377 1.20 alnsn *hints |= BJ_HINT_XREG; 1378 1.7 alnsn *initmask |= invalid & BJ_INIT_XBIT; 1379 1.7 alnsn } 1380 1.7 alnsn 1381 1.7 alnsn invalid &= ~BJ_INIT_ABIT; 1382 1.7 alnsn continue; 1383 1.7 alnsn 1384 1.7 alnsn case BPF_MISC: 1385 1.7 alnsn switch (BPF_MISCOP(insns[i].code)) { 1386 1.7 alnsn case BPF_TAX: // X <- A 1387 1.20 alnsn *hints |= BJ_HINT_XREG; 1388 1.7 alnsn *initmask |= invalid & BJ_INIT_ABIT; 1389 1.7 alnsn invalid &= ~BJ_INIT_XBIT; 1390 1.7 alnsn continue; 1391 1.7 alnsn 1392 1.7 alnsn case BPF_TXA: // A <- X 1393 1.20 alnsn *hints |= BJ_HINT_XREG; 1394 1.7 alnsn *initmask |= invalid & BJ_INIT_XBIT; 1395 1.7 alnsn invalid &= ~BJ_INIT_ABIT; 1396 1.7 alnsn continue; 1397 1.13 alnsn 1398 1.13 alnsn case BPF_COPX: 1399 1.28 alnsn *hints |= BJ_HINT_XREG | BJ_HINT_COPX; 1400 1.13 alnsn /* FALLTHROUGH */ 1401 1.13 alnsn 1402 1.13 alnsn case BPF_COP: 1403 1.20 alnsn *hints |= BJ_HINT_COP; 1404 1.13 alnsn *initmask |= invalid & BJ_INIT_ABIT; 1405 1.13 alnsn invalid &= ~BJ_INIT_ABIT; 1406 1.13 alnsn continue; 1407 1.7 alnsn } 1408 1.7 alnsn 1409 1.7 alnsn continue; 1410 1.7 alnsn 1411 1.1 alnsn case BPF_JMP: 1412 1.7 alnsn /* Initialize abc_length for ABC pass. */ 1413 1.8 alnsn insn_dat[i].u.jdata.abc_length = MAX_ABC_LENGTH; 1414 1.7 alnsn 1415 1.41 alnsn *initmask |= invalid & BJ_INIT_ABIT; 1416 1.41 alnsn 1417 1.41 alnsn if (BPF_SRC(insns[i].code) == BPF_X) { 1418 1.39 alnsn *hints |= BJ_HINT_XREG; 1419 1.41 alnsn *initmask |= invalid & BJ_INIT_XBIT; 1420 1.41 alnsn } 1421 1.39 alnsn 1422 1.7 alnsn if (BPF_OP(insns[i].code) == BPF_JA) { 1423 1.1 alnsn jt = jf = insns[i].k; 1424 1.1 alnsn } else { 1425 1.1 alnsn jt = insns[i].jt; 1426 1.1 alnsn jf = insns[i].jf; 1427 1.1 alnsn } 1428 1.1 alnsn 1429 1.1 alnsn if (jt >= insn_count - (i + 1) || 1430 1.1 alnsn jf >= insn_count - (i + 1)) { 1431 1.7 alnsn return false; 1432 1.1 alnsn } 1433 1.1 alnsn 1434 1.1 alnsn if (jt > 0 && jf > 0) 1435 1.1 alnsn unreachable = true; 1436 1.1 alnsn 1437 1.7 alnsn jt += i + 1; 1438 1.7 alnsn jf += i + 1; 1439 1.7 alnsn 1440 1.7 alnsn jtf = insn_dat[i].u.jdata.jtf; 1441 1.1 alnsn 1442 1.7 alnsn jtf[0].jdata = &insn_dat[i].u.jdata; 1443 1.7 alnsn SLIST_INSERT_HEAD(&insn_dat[jt].bjumps, 1444 1.7 alnsn &jtf[0], entries); 1445 1.1 alnsn 1446 1.1 alnsn if (jf != jt) { 1447 1.7 alnsn jtf[1].jdata = &insn_dat[i].u.jdata; 1448 1.7 alnsn SLIST_INSERT_HEAD(&insn_dat[jf].bjumps, 1449 1.7 alnsn &jtf[1], entries); 1450 1.1 alnsn } 1451 1.1 alnsn 1452 1.7 alnsn insn_dat[jf].invalid |= invalid; 1453 1.7 alnsn insn_dat[jt].invalid |= invalid; 1454 1.7 alnsn invalid = 0; 1455 1.7 alnsn 1456 1.1 alnsn continue; 1457 1.1 alnsn } 1458 1.1 alnsn } 1459 1.1 alnsn 1460 1.7 alnsn return true; 1461 1.1 alnsn } 1462 1.1 alnsn 1463 1.1 alnsn /* 1464 1.7 alnsn * Array Bounds Check Elimination (ABC) pass. 1465 1.1 alnsn */ 1466 1.7 alnsn static void 1467 1.19 alnsn optimize_pass2(const bpf_ctx_t *bc, const struct bpf_insn *insns, 1468 1.19 alnsn struct bpfjit_insn_data *insn_dat, size_t insn_count) 1469 1.7 alnsn { 1470 1.7 alnsn struct bpfjit_jump *jmp; 1471 1.7 alnsn const struct bpf_insn *pc; 1472 1.7 alnsn struct bpfjit_insn_data *pd; 1473 1.7 alnsn size_t i; 1474 1.8 alnsn bpfjit_abc_length_t length, abc_length = 0; 1475 1.7 alnsn 1476 1.19 alnsn const size_t extwords = GET_EXTWORDS(bc); 1477 1.19 alnsn 1478 1.7 alnsn for (i = insn_count; i != 0; i--) { 1479 1.7 alnsn pc = &insns[i-1]; 1480 1.7 alnsn pd = &insn_dat[i-1]; 1481 1.7 alnsn 1482 1.7 alnsn if (pd->unreachable) 1483 1.7 alnsn continue; 1484 1.7 alnsn 1485 1.7 alnsn switch (BPF_CLASS(pc->code)) { 1486 1.7 alnsn case BPF_RET: 1487 1.11 alnsn /* 1488 1.11 alnsn * It's quite common for bpf programs to 1489 1.11 alnsn * check packet bytes in increasing order 1490 1.11 alnsn * and return zero if bytes don't match 1491 1.11 alnsn * specified critetion. Such programs disable 1492 1.11 alnsn * ABC optimization completely because for 1493 1.11 alnsn * every jump there is a branch with no read 1494 1.11 alnsn * instruction. 1495 1.13 alnsn * With no side effects, BPF_STMT(BPF_RET+BPF_K, 0) 1496 1.13 alnsn * is indistinguishable from out-of-bound load. 1497 1.11 alnsn * Therefore, abc_length can be set to 1498 1.11 alnsn * MAX_ABC_LENGTH and enable ABC for many 1499 1.11 alnsn * bpf programs. 1500 1.13 alnsn * If this optimization encounters any 1501 1.11 alnsn * instruction with a side effect, it will 1502 1.11 alnsn * reset abc_length. 1503 1.11 alnsn */ 1504 1.11 alnsn if (BPF_RVAL(pc->code) == BPF_K && pc->k == 0) 1505 1.11 alnsn abc_length = MAX_ABC_LENGTH; 1506 1.11 alnsn else 1507 1.11 alnsn abc_length = 0; 1508 1.7 alnsn break; 1509 1.7 alnsn 1510 1.13 alnsn case BPF_MISC: 1511 1.13 alnsn if (BPF_MISCOP(pc->code) == BPF_COP || 1512 1.13 alnsn BPF_MISCOP(pc->code) == BPF_COPX) { 1513 1.13 alnsn /* COP instructions can have side effects. */ 1514 1.13 alnsn abc_length = 0; 1515 1.13 alnsn } 1516 1.13 alnsn break; 1517 1.13 alnsn 1518 1.13 alnsn case BPF_ST: 1519 1.13 alnsn case BPF_STX: 1520 1.13 alnsn if (extwords != 0) { 1521 1.13 alnsn /* Write to memory is visible after a call. */ 1522 1.13 alnsn abc_length = 0; 1523 1.13 alnsn } 1524 1.13 alnsn break; 1525 1.13 alnsn 1526 1.7 alnsn case BPF_JMP: 1527 1.7 alnsn abc_length = pd->u.jdata.abc_length; 1528 1.7 alnsn break; 1529 1.7 alnsn 1530 1.7 alnsn default: 1531 1.7 alnsn if (read_pkt_insn(pc, &length)) { 1532 1.7 alnsn if (abc_length < length) 1533 1.7 alnsn abc_length = length; 1534 1.7 alnsn pd->u.rdata.abc_length = abc_length; 1535 1.7 alnsn } 1536 1.7 alnsn break; 1537 1.7 alnsn } 1538 1.7 alnsn 1539 1.7 alnsn SLIST_FOREACH(jmp, &pd->bjumps, entries) { 1540 1.7 alnsn if (jmp->jdata->abc_length > abc_length) 1541 1.7 alnsn jmp->jdata->abc_length = abc_length; 1542 1.7 alnsn } 1543 1.7 alnsn } 1544 1.7 alnsn } 1545 1.7 alnsn 1546 1.7 alnsn static void 1547 1.7 alnsn optimize_pass3(const struct bpf_insn *insns, 1548 1.7 alnsn struct bpfjit_insn_data *insn_dat, size_t insn_count) 1549 1.1 alnsn { 1550 1.7 alnsn struct bpfjit_jump *jmp; 1551 1.1 alnsn size_t i; 1552 1.8 alnsn bpfjit_abc_length_t checked_length = 0; 1553 1.1 alnsn 1554 1.1 alnsn for (i = 0; i < insn_count; i++) { 1555 1.7 alnsn if (insn_dat[i].unreachable) 1556 1.7 alnsn continue; 1557 1.1 alnsn 1558 1.7 alnsn SLIST_FOREACH(jmp, &insn_dat[i].bjumps, entries) { 1559 1.7 alnsn if (jmp->jdata->checked_length < checked_length) 1560 1.7 alnsn checked_length = jmp->jdata->checked_length; 1561 1.1 alnsn } 1562 1.1 alnsn 1563 1.7 alnsn if (BPF_CLASS(insns[i].code) == BPF_JMP) { 1564 1.7 alnsn insn_dat[i].u.jdata.checked_length = checked_length; 1565 1.8 alnsn } else if (read_pkt_insn(&insns[i], NULL)) { 1566 1.7 alnsn struct bpfjit_read_pkt_data *rdata = 1567 1.7 alnsn &insn_dat[i].u.rdata; 1568 1.7 alnsn rdata->check_length = 0; 1569 1.7 alnsn if (checked_length < rdata->abc_length) { 1570 1.7 alnsn checked_length = rdata->abc_length; 1571 1.7 alnsn rdata->check_length = checked_length; 1572 1.7 alnsn } 1573 1.1 alnsn } 1574 1.7 alnsn } 1575 1.7 alnsn } 1576 1.1 alnsn 1577 1.7 alnsn static bool 1578 1.19 alnsn optimize(const bpf_ctx_t *bc, const struct bpf_insn *insns, 1579 1.7 alnsn struct bpfjit_insn_data *insn_dat, size_t insn_count, 1580 1.20 alnsn bpf_memword_init_t *initmask, bpfjit_hint_t *hints) 1581 1.7 alnsn { 1582 1.1 alnsn 1583 1.7 alnsn optimize_init(insn_dat, insn_count); 1584 1.7 alnsn 1585 1.20 alnsn if (!optimize_pass1(bc, insns, insn_dat, insn_count, initmask, hints)) 1586 1.7 alnsn return false; 1587 1.1 alnsn 1588 1.19 alnsn optimize_pass2(bc, insns, insn_dat, insn_count); 1589 1.7 alnsn optimize_pass3(insns, insn_dat, insn_count); 1590 1.7 alnsn 1591 1.7 alnsn return true; 1592 1.1 alnsn } 1593 1.1 alnsn 1594 1.1 alnsn /* 1595 1.1 alnsn * Convert BPF_ALU operations except BPF_NEG and BPF_DIV to sljit operation. 1596 1.1 alnsn */ 1597 1.46 alnsn static bool 1598 1.46 alnsn alu_to_op(const struct bpf_insn *pc, int *res) 1599 1.1 alnsn { 1600 1.42 alnsn const uint32_t k = pc->k; 1601 1.1 alnsn 1602 1.1 alnsn /* 1603 1.1 alnsn * Note: all supported 64bit arches have 32bit multiply 1604 1.45 alnsn * instruction so SLJIT_I32_OP doesn't have any overhead. 1605 1.1 alnsn */ 1606 1.1 alnsn switch (BPF_OP(pc->code)) { 1607 1.46 alnsn case BPF_ADD: 1608 1.46 alnsn *res = SLJIT_ADD; 1609 1.46 alnsn return true; 1610 1.46 alnsn case BPF_SUB: 1611 1.46 alnsn *res = SLJIT_SUB; 1612 1.46 alnsn return true; 1613 1.46 alnsn case BPF_MUL: 1614 1.46 alnsn *res = SLJIT_MUL|SLJIT_I32_OP; 1615 1.46 alnsn return true; 1616 1.46 alnsn case BPF_OR: 1617 1.46 alnsn *res = SLJIT_OR; 1618 1.46 alnsn return true; 1619 1.46 alnsn case BPF_XOR: 1620 1.46 alnsn *res = SLJIT_XOR; 1621 1.46 alnsn return true; 1622 1.46 alnsn case BPF_AND: 1623 1.46 alnsn *res = SLJIT_AND; 1624 1.46 alnsn return true; 1625 1.46 alnsn case BPF_LSH: 1626 1.46 alnsn *res = SLJIT_SHL; 1627 1.46 alnsn return k < 32; 1628 1.46 alnsn case BPF_RSH: 1629 1.46 alnsn *res = SLJIT_LSHR|SLJIT_I32_OP; 1630 1.46 alnsn return k < 32; 1631 1.1 alnsn default: 1632 1.46 alnsn return false; 1633 1.1 alnsn } 1634 1.1 alnsn } 1635 1.1 alnsn 1636 1.1 alnsn /* 1637 1.1 alnsn * Convert BPF_JMP operations except BPF_JA to sljit condition. 1638 1.1 alnsn */ 1639 1.46 alnsn static bool 1640 1.46 alnsn jmp_to_cond(const struct bpf_insn *pc, bool negate, int *res) 1641 1.1 alnsn { 1642 1.46 alnsn 1643 1.1 alnsn /* 1644 1.1 alnsn * Note: all supported 64bit arches have 32bit comparison 1645 1.45 alnsn * instructions so SLJIT_I32_OP doesn't have any overhead. 1646 1.1 alnsn */ 1647 1.46 alnsn *res = SLJIT_I32_OP; 1648 1.1 alnsn 1649 1.1 alnsn switch (BPF_OP(pc->code)) { 1650 1.1 alnsn case BPF_JGT: 1651 1.46 alnsn *res |= negate ? SLJIT_LESS_EQUAL : SLJIT_GREATER; 1652 1.46 alnsn return true; 1653 1.1 alnsn case BPF_JGE: 1654 1.46 alnsn *res |= negate ? SLJIT_LESS : SLJIT_GREATER_EQUAL; 1655 1.46 alnsn return true; 1656 1.1 alnsn case BPF_JEQ: 1657 1.46 alnsn *res |= negate ? SLJIT_NOT_EQUAL : SLJIT_EQUAL; 1658 1.46 alnsn return true; 1659 1.1 alnsn case BPF_JSET: 1660 1.46 alnsn *res |= negate ? SLJIT_EQUAL : SLJIT_NOT_EQUAL; 1661 1.46 alnsn return true; 1662 1.1 alnsn default: 1663 1.46 alnsn return false; 1664 1.1 alnsn } 1665 1.1 alnsn } 1666 1.1 alnsn 1667 1.1 alnsn /* 1668 1.1 alnsn * Convert BPF_K and BPF_X to sljit register. 1669 1.1 alnsn */ 1670 1.1 alnsn static int 1671 1.7 alnsn kx_to_reg(const struct bpf_insn *pc) 1672 1.1 alnsn { 1673 1.1 alnsn 1674 1.1 alnsn switch (BPF_SRC(pc->code)) { 1675 1.1 alnsn case BPF_K: return SLJIT_IMM; 1676 1.7 alnsn case BPF_X: return BJ_XREG; 1677 1.1 alnsn default: 1678 1.7 alnsn BJ_ASSERT(false); 1679 1.1 alnsn return 0; 1680 1.1 alnsn } 1681 1.1 alnsn } 1682 1.1 alnsn 1683 1.12 alnsn static sljit_sw 1684 1.7 alnsn kx_to_reg_arg(const struct bpf_insn *pc) 1685 1.1 alnsn { 1686 1.1 alnsn 1687 1.1 alnsn switch (BPF_SRC(pc->code)) { 1688 1.1 alnsn case BPF_K: return (uint32_t)pc->k; /* SLJIT_IMM, pc->k, */ 1689 1.7 alnsn case BPF_X: return 0; /* BJ_XREG, 0, */ 1690 1.1 alnsn default: 1691 1.7 alnsn BJ_ASSERT(false); 1692 1.1 alnsn return 0; 1693 1.1 alnsn } 1694 1.1 alnsn } 1695 1.1 alnsn 1696 1.19 alnsn static bool 1697 1.32 alnsn generate_insn_code(struct sljit_compiler *compiler, bpfjit_hint_t hints, 1698 1.32 alnsn const bpf_ctx_t *bc, const struct bpf_insn *insns, 1699 1.32 alnsn struct bpfjit_insn_data *insn_dat, size_t insn_count) 1700 1.1 alnsn { 1701 1.1 alnsn /* a list of jumps to out-of-bound return from a generated function */ 1702 1.1 alnsn struct sljit_jump **ret0; 1703 1.7 alnsn size_t ret0_size, ret0_maxsize; 1704 1.1 alnsn 1705 1.19 alnsn struct sljit_jump *jump; 1706 1.19 alnsn struct sljit_label *label; 1707 1.7 alnsn const struct bpf_insn *pc; 1708 1.1 alnsn struct bpfjit_jump *bjump, *jtf; 1709 1.1 alnsn struct sljit_jump *to_mchain_jump; 1710 1.1 alnsn 1711 1.19 alnsn size_t i; 1712 1.46 alnsn unsigned int rval, mode, src, op; 1713 1.19 alnsn int branching, negate; 1714 1.46 alnsn int status, cond, op2; 1715 1.1 alnsn uint32_t jt, jf; 1716 1.1 alnsn 1717 1.19 alnsn bool unconditional_ret; 1718 1.19 alnsn bool rv; 1719 1.19 alnsn 1720 1.19 alnsn const size_t extwords = GET_EXTWORDS(bc); 1721 1.19 alnsn const size_t memwords = GET_MEMWORDS(bc); 1722 1.13 alnsn 1723 1.13 alnsn ret0 = NULL; 1724 1.19 alnsn rv = false; 1725 1.7 alnsn 1726 1.1 alnsn ret0_size = 0; 1727 1.7 alnsn ret0_maxsize = 64; 1728 1.7 alnsn ret0 = BJ_ALLOC(ret0_maxsize * sizeof(ret0[0])); 1729 1.7 alnsn if (ret0 == NULL) 1730 1.1 alnsn goto fail; 1731 1.1 alnsn 1732 1.24 alnsn /* reset sjump members of jdata */ 1733 1.24 alnsn for (i = 0; i < insn_count; i++) { 1734 1.24 alnsn if (insn_dat[i].unreachable || 1735 1.24 alnsn BPF_CLASS(insns[i].code) != BPF_JMP) { 1736 1.24 alnsn continue; 1737 1.24 alnsn } 1738 1.24 alnsn 1739 1.24 alnsn jtf = insn_dat[i].u.jdata.jtf; 1740 1.24 alnsn jtf[0].sjump = jtf[1].sjump = NULL; 1741 1.24 alnsn } 1742 1.24 alnsn 1743 1.24 alnsn /* main loop */ 1744 1.1 alnsn for (i = 0; i < insn_count; i++) { 1745 1.7 alnsn if (insn_dat[i].unreachable) 1746 1.1 alnsn continue; 1747 1.1 alnsn 1748 1.1 alnsn /* 1749 1.1 alnsn * Resolve jumps to the current insn. 1750 1.1 alnsn */ 1751 1.1 alnsn label = NULL; 1752 1.7 alnsn SLIST_FOREACH(bjump, &insn_dat[i].bjumps, entries) { 1753 1.7 alnsn if (bjump->sjump != NULL) { 1754 1.1 alnsn if (label == NULL) 1755 1.1 alnsn label = sljit_emit_label(compiler); 1756 1.1 alnsn if (label == NULL) 1757 1.1 alnsn goto fail; 1758 1.7 alnsn sljit_set_label(bjump->sjump, label); 1759 1.1 alnsn } 1760 1.1 alnsn } 1761 1.1 alnsn 1762 1.9 alnsn to_mchain_jump = NULL; 1763 1.9 alnsn unconditional_ret = false; 1764 1.9 alnsn 1765 1.9 alnsn if (read_pkt_insn(&insns[i], NULL)) { 1766 1.9 alnsn if (insn_dat[i].u.rdata.check_length > UINT32_MAX) { 1767 1.9 alnsn /* Jump to "return 0" unconditionally. */ 1768 1.9 alnsn unconditional_ret = true; 1769 1.9 alnsn jump = sljit_emit_jump(compiler, SLJIT_JUMP); 1770 1.9 alnsn if (jump == NULL) 1771 1.9 alnsn goto fail; 1772 1.9 alnsn if (!append_jump(jump, &ret0, 1773 1.9 alnsn &ret0_size, &ret0_maxsize)) 1774 1.9 alnsn goto fail; 1775 1.9 alnsn } else if (insn_dat[i].u.rdata.check_length > 0) { 1776 1.9 alnsn /* if (buflen < check_length) return 0; */ 1777 1.9 alnsn jump = sljit_emit_cmp(compiler, 1778 1.45 alnsn SLJIT_LESS, 1779 1.9 alnsn BJ_BUFLEN, 0, 1780 1.9 alnsn SLJIT_IMM, 1781 1.9 alnsn insn_dat[i].u.rdata.check_length); 1782 1.9 alnsn if (jump == NULL) 1783 1.9 alnsn goto fail; 1784 1.1 alnsn #ifdef _KERNEL 1785 1.9 alnsn to_mchain_jump = jump; 1786 1.1 alnsn #else 1787 1.9 alnsn if (!append_jump(jump, &ret0, 1788 1.9 alnsn &ret0_size, &ret0_maxsize)) 1789 1.9 alnsn goto fail; 1790 1.1 alnsn #endif 1791 1.9 alnsn } 1792 1.1 alnsn } 1793 1.1 alnsn 1794 1.1 alnsn pc = &insns[i]; 1795 1.1 alnsn switch (BPF_CLASS(pc->code)) { 1796 1.1 alnsn 1797 1.1 alnsn default: 1798 1.1 alnsn goto fail; 1799 1.1 alnsn 1800 1.1 alnsn case BPF_LD: 1801 1.1 alnsn /* BPF_LD+BPF_IMM A <- k */ 1802 1.1 alnsn if (pc->code == (BPF_LD|BPF_IMM)) { 1803 1.1 alnsn status = sljit_emit_op1(compiler, 1804 1.1 alnsn SLJIT_MOV, 1805 1.7 alnsn BJ_AREG, 0, 1806 1.1 alnsn SLJIT_IMM, (uint32_t)pc->k); 1807 1.1 alnsn if (status != SLJIT_SUCCESS) 1808 1.1 alnsn goto fail; 1809 1.1 alnsn 1810 1.1 alnsn continue; 1811 1.1 alnsn } 1812 1.1 alnsn 1813 1.1 alnsn /* BPF_LD+BPF_MEM A <- M[k] */ 1814 1.1 alnsn if (pc->code == (BPF_LD|BPF_MEM)) { 1815 1.13 alnsn if ((uint32_t)pc->k >= memwords) 1816 1.1 alnsn goto fail; 1817 1.13 alnsn status = emit_memload(compiler, 1818 1.13 alnsn BJ_AREG, pc->k, extwords); 1819 1.1 alnsn if (status != SLJIT_SUCCESS) 1820 1.1 alnsn goto fail; 1821 1.1 alnsn 1822 1.1 alnsn continue; 1823 1.1 alnsn } 1824 1.1 alnsn 1825 1.1 alnsn /* BPF_LD+BPF_W+BPF_LEN A <- len */ 1826 1.1 alnsn if (pc->code == (BPF_LD|BPF_W|BPF_LEN)) { 1827 1.1 alnsn status = sljit_emit_op1(compiler, 1828 1.21 alnsn SLJIT_MOV, /* size_t source */ 1829 1.7 alnsn BJ_AREG, 0, 1830 1.13 alnsn SLJIT_MEM1(BJ_ARGS), 1831 1.13 alnsn offsetof(struct bpf_args, wirelen)); 1832 1.1 alnsn if (status != SLJIT_SUCCESS) 1833 1.1 alnsn goto fail; 1834 1.1 alnsn 1835 1.1 alnsn continue; 1836 1.1 alnsn } 1837 1.1 alnsn 1838 1.1 alnsn mode = BPF_MODE(pc->code); 1839 1.1 alnsn if (mode != BPF_ABS && mode != BPF_IND) 1840 1.1 alnsn goto fail; 1841 1.1 alnsn 1842 1.9 alnsn if (unconditional_ret) 1843 1.9 alnsn continue; 1844 1.9 alnsn 1845 1.32 alnsn status = emit_pkt_read(compiler, hints, pc, 1846 1.7 alnsn to_mchain_jump, &ret0, &ret0_size, &ret0_maxsize); 1847 1.1 alnsn if (status != SLJIT_SUCCESS) 1848 1.1 alnsn goto fail; 1849 1.1 alnsn 1850 1.1 alnsn continue; 1851 1.1 alnsn 1852 1.1 alnsn case BPF_LDX: 1853 1.1 alnsn mode = BPF_MODE(pc->code); 1854 1.1 alnsn 1855 1.1 alnsn /* BPF_LDX+BPF_W+BPF_IMM X <- k */ 1856 1.1 alnsn if (mode == BPF_IMM) { 1857 1.1 alnsn if (BPF_SIZE(pc->code) != BPF_W) 1858 1.1 alnsn goto fail; 1859 1.1 alnsn status = sljit_emit_op1(compiler, 1860 1.1 alnsn SLJIT_MOV, 1861 1.7 alnsn BJ_XREG, 0, 1862 1.1 alnsn SLJIT_IMM, (uint32_t)pc->k); 1863 1.1 alnsn if (status != SLJIT_SUCCESS) 1864 1.1 alnsn goto fail; 1865 1.1 alnsn 1866 1.1 alnsn continue; 1867 1.1 alnsn } 1868 1.1 alnsn 1869 1.1 alnsn /* BPF_LDX+BPF_W+BPF_LEN X <- len */ 1870 1.1 alnsn if (mode == BPF_LEN) { 1871 1.1 alnsn if (BPF_SIZE(pc->code) != BPF_W) 1872 1.1 alnsn goto fail; 1873 1.1 alnsn status = sljit_emit_op1(compiler, 1874 1.21 alnsn SLJIT_MOV, /* size_t source */ 1875 1.7 alnsn BJ_XREG, 0, 1876 1.13 alnsn SLJIT_MEM1(BJ_ARGS), 1877 1.13 alnsn offsetof(struct bpf_args, wirelen)); 1878 1.1 alnsn if (status != SLJIT_SUCCESS) 1879 1.1 alnsn goto fail; 1880 1.1 alnsn 1881 1.1 alnsn continue; 1882 1.1 alnsn } 1883 1.1 alnsn 1884 1.1 alnsn /* BPF_LDX+BPF_W+BPF_MEM X <- M[k] */ 1885 1.1 alnsn if (mode == BPF_MEM) { 1886 1.1 alnsn if (BPF_SIZE(pc->code) != BPF_W) 1887 1.1 alnsn goto fail; 1888 1.13 alnsn if ((uint32_t)pc->k >= memwords) 1889 1.1 alnsn goto fail; 1890 1.13 alnsn status = emit_memload(compiler, 1891 1.13 alnsn BJ_XREG, pc->k, extwords); 1892 1.1 alnsn if (status != SLJIT_SUCCESS) 1893 1.1 alnsn goto fail; 1894 1.1 alnsn 1895 1.1 alnsn continue; 1896 1.1 alnsn } 1897 1.1 alnsn 1898 1.1 alnsn /* BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf) */ 1899 1.1 alnsn if (mode != BPF_MSH || BPF_SIZE(pc->code) != BPF_B) 1900 1.1 alnsn goto fail; 1901 1.1 alnsn 1902 1.9 alnsn if (unconditional_ret) 1903 1.9 alnsn continue; 1904 1.9 alnsn 1905 1.32 alnsn status = emit_msh(compiler, hints, pc, 1906 1.7 alnsn to_mchain_jump, &ret0, &ret0_size, &ret0_maxsize); 1907 1.1 alnsn if (status != SLJIT_SUCCESS) 1908 1.1 alnsn goto fail; 1909 1.1 alnsn 1910 1.1 alnsn continue; 1911 1.1 alnsn 1912 1.1 alnsn case BPF_ST: 1913 1.8 alnsn if (pc->code != BPF_ST || 1914 1.13 alnsn (uint32_t)pc->k >= memwords) { 1915 1.1 alnsn goto fail; 1916 1.8 alnsn } 1917 1.1 alnsn 1918 1.13 alnsn status = emit_memstore(compiler, 1919 1.13 alnsn BJ_AREG, pc->k, extwords); 1920 1.1 alnsn if (status != SLJIT_SUCCESS) 1921 1.1 alnsn goto fail; 1922 1.1 alnsn 1923 1.1 alnsn continue; 1924 1.1 alnsn 1925 1.1 alnsn case BPF_STX: 1926 1.8 alnsn if (pc->code != BPF_STX || 1927 1.13 alnsn (uint32_t)pc->k >= memwords) { 1928 1.1 alnsn goto fail; 1929 1.8 alnsn } 1930 1.1 alnsn 1931 1.13 alnsn status = emit_memstore(compiler, 1932 1.13 alnsn BJ_XREG, pc->k, extwords); 1933 1.1 alnsn if (status != SLJIT_SUCCESS) 1934 1.1 alnsn goto fail; 1935 1.1 alnsn 1936 1.1 alnsn continue; 1937 1.1 alnsn 1938 1.1 alnsn case BPF_ALU: 1939 1.1 alnsn if (pc->code == (BPF_ALU|BPF_NEG)) { 1940 1.1 alnsn status = sljit_emit_op1(compiler, 1941 1.1 alnsn SLJIT_NEG, 1942 1.7 alnsn BJ_AREG, 0, 1943 1.7 alnsn BJ_AREG, 0); 1944 1.1 alnsn if (status != SLJIT_SUCCESS) 1945 1.1 alnsn goto fail; 1946 1.1 alnsn 1947 1.1 alnsn continue; 1948 1.1 alnsn } 1949 1.1 alnsn 1950 1.33 christos op = BPF_OP(pc->code); 1951 1.33 christos if (op != BPF_DIV && op != BPF_MOD) { 1952 1.46 alnsn if (!alu_to_op(pc, &op2)) 1953 1.46 alnsn goto fail; 1954 1.39 alnsn 1955 1.1 alnsn status = sljit_emit_op2(compiler, 1956 1.39 alnsn op2, BJ_AREG, 0, BJ_AREG, 0, 1957 1.1 alnsn kx_to_reg(pc), kx_to_reg_arg(pc)); 1958 1.1 alnsn if (status != SLJIT_SUCCESS) 1959 1.1 alnsn goto fail; 1960 1.1 alnsn 1961 1.1 alnsn continue; 1962 1.1 alnsn } 1963 1.1 alnsn 1964 1.33 christos /* BPF_DIV/BPF_MOD */ 1965 1.1 alnsn 1966 1.1 alnsn src = BPF_SRC(pc->code); 1967 1.1 alnsn if (src != BPF_X && src != BPF_K) 1968 1.1 alnsn goto fail; 1969 1.1 alnsn 1970 1.1 alnsn /* division by zero? */ 1971 1.1 alnsn if (src == BPF_X) { 1972 1.1 alnsn jump = sljit_emit_cmp(compiler, 1973 1.45 alnsn SLJIT_EQUAL|SLJIT_I32_OP, 1974 1.8 alnsn BJ_XREG, 0, 1975 1.1 alnsn SLJIT_IMM, 0); 1976 1.1 alnsn if (jump == NULL) 1977 1.1 alnsn goto fail; 1978 1.7 alnsn if (!append_jump(jump, &ret0, 1979 1.7 alnsn &ret0_size, &ret0_maxsize)) 1980 1.7 alnsn goto fail; 1981 1.1 alnsn } else if (pc->k == 0) { 1982 1.1 alnsn jump = sljit_emit_jump(compiler, SLJIT_JUMP); 1983 1.1 alnsn if (jump == NULL) 1984 1.1 alnsn goto fail; 1985 1.7 alnsn if (!append_jump(jump, &ret0, 1986 1.7 alnsn &ret0_size, &ret0_maxsize)) 1987 1.7 alnsn goto fail; 1988 1.1 alnsn } 1989 1.1 alnsn 1990 1.1 alnsn if (src == BPF_X) { 1991 1.35 alnsn status = emit_moddiv(compiler, pc); 1992 1.1 alnsn if (status != SLJIT_SUCCESS) 1993 1.1 alnsn goto fail; 1994 1.1 alnsn } else if (pc->k != 0) { 1995 1.35 alnsn if (pc->k & (pc->k - 1)) { 1996 1.35 alnsn status = emit_moddiv(compiler, pc); 1997 1.1 alnsn } else { 1998 1.35 alnsn status = emit_pow2_moddiv(compiler, pc); 1999 1.1 alnsn } 2000 1.1 alnsn if (status != SLJIT_SUCCESS) 2001 1.1 alnsn goto fail; 2002 1.1 alnsn } 2003 1.1 alnsn 2004 1.1 alnsn continue; 2005 1.1 alnsn 2006 1.1 alnsn case BPF_JMP: 2007 1.33 christos op = BPF_OP(pc->code); 2008 1.33 christos if (op == BPF_JA) { 2009 1.1 alnsn jt = jf = pc->k; 2010 1.1 alnsn } else { 2011 1.1 alnsn jt = pc->jt; 2012 1.1 alnsn jf = pc->jf; 2013 1.1 alnsn } 2014 1.1 alnsn 2015 1.1 alnsn negate = (jt == 0) ? 1 : 0; 2016 1.1 alnsn branching = (jt == jf) ? 0 : 1; 2017 1.7 alnsn jtf = insn_dat[i].u.jdata.jtf; 2018 1.1 alnsn 2019 1.1 alnsn if (branching) { 2020 1.33 christos if (op != BPF_JSET) { 2021 1.46 alnsn if (!jmp_to_cond(pc, negate, &cond)) 2022 1.46 alnsn goto fail; 2023 1.1 alnsn jump = sljit_emit_cmp(compiler, 2024 1.46 alnsn cond, BJ_AREG, 0, 2025 1.1 alnsn kx_to_reg(pc), kx_to_reg_arg(pc)); 2026 1.1 alnsn } else { 2027 1.1 alnsn status = sljit_emit_op2(compiler, 2028 1.1 alnsn SLJIT_AND, 2029 1.7 alnsn BJ_TMP1REG, 0, 2030 1.7 alnsn BJ_AREG, 0, 2031 1.1 alnsn kx_to_reg(pc), kx_to_reg_arg(pc)); 2032 1.1 alnsn if (status != SLJIT_SUCCESS) 2033 1.1 alnsn goto fail; 2034 1.1 alnsn 2035 1.46 alnsn if (!jmp_to_cond(pc, negate, &cond)) 2036 1.46 alnsn goto fail; 2037 1.1 alnsn jump = sljit_emit_cmp(compiler, 2038 1.46 alnsn cond, BJ_TMP1REG, 0, SLJIT_IMM, 0); 2039 1.1 alnsn } 2040 1.1 alnsn 2041 1.1 alnsn if (jump == NULL) 2042 1.1 alnsn goto fail; 2043 1.1 alnsn 2044 1.7 alnsn BJ_ASSERT(jtf[negate].sjump == NULL); 2045 1.7 alnsn jtf[negate].sjump = jump; 2046 1.1 alnsn } 2047 1.1 alnsn 2048 1.1 alnsn if (!branching || (jt != 0 && jf != 0)) { 2049 1.1 alnsn jump = sljit_emit_jump(compiler, SLJIT_JUMP); 2050 1.1 alnsn if (jump == NULL) 2051 1.1 alnsn goto fail; 2052 1.1 alnsn 2053 1.7 alnsn BJ_ASSERT(jtf[branching].sjump == NULL); 2054 1.7 alnsn jtf[branching].sjump = jump; 2055 1.1 alnsn } 2056 1.1 alnsn 2057 1.1 alnsn continue; 2058 1.1 alnsn 2059 1.1 alnsn case BPF_RET: 2060 1.1 alnsn rval = BPF_RVAL(pc->code); 2061 1.1 alnsn if (rval == BPF_X) 2062 1.1 alnsn goto fail; 2063 1.1 alnsn 2064 1.1 alnsn /* BPF_RET+BPF_K accept k bytes */ 2065 1.1 alnsn if (rval == BPF_K) { 2066 1.7 alnsn status = sljit_emit_return(compiler, 2067 1.45 alnsn SLJIT_MOV_U32, 2068 1.1 alnsn SLJIT_IMM, (uint32_t)pc->k); 2069 1.1 alnsn if (status != SLJIT_SUCCESS) 2070 1.1 alnsn goto fail; 2071 1.1 alnsn } 2072 1.1 alnsn 2073 1.1 alnsn /* BPF_RET+BPF_A accept A bytes */ 2074 1.1 alnsn if (rval == BPF_A) { 2075 1.7 alnsn status = sljit_emit_return(compiler, 2076 1.45 alnsn SLJIT_MOV_U32, 2077 1.7 alnsn BJ_AREG, 0); 2078 1.1 alnsn if (status != SLJIT_SUCCESS) 2079 1.1 alnsn goto fail; 2080 1.1 alnsn } 2081 1.1 alnsn 2082 1.1 alnsn continue; 2083 1.1 alnsn 2084 1.1 alnsn case BPF_MISC: 2085 1.7 alnsn switch (BPF_MISCOP(pc->code)) { 2086 1.7 alnsn case BPF_TAX: 2087 1.1 alnsn status = sljit_emit_op1(compiler, 2088 1.45 alnsn SLJIT_MOV_U32, 2089 1.7 alnsn BJ_XREG, 0, 2090 1.7 alnsn BJ_AREG, 0); 2091 1.1 alnsn if (status != SLJIT_SUCCESS) 2092 1.1 alnsn goto fail; 2093 1.1 alnsn 2094 1.1 alnsn continue; 2095 1.1 alnsn 2096 1.7 alnsn case BPF_TXA: 2097 1.1 alnsn status = sljit_emit_op1(compiler, 2098 1.1 alnsn SLJIT_MOV, 2099 1.7 alnsn BJ_AREG, 0, 2100 1.7 alnsn BJ_XREG, 0); 2101 1.1 alnsn if (status != SLJIT_SUCCESS) 2102 1.1 alnsn goto fail; 2103 1.1 alnsn 2104 1.1 alnsn continue; 2105 1.13 alnsn 2106 1.13 alnsn case BPF_COP: 2107 1.13 alnsn case BPF_COPX: 2108 1.13 alnsn if (bc == NULL || bc->copfuncs == NULL) 2109 1.13 alnsn goto fail; 2110 1.13 alnsn if (BPF_MISCOP(pc->code) == BPF_COP && 2111 1.13 alnsn (uint32_t)pc->k >= bc->nfuncs) { 2112 1.13 alnsn goto fail; 2113 1.13 alnsn } 2114 1.13 alnsn 2115 1.32 alnsn status = emit_cop(compiler, hints, bc, pc, 2116 1.28 alnsn &ret0, &ret0_size, &ret0_maxsize); 2117 1.13 alnsn if (status != SLJIT_SUCCESS) 2118 1.13 alnsn goto fail; 2119 1.13 alnsn 2120 1.13 alnsn continue; 2121 1.1 alnsn } 2122 1.1 alnsn 2123 1.1 alnsn goto fail; 2124 1.1 alnsn } /* switch */ 2125 1.1 alnsn } /* main loop */ 2126 1.1 alnsn 2127 1.7 alnsn BJ_ASSERT(ret0_size <= ret0_maxsize); 2128 1.1 alnsn 2129 1.7 alnsn if (ret0_size > 0) { 2130 1.1 alnsn label = sljit_emit_label(compiler); 2131 1.1 alnsn if (label == NULL) 2132 1.1 alnsn goto fail; 2133 1.7 alnsn for (i = 0; i < ret0_size; i++) 2134 1.7 alnsn sljit_set_label(ret0[i], label); 2135 1.1 alnsn } 2136 1.1 alnsn 2137 1.23 alnsn status = sljit_emit_return(compiler, 2138 1.45 alnsn SLJIT_MOV_U32, 2139 1.23 alnsn SLJIT_IMM, 0); 2140 1.23 alnsn if (status != SLJIT_SUCCESS) 2141 1.23 alnsn goto fail; 2142 1.23 alnsn 2143 1.19 alnsn rv = true; 2144 1.19 alnsn 2145 1.19 alnsn fail: 2146 1.19 alnsn if (ret0 != NULL) 2147 1.19 alnsn BJ_FREE(ret0, ret0_maxsize * sizeof(ret0[0])); 2148 1.19 alnsn 2149 1.19 alnsn return rv; 2150 1.19 alnsn } 2151 1.19 alnsn 2152 1.19 alnsn bpfjit_func_t 2153 1.19 alnsn bpfjit_generate_code(const bpf_ctx_t *bc, 2154 1.19 alnsn const struct bpf_insn *insns, size_t insn_count) 2155 1.19 alnsn { 2156 1.19 alnsn void *rv; 2157 1.19 alnsn struct sljit_compiler *compiler; 2158 1.19 alnsn 2159 1.19 alnsn size_t i; 2160 1.19 alnsn int status; 2161 1.19 alnsn 2162 1.19 alnsn /* optimization related */ 2163 1.19 alnsn bpf_memword_init_t initmask; 2164 1.20 alnsn bpfjit_hint_t hints; 2165 1.19 alnsn 2166 1.19 alnsn /* memory store location for initial zero initialization */ 2167 1.45 alnsn sljit_s32 mem_reg; 2168 1.19 alnsn sljit_sw mem_off; 2169 1.19 alnsn 2170 1.19 alnsn struct bpfjit_insn_data *insn_dat; 2171 1.19 alnsn 2172 1.19 alnsn const size_t extwords = GET_EXTWORDS(bc); 2173 1.19 alnsn const size_t memwords = GET_MEMWORDS(bc); 2174 1.19 alnsn const bpf_memword_init_t preinited = extwords ? bc->preinited : 0; 2175 1.19 alnsn 2176 1.19 alnsn rv = NULL; 2177 1.19 alnsn compiler = NULL; 2178 1.19 alnsn insn_dat = NULL; 2179 1.19 alnsn 2180 1.19 alnsn if (memwords > MAX_MEMWORDS) 2181 1.19 alnsn goto fail; 2182 1.19 alnsn 2183 1.19 alnsn if (insn_count == 0 || insn_count > SIZE_MAX / sizeof(insn_dat[0])) 2184 1.19 alnsn goto fail; 2185 1.19 alnsn 2186 1.19 alnsn insn_dat = BJ_ALLOC(insn_count * sizeof(insn_dat[0])); 2187 1.19 alnsn if (insn_dat == NULL) 2188 1.19 alnsn goto fail; 2189 1.19 alnsn 2190 1.20 alnsn if (!optimize(bc, insns, insn_dat, insn_count, &initmask, &hints)) 2191 1.19 alnsn goto fail; 2192 1.19 alnsn 2193 1.45 alnsn compiler = sljit_create_compiler(NULL); 2194 1.19 alnsn if (compiler == NULL) 2195 1.19 alnsn goto fail; 2196 1.19 alnsn 2197 1.19 alnsn #if !defined(_KERNEL) && defined(SLJIT_VERBOSE) && SLJIT_VERBOSE 2198 1.19 alnsn sljit_compiler_verbose(compiler, stderr); 2199 1.19 alnsn #endif 2200 1.19 alnsn 2201 1.45 alnsn status = sljit_emit_enter(compiler, 0, 2, nscratches(hints), 2202 1.45 alnsn NSAVEDS, 0, 0, sizeof(struct bpfjit_stack)); 2203 1.19 alnsn if (status != SLJIT_SUCCESS) 2204 1.19 alnsn goto fail; 2205 1.19 alnsn 2206 1.20 alnsn if (hints & BJ_HINT_COP) { 2207 1.19 alnsn /* save ctx argument */ 2208 1.19 alnsn status = sljit_emit_op1(compiler, 2209 1.19 alnsn SLJIT_MOV_P, 2210 1.45 alnsn SLJIT_MEM1(SLJIT_SP), 2211 1.19 alnsn offsetof(struct bpfjit_stack, ctx), 2212 1.19 alnsn BJ_CTX_ARG, 0); 2213 1.19 alnsn if (status != SLJIT_SUCCESS) 2214 1.19 alnsn goto fail; 2215 1.19 alnsn } 2216 1.19 alnsn 2217 1.19 alnsn if (extwords == 0) { 2218 1.45 alnsn mem_reg = SLJIT_MEM1(SLJIT_SP); 2219 1.19 alnsn mem_off = offsetof(struct bpfjit_stack, mem); 2220 1.19 alnsn } else { 2221 1.19 alnsn /* copy "mem" argument from bpf_args to bpfjit_stack */ 2222 1.19 alnsn status = sljit_emit_op1(compiler, 2223 1.19 alnsn SLJIT_MOV_P, 2224 1.19 alnsn BJ_TMP1REG, 0, 2225 1.19 alnsn SLJIT_MEM1(BJ_ARGS), offsetof(struct bpf_args, mem)); 2226 1.19 alnsn if (status != SLJIT_SUCCESS) 2227 1.19 alnsn goto fail; 2228 1.19 alnsn 2229 1.19 alnsn status = sljit_emit_op1(compiler, 2230 1.19 alnsn SLJIT_MOV_P, 2231 1.45 alnsn SLJIT_MEM1(SLJIT_SP), 2232 1.19 alnsn offsetof(struct bpfjit_stack, extmem), 2233 1.19 alnsn BJ_TMP1REG, 0); 2234 1.19 alnsn if (status != SLJIT_SUCCESS) 2235 1.19 alnsn goto fail; 2236 1.19 alnsn 2237 1.19 alnsn mem_reg = SLJIT_MEM1(BJ_TMP1REG); 2238 1.19 alnsn mem_off = 0; 2239 1.19 alnsn } 2240 1.19 alnsn 2241 1.19 alnsn /* 2242 1.19 alnsn * Exclude pre-initialised external memory words but keep 2243 1.19 alnsn * initialization statuses of A and X registers in case 2244 1.19 alnsn * bc->preinited wrongly sets those two bits. 2245 1.19 alnsn */ 2246 1.19 alnsn initmask &= ~preinited | BJ_INIT_ABIT | BJ_INIT_XBIT; 2247 1.19 alnsn 2248 1.19 alnsn #if defined(_KERNEL) 2249 1.19 alnsn /* bpf_filter() checks initialization of memwords. */ 2250 1.19 alnsn BJ_ASSERT((initmask & (BJ_INIT_MBIT(memwords) - 1)) == 0); 2251 1.19 alnsn #endif 2252 1.19 alnsn for (i = 0; i < memwords; i++) { 2253 1.19 alnsn if (initmask & BJ_INIT_MBIT(i)) { 2254 1.19 alnsn /* M[i] = 0; */ 2255 1.19 alnsn status = sljit_emit_op1(compiler, 2256 1.45 alnsn SLJIT_MOV_U32, 2257 1.19 alnsn mem_reg, mem_off + i * sizeof(uint32_t), 2258 1.19 alnsn SLJIT_IMM, 0); 2259 1.19 alnsn if (status != SLJIT_SUCCESS) 2260 1.19 alnsn goto fail; 2261 1.19 alnsn } 2262 1.19 alnsn } 2263 1.19 alnsn 2264 1.19 alnsn if (initmask & BJ_INIT_ABIT) { 2265 1.19 alnsn /* A = 0; */ 2266 1.19 alnsn status = sljit_emit_op1(compiler, 2267 1.19 alnsn SLJIT_MOV, 2268 1.19 alnsn BJ_AREG, 0, 2269 1.19 alnsn SLJIT_IMM, 0); 2270 1.19 alnsn if (status != SLJIT_SUCCESS) 2271 1.19 alnsn goto fail; 2272 1.19 alnsn } 2273 1.19 alnsn 2274 1.19 alnsn if (initmask & BJ_INIT_XBIT) { 2275 1.19 alnsn /* X = 0; */ 2276 1.19 alnsn status = sljit_emit_op1(compiler, 2277 1.19 alnsn SLJIT_MOV, 2278 1.19 alnsn BJ_XREG, 0, 2279 1.19 alnsn SLJIT_IMM, 0); 2280 1.19 alnsn if (status != SLJIT_SUCCESS) 2281 1.19 alnsn goto fail; 2282 1.19 alnsn } 2283 1.19 alnsn 2284 1.19 alnsn status = load_buf_buflen(compiler); 2285 1.19 alnsn if (status != SLJIT_SUCCESS) 2286 1.19 alnsn goto fail; 2287 1.19 alnsn 2288 1.32 alnsn if (!generate_insn_code(compiler, hints, 2289 1.32 alnsn bc, insns, insn_dat, insn_count)) { 2290 1.19 alnsn goto fail; 2291 1.32 alnsn } 2292 1.19 alnsn 2293 1.1 alnsn rv = sljit_generate_code(compiler); 2294 1.1 alnsn 2295 1.1 alnsn fail: 2296 1.1 alnsn if (compiler != NULL) 2297 1.1 alnsn sljit_free_compiler(compiler); 2298 1.1 alnsn 2299 1.1 alnsn if (insn_dat != NULL) 2300 1.7 alnsn BJ_FREE(insn_dat, insn_count * sizeof(insn_dat[0])); 2301 1.1 alnsn 2302 1.4 rmind return (bpfjit_func_t)rv; 2303 1.1 alnsn } 2304 1.1 alnsn 2305 1.1 alnsn void 2306 1.4 rmind bpfjit_free_code(bpfjit_func_t code) 2307 1.1 alnsn { 2308 1.7 alnsn 2309 1.1 alnsn sljit_free_code((void *)code); 2310 1.1 alnsn } 2311