bpfjit.c revision 1.2.4.1 1 1.2.4.1 rmind /* $NetBSD: bpfjit.c,v 1.2.4.1 2014/05/18 17:46:12 rmind Exp $ */
2 1.2.4.1 rmind
3 1.1 alnsn /*-
4 1.2.4.1 rmind * Copyright (c) 2011-2014 Alexander Nasonov.
5 1.1 alnsn * All rights reserved.
6 1.1 alnsn *
7 1.1 alnsn * Redistribution and use in source and binary forms, with or without
8 1.1 alnsn * modification, are permitted provided that the following conditions
9 1.1 alnsn * are met:
10 1.1 alnsn *
11 1.1 alnsn * 1. Redistributions of source code must retain the above copyright
12 1.1 alnsn * notice, this list of conditions and the following disclaimer.
13 1.1 alnsn * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 alnsn * notice, this list of conditions and the following disclaimer in
15 1.1 alnsn * the documentation and/or other materials provided with the
16 1.1 alnsn * distribution.
17 1.1 alnsn *
18 1.1 alnsn * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 1.1 alnsn * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 1.1 alnsn * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 1.1 alnsn * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 1.1 alnsn * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 1.1 alnsn * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 1.1 alnsn * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 1.1 alnsn * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 1.1 alnsn * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 1.1 alnsn * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 1.1 alnsn * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 1.1 alnsn * SUCH DAMAGE.
30 1.1 alnsn */
31 1.1 alnsn
32 1.2 alnsn #include <sys/cdefs.h>
33 1.2 alnsn #ifdef _KERNEL
34 1.2.4.1 rmind __KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.2.4.1 2014/05/18 17:46:12 rmind Exp $");
35 1.2 alnsn #else
36 1.2.4.1 rmind __RCSID("$NetBSD: bpfjit.c,v 1.2.4.1 2014/05/18 17:46:12 rmind Exp $");
37 1.2 alnsn #endif
38 1.2 alnsn
39 1.2.4.1 rmind #include <sys/types.h>
40 1.2.4.1 rmind #include <sys/queue.h>
41 1.1 alnsn
42 1.1 alnsn #ifndef _KERNEL
43 1.1 alnsn #include <assert.h>
44 1.2.4.1 rmind #define BJ_ASSERT(c) assert(c)
45 1.1 alnsn #else
46 1.2.4.1 rmind #define BJ_ASSERT(c) KASSERT(c)
47 1.1 alnsn #endif
48 1.1 alnsn
49 1.1 alnsn #ifndef _KERNEL
50 1.1 alnsn #include <stdlib.h>
51 1.2.4.1 rmind #define BJ_ALLOC(sz) malloc(sz)
52 1.2.4.1 rmind #define BJ_FREE(p, sz) free(p)
53 1.1 alnsn #else
54 1.2.4.1 rmind #include <sys/kmem.h>
55 1.2.4.1 rmind #define BJ_ALLOC(sz) kmem_alloc(sz, KM_SLEEP)
56 1.2.4.1 rmind #define BJ_FREE(p, sz) kmem_free(p, sz)
57 1.1 alnsn #endif
58 1.1 alnsn
59 1.1 alnsn #ifndef _KERNEL
60 1.1 alnsn #include <limits.h>
61 1.1 alnsn #include <stdbool.h>
62 1.1 alnsn #include <stddef.h>
63 1.1 alnsn #include <stdint.h>
64 1.1 alnsn #else
65 1.1 alnsn #include <sys/atomic.h>
66 1.1 alnsn #include <sys/module.h>
67 1.1 alnsn #endif
68 1.1 alnsn
69 1.2.4.1 rmind #define __BPF_PRIVATE
70 1.2.4.1 rmind #include <net/bpf.h>
71 1.2.4.1 rmind #include <net/bpfjit.h>
72 1.1 alnsn #include <sljitLir.h>
73 1.1 alnsn
74 1.1 alnsn #if !defined(_KERNEL) && defined(SLJIT_VERBOSE) && SLJIT_VERBOSE
75 1.1 alnsn #include <stdio.h> /* for stderr */
76 1.1 alnsn #endif
77 1.1 alnsn
78 1.2.4.1 rmind /*
79 1.2.4.1 rmind * Permanent register assignments.
80 1.2.4.1 rmind */
81 1.2.4.1 rmind #define BJ_BUF SLJIT_SAVED_REG1
82 1.2.4.1 rmind #define BJ_WIRELEN SLJIT_SAVED_REG2
83 1.2.4.1 rmind #define BJ_BUFLEN SLJIT_SAVED_REG3
84 1.2.4.1 rmind #define BJ_AREG SLJIT_TEMPORARY_REG1
85 1.2.4.1 rmind #define BJ_TMP1REG SLJIT_TEMPORARY_REG2
86 1.2.4.1 rmind #define BJ_TMP2REG SLJIT_TEMPORARY_REG3
87 1.2.4.1 rmind #define BJ_XREG SLJIT_TEMPORARY_EREG1
88 1.2.4.1 rmind #define BJ_TMP3REG SLJIT_TEMPORARY_EREG2
89 1.2.4.1 rmind
90 1.2.4.1 rmind typedef unsigned int bpfjit_init_mask_t;
91 1.2.4.1 rmind #define BJ_INIT_NOBITS 0u
92 1.2.4.1 rmind #define BJ_INIT_MBIT(k) (1u << (k))
93 1.2.4.1 rmind #define BJ_INIT_MMASK (BJ_INIT_MBIT(BPF_MEMWORDS) - 1u)
94 1.2.4.1 rmind #define BJ_INIT_ABIT BJ_INIT_MBIT(BPF_MEMWORDS)
95 1.2.4.1 rmind #define BJ_INIT_XBIT BJ_INIT_MBIT(BPF_MEMWORDS + 1)
96 1.1 alnsn
97 1.2.4.1 rmind struct bpfjit_stack
98 1.2.4.1 rmind {
99 1.2.4.1 rmind uint32_t mem[BPF_MEMWORDS];
100 1.2.4.1 rmind #ifdef _KERNEL
101 1.2.4.1 rmind void *tmp;
102 1.2.4.1 rmind #endif
103 1.2.4.1 rmind };
104 1.1 alnsn
105 1.2.4.1 rmind /*
106 1.2.4.1 rmind * Data for BPF_JMP instruction.
107 1.2.4.1 rmind * Forward declaration for struct bpfjit_jump.
108 1.1 alnsn */
109 1.2.4.1 rmind struct bpfjit_jump_data;
110 1.1 alnsn
111 1.1 alnsn /*
112 1.2.4.1 rmind * Node of bjumps list.
113 1.1 alnsn */
114 1.2.4.1 rmind struct bpfjit_jump {
115 1.2.4.1 rmind struct sljit_jump *sjump;
116 1.2.4.1 rmind SLIST_ENTRY(bpfjit_jump) entries;
117 1.2.4.1 rmind struct bpfjit_jump_data *jdata;
118 1.1 alnsn };
119 1.1 alnsn
120 1.1 alnsn /*
121 1.1 alnsn * Data for BPF_JMP instruction.
122 1.1 alnsn */
123 1.2.4.1 rmind struct bpfjit_jump_data {
124 1.1 alnsn /*
125 1.2.4.1 rmind * These entries make up bjumps list:
126 1.2.4.1 rmind * jtf[0] - when coming from jt path,
127 1.2.4.1 rmind * jtf[1] - when coming from jf path.
128 1.1 alnsn */
129 1.2.4.1 rmind struct bpfjit_jump jtf[2];
130 1.2.4.1 rmind /*
131 1.2.4.1 rmind * Length calculated by Array Bounds Check Elimination (ABC) pass.
132 1.2.4.1 rmind */
133 1.2.4.1 rmind uint32_t abc_length;
134 1.2.4.1 rmind /*
135 1.2.4.1 rmind * Length checked by the last out-of-bounds check.
136 1.2.4.1 rmind */
137 1.2.4.1 rmind uint32_t checked_length;
138 1.1 alnsn };
139 1.1 alnsn
140 1.1 alnsn /*
141 1.1 alnsn * Data for "read from packet" instructions.
142 1.1 alnsn * See also read_pkt_insn() function below.
143 1.1 alnsn */
144 1.2.4.1 rmind struct bpfjit_read_pkt_data {
145 1.2.4.1 rmind /*
146 1.2.4.1 rmind * Length calculated by Array Bounds Check Elimination (ABC) pass.
147 1.2.4.1 rmind */
148 1.2.4.1 rmind uint32_t abc_length;
149 1.1 alnsn /*
150 1.2.4.1 rmind * If positive, emit "if (buflen < check_length) return 0"
151 1.2.4.1 rmind * out-of-bounds check.
152 1.1 alnsn * We assume that buflen is never equal to UINT32_MAX (otherwise,
153 1.2.4.1 rmind * we'd need a special bool variable to emit unconditional "return 0").
154 1.1 alnsn */
155 1.2.4.1 rmind uint32_t check_length;
156 1.1 alnsn };
157 1.1 alnsn
158 1.1 alnsn /*
159 1.1 alnsn * Additional (optimization-related) data for bpf_insn.
160 1.1 alnsn */
161 1.2.4.1 rmind struct bpfjit_insn_data {
162 1.1 alnsn /* List of jumps to this insn. */
163 1.2.4.1 rmind SLIST_HEAD(, bpfjit_jump) bjumps;
164 1.1 alnsn
165 1.1 alnsn union {
166 1.2.4.1 rmind struct bpfjit_jump_data jdata;
167 1.2.4.1 rmind struct bpfjit_read_pkt_data rdata;
168 1.2.4.1 rmind } u;
169 1.1 alnsn
170 1.2.4.1 rmind bpfjit_init_mask_t invalid;
171 1.2.4.1 rmind bool unreachable;
172 1.1 alnsn };
173 1.1 alnsn
174 1.1 alnsn #ifdef _KERNEL
175 1.1 alnsn
176 1.1 alnsn uint32_t m_xword(const struct mbuf *, uint32_t, int *);
177 1.1 alnsn uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
178 1.1 alnsn uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
179 1.1 alnsn
180 1.1 alnsn MODULE(MODULE_CLASS_MISC, bpfjit, "sljit")
181 1.1 alnsn
182 1.1 alnsn static int
183 1.1 alnsn bpfjit_modcmd(modcmd_t cmd, void *arg)
184 1.1 alnsn {
185 1.1 alnsn
186 1.1 alnsn switch (cmd) {
187 1.1 alnsn case MODULE_CMD_INIT:
188 1.1 alnsn bpfjit_module_ops.bj_free_code = &bpfjit_free_code;
189 1.1 alnsn membar_producer();
190 1.1 alnsn bpfjit_module_ops.bj_generate_code = &bpfjit_generate_code;
191 1.1 alnsn membar_producer();
192 1.1 alnsn return 0;
193 1.1 alnsn
194 1.1 alnsn case MODULE_CMD_FINI:
195 1.1 alnsn return EOPNOTSUPP;
196 1.1 alnsn
197 1.1 alnsn default:
198 1.1 alnsn return ENOTTY;
199 1.1 alnsn }
200 1.1 alnsn }
201 1.1 alnsn #endif
202 1.1 alnsn
203 1.1 alnsn static uint32_t
204 1.2.4.1 rmind read_width(const struct bpf_insn *pc)
205 1.1 alnsn {
206 1.1 alnsn
207 1.1 alnsn switch (BPF_SIZE(pc->code)) {
208 1.1 alnsn case BPF_W:
209 1.1 alnsn return 4;
210 1.1 alnsn case BPF_H:
211 1.1 alnsn return 2;
212 1.1 alnsn case BPF_B:
213 1.1 alnsn return 1;
214 1.1 alnsn default:
215 1.2.4.1 rmind BJ_ASSERT(false);
216 1.1 alnsn return 0;
217 1.1 alnsn }
218 1.1 alnsn }
219 1.1 alnsn
220 1.2.4.1 rmind static bool
221 1.2.4.1 rmind grow_jumps(struct sljit_jump ***jumps, size_t *size)
222 1.1 alnsn {
223 1.2.4.1 rmind struct sljit_jump **newptr;
224 1.2.4.1 rmind const size_t elemsz = sizeof(struct sljit_jump *);
225 1.2.4.1 rmind size_t old_size = *size;
226 1.2.4.1 rmind size_t new_size = 2 * old_size;
227 1.2.4.1 rmind
228 1.2.4.1 rmind if (new_size < old_size || new_size > SIZE_MAX / elemsz)
229 1.2.4.1 rmind return false;
230 1.2.4.1 rmind
231 1.2.4.1 rmind newptr = BJ_ALLOC(new_size * elemsz);
232 1.2.4.1 rmind if (newptr == NULL)
233 1.2.4.1 rmind return false;
234 1.2.4.1 rmind
235 1.2.4.1 rmind memcpy(newptr, *jumps, old_size * elemsz);
236 1.2.4.1 rmind BJ_FREE(*jumps, old_size * elemsz);
237 1.2.4.1 rmind
238 1.2.4.1 rmind *jumps = newptr;
239 1.2.4.1 rmind *size = new_size;
240 1.2.4.1 rmind return true;
241 1.2.4.1 rmind }
242 1.1 alnsn
243 1.2.4.1 rmind static bool
244 1.2.4.1 rmind append_jump(struct sljit_jump *jump, struct sljit_jump ***jumps,
245 1.2.4.1 rmind size_t *size, size_t *max_size)
246 1.2.4.1 rmind {
247 1.2.4.1 rmind if (*size == *max_size && !grow_jumps(jumps, max_size))
248 1.2.4.1 rmind return false;
249 1.2.4.1 rmind
250 1.2.4.1 rmind (*jumps)[(*size)++] = jump;
251 1.2.4.1 rmind return true;
252 1.1 alnsn }
253 1.1 alnsn
254 1.1 alnsn /*
255 1.1 alnsn * Generate code for BPF_LD+BPF_B+BPF_ABS A <- P[k:1].
256 1.1 alnsn */
257 1.1 alnsn static int
258 1.1 alnsn emit_read8(struct sljit_compiler* compiler, uint32_t k)
259 1.1 alnsn {
260 1.1 alnsn
261 1.1 alnsn return sljit_emit_op1(compiler,
262 1.1 alnsn SLJIT_MOV_UB,
263 1.2.4.1 rmind BJ_AREG, 0,
264 1.2.4.1 rmind SLJIT_MEM1(BJ_BUF), k);
265 1.1 alnsn }
266 1.1 alnsn
267 1.1 alnsn /*
268 1.1 alnsn * Generate code for BPF_LD+BPF_H+BPF_ABS A <- P[k:2].
269 1.1 alnsn */
270 1.1 alnsn static int
271 1.1 alnsn emit_read16(struct sljit_compiler* compiler, uint32_t k)
272 1.1 alnsn {
273 1.1 alnsn int status;
274 1.1 alnsn
275 1.1 alnsn /* tmp1 = buf[k]; */
276 1.1 alnsn status = sljit_emit_op1(compiler,
277 1.1 alnsn SLJIT_MOV_UB,
278 1.2.4.1 rmind BJ_TMP1REG, 0,
279 1.2.4.1 rmind SLJIT_MEM1(BJ_BUF), k);
280 1.1 alnsn if (status != SLJIT_SUCCESS)
281 1.1 alnsn return status;
282 1.1 alnsn
283 1.1 alnsn /* A = buf[k+1]; */
284 1.1 alnsn status = sljit_emit_op1(compiler,
285 1.1 alnsn SLJIT_MOV_UB,
286 1.2.4.1 rmind BJ_AREG, 0,
287 1.2.4.1 rmind SLJIT_MEM1(BJ_BUF), k+1);
288 1.1 alnsn if (status != SLJIT_SUCCESS)
289 1.1 alnsn return status;
290 1.1 alnsn
291 1.1 alnsn /* tmp1 = tmp1 << 8; */
292 1.1 alnsn status = sljit_emit_op2(compiler,
293 1.1 alnsn SLJIT_SHL,
294 1.2.4.1 rmind BJ_TMP1REG, 0,
295 1.2.4.1 rmind BJ_TMP1REG, 0,
296 1.1 alnsn SLJIT_IMM, 8);
297 1.1 alnsn if (status != SLJIT_SUCCESS)
298 1.1 alnsn return status;
299 1.1 alnsn
300 1.1 alnsn /* A = A + tmp1; */
301 1.1 alnsn status = sljit_emit_op2(compiler,
302 1.1 alnsn SLJIT_ADD,
303 1.2.4.1 rmind BJ_AREG, 0,
304 1.2.4.1 rmind BJ_AREG, 0,
305 1.2.4.1 rmind BJ_TMP1REG, 0);
306 1.1 alnsn return status;
307 1.1 alnsn }
308 1.1 alnsn
309 1.1 alnsn /*
310 1.1 alnsn * Generate code for BPF_LD+BPF_W+BPF_ABS A <- P[k:4].
311 1.1 alnsn */
312 1.1 alnsn static int
313 1.1 alnsn emit_read32(struct sljit_compiler* compiler, uint32_t k)
314 1.1 alnsn {
315 1.1 alnsn int status;
316 1.1 alnsn
317 1.1 alnsn /* tmp1 = buf[k]; */
318 1.1 alnsn status = sljit_emit_op1(compiler,
319 1.1 alnsn SLJIT_MOV_UB,
320 1.2.4.1 rmind BJ_TMP1REG, 0,
321 1.2.4.1 rmind SLJIT_MEM1(BJ_BUF), k);
322 1.1 alnsn if (status != SLJIT_SUCCESS)
323 1.1 alnsn return status;
324 1.1 alnsn
325 1.1 alnsn /* tmp2 = buf[k+1]; */
326 1.1 alnsn status = sljit_emit_op1(compiler,
327 1.1 alnsn SLJIT_MOV_UB,
328 1.2.4.1 rmind BJ_TMP2REG, 0,
329 1.2.4.1 rmind SLJIT_MEM1(BJ_BUF), k+1);
330 1.1 alnsn if (status != SLJIT_SUCCESS)
331 1.1 alnsn return status;
332 1.1 alnsn
333 1.1 alnsn /* A = buf[k+3]; */
334 1.1 alnsn status = sljit_emit_op1(compiler,
335 1.1 alnsn SLJIT_MOV_UB,
336 1.2.4.1 rmind BJ_AREG, 0,
337 1.2.4.1 rmind SLJIT_MEM1(BJ_BUF), k+3);
338 1.1 alnsn if (status != SLJIT_SUCCESS)
339 1.1 alnsn return status;
340 1.1 alnsn
341 1.1 alnsn /* tmp1 = tmp1 << 24; */
342 1.1 alnsn status = sljit_emit_op2(compiler,
343 1.1 alnsn SLJIT_SHL,
344 1.2.4.1 rmind BJ_TMP1REG, 0,
345 1.2.4.1 rmind BJ_TMP1REG, 0,
346 1.1 alnsn SLJIT_IMM, 24);
347 1.1 alnsn if (status != SLJIT_SUCCESS)
348 1.1 alnsn return status;
349 1.1 alnsn
350 1.1 alnsn /* A = A + tmp1; */
351 1.1 alnsn status = sljit_emit_op2(compiler,
352 1.1 alnsn SLJIT_ADD,
353 1.2.4.1 rmind BJ_AREG, 0,
354 1.2.4.1 rmind BJ_AREG, 0,
355 1.2.4.1 rmind BJ_TMP1REG, 0);
356 1.1 alnsn if (status != SLJIT_SUCCESS)
357 1.1 alnsn return status;
358 1.1 alnsn
359 1.1 alnsn /* tmp1 = buf[k+2]; */
360 1.1 alnsn status = sljit_emit_op1(compiler,
361 1.1 alnsn SLJIT_MOV_UB,
362 1.2.4.1 rmind BJ_TMP1REG, 0,
363 1.2.4.1 rmind SLJIT_MEM1(BJ_BUF), k+2);
364 1.1 alnsn if (status != SLJIT_SUCCESS)
365 1.1 alnsn return status;
366 1.1 alnsn
367 1.1 alnsn /* tmp2 = tmp2 << 16; */
368 1.1 alnsn status = sljit_emit_op2(compiler,
369 1.1 alnsn SLJIT_SHL,
370 1.2.4.1 rmind BJ_TMP2REG, 0,
371 1.2.4.1 rmind BJ_TMP2REG, 0,
372 1.1 alnsn SLJIT_IMM, 16);
373 1.1 alnsn if (status != SLJIT_SUCCESS)
374 1.1 alnsn return status;
375 1.1 alnsn
376 1.1 alnsn /* A = A + tmp2; */
377 1.1 alnsn status = sljit_emit_op2(compiler,
378 1.1 alnsn SLJIT_ADD,
379 1.2.4.1 rmind BJ_AREG, 0,
380 1.2.4.1 rmind BJ_AREG, 0,
381 1.2.4.1 rmind BJ_TMP2REG, 0);
382 1.1 alnsn if (status != SLJIT_SUCCESS)
383 1.1 alnsn return status;
384 1.1 alnsn
385 1.1 alnsn /* tmp1 = tmp1 << 8; */
386 1.1 alnsn status = sljit_emit_op2(compiler,
387 1.1 alnsn SLJIT_SHL,
388 1.2.4.1 rmind BJ_TMP1REG, 0,
389 1.2.4.1 rmind BJ_TMP1REG, 0,
390 1.1 alnsn SLJIT_IMM, 8);
391 1.1 alnsn if (status != SLJIT_SUCCESS)
392 1.1 alnsn return status;
393 1.1 alnsn
394 1.1 alnsn /* A = A + tmp1; */
395 1.1 alnsn status = sljit_emit_op2(compiler,
396 1.1 alnsn SLJIT_ADD,
397 1.2.4.1 rmind BJ_AREG, 0,
398 1.2.4.1 rmind BJ_AREG, 0,
399 1.2.4.1 rmind BJ_TMP1REG, 0);
400 1.1 alnsn return status;
401 1.1 alnsn }
402 1.1 alnsn
403 1.1 alnsn #ifdef _KERNEL
404 1.1 alnsn /*
405 1.1 alnsn * Generate m_xword/m_xhalf/m_xbyte call.
406 1.1 alnsn *
407 1.1 alnsn * pc is one of:
408 1.1 alnsn * BPF_LD+BPF_W+BPF_ABS A <- P[k:4]
409 1.1 alnsn * BPF_LD+BPF_H+BPF_ABS A <- P[k:2]
410 1.1 alnsn * BPF_LD+BPF_B+BPF_ABS A <- P[k:1]
411 1.1 alnsn * BPF_LD+BPF_W+BPF_IND A <- P[X+k:4]
412 1.1 alnsn * BPF_LD+BPF_H+BPF_IND A <- P[X+k:2]
413 1.1 alnsn * BPF_LD+BPF_B+BPF_IND A <- P[X+k:1]
414 1.1 alnsn * BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf)
415 1.1 alnsn *
416 1.2.4.1 rmind * The dst variable should be
417 1.2.4.1 rmind * - BJ_AREG when emitting code for BPF_LD instructions,
418 1.2.4.1 rmind * - BJ_XREG or any of BJ_TMP[1-3]REG registers when emitting
419 1.2.4.1 rmind * code for BPF_MSH instruction.
420 1.1 alnsn */
421 1.1 alnsn static int
422 1.2.4.1 rmind emit_xcall(struct sljit_compiler* compiler, const struct bpf_insn *pc,
423 1.1 alnsn int dst, sljit_w dstw, struct sljit_jump **ret0_jump,
424 1.1 alnsn uint32_t (*fn)(const struct mbuf *, uint32_t, int *))
425 1.1 alnsn {
426 1.2.4.1 rmind #if BJ_XREG == SLJIT_RETURN_REG || \
427 1.2.4.1 rmind BJ_XREG == SLJIT_TEMPORARY_REG1 || \
428 1.2.4.1 rmind BJ_XREG == SLJIT_TEMPORARY_REG2 || \
429 1.2.4.1 rmind BJ_XREG == SLJIT_TEMPORARY_REG3
430 1.1 alnsn #error "Not supported assignment of registers."
431 1.1 alnsn #endif
432 1.1 alnsn int status;
433 1.1 alnsn
434 1.1 alnsn /*
435 1.1 alnsn * The third argument of fn is an address on stack.
436 1.1 alnsn */
437 1.2.4.1 rmind const int arg3_offset = offsetof(struct bpfjit_stack, tmp);
438 1.1 alnsn
439 1.1 alnsn if (BPF_CLASS(pc->code) == BPF_LDX) {
440 1.1 alnsn /* save A */
441 1.1 alnsn status = sljit_emit_op1(compiler,
442 1.1 alnsn SLJIT_MOV,
443 1.2.4.1 rmind BJ_TMP3REG, 0,
444 1.2.4.1 rmind BJ_AREG, 0);
445 1.1 alnsn if (status != SLJIT_SUCCESS)
446 1.1 alnsn return status;
447 1.1 alnsn }
448 1.1 alnsn
449 1.1 alnsn /*
450 1.1 alnsn * Prepare registers for fn(buf, k, &err) call.
451 1.1 alnsn */
452 1.1 alnsn status = sljit_emit_op1(compiler,
453 1.1 alnsn SLJIT_MOV,
454 1.1 alnsn SLJIT_TEMPORARY_REG1, 0,
455 1.2.4.1 rmind BJ_BUF, 0);
456 1.1 alnsn if (status != SLJIT_SUCCESS)
457 1.1 alnsn return status;
458 1.1 alnsn
459 1.1 alnsn if (BPF_CLASS(pc->code) == BPF_LD && BPF_MODE(pc->code) == BPF_IND) {
460 1.1 alnsn status = sljit_emit_op2(compiler,
461 1.1 alnsn SLJIT_ADD,
462 1.1 alnsn SLJIT_TEMPORARY_REG2, 0,
463 1.2.4.1 rmind BJ_XREG, 0,
464 1.1 alnsn SLJIT_IMM, (uint32_t)pc->k);
465 1.1 alnsn } else {
466 1.1 alnsn status = sljit_emit_op1(compiler,
467 1.1 alnsn SLJIT_MOV,
468 1.1 alnsn SLJIT_TEMPORARY_REG2, 0,
469 1.1 alnsn SLJIT_IMM, (uint32_t)pc->k);
470 1.1 alnsn }
471 1.1 alnsn
472 1.1 alnsn if (status != SLJIT_SUCCESS)
473 1.1 alnsn return status;
474 1.1 alnsn
475 1.1 alnsn status = sljit_get_local_base(compiler,
476 1.1 alnsn SLJIT_TEMPORARY_REG3, 0, arg3_offset);
477 1.1 alnsn if (status != SLJIT_SUCCESS)
478 1.1 alnsn return status;
479 1.1 alnsn
480 1.1 alnsn /* fn(buf, k, &err); */
481 1.1 alnsn status = sljit_emit_ijump(compiler,
482 1.1 alnsn SLJIT_CALL3,
483 1.1 alnsn SLJIT_IMM, SLJIT_FUNC_OFFSET(fn));
484 1.1 alnsn
485 1.2.4.1 rmind if (dst != SLJIT_RETURN_REG) {
486 1.1 alnsn /* move return value to dst */
487 1.1 alnsn status = sljit_emit_op1(compiler,
488 1.1 alnsn SLJIT_MOV,
489 1.1 alnsn dst, dstw,
490 1.1 alnsn SLJIT_RETURN_REG, 0);
491 1.1 alnsn if (status != SLJIT_SUCCESS)
492 1.1 alnsn return status;
493 1.2.4.1 rmind }
494 1.1 alnsn
495 1.2.4.1 rmind if (BPF_CLASS(pc->code) == BPF_LDX) {
496 1.1 alnsn /* restore A */
497 1.1 alnsn status = sljit_emit_op1(compiler,
498 1.1 alnsn SLJIT_MOV,
499 1.2.4.1 rmind BJ_AREG, 0,
500 1.2.4.1 rmind BJ_TMP3REG, 0);
501 1.1 alnsn if (status != SLJIT_SUCCESS)
502 1.1 alnsn return status;
503 1.1 alnsn }
504 1.1 alnsn
505 1.1 alnsn /* tmp3 = *err; */
506 1.1 alnsn status = sljit_emit_op1(compiler,
507 1.1 alnsn SLJIT_MOV_UI,
508 1.1 alnsn SLJIT_TEMPORARY_REG3, 0,
509 1.1 alnsn SLJIT_MEM1(SLJIT_LOCALS_REG), arg3_offset);
510 1.1 alnsn if (status != SLJIT_SUCCESS)
511 1.1 alnsn return status;
512 1.1 alnsn
513 1.1 alnsn /* if (tmp3 != 0) return 0; */
514 1.1 alnsn *ret0_jump = sljit_emit_cmp(compiler,
515 1.1 alnsn SLJIT_C_NOT_EQUAL,
516 1.1 alnsn SLJIT_TEMPORARY_REG3, 0,
517 1.1 alnsn SLJIT_IMM, 0);
518 1.1 alnsn if (*ret0_jump == NULL)
519 1.1 alnsn return SLJIT_ERR_ALLOC_FAILED;
520 1.1 alnsn
521 1.1 alnsn return status;
522 1.1 alnsn }
523 1.1 alnsn #endif
524 1.1 alnsn
525 1.1 alnsn /*
526 1.1 alnsn * Generate code for
527 1.1 alnsn * BPF_LD+BPF_W+BPF_ABS A <- P[k:4]
528 1.1 alnsn * BPF_LD+BPF_H+BPF_ABS A <- P[k:2]
529 1.1 alnsn * BPF_LD+BPF_B+BPF_ABS A <- P[k:1]
530 1.1 alnsn * BPF_LD+BPF_W+BPF_IND A <- P[X+k:4]
531 1.1 alnsn * BPF_LD+BPF_H+BPF_IND A <- P[X+k:2]
532 1.1 alnsn * BPF_LD+BPF_B+BPF_IND A <- P[X+k:1]
533 1.1 alnsn */
534 1.1 alnsn static int
535 1.1 alnsn emit_pkt_read(struct sljit_compiler* compiler,
536 1.2.4.1 rmind const struct bpf_insn *pc, struct sljit_jump *to_mchain_jump,
537 1.2.4.1 rmind struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize)
538 1.1 alnsn {
539 1.2.4.1 rmind int status = 0; /* XXX gcc 4.1 */
540 1.1 alnsn uint32_t width;
541 1.1 alnsn struct sljit_jump *jump;
542 1.1 alnsn #ifdef _KERNEL
543 1.1 alnsn struct sljit_label *label;
544 1.1 alnsn struct sljit_jump *over_mchain_jump;
545 1.1 alnsn const bool check_zero_buflen = (to_mchain_jump != NULL);
546 1.1 alnsn #endif
547 1.1 alnsn const uint32_t k = pc->k;
548 1.1 alnsn
549 1.1 alnsn #ifdef _KERNEL
550 1.1 alnsn if (to_mchain_jump == NULL) {
551 1.1 alnsn to_mchain_jump = sljit_emit_cmp(compiler,
552 1.1 alnsn SLJIT_C_EQUAL,
553 1.2.4.1 rmind BJ_BUFLEN, 0,
554 1.1 alnsn SLJIT_IMM, 0);
555 1.1 alnsn if (to_mchain_jump == NULL)
556 1.2.4.1 rmind return SLJIT_ERR_ALLOC_FAILED;
557 1.1 alnsn }
558 1.1 alnsn #endif
559 1.1 alnsn
560 1.1 alnsn width = read_width(pc);
561 1.1 alnsn
562 1.1 alnsn if (BPF_MODE(pc->code) == BPF_IND) {
563 1.1 alnsn /* tmp1 = buflen - (pc->k + width); */
564 1.1 alnsn status = sljit_emit_op2(compiler,
565 1.1 alnsn SLJIT_SUB,
566 1.2.4.1 rmind BJ_TMP1REG, 0,
567 1.2.4.1 rmind BJ_BUFLEN, 0,
568 1.1 alnsn SLJIT_IMM, k + width);
569 1.1 alnsn if (status != SLJIT_SUCCESS)
570 1.1 alnsn return status;
571 1.1 alnsn
572 1.1 alnsn /* buf += X; */
573 1.1 alnsn status = sljit_emit_op2(compiler,
574 1.1 alnsn SLJIT_ADD,
575 1.2.4.1 rmind BJ_BUF, 0,
576 1.2.4.1 rmind BJ_BUF, 0,
577 1.2.4.1 rmind BJ_XREG, 0);
578 1.1 alnsn if (status != SLJIT_SUCCESS)
579 1.1 alnsn return status;
580 1.1 alnsn
581 1.1 alnsn /* if (tmp1 < X) return 0; */
582 1.1 alnsn jump = sljit_emit_cmp(compiler,
583 1.1 alnsn SLJIT_C_LESS,
584 1.2.4.1 rmind BJ_TMP1REG, 0,
585 1.2.4.1 rmind BJ_XREG, 0);
586 1.1 alnsn if (jump == NULL)
587 1.2.4.1 rmind return SLJIT_ERR_ALLOC_FAILED;
588 1.2.4.1 rmind if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
589 1.2.4.1 rmind return SLJIT_ERR_ALLOC_FAILED;
590 1.1 alnsn }
591 1.1 alnsn
592 1.1 alnsn switch (width) {
593 1.1 alnsn case 4:
594 1.1 alnsn status = emit_read32(compiler, k);
595 1.1 alnsn break;
596 1.1 alnsn case 2:
597 1.1 alnsn status = emit_read16(compiler, k);
598 1.1 alnsn break;
599 1.1 alnsn case 1:
600 1.1 alnsn status = emit_read8(compiler, k);
601 1.1 alnsn break;
602 1.1 alnsn }
603 1.1 alnsn
604 1.1 alnsn if (status != SLJIT_SUCCESS)
605 1.1 alnsn return status;
606 1.1 alnsn
607 1.1 alnsn if (BPF_MODE(pc->code) == BPF_IND) {
608 1.1 alnsn /* buf -= X; */
609 1.1 alnsn status = sljit_emit_op2(compiler,
610 1.1 alnsn SLJIT_SUB,
611 1.2.4.1 rmind BJ_BUF, 0,
612 1.2.4.1 rmind BJ_BUF, 0,
613 1.2.4.1 rmind BJ_XREG, 0);
614 1.1 alnsn if (status != SLJIT_SUCCESS)
615 1.1 alnsn return status;
616 1.1 alnsn }
617 1.1 alnsn
618 1.1 alnsn #ifdef _KERNEL
619 1.1 alnsn over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP);
620 1.1 alnsn if (over_mchain_jump == NULL)
621 1.2.4.1 rmind return SLJIT_ERR_ALLOC_FAILED;
622 1.1 alnsn
623 1.1 alnsn /* entry point to mchain handler */
624 1.1 alnsn label = sljit_emit_label(compiler);
625 1.1 alnsn if (label == NULL)
626 1.2.4.1 rmind return SLJIT_ERR_ALLOC_FAILED;
627 1.1 alnsn sljit_set_label(to_mchain_jump, label);
628 1.1 alnsn
629 1.1 alnsn if (check_zero_buflen) {
630 1.1 alnsn /* if (buflen != 0) return 0; */
631 1.1 alnsn jump = sljit_emit_cmp(compiler,
632 1.1 alnsn SLJIT_C_NOT_EQUAL,
633 1.2.4.1 rmind BJ_BUFLEN, 0,
634 1.1 alnsn SLJIT_IMM, 0);
635 1.1 alnsn if (jump == NULL)
636 1.1 alnsn return SLJIT_ERR_ALLOC_FAILED;
637 1.2.4.1 rmind if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
638 1.2.4.1 rmind return SLJIT_ERR_ALLOC_FAILED;
639 1.1 alnsn }
640 1.1 alnsn
641 1.1 alnsn switch (width) {
642 1.1 alnsn case 4:
643 1.2.4.1 rmind status = emit_xcall(compiler, pc, BJ_AREG, 0, &jump, &m_xword);
644 1.1 alnsn break;
645 1.1 alnsn case 2:
646 1.2.4.1 rmind status = emit_xcall(compiler, pc, BJ_AREG, 0, &jump, &m_xhalf);
647 1.1 alnsn break;
648 1.1 alnsn case 1:
649 1.2.4.1 rmind status = emit_xcall(compiler, pc, BJ_AREG, 0, &jump, &m_xbyte);
650 1.1 alnsn break;
651 1.1 alnsn }
652 1.1 alnsn
653 1.1 alnsn if (status != SLJIT_SUCCESS)
654 1.1 alnsn return status;
655 1.1 alnsn
656 1.2.4.1 rmind if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
657 1.2.4.1 rmind return SLJIT_ERR_ALLOC_FAILED;
658 1.1 alnsn
659 1.1 alnsn label = sljit_emit_label(compiler);
660 1.1 alnsn if (label == NULL)
661 1.1 alnsn return SLJIT_ERR_ALLOC_FAILED;
662 1.1 alnsn sljit_set_label(over_mchain_jump, label);
663 1.1 alnsn #endif
664 1.1 alnsn
665 1.1 alnsn return status;
666 1.1 alnsn }
667 1.1 alnsn
668 1.1 alnsn /*
669 1.1 alnsn * Generate code for BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf).
670 1.1 alnsn */
671 1.1 alnsn static int
672 1.1 alnsn emit_msh(struct sljit_compiler* compiler,
673 1.2.4.1 rmind const struct bpf_insn *pc, struct sljit_jump *to_mchain_jump,
674 1.2.4.1 rmind struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize)
675 1.1 alnsn {
676 1.1 alnsn int status;
677 1.1 alnsn #ifdef _KERNEL
678 1.1 alnsn struct sljit_label *label;
679 1.1 alnsn struct sljit_jump *jump, *over_mchain_jump;
680 1.1 alnsn const bool check_zero_buflen = (to_mchain_jump != NULL);
681 1.1 alnsn #endif
682 1.1 alnsn const uint32_t k = pc->k;
683 1.1 alnsn
684 1.1 alnsn #ifdef _KERNEL
685 1.1 alnsn if (to_mchain_jump == NULL) {
686 1.1 alnsn to_mchain_jump = sljit_emit_cmp(compiler,
687 1.1 alnsn SLJIT_C_EQUAL,
688 1.2.4.1 rmind BJ_BUFLEN, 0,
689 1.1 alnsn SLJIT_IMM, 0);
690 1.1 alnsn if (to_mchain_jump == NULL)
691 1.2.4.1 rmind return SLJIT_ERR_ALLOC_FAILED;
692 1.1 alnsn }
693 1.1 alnsn #endif
694 1.1 alnsn
695 1.1 alnsn /* tmp1 = buf[k] */
696 1.1 alnsn status = sljit_emit_op1(compiler,
697 1.1 alnsn SLJIT_MOV_UB,
698 1.2.4.1 rmind BJ_TMP1REG, 0,
699 1.2.4.1 rmind SLJIT_MEM1(BJ_BUF), k);
700 1.1 alnsn if (status != SLJIT_SUCCESS)
701 1.1 alnsn return status;
702 1.1 alnsn
703 1.1 alnsn /* tmp1 &= 0xf */
704 1.1 alnsn status = sljit_emit_op2(compiler,
705 1.1 alnsn SLJIT_AND,
706 1.2.4.1 rmind BJ_TMP1REG, 0,
707 1.2.4.1 rmind BJ_TMP1REG, 0,
708 1.1 alnsn SLJIT_IMM, 0xf);
709 1.1 alnsn if (status != SLJIT_SUCCESS)
710 1.1 alnsn return status;
711 1.1 alnsn
712 1.1 alnsn /* tmp1 = tmp1 << 2 */
713 1.1 alnsn status = sljit_emit_op2(compiler,
714 1.1 alnsn SLJIT_SHL,
715 1.2.4.1 rmind BJ_XREG, 0,
716 1.2.4.1 rmind BJ_TMP1REG, 0,
717 1.1 alnsn SLJIT_IMM, 2);
718 1.1 alnsn if (status != SLJIT_SUCCESS)
719 1.1 alnsn return status;
720 1.1 alnsn
721 1.1 alnsn #ifdef _KERNEL
722 1.1 alnsn over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP);
723 1.1 alnsn if (over_mchain_jump == NULL)
724 1.1 alnsn return SLJIT_ERR_ALLOC_FAILED;
725 1.1 alnsn
726 1.1 alnsn /* entry point to mchain handler */
727 1.1 alnsn label = sljit_emit_label(compiler);
728 1.1 alnsn if (label == NULL)
729 1.1 alnsn return SLJIT_ERR_ALLOC_FAILED;
730 1.1 alnsn sljit_set_label(to_mchain_jump, label);
731 1.1 alnsn
732 1.1 alnsn if (check_zero_buflen) {
733 1.1 alnsn /* if (buflen != 0) return 0; */
734 1.1 alnsn jump = sljit_emit_cmp(compiler,
735 1.1 alnsn SLJIT_C_NOT_EQUAL,
736 1.2.4.1 rmind BJ_BUFLEN, 0,
737 1.1 alnsn SLJIT_IMM, 0);
738 1.1 alnsn if (jump == NULL)
739 1.2.4.1 rmind return SLJIT_ERR_ALLOC_FAILED;
740 1.2.4.1 rmind if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
741 1.2.4.1 rmind return SLJIT_ERR_ALLOC_FAILED;
742 1.1 alnsn }
743 1.1 alnsn
744 1.2.4.1 rmind status = emit_xcall(compiler, pc, BJ_TMP1REG, 0, &jump, &m_xbyte);
745 1.1 alnsn if (status != SLJIT_SUCCESS)
746 1.1 alnsn return status;
747 1.2.4.1 rmind
748 1.2.4.1 rmind if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
749 1.2.4.1 rmind return SLJIT_ERR_ALLOC_FAILED;
750 1.1 alnsn
751 1.1 alnsn /* tmp1 &= 0xf */
752 1.1 alnsn status = sljit_emit_op2(compiler,
753 1.1 alnsn SLJIT_AND,
754 1.2.4.1 rmind BJ_TMP1REG, 0,
755 1.2.4.1 rmind BJ_TMP1REG, 0,
756 1.1 alnsn SLJIT_IMM, 0xf);
757 1.1 alnsn if (status != SLJIT_SUCCESS)
758 1.1 alnsn return status;
759 1.1 alnsn
760 1.1 alnsn /* tmp1 = tmp1 << 2 */
761 1.1 alnsn status = sljit_emit_op2(compiler,
762 1.1 alnsn SLJIT_SHL,
763 1.2.4.1 rmind BJ_XREG, 0,
764 1.2.4.1 rmind BJ_TMP1REG, 0,
765 1.1 alnsn SLJIT_IMM, 2);
766 1.1 alnsn if (status != SLJIT_SUCCESS)
767 1.1 alnsn return status;
768 1.1 alnsn
769 1.1 alnsn
770 1.1 alnsn label = sljit_emit_label(compiler);
771 1.1 alnsn if (label == NULL)
772 1.1 alnsn return SLJIT_ERR_ALLOC_FAILED;
773 1.1 alnsn sljit_set_label(over_mchain_jump, label);
774 1.1 alnsn #endif
775 1.1 alnsn
776 1.1 alnsn return status;
777 1.1 alnsn }
778 1.1 alnsn
779 1.1 alnsn static int
780 1.1 alnsn emit_pow2_division(struct sljit_compiler* compiler, uint32_t k)
781 1.1 alnsn {
782 1.1 alnsn int shift = 0;
783 1.1 alnsn int status = SLJIT_SUCCESS;
784 1.1 alnsn
785 1.1 alnsn while (k > 1) {
786 1.1 alnsn k >>= 1;
787 1.1 alnsn shift++;
788 1.1 alnsn }
789 1.1 alnsn
790 1.2.4.1 rmind BJ_ASSERT(k == 1 && shift < 32);
791 1.1 alnsn
792 1.1 alnsn if (shift != 0) {
793 1.1 alnsn status = sljit_emit_op2(compiler,
794 1.1 alnsn SLJIT_LSHR|SLJIT_INT_OP,
795 1.2.4.1 rmind BJ_AREG, 0,
796 1.2.4.1 rmind BJ_AREG, 0,
797 1.1 alnsn SLJIT_IMM, shift);
798 1.1 alnsn }
799 1.1 alnsn
800 1.1 alnsn return status;
801 1.1 alnsn }
802 1.1 alnsn
803 1.1 alnsn #if !defined(BPFJIT_USE_UDIV)
804 1.1 alnsn static sljit_uw
805 1.1 alnsn divide(sljit_uw x, sljit_uw y)
806 1.1 alnsn {
807 1.1 alnsn
808 1.1 alnsn return (uint32_t)x / (uint32_t)y;
809 1.1 alnsn }
810 1.1 alnsn #endif
811 1.1 alnsn
812 1.1 alnsn /*
813 1.1 alnsn * Generate A = A / div.
814 1.2.4.1 rmind * divt,divw are either SLJIT_IMM,pc->k or BJ_XREG,0.
815 1.1 alnsn */
816 1.1 alnsn static int
817 1.1 alnsn emit_division(struct sljit_compiler* compiler, int divt, sljit_w divw)
818 1.1 alnsn {
819 1.1 alnsn int status;
820 1.1 alnsn
821 1.2.4.1 rmind #if BJ_XREG == SLJIT_RETURN_REG || \
822 1.2.4.1 rmind BJ_XREG == SLJIT_TEMPORARY_REG1 || \
823 1.2.4.1 rmind BJ_XREG == SLJIT_TEMPORARY_REG2 || \
824 1.2.4.1 rmind BJ_AREG == SLJIT_TEMPORARY_REG2
825 1.1 alnsn #error "Not supported assignment of registers."
826 1.1 alnsn #endif
827 1.1 alnsn
828 1.2.4.1 rmind #if BJ_AREG != SLJIT_TEMPORARY_REG1
829 1.1 alnsn status = sljit_emit_op1(compiler,
830 1.1 alnsn SLJIT_MOV,
831 1.1 alnsn SLJIT_TEMPORARY_REG1, 0,
832 1.2.4.1 rmind BJ_AREG, 0);
833 1.1 alnsn if (status != SLJIT_SUCCESS)
834 1.1 alnsn return status;
835 1.1 alnsn #endif
836 1.1 alnsn
837 1.1 alnsn status = sljit_emit_op1(compiler,
838 1.1 alnsn SLJIT_MOV,
839 1.1 alnsn SLJIT_TEMPORARY_REG2, 0,
840 1.1 alnsn divt, divw);
841 1.1 alnsn if (status != SLJIT_SUCCESS)
842 1.1 alnsn return status;
843 1.1 alnsn
844 1.1 alnsn #if defined(BPFJIT_USE_UDIV)
845 1.1 alnsn status = sljit_emit_op0(compiler, SLJIT_UDIV|SLJIT_INT_OP);
846 1.1 alnsn
847 1.2.4.1 rmind #if BJ_AREG != SLJIT_TEMPORARY_REG1
848 1.1 alnsn status = sljit_emit_op1(compiler,
849 1.1 alnsn SLJIT_MOV,
850 1.2.4.1 rmind BJ_AREG, 0,
851 1.1 alnsn SLJIT_TEMPORARY_REG1, 0);
852 1.1 alnsn if (status != SLJIT_SUCCESS)
853 1.1 alnsn return status;
854 1.1 alnsn #endif
855 1.1 alnsn #else
856 1.1 alnsn status = sljit_emit_ijump(compiler,
857 1.1 alnsn SLJIT_CALL2,
858 1.1 alnsn SLJIT_IMM, SLJIT_FUNC_OFFSET(divide));
859 1.1 alnsn
860 1.2.4.1 rmind #if BJ_AREG != SLJIT_RETURN_REG
861 1.1 alnsn status = sljit_emit_op1(compiler,
862 1.1 alnsn SLJIT_MOV,
863 1.2.4.1 rmind BJ_AREG, 0,
864 1.1 alnsn SLJIT_RETURN_REG, 0);
865 1.1 alnsn if (status != SLJIT_SUCCESS)
866 1.1 alnsn return status;
867 1.1 alnsn #endif
868 1.1 alnsn #endif
869 1.1 alnsn
870 1.1 alnsn return status;
871 1.1 alnsn }
872 1.1 alnsn
873 1.1 alnsn /*
874 1.1 alnsn * Return true if pc is a "read from packet" instruction.
875 1.1 alnsn * If length is not NULL and return value is true, *length will
876 1.1 alnsn * be set to a safe length required to read a packet.
877 1.1 alnsn */
878 1.1 alnsn static bool
879 1.2.4.1 rmind read_pkt_insn(const struct bpf_insn *pc, uint32_t *length)
880 1.1 alnsn {
881 1.1 alnsn bool rv;
882 1.1 alnsn uint32_t width;
883 1.1 alnsn
884 1.1 alnsn switch (BPF_CLASS(pc->code)) {
885 1.1 alnsn default:
886 1.1 alnsn rv = false;
887 1.1 alnsn break;
888 1.1 alnsn
889 1.1 alnsn case BPF_LD:
890 1.1 alnsn rv = BPF_MODE(pc->code) == BPF_ABS ||
891 1.1 alnsn BPF_MODE(pc->code) == BPF_IND;
892 1.1 alnsn if (rv)
893 1.1 alnsn width = read_width(pc);
894 1.1 alnsn break;
895 1.1 alnsn
896 1.1 alnsn case BPF_LDX:
897 1.1 alnsn rv = pc->code == (BPF_LDX|BPF_B|BPF_MSH);
898 1.1 alnsn width = 1;
899 1.1 alnsn break;
900 1.1 alnsn }
901 1.1 alnsn
902 1.1 alnsn if (rv && length != NULL) {
903 1.1 alnsn *length = (pc->k > UINT32_MAX - width) ?
904 1.1 alnsn UINT32_MAX : pc->k + width;
905 1.1 alnsn }
906 1.1 alnsn
907 1.1 alnsn return rv;
908 1.1 alnsn }
909 1.1 alnsn
910 1.1 alnsn static void
911 1.2.4.1 rmind optimize_init(struct bpfjit_insn_data *insn_dat, size_t insn_count)
912 1.1 alnsn {
913 1.2.4.1 rmind size_t i;
914 1.1 alnsn
915 1.2.4.1 rmind for (i = 0; i < insn_count; i++) {
916 1.2.4.1 rmind SLIST_INIT(&insn_dat[i].bjumps);
917 1.2.4.1 rmind insn_dat[i].invalid = BJ_INIT_NOBITS;
918 1.1 alnsn }
919 1.1 alnsn }
920 1.1 alnsn
921 1.1 alnsn /*
922 1.1 alnsn * The function divides instructions into blocks. Destination of a jump
923 1.1 alnsn * instruction starts a new block. BPF_RET and BPF_JMP instructions
924 1.1 alnsn * terminate a block. Blocks are linear, that is, there are no jumps out
925 1.1 alnsn * from the middle of a block and there are no jumps in to the middle of
926 1.1 alnsn * a block.
927 1.2.4.1 rmind *
928 1.2.4.1 rmind * The function also sets bits in *initmask for memwords that
929 1.2.4.1 rmind * need to be initialized to zero. Note that this set should be empty
930 1.2.4.1 rmind * for any valid kernel filter program.
931 1.1 alnsn */
932 1.2.4.1 rmind static bool
933 1.2.4.1 rmind optimize_pass1(const struct bpf_insn *insns,
934 1.2.4.1 rmind struct bpfjit_insn_data *insn_dat, size_t insn_count,
935 1.2.4.1 rmind bpfjit_init_mask_t *initmask, int *nscratches)
936 1.1 alnsn {
937 1.2.4.1 rmind struct bpfjit_jump *jtf;
938 1.1 alnsn size_t i;
939 1.1 alnsn uint32_t jt, jf;
940 1.2.4.1 rmind bpfjit_init_mask_t invalid; /* borrowed from bpf_filter() */
941 1.2.4.1 rmind bool unreachable;
942 1.1 alnsn
943 1.2.4.1 rmind *nscratches = 2;
944 1.2.4.1 rmind *initmask = BJ_INIT_NOBITS;
945 1.1 alnsn
946 1.1 alnsn unreachable = false;
947 1.2.4.1 rmind invalid = ~BJ_INIT_NOBITS;
948 1.1 alnsn
949 1.1 alnsn for (i = 0; i < insn_count; i++) {
950 1.2.4.1 rmind if (!SLIST_EMPTY(&insn_dat[i].bjumps))
951 1.1 alnsn unreachable = false;
952 1.2.4.1 rmind insn_dat[i].unreachable = unreachable;
953 1.1 alnsn
954 1.1 alnsn if (unreachable)
955 1.1 alnsn continue;
956 1.1 alnsn
957 1.2.4.1 rmind invalid |= insn_dat[i].invalid;
958 1.1 alnsn
959 1.1 alnsn switch (BPF_CLASS(insns[i].code)) {
960 1.1 alnsn case BPF_RET:
961 1.2.4.1 rmind if (BPF_RVAL(insns[i].code) == BPF_A)
962 1.2.4.1 rmind *initmask |= invalid & BJ_INIT_ABIT;
963 1.2.4.1 rmind
964 1.1 alnsn unreachable = true;
965 1.1 alnsn continue;
966 1.1 alnsn
967 1.2.4.1 rmind case BPF_LD:
968 1.2.4.1 rmind if (BPF_MODE(insns[i].code) == BPF_IND ||
969 1.2.4.1 rmind BPF_MODE(insns[i].code) == BPF_ABS) {
970 1.2.4.1 rmind if (BPF_MODE(insns[i].code) == BPF_IND &&
971 1.2.4.1 rmind *nscratches < 4) {
972 1.2.4.1 rmind /* uses BJ_XREG */
973 1.2.4.1 rmind *nscratches = 4;
974 1.2.4.1 rmind }
975 1.2.4.1 rmind if (*nscratches < 3 &&
976 1.2.4.1 rmind read_width(&insns[i]) == 4) {
977 1.2.4.1 rmind /* uses BJ_TMP2REG */
978 1.2.4.1 rmind *nscratches = 3;
979 1.2.4.1 rmind }
980 1.2.4.1 rmind }
981 1.2.4.1 rmind
982 1.2.4.1 rmind if (BPF_MODE(insns[i].code) == BPF_IND)
983 1.2.4.1 rmind *initmask |= invalid & BJ_INIT_XBIT;
984 1.2.4.1 rmind
985 1.2.4.1 rmind if (BPF_MODE(insns[i].code) == BPF_MEM &&
986 1.2.4.1 rmind (uint32_t)insns[i].k < BPF_MEMWORDS) {
987 1.2.4.1 rmind *initmask |= invalid & BJ_INIT_MBIT(insns[i].k);
988 1.2.4.1 rmind }
989 1.2.4.1 rmind
990 1.2.4.1 rmind invalid &= ~BJ_INIT_ABIT;
991 1.2.4.1 rmind continue;
992 1.2.4.1 rmind
993 1.2.4.1 rmind case BPF_LDX:
994 1.2.4.1 rmind #if defined(_KERNEL)
995 1.2.4.1 rmind /* uses BJ_TMP3REG */
996 1.2.4.1 rmind *nscratches = 5;
997 1.2.4.1 rmind #endif
998 1.2.4.1 rmind /* uses BJ_XREG */
999 1.2.4.1 rmind if (*nscratches < 4)
1000 1.2.4.1 rmind *nscratches = 4;
1001 1.2.4.1 rmind
1002 1.2.4.1 rmind if (BPF_MODE(insns[i].code) == BPF_MEM &&
1003 1.2.4.1 rmind (uint32_t)insns[i].k < BPF_MEMWORDS) {
1004 1.2.4.1 rmind *initmask |= invalid & BJ_INIT_MBIT(insns[i].k);
1005 1.2.4.1 rmind }
1006 1.2.4.1 rmind
1007 1.2.4.1 rmind invalid &= ~BJ_INIT_XBIT;
1008 1.2.4.1 rmind continue;
1009 1.2.4.1 rmind
1010 1.2.4.1 rmind case BPF_ST:
1011 1.2.4.1 rmind *initmask |= invalid & BJ_INIT_ABIT;
1012 1.2.4.1 rmind
1013 1.2.4.1 rmind if ((uint32_t)insns[i].k < BPF_MEMWORDS)
1014 1.2.4.1 rmind invalid &= ~BJ_INIT_MBIT(insns[i].k);
1015 1.2.4.1 rmind
1016 1.2.4.1 rmind continue;
1017 1.2.4.1 rmind
1018 1.2.4.1 rmind case BPF_STX:
1019 1.2.4.1 rmind /* uses BJ_XREG */
1020 1.2.4.1 rmind if (*nscratches < 4)
1021 1.2.4.1 rmind *nscratches = 4;
1022 1.2.4.1 rmind
1023 1.2.4.1 rmind *initmask |= invalid & BJ_INIT_XBIT;
1024 1.2.4.1 rmind
1025 1.2.4.1 rmind if ((uint32_t)insns[i].k < BPF_MEMWORDS)
1026 1.2.4.1 rmind invalid &= ~BJ_INIT_MBIT(insns[i].k);
1027 1.2.4.1 rmind
1028 1.2.4.1 rmind continue;
1029 1.2.4.1 rmind
1030 1.2.4.1 rmind case BPF_ALU:
1031 1.2.4.1 rmind *initmask |= invalid & BJ_INIT_ABIT;
1032 1.2.4.1 rmind
1033 1.2.4.1 rmind if (insns[i].code != (BPF_ALU|BPF_NEG) &&
1034 1.2.4.1 rmind BPF_SRC(insns[i].code) == BPF_X) {
1035 1.2.4.1 rmind *initmask |= invalid & BJ_INIT_XBIT;
1036 1.2.4.1 rmind /* uses BJ_XREG */
1037 1.2.4.1 rmind if (*nscratches < 4)
1038 1.2.4.1 rmind *nscratches = 4;
1039 1.2.4.1 rmind
1040 1.2.4.1 rmind }
1041 1.2.4.1 rmind
1042 1.2.4.1 rmind invalid &= ~BJ_INIT_ABIT;
1043 1.2.4.1 rmind continue;
1044 1.2.4.1 rmind
1045 1.2.4.1 rmind case BPF_MISC:
1046 1.2.4.1 rmind switch (BPF_MISCOP(insns[i].code)) {
1047 1.2.4.1 rmind case BPF_TAX: // X <- A
1048 1.2.4.1 rmind /* uses BJ_XREG */
1049 1.2.4.1 rmind if (*nscratches < 4)
1050 1.2.4.1 rmind *nscratches = 4;
1051 1.2.4.1 rmind
1052 1.2.4.1 rmind *initmask |= invalid & BJ_INIT_ABIT;
1053 1.2.4.1 rmind invalid &= ~BJ_INIT_XBIT;
1054 1.2.4.1 rmind continue;
1055 1.2.4.1 rmind
1056 1.2.4.1 rmind case BPF_TXA: // A <- X
1057 1.2.4.1 rmind /* uses BJ_XREG */
1058 1.2.4.1 rmind if (*nscratches < 4)
1059 1.2.4.1 rmind *nscratches = 4;
1060 1.2.4.1 rmind
1061 1.2.4.1 rmind *initmask |= invalid & BJ_INIT_XBIT;
1062 1.2.4.1 rmind invalid &= ~BJ_INIT_ABIT;
1063 1.2.4.1 rmind continue;
1064 1.2.4.1 rmind }
1065 1.2.4.1 rmind
1066 1.2.4.1 rmind continue;
1067 1.2.4.1 rmind
1068 1.1 alnsn case BPF_JMP:
1069 1.2.4.1 rmind /* Initialize abc_length for ABC pass. */
1070 1.2.4.1 rmind insn_dat[i].u.jdata.abc_length = UINT32_MAX;
1071 1.2.4.1 rmind
1072 1.2.4.1 rmind if (BPF_OP(insns[i].code) == BPF_JA) {
1073 1.1 alnsn jt = jf = insns[i].k;
1074 1.1 alnsn } else {
1075 1.1 alnsn jt = insns[i].jt;
1076 1.1 alnsn jf = insns[i].jf;
1077 1.1 alnsn }
1078 1.1 alnsn
1079 1.1 alnsn if (jt >= insn_count - (i + 1) ||
1080 1.1 alnsn jf >= insn_count - (i + 1)) {
1081 1.2.4.1 rmind return false;
1082 1.1 alnsn }
1083 1.1 alnsn
1084 1.1 alnsn if (jt > 0 && jf > 0)
1085 1.1 alnsn unreachable = true;
1086 1.1 alnsn
1087 1.2.4.1 rmind jt += i + 1;
1088 1.2.4.1 rmind jf += i + 1;
1089 1.1 alnsn
1090 1.2.4.1 rmind jtf = insn_dat[i].u.jdata.jtf;
1091 1.2.4.1 rmind
1092 1.2.4.1 rmind jtf[0].sjump = NULL;
1093 1.2.4.1 rmind jtf[0].jdata = &insn_dat[i].u.jdata;
1094 1.2.4.1 rmind SLIST_INSERT_HEAD(&insn_dat[jt].bjumps,
1095 1.2.4.1 rmind &jtf[0], entries);
1096 1.1 alnsn
1097 1.1 alnsn if (jf != jt) {
1098 1.2.4.1 rmind jtf[1].sjump = NULL;
1099 1.2.4.1 rmind jtf[1].jdata = &insn_dat[i].u.jdata;
1100 1.2.4.1 rmind SLIST_INSERT_HEAD(&insn_dat[jf].bjumps,
1101 1.2.4.1 rmind &jtf[1], entries);
1102 1.1 alnsn }
1103 1.1 alnsn
1104 1.2.4.1 rmind insn_dat[jf].invalid |= invalid;
1105 1.2.4.1 rmind insn_dat[jt].invalid |= invalid;
1106 1.2.4.1 rmind invalid = 0;
1107 1.2.4.1 rmind
1108 1.1 alnsn continue;
1109 1.1 alnsn }
1110 1.1 alnsn }
1111 1.1 alnsn
1112 1.2.4.1 rmind return true;
1113 1.1 alnsn }
1114 1.1 alnsn
1115 1.1 alnsn /*
1116 1.2.4.1 rmind * Array Bounds Check Elimination (ABC) pass.
1117 1.1 alnsn */
1118 1.2.4.1 rmind static void
1119 1.2.4.1 rmind optimize_pass2(const struct bpf_insn *insns,
1120 1.2.4.1 rmind struct bpfjit_insn_data *insn_dat, size_t insn_count)
1121 1.1 alnsn {
1122 1.2.4.1 rmind struct bpfjit_jump *jmp;
1123 1.2.4.1 rmind const struct bpf_insn *pc;
1124 1.2.4.1 rmind struct bpfjit_insn_data *pd;
1125 1.1 alnsn size_t i;
1126 1.2.4.1 rmind uint32_t length, abc_length = 0;
1127 1.1 alnsn
1128 1.2.4.1 rmind for (i = insn_count; i != 0; i--) {
1129 1.2.4.1 rmind pc = &insns[i-1];
1130 1.2.4.1 rmind pd = &insn_dat[i-1];
1131 1.1 alnsn
1132 1.2.4.1 rmind if (pd->unreachable)
1133 1.2.4.1 rmind continue;
1134 1.2.4.1 rmind
1135 1.2.4.1 rmind switch (BPF_CLASS(pc->code)) {
1136 1.2.4.1 rmind case BPF_RET:
1137 1.2.4.1 rmind abc_length = 0;
1138 1.2.4.1 rmind break;
1139 1.2.4.1 rmind
1140 1.2.4.1 rmind case BPF_JMP:
1141 1.2.4.1 rmind abc_length = pd->u.jdata.abc_length;
1142 1.2.4.1 rmind break;
1143 1.2.4.1 rmind
1144 1.2.4.1 rmind default:
1145 1.2.4.1 rmind if (read_pkt_insn(pc, &length)) {
1146 1.2.4.1 rmind if (abc_length < length)
1147 1.2.4.1 rmind abc_length = length;
1148 1.2.4.1 rmind pd->u.rdata.abc_length = abc_length;
1149 1.2.4.1 rmind }
1150 1.2.4.1 rmind break;
1151 1.1 alnsn }
1152 1.1 alnsn
1153 1.2.4.1 rmind SLIST_FOREACH(jmp, &pd->bjumps, entries) {
1154 1.2.4.1 rmind if (jmp->jdata->abc_length > abc_length)
1155 1.2.4.1 rmind jmp->jdata->abc_length = abc_length;
1156 1.1 alnsn }
1157 1.2.4.1 rmind }
1158 1.2.4.1 rmind }
1159 1.1 alnsn
1160 1.2.4.1 rmind static void
1161 1.2.4.1 rmind optimize_pass3(const struct bpf_insn *insns,
1162 1.2.4.1 rmind struct bpfjit_insn_data *insn_dat, size_t insn_count)
1163 1.2.4.1 rmind {
1164 1.2.4.1 rmind struct bpfjit_jump *jmp;
1165 1.2.4.1 rmind size_t i;
1166 1.2.4.1 rmind uint32_t length, checked_length = 0;
1167 1.2.4.1 rmind
1168 1.2.4.1 rmind for (i = 0; i < insn_count; i++) {
1169 1.2.4.1 rmind if (insn_dat[i].unreachable)
1170 1.2.4.1 rmind continue;
1171 1.1 alnsn
1172 1.2.4.1 rmind SLIST_FOREACH(jmp, &insn_dat[i].bjumps, entries) {
1173 1.2.4.1 rmind if (jmp->jdata->checked_length < checked_length)
1174 1.2.4.1 rmind checked_length = jmp->jdata->checked_length;
1175 1.2.4.1 rmind }
1176 1.2.4.1 rmind
1177 1.2.4.1 rmind if (BPF_CLASS(insns[i].code) == BPF_JMP) {
1178 1.2.4.1 rmind insn_dat[i].u.jdata.checked_length = checked_length;
1179 1.2.4.1 rmind } else if (read_pkt_insn(&insns[i], &length)) {
1180 1.2.4.1 rmind struct bpfjit_read_pkt_data *rdata =
1181 1.2.4.1 rmind &insn_dat[i].u.rdata;
1182 1.2.4.1 rmind rdata->check_length = 0;
1183 1.2.4.1 rmind if (checked_length < rdata->abc_length) {
1184 1.2.4.1 rmind checked_length = rdata->abc_length;
1185 1.2.4.1 rmind rdata->check_length = checked_length;
1186 1.2.4.1 rmind }
1187 1.1 alnsn }
1188 1.1 alnsn }
1189 1.2.4.1 rmind }
1190 1.1 alnsn
1191 1.2.4.1 rmind static bool
1192 1.2.4.1 rmind optimize(const struct bpf_insn *insns,
1193 1.2.4.1 rmind struct bpfjit_insn_data *insn_dat, size_t insn_count,
1194 1.2.4.1 rmind bpfjit_init_mask_t *initmask, int *nscratches)
1195 1.2.4.1 rmind {
1196 1.2.4.1 rmind
1197 1.2.4.1 rmind optimize_init(insn_dat, insn_count);
1198 1.2.4.1 rmind
1199 1.2.4.1 rmind if (!optimize_pass1(insns, insn_dat, insn_count,
1200 1.2.4.1 rmind initmask, nscratches)) {
1201 1.2.4.1 rmind return false;
1202 1.2.4.1 rmind }
1203 1.2.4.1 rmind
1204 1.2.4.1 rmind optimize_pass2(insns, insn_dat, insn_count);
1205 1.2.4.1 rmind optimize_pass3(insns, insn_dat, insn_count);
1206 1.2.4.1 rmind
1207 1.2.4.1 rmind return true;
1208 1.1 alnsn }
1209 1.1 alnsn
1210 1.1 alnsn /*
1211 1.1 alnsn * Convert BPF_ALU operations except BPF_NEG and BPF_DIV to sljit operation.
1212 1.1 alnsn */
1213 1.1 alnsn static int
1214 1.2.4.1 rmind bpf_alu_to_sljit_op(const struct bpf_insn *pc)
1215 1.1 alnsn {
1216 1.1 alnsn
1217 1.1 alnsn /*
1218 1.1 alnsn * Note: all supported 64bit arches have 32bit multiply
1219 1.1 alnsn * instruction so SLJIT_INT_OP doesn't have any overhead.
1220 1.1 alnsn */
1221 1.1 alnsn switch (BPF_OP(pc->code)) {
1222 1.1 alnsn case BPF_ADD: return SLJIT_ADD;
1223 1.1 alnsn case BPF_SUB: return SLJIT_SUB;
1224 1.1 alnsn case BPF_MUL: return SLJIT_MUL|SLJIT_INT_OP;
1225 1.1 alnsn case BPF_OR: return SLJIT_OR;
1226 1.1 alnsn case BPF_AND: return SLJIT_AND;
1227 1.1 alnsn case BPF_LSH: return SLJIT_SHL;
1228 1.1 alnsn case BPF_RSH: return SLJIT_LSHR|SLJIT_INT_OP;
1229 1.1 alnsn default:
1230 1.2.4.1 rmind BJ_ASSERT(false);
1231 1.1 alnsn return 0;
1232 1.1 alnsn }
1233 1.1 alnsn }
1234 1.1 alnsn
1235 1.1 alnsn /*
1236 1.1 alnsn * Convert BPF_JMP operations except BPF_JA to sljit condition.
1237 1.1 alnsn */
1238 1.1 alnsn static int
1239 1.2.4.1 rmind bpf_jmp_to_sljit_cond(const struct bpf_insn *pc, bool negate)
1240 1.1 alnsn {
1241 1.1 alnsn /*
1242 1.1 alnsn * Note: all supported 64bit arches have 32bit comparison
1243 1.1 alnsn * instructions so SLJIT_INT_OP doesn't have any overhead.
1244 1.1 alnsn */
1245 1.1 alnsn int rv = SLJIT_INT_OP;
1246 1.1 alnsn
1247 1.1 alnsn switch (BPF_OP(pc->code)) {
1248 1.1 alnsn case BPF_JGT:
1249 1.1 alnsn rv |= negate ? SLJIT_C_LESS_EQUAL : SLJIT_C_GREATER;
1250 1.1 alnsn break;
1251 1.1 alnsn case BPF_JGE:
1252 1.1 alnsn rv |= negate ? SLJIT_C_LESS : SLJIT_C_GREATER_EQUAL;
1253 1.1 alnsn break;
1254 1.1 alnsn case BPF_JEQ:
1255 1.1 alnsn rv |= negate ? SLJIT_C_NOT_EQUAL : SLJIT_C_EQUAL;
1256 1.1 alnsn break;
1257 1.1 alnsn case BPF_JSET:
1258 1.1 alnsn rv |= negate ? SLJIT_C_EQUAL : SLJIT_C_NOT_EQUAL;
1259 1.1 alnsn break;
1260 1.1 alnsn default:
1261 1.2.4.1 rmind BJ_ASSERT(false);
1262 1.1 alnsn }
1263 1.1 alnsn
1264 1.1 alnsn return rv;
1265 1.1 alnsn }
1266 1.1 alnsn
1267 1.1 alnsn /*
1268 1.1 alnsn * Convert BPF_K and BPF_X to sljit register.
1269 1.1 alnsn */
1270 1.1 alnsn static int
1271 1.2.4.1 rmind kx_to_reg(const struct bpf_insn *pc)
1272 1.1 alnsn {
1273 1.1 alnsn
1274 1.1 alnsn switch (BPF_SRC(pc->code)) {
1275 1.1 alnsn case BPF_K: return SLJIT_IMM;
1276 1.2.4.1 rmind case BPF_X: return BJ_XREG;
1277 1.1 alnsn default:
1278 1.2.4.1 rmind BJ_ASSERT(false);
1279 1.1 alnsn return 0;
1280 1.1 alnsn }
1281 1.1 alnsn }
1282 1.1 alnsn
1283 1.1 alnsn static sljit_w
1284 1.2.4.1 rmind kx_to_reg_arg(const struct bpf_insn *pc)
1285 1.1 alnsn {
1286 1.1 alnsn
1287 1.1 alnsn switch (BPF_SRC(pc->code)) {
1288 1.1 alnsn case BPF_K: return (uint32_t)pc->k; /* SLJIT_IMM, pc->k, */
1289 1.2.4.1 rmind case BPF_X: return 0; /* BJ_XREG, 0, */
1290 1.1 alnsn default:
1291 1.2.4.1 rmind BJ_ASSERT(false);
1292 1.1 alnsn return 0;
1293 1.1 alnsn }
1294 1.1 alnsn }
1295 1.1 alnsn
1296 1.2.4.1 rmind bpfjit_func_t
1297 1.2.4.1 rmind bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
1298 1.1 alnsn {
1299 1.1 alnsn void *rv;
1300 1.2.4.1 rmind struct sljit_compiler *compiler;
1301 1.2.4.1 rmind
1302 1.1 alnsn size_t i;
1303 1.1 alnsn int status;
1304 1.1 alnsn int branching, negate;
1305 1.1 alnsn unsigned int rval, mode, src;
1306 1.2.4.1 rmind
1307 1.2.4.1 rmind /* optimization related */
1308 1.2.4.1 rmind bpfjit_init_mask_t initmask;
1309 1.2.4.1 rmind int nscratches;
1310 1.1 alnsn
1311 1.1 alnsn /* a list of jumps to out-of-bound return from a generated function */
1312 1.1 alnsn struct sljit_jump **ret0;
1313 1.1 alnsn size_t ret0_size, ret0_maxsize;
1314 1.1 alnsn
1315 1.2.4.1 rmind const struct bpf_insn *pc;
1316 1.1 alnsn struct bpfjit_insn_data *insn_dat;
1317 1.1 alnsn
1318 1.1 alnsn /* for local use */
1319 1.1 alnsn struct sljit_label *label;
1320 1.1 alnsn struct sljit_jump *jump;
1321 1.1 alnsn struct bpfjit_jump *bjump, *jtf;
1322 1.1 alnsn
1323 1.1 alnsn struct sljit_jump *to_mchain_jump;
1324 1.1 alnsn
1325 1.1 alnsn uint32_t jt, jf;
1326 1.1 alnsn
1327 1.1 alnsn rv = NULL;
1328 1.1 alnsn compiler = NULL;
1329 1.1 alnsn insn_dat = NULL;
1330 1.1 alnsn ret0 = NULL;
1331 1.1 alnsn
1332 1.2.4.1 rmind if (insn_count == 0 || insn_count > SIZE_MAX / sizeof(insn_dat[0]))
1333 1.1 alnsn goto fail;
1334 1.1 alnsn
1335 1.2.4.1 rmind insn_dat = BJ_ALLOC(insn_count * sizeof(insn_dat[0]));
1336 1.1 alnsn if (insn_dat == NULL)
1337 1.1 alnsn goto fail;
1338 1.1 alnsn
1339 1.2.4.1 rmind if (!optimize(insns, insn_dat, insn_count,
1340 1.2.4.1 rmind &initmask, &nscratches)) {
1341 1.1 alnsn goto fail;
1342 1.1 alnsn }
1343 1.1 alnsn
1344 1.2.4.1 rmind #if defined(_KERNEL)
1345 1.2.4.1 rmind /* bpf_filter() checks initialization of memwords. */
1346 1.2.4.1 rmind BJ_ASSERT((initmask & BJ_INIT_MMASK) == 0);
1347 1.2.4.1 rmind #endif
1348 1.2.4.1 rmind
1349 1.2.4.1 rmind ret0_size = 0;
1350 1.2.4.1 rmind ret0_maxsize = 64;
1351 1.2.4.1 rmind ret0 = BJ_ALLOC(ret0_maxsize * sizeof(ret0[0]));
1352 1.2.4.1 rmind if (ret0 == NULL)
1353 1.1 alnsn goto fail;
1354 1.1 alnsn
1355 1.1 alnsn compiler = sljit_create_compiler();
1356 1.1 alnsn if (compiler == NULL)
1357 1.1 alnsn goto fail;
1358 1.1 alnsn
1359 1.1 alnsn #if !defined(_KERNEL) && defined(SLJIT_VERBOSE) && SLJIT_VERBOSE
1360 1.1 alnsn sljit_compiler_verbose(compiler, stderr);
1361 1.1 alnsn #endif
1362 1.1 alnsn
1363 1.2.4.1 rmind status = sljit_emit_enter(compiler,
1364 1.2.4.1 rmind 3, nscratches, 3, sizeof(struct bpfjit_stack));
1365 1.1 alnsn if (status != SLJIT_SUCCESS)
1366 1.1 alnsn goto fail;
1367 1.1 alnsn
1368 1.2.4.1 rmind for (i = 0; i < BPF_MEMWORDS; i++) {
1369 1.2.4.1 rmind if (initmask & BJ_INIT_MBIT(i)) {
1370 1.2.4.1 rmind status = sljit_emit_op1(compiler,
1371 1.2.4.1 rmind SLJIT_MOV_UI,
1372 1.2.4.1 rmind SLJIT_MEM1(SLJIT_LOCALS_REG),
1373 1.2.4.1 rmind offsetof(struct bpfjit_stack, mem) +
1374 1.2.4.1 rmind i * sizeof(uint32_t),
1375 1.2.4.1 rmind SLJIT_IMM, 0);
1376 1.2.4.1 rmind if (status != SLJIT_SUCCESS)
1377 1.2.4.1 rmind goto fail;
1378 1.2.4.1 rmind }
1379 1.1 alnsn }
1380 1.1 alnsn
1381 1.2.4.1 rmind if (initmask & BJ_INIT_ABIT) {
1382 1.1 alnsn /* A = 0; */
1383 1.1 alnsn status = sljit_emit_op1(compiler,
1384 1.1 alnsn SLJIT_MOV,
1385 1.2.4.1 rmind BJ_AREG, 0,
1386 1.1 alnsn SLJIT_IMM, 0);
1387 1.1 alnsn if (status != SLJIT_SUCCESS)
1388 1.1 alnsn goto fail;
1389 1.1 alnsn }
1390 1.1 alnsn
1391 1.2.4.1 rmind if (initmask & BJ_INIT_XBIT) {
1392 1.1 alnsn /* X = 0; */
1393 1.1 alnsn status = sljit_emit_op1(compiler,
1394 1.1 alnsn SLJIT_MOV,
1395 1.2.4.1 rmind BJ_XREG, 0,
1396 1.1 alnsn SLJIT_IMM, 0);
1397 1.1 alnsn if (status != SLJIT_SUCCESS)
1398 1.1 alnsn goto fail;
1399 1.1 alnsn }
1400 1.1 alnsn
1401 1.1 alnsn for (i = 0; i < insn_count; i++) {
1402 1.2.4.1 rmind if (insn_dat[i].unreachable)
1403 1.1 alnsn continue;
1404 1.1 alnsn
1405 1.1 alnsn to_mchain_jump = NULL;
1406 1.1 alnsn
1407 1.1 alnsn /*
1408 1.1 alnsn * Resolve jumps to the current insn.
1409 1.1 alnsn */
1410 1.1 alnsn label = NULL;
1411 1.2.4.1 rmind SLIST_FOREACH(bjump, &insn_dat[i].bjumps, entries) {
1412 1.2.4.1 rmind if (bjump->sjump != NULL) {
1413 1.1 alnsn if (label == NULL)
1414 1.1 alnsn label = sljit_emit_label(compiler);
1415 1.1 alnsn if (label == NULL)
1416 1.1 alnsn goto fail;
1417 1.2.4.1 rmind sljit_set_label(bjump->sjump, label);
1418 1.1 alnsn }
1419 1.1 alnsn }
1420 1.1 alnsn
1421 1.1 alnsn if (read_pkt_insn(&insns[i], NULL) &&
1422 1.2.4.1 rmind insn_dat[i].u.rdata.check_length > 0) {
1423 1.2.4.1 rmind /* if (buflen < check_length) return 0; */
1424 1.1 alnsn jump = sljit_emit_cmp(compiler,
1425 1.1 alnsn SLJIT_C_LESS,
1426 1.2.4.1 rmind BJ_BUFLEN, 0,
1427 1.1 alnsn SLJIT_IMM,
1428 1.2.4.1 rmind insn_dat[i].u.rdata.check_length);
1429 1.1 alnsn if (jump == NULL)
1430 1.1 alnsn goto fail;
1431 1.1 alnsn #ifdef _KERNEL
1432 1.1 alnsn to_mchain_jump = jump;
1433 1.1 alnsn #else
1434 1.2.4.1 rmind if (!append_jump(jump, &ret0,
1435 1.2.4.1 rmind &ret0_size, &ret0_maxsize))
1436 1.2.4.1 rmind goto fail;
1437 1.1 alnsn #endif
1438 1.1 alnsn }
1439 1.1 alnsn
1440 1.1 alnsn pc = &insns[i];
1441 1.1 alnsn switch (BPF_CLASS(pc->code)) {
1442 1.1 alnsn
1443 1.1 alnsn default:
1444 1.1 alnsn goto fail;
1445 1.1 alnsn
1446 1.1 alnsn case BPF_LD:
1447 1.1 alnsn /* BPF_LD+BPF_IMM A <- k */
1448 1.1 alnsn if (pc->code == (BPF_LD|BPF_IMM)) {
1449 1.1 alnsn status = sljit_emit_op1(compiler,
1450 1.1 alnsn SLJIT_MOV,
1451 1.2.4.1 rmind BJ_AREG, 0,
1452 1.1 alnsn SLJIT_IMM, (uint32_t)pc->k);
1453 1.1 alnsn if (status != SLJIT_SUCCESS)
1454 1.1 alnsn goto fail;
1455 1.1 alnsn
1456 1.1 alnsn continue;
1457 1.1 alnsn }
1458 1.1 alnsn
1459 1.1 alnsn /* BPF_LD+BPF_MEM A <- M[k] */
1460 1.1 alnsn if (pc->code == (BPF_LD|BPF_MEM)) {
1461 1.2.4.1 rmind if (pc->k >= BPF_MEMWORDS)
1462 1.1 alnsn goto fail;
1463 1.1 alnsn status = sljit_emit_op1(compiler,
1464 1.1 alnsn SLJIT_MOV_UI,
1465 1.2.4.1 rmind BJ_AREG, 0,
1466 1.1 alnsn SLJIT_MEM1(SLJIT_LOCALS_REG),
1467 1.2.4.1 rmind offsetof(struct bpfjit_stack, mem) +
1468 1.2.4.1 rmind pc->k * sizeof(uint32_t));
1469 1.1 alnsn if (status != SLJIT_SUCCESS)
1470 1.1 alnsn goto fail;
1471 1.1 alnsn
1472 1.1 alnsn continue;
1473 1.1 alnsn }
1474 1.1 alnsn
1475 1.1 alnsn /* BPF_LD+BPF_W+BPF_LEN A <- len */
1476 1.1 alnsn if (pc->code == (BPF_LD|BPF_W|BPF_LEN)) {
1477 1.1 alnsn status = sljit_emit_op1(compiler,
1478 1.1 alnsn SLJIT_MOV,
1479 1.2.4.1 rmind BJ_AREG, 0,
1480 1.2.4.1 rmind BJ_WIRELEN, 0);
1481 1.1 alnsn if (status != SLJIT_SUCCESS)
1482 1.1 alnsn goto fail;
1483 1.1 alnsn
1484 1.1 alnsn continue;
1485 1.1 alnsn }
1486 1.1 alnsn
1487 1.1 alnsn mode = BPF_MODE(pc->code);
1488 1.1 alnsn if (mode != BPF_ABS && mode != BPF_IND)
1489 1.1 alnsn goto fail;
1490 1.1 alnsn
1491 1.1 alnsn status = emit_pkt_read(compiler, pc,
1492 1.2.4.1 rmind to_mchain_jump, &ret0, &ret0_size, &ret0_maxsize);
1493 1.1 alnsn if (status != SLJIT_SUCCESS)
1494 1.1 alnsn goto fail;
1495 1.1 alnsn
1496 1.1 alnsn continue;
1497 1.1 alnsn
1498 1.1 alnsn case BPF_LDX:
1499 1.1 alnsn mode = BPF_MODE(pc->code);
1500 1.1 alnsn
1501 1.1 alnsn /* BPF_LDX+BPF_W+BPF_IMM X <- k */
1502 1.1 alnsn if (mode == BPF_IMM) {
1503 1.1 alnsn if (BPF_SIZE(pc->code) != BPF_W)
1504 1.1 alnsn goto fail;
1505 1.1 alnsn status = sljit_emit_op1(compiler,
1506 1.1 alnsn SLJIT_MOV,
1507 1.2.4.1 rmind BJ_XREG, 0,
1508 1.1 alnsn SLJIT_IMM, (uint32_t)pc->k);
1509 1.1 alnsn if (status != SLJIT_SUCCESS)
1510 1.1 alnsn goto fail;
1511 1.1 alnsn
1512 1.1 alnsn continue;
1513 1.1 alnsn }
1514 1.1 alnsn
1515 1.1 alnsn /* BPF_LDX+BPF_W+BPF_LEN X <- len */
1516 1.1 alnsn if (mode == BPF_LEN) {
1517 1.1 alnsn if (BPF_SIZE(pc->code) != BPF_W)
1518 1.1 alnsn goto fail;
1519 1.1 alnsn status = sljit_emit_op1(compiler,
1520 1.1 alnsn SLJIT_MOV,
1521 1.2.4.1 rmind BJ_XREG, 0,
1522 1.2.4.1 rmind BJ_WIRELEN, 0);
1523 1.1 alnsn if (status != SLJIT_SUCCESS)
1524 1.1 alnsn goto fail;
1525 1.1 alnsn
1526 1.1 alnsn continue;
1527 1.1 alnsn }
1528 1.1 alnsn
1529 1.1 alnsn /* BPF_LDX+BPF_W+BPF_MEM X <- M[k] */
1530 1.1 alnsn if (mode == BPF_MEM) {
1531 1.1 alnsn if (BPF_SIZE(pc->code) != BPF_W)
1532 1.1 alnsn goto fail;
1533 1.2.4.1 rmind if (pc->k >= BPF_MEMWORDS)
1534 1.1 alnsn goto fail;
1535 1.1 alnsn status = sljit_emit_op1(compiler,
1536 1.1 alnsn SLJIT_MOV_UI,
1537 1.2.4.1 rmind BJ_XREG, 0,
1538 1.1 alnsn SLJIT_MEM1(SLJIT_LOCALS_REG),
1539 1.2.4.1 rmind offsetof(struct bpfjit_stack, mem) +
1540 1.2.4.1 rmind pc->k * sizeof(uint32_t));
1541 1.1 alnsn if (status != SLJIT_SUCCESS)
1542 1.1 alnsn goto fail;
1543 1.1 alnsn
1544 1.1 alnsn continue;
1545 1.1 alnsn }
1546 1.1 alnsn
1547 1.1 alnsn /* BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf) */
1548 1.1 alnsn if (mode != BPF_MSH || BPF_SIZE(pc->code) != BPF_B)
1549 1.1 alnsn goto fail;
1550 1.1 alnsn
1551 1.1 alnsn status = emit_msh(compiler, pc,
1552 1.2.4.1 rmind to_mchain_jump, &ret0, &ret0_size, &ret0_maxsize);
1553 1.1 alnsn if (status != SLJIT_SUCCESS)
1554 1.1 alnsn goto fail;
1555 1.1 alnsn
1556 1.1 alnsn continue;
1557 1.1 alnsn
1558 1.1 alnsn case BPF_ST:
1559 1.2.4.1 rmind if (pc->code != BPF_ST || pc->k >= BPF_MEMWORDS)
1560 1.1 alnsn goto fail;
1561 1.1 alnsn
1562 1.1 alnsn status = sljit_emit_op1(compiler,
1563 1.1 alnsn SLJIT_MOV_UI,
1564 1.1 alnsn SLJIT_MEM1(SLJIT_LOCALS_REG),
1565 1.2.4.1 rmind offsetof(struct bpfjit_stack, mem) +
1566 1.2.4.1 rmind pc->k * sizeof(uint32_t),
1567 1.2.4.1 rmind BJ_AREG, 0);
1568 1.1 alnsn if (status != SLJIT_SUCCESS)
1569 1.1 alnsn goto fail;
1570 1.1 alnsn
1571 1.1 alnsn continue;
1572 1.1 alnsn
1573 1.1 alnsn case BPF_STX:
1574 1.2.4.1 rmind if (pc->code != BPF_STX || pc->k >= BPF_MEMWORDS)
1575 1.1 alnsn goto fail;
1576 1.1 alnsn
1577 1.1 alnsn status = sljit_emit_op1(compiler,
1578 1.1 alnsn SLJIT_MOV_UI,
1579 1.1 alnsn SLJIT_MEM1(SLJIT_LOCALS_REG),
1580 1.2.4.1 rmind offsetof(struct bpfjit_stack, mem) +
1581 1.2.4.1 rmind pc->k * sizeof(uint32_t),
1582 1.2.4.1 rmind BJ_XREG, 0);
1583 1.1 alnsn if (status != SLJIT_SUCCESS)
1584 1.1 alnsn goto fail;
1585 1.1 alnsn
1586 1.1 alnsn continue;
1587 1.1 alnsn
1588 1.1 alnsn case BPF_ALU:
1589 1.1 alnsn if (pc->code == (BPF_ALU|BPF_NEG)) {
1590 1.1 alnsn status = sljit_emit_op1(compiler,
1591 1.1 alnsn SLJIT_NEG,
1592 1.2.4.1 rmind BJ_AREG, 0,
1593 1.2.4.1 rmind BJ_AREG, 0);
1594 1.1 alnsn if (status != SLJIT_SUCCESS)
1595 1.1 alnsn goto fail;
1596 1.1 alnsn
1597 1.1 alnsn continue;
1598 1.1 alnsn }
1599 1.1 alnsn
1600 1.1 alnsn if (BPF_OP(pc->code) != BPF_DIV) {
1601 1.1 alnsn status = sljit_emit_op2(compiler,
1602 1.1 alnsn bpf_alu_to_sljit_op(pc),
1603 1.2.4.1 rmind BJ_AREG, 0,
1604 1.2.4.1 rmind BJ_AREG, 0,
1605 1.1 alnsn kx_to_reg(pc), kx_to_reg_arg(pc));
1606 1.1 alnsn if (status != SLJIT_SUCCESS)
1607 1.1 alnsn goto fail;
1608 1.1 alnsn
1609 1.1 alnsn continue;
1610 1.1 alnsn }
1611 1.1 alnsn
1612 1.1 alnsn /* BPF_DIV */
1613 1.1 alnsn
1614 1.1 alnsn src = BPF_SRC(pc->code);
1615 1.1 alnsn if (src != BPF_X && src != BPF_K)
1616 1.1 alnsn goto fail;
1617 1.1 alnsn
1618 1.1 alnsn /* division by zero? */
1619 1.1 alnsn if (src == BPF_X) {
1620 1.1 alnsn jump = sljit_emit_cmp(compiler,
1621 1.1 alnsn SLJIT_C_EQUAL|SLJIT_INT_OP,
1622 1.2.4.1 rmind BJ_XREG, 0,
1623 1.1 alnsn SLJIT_IMM, 0);
1624 1.1 alnsn if (jump == NULL)
1625 1.1 alnsn goto fail;
1626 1.2.4.1 rmind if (!append_jump(jump, &ret0,
1627 1.2.4.1 rmind &ret0_size, &ret0_maxsize))
1628 1.2.4.1 rmind goto fail;
1629 1.1 alnsn } else if (pc->k == 0) {
1630 1.1 alnsn jump = sljit_emit_jump(compiler, SLJIT_JUMP);
1631 1.1 alnsn if (jump == NULL)
1632 1.1 alnsn goto fail;
1633 1.2.4.1 rmind if (!append_jump(jump, &ret0,
1634 1.2.4.1 rmind &ret0_size, &ret0_maxsize))
1635 1.2.4.1 rmind goto fail;
1636 1.1 alnsn }
1637 1.1 alnsn
1638 1.1 alnsn if (src == BPF_X) {
1639 1.2.4.1 rmind status = emit_division(compiler, BJ_XREG, 0);
1640 1.1 alnsn if (status != SLJIT_SUCCESS)
1641 1.1 alnsn goto fail;
1642 1.1 alnsn } else if (pc->k != 0) {
1643 1.1 alnsn if (pc->k & (pc->k - 1)) {
1644 1.1 alnsn status = emit_division(compiler,
1645 1.1 alnsn SLJIT_IMM, (uint32_t)pc->k);
1646 1.1 alnsn } else {
1647 1.2.4.1 rmind status = emit_pow2_division(compiler,
1648 1.1 alnsn (uint32_t)pc->k);
1649 1.1 alnsn }
1650 1.1 alnsn if (status != SLJIT_SUCCESS)
1651 1.1 alnsn goto fail;
1652 1.1 alnsn }
1653 1.1 alnsn
1654 1.1 alnsn continue;
1655 1.1 alnsn
1656 1.1 alnsn case BPF_JMP:
1657 1.2.4.1 rmind if (BPF_OP(pc->code) == BPF_JA) {
1658 1.1 alnsn jt = jf = pc->k;
1659 1.1 alnsn } else {
1660 1.1 alnsn jt = pc->jt;
1661 1.1 alnsn jf = pc->jf;
1662 1.1 alnsn }
1663 1.1 alnsn
1664 1.1 alnsn negate = (jt == 0) ? 1 : 0;
1665 1.1 alnsn branching = (jt == jf) ? 0 : 1;
1666 1.2.4.1 rmind jtf = insn_dat[i].u.jdata.jtf;
1667 1.1 alnsn
1668 1.1 alnsn if (branching) {
1669 1.1 alnsn if (BPF_OP(pc->code) != BPF_JSET) {
1670 1.1 alnsn jump = sljit_emit_cmp(compiler,
1671 1.1 alnsn bpf_jmp_to_sljit_cond(pc, negate),
1672 1.2.4.1 rmind BJ_AREG, 0,
1673 1.1 alnsn kx_to_reg(pc), kx_to_reg_arg(pc));
1674 1.1 alnsn } else {
1675 1.1 alnsn status = sljit_emit_op2(compiler,
1676 1.1 alnsn SLJIT_AND,
1677 1.2.4.1 rmind BJ_TMP1REG, 0,
1678 1.2.4.1 rmind BJ_AREG, 0,
1679 1.1 alnsn kx_to_reg(pc), kx_to_reg_arg(pc));
1680 1.1 alnsn if (status != SLJIT_SUCCESS)
1681 1.1 alnsn goto fail;
1682 1.1 alnsn
1683 1.1 alnsn jump = sljit_emit_cmp(compiler,
1684 1.1 alnsn bpf_jmp_to_sljit_cond(pc, negate),
1685 1.2.4.1 rmind BJ_TMP1REG, 0,
1686 1.1 alnsn SLJIT_IMM, 0);
1687 1.1 alnsn }
1688 1.1 alnsn
1689 1.1 alnsn if (jump == NULL)
1690 1.1 alnsn goto fail;
1691 1.1 alnsn
1692 1.2.4.1 rmind BJ_ASSERT(jtf[negate].sjump == NULL);
1693 1.2.4.1 rmind jtf[negate].sjump = jump;
1694 1.1 alnsn }
1695 1.1 alnsn
1696 1.1 alnsn if (!branching || (jt != 0 && jf != 0)) {
1697 1.1 alnsn jump = sljit_emit_jump(compiler, SLJIT_JUMP);
1698 1.1 alnsn if (jump == NULL)
1699 1.1 alnsn goto fail;
1700 1.1 alnsn
1701 1.2.4.1 rmind BJ_ASSERT(jtf[branching].sjump == NULL);
1702 1.2.4.1 rmind jtf[branching].sjump = jump;
1703 1.1 alnsn }
1704 1.1 alnsn
1705 1.1 alnsn continue;
1706 1.1 alnsn
1707 1.1 alnsn case BPF_RET:
1708 1.1 alnsn rval = BPF_RVAL(pc->code);
1709 1.1 alnsn if (rval == BPF_X)
1710 1.1 alnsn goto fail;
1711 1.1 alnsn
1712 1.1 alnsn /* BPF_RET+BPF_K accept k bytes */
1713 1.1 alnsn if (rval == BPF_K) {
1714 1.2.4.1 rmind status = sljit_emit_return(compiler,
1715 1.2.4.1 rmind SLJIT_MOV_UI,
1716 1.1 alnsn SLJIT_IMM, (uint32_t)pc->k);
1717 1.1 alnsn if (status != SLJIT_SUCCESS)
1718 1.1 alnsn goto fail;
1719 1.1 alnsn }
1720 1.1 alnsn
1721 1.1 alnsn /* BPF_RET+BPF_A accept A bytes */
1722 1.1 alnsn if (rval == BPF_A) {
1723 1.2.4.1 rmind status = sljit_emit_return(compiler,
1724 1.2.4.1 rmind SLJIT_MOV_UI,
1725 1.2.4.1 rmind BJ_AREG, 0);
1726 1.1 alnsn if (status != SLJIT_SUCCESS)
1727 1.1 alnsn goto fail;
1728 1.1 alnsn }
1729 1.1 alnsn
1730 1.1 alnsn continue;
1731 1.1 alnsn
1732 1.1 alnsn case BPF_MISC:
1733 1.2.4.1 rmind switch (BPF_MISCOP(pc->code)) {
1734 1.2.4.1 rmind case BPF_TAX:
1735 1.1 alnsn status = sljit_emit_op1(compiler,
1736 1.1 alnsn SLJIT_MOV_UI,
1737 1.2.4.1 rmind BJ_XREG, 0,
1738 1.2.4.1 rmind BJ_AREG, 0);
1739 1.1 alnsn if (status != SLJIT_SUCCESS)
1740 1.1 alnsn goto fail;
1741 1.1 alnsn
1742 1.1 alnsn continue;
1743 1.1 alnsn
1744 1.2.4.1 rmind case BPF_TXA:
1745 1.1 alnsn status = sljit_emit_op1(compiler,
1746 1.1 alnsn SLJIT_MOV,
1747 1.2.4.1 rmind BJ_AREG, 0,
1748 1.2.4.1 rmind BJ_XREG, 0);
1749 1.1 alnsn if (status != SLJIT_SUCCESS)
1750 1.1 alnsn goto fail;
1751 1.1 alnsn
1752 1.1 alnsn continue;
1753 1.1 alnsn }
1754 1.1 alnsn
1755 1.1 alnsn goto fail;
1756 1.1 alnsn } /* switch */
1757 1.1 alnsn } /* main loop */
1758 1.1 alnsn
1759 1.2.4.1 rmind BJ_ASSERT(ret0_size <= ret0_maxsize);
1760 1.1 alnsn
1761 1.2.4.1 rmind if (ret0_size > 0) {
1762 1.1 alnsn label = sljit_emit_label(compiler);
1763 1.1 alnsn if (label == NULL)
1764 1.1 alnsn goto fail;
1765 1.2.4.1 rmind for (i = 0; i < ret0_size; i++)
1766 1.2.4.1 rmind sljit_set_label(ret0[i], label);
1767 1.1 alnsn }
1768 1.1 alnsn
1769 1.1 alnsn status = sljit_emit_return(compiler,
1770 1.1 alnsn SLJIT_MOV_UI,
1771 1.2.4.1 rmind SLJIT_IMM, 0);
1772 1.1 alnsn if (status != SLJIT_SUCCESS)
1773 1.1 alnsn goto fail;
1774 1.1 alnsn
1775 1.1 alnsn rv = sljit_generate_code(compiler);
1776 1.1 alnsn
1777 1.1 alnsn fail:
1778 1.1 alnsn if (compiler != NULL)
1779 1.1 alnsn sljit_free_compiler(compiler);
1780 1.1 alnsn
1781 1.1 alnsn if (insn_dat != NULL)
1782 1.2.4.1 rmind BJ_FREE(insn_dat, insn_count * sizeof(insn_dat[0]));
1783 1.1 alnsn
1784 1.1 alnsn if (ret0 != NULL)
1785 1.2.4.1 rmind BJ_FREE(ret0, ret0_maxsize * sizeof(ret0[0]));
1786 1.1 alnsn
1787 1.2.4.1 rmind return (bpfjit_func_t)rv;
1788 1.1 alnsn }
1789 1.1 alnsn
1790 1.1 alnsn void
1791 1.2.4.1 rmind bpfjit_free_code(bpfjit_func_t code)
1792 1.1 alnsn {
1793 1.1 alnsn
1794 1.1 alnsn sljit_free_code((void *)code);
1795 1.1 alnsn }
1796