bpfjit.c revision 1.16 1 /* $NetBSD: bpfjit.c,v 1.16 2014/06/25 11:13:28 alnsn Exp $ */
2
3 /*-
4 * Copyright (c) 2011-2014 Alexander Nasonov.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #ifdef _KERNEL
34 __KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.16 2014/06/25 11:13:28 alnsn Exp $");
35 #else
36 __RCSID("$NetBSD: bpfjit.c,v 1.16 2014/06/25 11:13:28 alnsn Exp $");
37 #endif
38
39 #include <sys/types.h>
40 #include <sys/queue.h>
41
42 #ifndef _KERNEL
43 #include <assert.h>
44 #define BJ_ASSERT(c) assert(c)
45 #else
46 #define BJ_ASSERT(c) KASSERT(c)
47 #endif
48
49 #ifndef _KERNEL
50 #include <stdlib.h>
51 #define BJ_ALLOC(sz) malloc(sz)
52 #define BJ_FREE(p, sz) free(p)
53 #else
54 #include <sys/kmem.h>
55 #define BJ_ALLOC(sz) kmem_alloc(sz, KM_SLEEP)
56 #define BJ_FREE(p, sz) kmem_free(p, sz)
57 #endif
58
59 #ifndef _KERNEL
60 #include <limits.h>
61 #include <stdbool.h>
62 #include <stddef.h>
63 #include <stdint.h>
64 #else
65 #include <sys/atomic.h>
66 #include <sys/module.h>
67 #endif
68
69 #define __BPF_PRIVATE
70 #include <net/bpf.h>
71 #include <net/bpfjit.h>
72 #include <sljitLir.h>
73
74 #if !defined(_KERNEL) && defined(SLJIT_VERBOSE) && SLJIT_VERBOSE
75 #include <stdio.h> /* for stderr */
76 #endif
77
78 /*
79 * Arguments of generated bpfjit_func_t.
80 * The first argument is reassigned upon entry
81 * to a more frequently used buf argument.
82 */
83 #define BJ_CTX_ARG SLJIT_SAVED_REG1
84 #define BJ_ARGS SLJIT_SAVED_REG2
85
86 /*
87 * Permanent register assignments.
88 */
89 #define BJ_BUF SLJIT_SAVED_REG1
90 //#define BJ_ARGS SLJIT_SAVED_REG2
91 #define BJ_BUFLEN SLJIT_SAVED_REG3
92 #define BJ_AREG SLJIT_SCRATCH_REG1
93 #define BJ_TMP1REG SLJIT_SCRATCH_REG2
94 #define BJ_TMP2REG SLJIT_SCRATCH_REG3
95 #define BJ_XREG SLJIT_TEMPORARY_EREG1
96 #define BJ_TMP3REG SLJIT_TEMPORARY_EREG2
97
98 /*
99 * EREG registers can't be used for indirect calls, reuse BJ_BUF and
100 * BJ_BUFLEN registers. They can be easily restored from BJ_ARGS.
101 */
102 #define BJ_COPF_PTR SLJIT_SAVED_REG1
103 #define BJ_COPF_IDX SLJIT_SAVED_REG3
104
105 #ifdef _KERNEL
106 #define MAX_MEMWORDS BPF_MAX_MEMWORDS
107 #else
108 #define MAX_MEMWORDS BPF_MEMWORDS
109 #endif
110
111 #define BJ_INIT_NOBITS ((bpf_memword_init_t)0)
112 #define BJ_INIT_MBIT(k) BPF_MEMWORD_INIT(k)
113 #define BJ_INIT_ABIT BJ_INIT_MBIT(MAX_MEMWORDS)
114 #define BJ_INIT_XBIT BJ_INIT_MBIT(MAX_MEMWORDS + 1)
115
116 /*
117 * Datatype for Array Bounds Check Elimination (ABC) pass.
118 */
119 typedef uint64_t bpfjit_abc_length_t;
120 #define MAX_ABC_LENGTH (UINT32_MAX + UINT64_C(4)) /* max. width is 4 */
121
122 struct bpfjit_stack
123 {
124 bpf_ctx_t *ctx;
125 uint32_t *extmem; /* pointer to external memory store */
126 #ifdef _KERNEL
127 void *tmp;
128 #endif
129 uint32_t mem[BPF_MEMWORDS]; /* internal memory store */
130 };
131
132 /*
133 * Data for BPF_JMP instruction.
134 * Forward declaration for struct bpfjit_jump.
135 */
136 struct bpfjit_jump_data;
137
138 /*
139 * Node of bjumps list.
140 */
141 struct bpfjit_jump {
142 struct sljit_jump *sjump;
143 SLIST_ENTRY(bpfjit_jump) entries;
144 struct bpfjit_jump_data *jdata;
145 };
146
147 /*
148 * Data for BPF_JMP instruction.
149 */
150 struct bpfjit_jump_data {
151 /*
152 * These entries make up bjumps list:
153 * jtf[0] - when coming from jt path,
154 * jtf[1] - when coming from jf path.
155 */
156 struct bpfjit_jump jtf[2];
157 /*
158 * Length calculated by Array Bounds Check Elimination (ABC) pass.
159 */
160 bpfjit_abc_length_t abc_length;
161 /*
162 * Length checked by the last out-of-bounds check.
163 */
164 bpfjit_abc_length_t checked_length;
165 };
166
167 /*
168 * Data for "read from packet" instructions.
169 * See also read_pkt_insn() function below.
170 */
171 struct bpfjit_read_pkt_data {
172 /*
173 * Length calculated by Array Bounds Check Elimination (ABC) pass.
174 */
175 bpfjit_abc_length_t abc_length;
176 /*
177 * If positive, emit "if (buflen < check_length) return 0"
178 * out-of-bounds check.
179 * Values greater than UINT32_MAX generate unconditional "return 0".
180 */
181 bpfjit_abc_length_t check_length;
182 };
183
184 /*
185 * Additional (optimization-related) data for bpf_insn.
186 */
187 struct bpfjit_insn_data {
188 /* List of jumps to this insn. */
189 SLIST_HEAD(, bpfjit_jump) bjumps;
190
191 union {
192 struct bpfjit_jump_data jdata;
193 struct bpfjit_read_pkt_data rdata;
194 } u;
195
196 bpf_memword_init_t invalid;
197 bool unreachable;
198 };
199
200 #ifdef _KERNEL
201
202 uint32_t m_xword(const struct mbuf *, uint32_t, int *);
203 uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
204 uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
205
206 MODULE(MODULE_CLASS_MISC, bpfjit, "sljit")
207
208 static int
209 bpfjit_modcmd(modcmd_t cmd, void *arg)
210 {
211
212 switch (cmd) {
213 case MODULE_CMD_INIT:
214 bpfjit_module_ops.bj_free_code = &bpfjit_free_code;
215 membar_producer();
216 bpfjit_module_ops.bj_generate_code = &bpfjit_generate_code;
217 membar_producer();
218 return 0;
219
220 case MODULE_CMD_FINI:
221 return EOPNOTSUPP;
222
223 default:
224 return ENOTTY;
225 }
226 }
227 #endif
228
229 static uint32_t
230 read_width(const struct bpf_insn *pc)
231 {
232
233 switch (BPF_SIZE(pc->code)) {
234 case BPF_W:
235 return 4;
236 case BPF_H:
237 return 2;
238 case BPF_B:
239 return 1;
240 default:
241 BJ_ASSERT(false);
242 return 0;
243 }
244 }
245
246 /*
247 * Copy buf and buflen members of bpf_args from BJ_ARGS
248 * pointer to BJ_BUF and BJ_BUFLEN registers.
249 */
250 static int
251 load_buf_buflen(struct sljit_compiler *compiler)
252 {
253 int status;
254
255 status = sljit_emit_op1(compiler,
256 SLJIT_MOV_P,
257 BJ_BUF, 0,
258 SLJIT_MEM1(BJ_ARGS),
259 offsetof(struct bpf_args, pkt));
260 if (status != SLJIT_SUCCESS)
261 return status;
262
263 status = sljit_emit_op1(compiler,
264 SLJIT_MOV,
265 BJ_BUFLEN, 0,
266 SLJIT_MEM1(BJ_ARGS),
267 offsetof(struct bpf_args, buflen));
268
269 return status;
270 }
271
272 static bool
273 grow_jumps(struct sljit_jump ***jumps, size_t *size)
274 {
275 struct sljit_jump **newptr;
276 const size_t elemsz = sizeof(struct sljit_jump *);
277 size_t old_size = *size;
278 size_t new_size = 2 * old_size;
279
280 if (new_size < old_size || new_size > SIZE_MAX / elemsz)
281 return false;
282
283 newptr = BJ_ALLOC(new_size * elemsz);
284 if (newptr == NULL)
285 return false;
286
287 memcpy(newptr, *jumps, old_size * elemsz);
288 BJ_FREE(*jumps, old_size * elemsz);
289
290 *jumps = newptr;
291 *size = new_size;
292 return true;
293 }
294
295 static bool
296 append_jump(struct sljit_jump *jump, struct sljit_jump ***jumps,
297 size_t *size, size_t *max_size)
298 {
299 if (*size == *max_size && !grow_jumps(jumps, max_size))
300 return false;
301
302 (*jumps)[(*size)++] = jump;
303 return true;
304 }
305
306 /*
307 * Generate code for BPF_LD+BPF_B+BPF_ABS A <- P[k:1].
308 */
309 static int
310 emit_read8(struct sljit_compiler* compiler, uint32_t k)
311 {
312
313 return sljit_emit_op1(compiler,
314 SLJIT_MOV_UB,
315 BJ_AREG, 0,
316 SLJIT_MEM1(BJ_BUF), k);
317 }
318
319 /*
320 * Generate code for BPF_LD+BPF_H+BPF_ABS A <- P[k:2].
321 */
322 static int
323 emit_read16(struct sljit_compiler* compiler, uint32_t k)
324 {
325 int status;
326
327 /* tmp1 = buf[k]; */
328 status = sljit_emit_op1(compiler,
329 SLJIT_MOV_UB,
330 BJ_TMP1REG, 0,
331 SLJIT_MEM1(BJ_BUF), k);
332 if (status != SLJIT_SUCCESS)
333 return status;
334
335 /* A = buf[k+1]; */
336 status = sljit_emit_op1(compiler,
337 SLJIT_MOV_UB,
338 BJ_AREG, 0,
339 SLJIT_MEM1(BJ_BUF), k+1);
340 if (status != SLJIT_SUCCESS)
341 return status;
342
343 /* tmp1 = tmp1 << 8; */
344 status = sljit_emit_op2(compiler,
345 SLJIT_SHL,
346 BJ_TMP1REG, 0,
347 BJ_TMP1REG, 0,
348 SLJIT_IMM, 8);
349 if (status != SLJIT_SUCCESS)
350 return status;
351
352 /* A = A + tmp1; */
353 status = sljit_emit_op2(compiler,
354 SLJIT_ADD,
355 BJ_AREG, 0,
356 BJ_AREG, 0,
357 BJ_TMP1REG, 0);
358 return status;
359 }
360
361 /*
362 * Generate code for BPF_LD+BPF_W+BPF_ABS A <- P[k:4].
363 */
364 static int
365 emit_read32(struct sljit_compiler* compiler, uint32_t k)
366 {
367 int status;
368
369 /* tmp1 = buf[k]; */
370 status = sljit_emit_op1(compiler,
371 SLJIT_MOV_UB,
372 BJ_TMP1REG, 0,
373 SLJIT_MEM1(BJ_BUF), k);
374 if (status != SLJIT_SUCCESS)
375 return status;
376
377 /* tmp2 = buf[k+1]; */
378 status = sljit_emit_op1(compiler,
379 SLJIT_MOV_UB,
380 BJ_TMP2REG, 0,
381 SLJIT_MEM1(BJ_BUF), k+1);
382 if (status != SLJIT_SUCCESS)
383 return status;
384
385 /* A = buf[k+3]; */
386 status = sljit_emit_op1(compiler,
387 SLJIT_MOV_UB,
388 BJ_AREG, 0,
389 SLJIT_MEM1(BJ_BUF), k+3);
390 if (status != SLJIT_SUCCESS)
391 return status;
392
393 /* tmp1 = tmp1 << 24; */
394 status = sljit_emit_op2(compiler,
395 SLJIT_SHL,
396 BJ_TMP1REG, 0,
397 BJ_TMP1REG, 0,
398 SLJIT_IMM, 24);
399 if (status != SLJIT_SUCCESS)
400 return status;
401
402 /* A = A + tmp1; */
403 status = sljit_emit_op2(compiler,
404 SLJIT_ADD,
405 BJ_AREG, 0,
406 BJ_AREG, 0,
407 BJ_TMP1REG, 0);
408 if (status != SLJIT_SUCCESS)
409 return status;
410
411 /* tmp1 = buf[k+2]; */
412 status = sljit_emit_op1(compiler,
413 SLJIT_MOV_UB,
414 BJ_TMP1REG, 0,
415 SLJIT_MEM1(BJ_BUF), k+2);
416 if (status != SLJIT_SUCCESS)
417 return status;
418
419 /* tmp2 = tmp2 << 16; */
420 status = sljit_emit_op2(compiler,
421 SLJIT_SHL,
422 BJ_TMP2REG, 0,
423 BJ_TMP2REG, 0,
424 SLJIT_IMM, 16);
425 if (status != SLJIT_SUCCESS)
426 return status;
427
428 /* A = A + tmp2; */
429 status = sljit_emit_op2(compiler,
430 SLJIT_ADD,
431 BJ_AREG, 0,
432 BJ_AREG, 0,
433 BJ_TMP2REG, 0);
434 if (status != SLJIT_SUCCESS)
435 return status;
436
437 /* tmp1 = tmp1 << 8; */
438 status = sljit_emit_op2(compiler,
439 SLJIT_SHL,
440 BJ_TMP1REG, 0,
441 BJ_TMP1REG, 0,
442 SLJIT_IMM, 8);
443 if (status != SLJIT_SUCCESS)
444 return status;
445
446 /* A = A + tmp1; */
447 status = sljit_emit_op2(compiler,
448 SLJIT_ADD,
449 BJ_AREG, 0,
450 BJ_AREG, 0,
451 BJ_TMP1REG, 0);
452 return status;
453 }
454
455 #ifdef _KERNEL
456 /*
457 * Generate m_xword/m_xhalf/m_xbyte call.
458 *
459 * pc is one of:
460 * BPF_LD+BPF_W+BPF_ABS A <- P[k:4]
461 * BPF_LD+BPF_H+BPF_ABS A <- P[k:2]
462 * BPF_LD+BPF_B+BPF_ABS A <- P[k:1]
463 * BPF_LD+BPF_W+BPF_IND A <- P[X+k:4]
464 * BPF_LD+BPF_H+BPF_IND A <- P[X+k:2]
465 * BPF_LD+BPF_B+BPF_IND A <- P[X+k:1]
466 * BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf)
467 *
468 * The dst variable should be
469 * - BJ_AREG when emitting code for BPF_LD instructions,
470 * - BJ_XREG or any of BJ_TMP[1-3]REG registers when emitting
471 * code for BPF_MSH instruction.
472 */
473 static int
474 emit_xcall(struct sljit_compiler* compiler, const struct bpf_insn *pc,
475 int dst, sljit_sw dstw, struct sljit_jump **ret0_jump,
476 uint32_t (*fn)(const struct mbuf *, uint32_t, int *))
477 {
478 #if BJ_XREG == SLJIT_RETURN_REG || \
479 BJ_XREG == SLJIT_SCRATCH_REG1 || \
480 BJ_XREG == SLJIT_SCRATCH_REG2 || \
481 BJ_XREG == SLJIT_SCRATCH_REG3
482 #error "Not supported assignment of registers."
483 #endif
484 int status;
485
486 /*
487 * The third argument of fn is an address on stack.
488 */
489 const int arg3_offset = offsetof(struct bpfjit_stack, tmp);
490
491 if (BPF_CLASS(pc->code) == BPF_LDX) {
492 /* save A */
493 status = sljit_emit_op1(compiler,
494 SLJIT_MOV,
495 BJ_TMP3REG, 0,
496 BJ_AREG, 0);
497 if (status != SLJIT_SUCCESS)
498 return status;
499 }
500
501 /*
502 * Prepare registers for fn(buf, k, &err) call.
503 */
504 status = sljit_emit_op1(compiler,
505 SLJIT_MOV,
506 SLJIT_SCRATCH_REG1, 0,
507 BJ_BUF, 0);
508 if (status != SLJIT_SUCCESS)
509 return status;
510
511 if (BPF_CLASS(pc->code) == BPF_LD && BPF_MODE(pc->code) == BPF_IND) {
512 status = sljit_emit_op2(compiler,
513 SLJIT_ADD,
514 SLJIT_SCRATCH_REG2, 0,
515 BJ_XREG, 0,
516 SLJIT_IMM, (uint32_t)pc->k);
517 } else {
518 status = sljit_emit_op1(compiler,
519 SLJIT_MOV,
520 SLJIT_SCRATCH_REG2, 0,
521 SLJIT_IMM, (uint32_t)pc->k);
522 }
523
524 if (status != SLJIT_SUCCESS)
525 return status;
526
527 status = sljit_get_local_base(compiler,
528 SLJIT_SCRATCH_REG3, 0, arg3_offset);
529 if (status != SLJIT_SUCCESS)
530 return status;
531
532 /* fn(buf, k, &err); */
533 status = sljit_emit_ijump(compiler,
534 SLJIT_CALL3,
535 SLJIT_IMM, SLJIT_FUNC_OFFSET(fn));
536
537 if (dst != SLJIT_RETURN_REG) {
538 /* move return value to dst */
539 status = sljit_emit_op1(compiler,
540 SLJIT_MOV,
541 dst, dstw,
542 SLJIT_RETURN_REG, 0);
543 if (status != SLJIT_SUCCESS)
544 return status;
545 }
546
547 if (BPF_CLASS(pc->code) == BPF_LDX) {
548 /* restore A */
549 status = sljit_emit_op1(compiler,
550 SLJIT_MOV,
551 BJ_AREG, 0,
552 BJ_TMP3REG, 0);
553 if (status != SLJIT_SUCCESS)
554 return status;
555 }
556
557 /* tmp3 = *err; */
558 status = sljit_emit_op1(compiler,
559 SLJIT_MOV_UI,
560 SLJIT_SCRATCH_REG3, 0,
561 SLJIT_MEM1(SLJIT_LOCALS_REG), arg3_offset);
562 if (status != SLJIT_SUCCESS)
563 return status;
564
565 /* if (tmp3 != 0) return 0; */
566 *ret0_jump = sljit_emit_cmp(compiler,
567 SLJIT_C_NOT_EQUAL,
568 SLJIT_SCRATCH_REG3, 0,
569 SLJIT_IMM, 0);
570 if (*ret0_jump == NULL)
571 return SLJIT_ERR_ALLOC_FAILED;
572
573 return status;
574 }
575 #endif
576
577 /*
578 * Emit code for BPF_COP and BPF_COPX instructions.
579 */
580 static int
581 emit_cop(struct sljit_compiler* compiler, const bpf_ctx_t *bc,
582 const struct bpf_insn *pc, struct sljit_jump **ret0_jump)
583 {
584 #if BJ_XREG == SLJIT_RETURN_REG || \
585 BJ_XREG == SLJIT_SCRATCH_REG1 || \
586 BJ_XREG == SLJIT_SCRATCH_REG2 || \
587 BJ_XREG == SLJIT_SCRATCH_REG3 || \
588 BJ_COPF_PTR == BJ_ARGS || \
589 BJ_COPF_IDX == BJ_ARGS
590 #error "Not supported assignment of registers."
591 #endif
592
593 struct sljit_jump *jump;
594 int status;
595
596 jump = NULL;
597
598 BJ_ASSERT(bc != NULL && bc->copfuncs != NULL);
599
600 if (BPF_MISCOP(pc->code) == BPF_COPX) {
601 /* if (X >= bc->nfuncs) return 0; */
602 jump = sljit_emit_cmp(compiler,
603 SLJIT_C_GREATER_EQUAL,
604 BJ_XREG, 0,
605 SLJIT_IMM, bc->nfuncs);
606 if (jump == NULL)
607 return SLJIT_ERR_ALLOC_FAILED;
608 }
609
610 if (jump != NULL)
611 *ret0_jump = jump;
612
613 /*
614 * Copy bpf_copfunc_t arguments to registers.
615 */
616 #if BJ_AREG != SLJIT_SCRATCH_REG3
617 status = sljit_emit_op1(compiler,
618 SLJIT_MOV_UI,
619 SLJIT_SCRATCH_REG3, 0,
620 BJ_AREG, 0);
621 if (status != SLJIT_SUCCESS)
622 return status;
623 #endif
624
625 status = sljit_emit_op1(compiler,
626 SLJIT_MOV_P,
627 SLJIT_SCRATCH_REG1, 0,
628 SLJIT_MEM1(SLJIT_LOCALS_REG),
629 offsetof(struct bpfjit_stack, ctx));
630 if (status != SLJIT_SUCCESS)
631 return status;
632
633 status = sljit_emit_op1(compiler,
634 SLJIT_MOV_P,
635 SLJIT_SCRATCH_REG2, 0,
636 BJ_ARGS, 0);
637 if (status != SLJIT_SUCCESS)
638 return status;
639
640 if (BPF_MISCOP(pc->code) == BPF_COP) {
641 status = sljit_emit_ijump(compiler,
642 SLJIT_CALL3,
643 SLJIT_IMM, SLJIT_FUNC_OFFSET(bc->copfuncs[pc->k]));
644 if (status != SLJIT_SUCCESS)
645 return status;
646 } else if (BPF_MISCOP(pc->code) == BPF_COPX) {
647 /* load ctx->copfuncs */
648 status = sljit_emit_op1(compiler,
649 SLJIT_MOV_P,
650 BJ_COPF_PTR, 0,
651 SLJIT_MEM1(SLJIT_SCRATCH_REG1),
652 offsetof(struct bpf_ctx, copfuncs));
653 if (status != SLJIT_SUCCESS)
654 return status;
655
656 /*
657 * Load X to a register that can be used for
658 * memory addressing.
659 */
660 status = sljit_emit_op1(compiler,
661 SLJIT_MOV_P,
662 BJ_COPF_IDX, 0,
663 BJ_XREG, 0);
664 if (status != SLJIT_SUCCESS)
665 return status;
666
667 status = sljit_emit_ijump(compiler,
668 SLJIT_CALL3,
669 SLJIT_MEM2(BJ_COPF_PTR, BJ_COPF_IDX),
670 SLJIT_WORD_SHIFT);
671 if (status != SLJIT_SUCCESS)
672 return status;
673
674 status = load_buf_buflen(compiler);
675 if (status != SLJIT_SUCCESS)
676 return status;
677 }
678
679 #if BJ_AREG != SLJIT_RETURN_REG
680 status = sljit_emit_op1(compiler,
681 SLJIT_MOV,
682 BJ_AREG, 0,
683 SLJIT_RETURN_REG, 0);
684 if (status != SLJIT_SUCCESS)
685 return status;
686 #endif
687
688 return status;
689 }
690
691 /*
692 * Generate code for
693 * BPF_LD+BPF_W+BPF_ABS A <- P[k:4]
694 * BPF_LD+BPF_H+BPF_ABS A <- P[k:2]
695 * BPF_LD+BPF_B+BPF_ABS A <- P[k:1]
696 * BPF_LD+BPF_W+BPF_IND A <- P[X+k:4]
697 * BPF_LD+BPF_H+BPF_IND A <- P[X+k:2]
698 * BPF_LD+BPF_B+BPF_IND A <- P[X+k:1]
699 */
700 static int
701 emit_pkt_read(struct sljit_compiler* compiler,
702 const struct bpf_insn *pc, struct sljit_jump *to_mchain_jump,
703 struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize)
704 {
705 int status = 0; /* XXX gcc 4.1 */
706 uint32_t width;
707 struct sljit_jump *jump;
708 #ifdef _KERNEL
709 struct sljit_label *label;
710 struct sljit_jump *over_mchain_jump;
711 const bool check_zero_buflen = (to_mchain_jump != NULL);
712 #endif
713 const uint32_t k = pc->k;
714
715 #ifdef _KERNEL
716 if (to_mchain_jump == NULL) {
717 to_mchain_jump = sljit_emit_cmp(compiler,
718 SLJIT_C_EQUAL,
719 BJ_BUFLEN, 0,
720 SLJIT_IMM, 0);
721 if (to_mchain_jump == NULL)
722 return SLJIT_ERR_ALLOC_FAILED;
723 }
724 #endif
725
726 width = read_width(pc);
727
728 if (BPF_MODE(pc->code) == BPF_IND) {
729 /* tmp1 = buflen - (pc->k + width); */
730 status = sljit_emit_op2(compiler,
731 SLJIT_SUB,
732 BJ_TMP1REG, 0,
733 BJ_BUFLEN, 0,
734 SLJIT_IMM, k + width);
735 if (status != SLJIT_SUCCESS)
736 return status;
737
738 /* buf += X; */
739 status = sljit_emit_op2(compiler,
740 SLJIT_ADD,
741 BJ_BUF, 0,
742 BJ_BUF, 0,
743 BJ_XREG, 0);
744 if (status != SLJIT_SUCCESS)
745 return status;
746
747 /* if (tmp1 < X) return 0; */
748 jump = sljit_emit_cmp(compiler,
749 SLJIT_C_LESS,
750 BJ_TMP1REG, 0,
751 BJ_XREG, 0);
752 if (jump == NULL)
753 return SLJIT_ERR_ALLOC_FAILED;
754 if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
755 return SLJIT_ERR_ALLOC_FAILED;
756 }
757
758 switch (width) {
759 case 4:
760 status = emit_read32(compiler, k);
761 break;
762 case 2:
763 status = emit_read16(compiler, k);
764 break;
765 case 1:
766 status = emit_read8(compiler, k);
767 break;
768 }
769
770 if (status != SLJIT_SUCCESS)
771 return status;
772
773 if (BPF_MODE(pc->code) == BPF_IND) {
774 /* buf -= X; */
775 status = sljit_emit_op2(compiler,
776 SLJIT_SUB,
777 BJ_BUF, 0,
778 BJ_BUF, 0,
779 BJ_XREG, 0);
780 if (status != SLJIT_SUCCESS)
781 return status;
782 }
783
784 #ifdef _KERNEL
785 over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP);
786 if (over_mchain_jump == NULL)
787 return SLJIT_ERR_ALLOC_FAILED;
788
789 /* entry point to mchain handler */
790 label = sljit_emit_label(compiler);
791 if (label == NULL)
792 return SLJIT_ERR_ALLOC_FAILED;
793 sljit_set_label(to_mchain_jump, label);
794
795 if (check_zero_buflen) {
796 /* if (buflen != 0) return 0; */
797 jump = sljit_emit_cmp(compiler,
798 SLJIT_C_NOT_EQUAL,
799 BJ_BUFLEN, 0,
800 SLJIT_IMM, 0);
801 if (jump == NULL)
802 return SLJIT_ERR_ALLOC_FAILED;
803 if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
804 return SLJIT_ERR_ALLOC_FAILED;
805 }
806
807 switch (width) {
808 case 4:
809 status = emit_xcall(compiler, pc, BJ_AREG, 0, &jump, &m_xword);
810 break;
811 case 2:
812 status = emit_xcall(compiler, pc, BJ_AREG, 0, &jump, &m_xhalf);
813 break;
814 case 1:
815 status = emit_xcall(compiler, pc, BJ_AREG, 0, &jump, &m_xbyte);
816 break;
817 }
818
819 if (status != SLJIT_SUCCESS)
820 return status;
821
822 if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
823 return SLJIT_ERR_ALLOC_FAILED;
824
825 label = sljit_emit_label(compiler);
826 if (label == NULL)
827 return SLJIT_ERR_ALLOC_FAILED;
828 sljit_set_label(over_mchain_jump, label);
829 #endif
830
831 return status;
832 }
833
834 static int
835 emit_memload(struct sljit_compiler* compiler,
836 sljit_si dst, uint32_t k, size_t extwords)
837 {
838 int status;
839 sljit_si src;
840 sljit_sw srcw;
841
842 srcw = k * sizeof(uint32_t);
843
844 if (extwords == 0) {
845 src = SLJIT_MEM1(SLJIT_LOCALS_REG);
846 srcw += offsetof(struct bpfjit_stack, mem);
847 } else {
848 /* copy extmem pointer to the tmp1 register */
849 status = sljit_emit_op1(compiler,
850 SLJIT_MOV_P,
851 BJ_TMP1REG, 0,
852 SLJIT_MEM1(SLJIT_LOCALS_REG),
853 offsetof(struct bpfjit_stack, extmem));
854 if (status != SLJIT_SUCCESS)
855 return status;
856 src = SLJIT_MEM1(BJ_TMP1REG);
857 }
858
859 return sljit_emit_op1(compiler, SLJIT_MOV_UI, dst, 0, src, srcw);
860 }
861
862 static int
863 emit_memstore(struct sljit_compiler* compiler,
864 sljit_si src, uint32_t k, size_t extwords)
865 {
866 int status;
867 sljit_si dst;
868 sljit_sw dstw;
869
870 dstw = k * sizeof(uint32_t);
871
872 if (extwords == 0) {
873 dst = SLJIT_MEM1(SLJIT_LOCALS_REG);
874 dstw += offsetof(struct bpfjit_stack, mem);
875 } else {
876 /* copy extmem pointer to the tmp1 register */
877 status = sljit_emit_op1(compiler,
878 SLJIT_MOV_P,
879 BJ_TMP1REG, 0,
880 SLJIT_MEM1(SLJIT_LOCALS_REG),
881 offsetof(struct bpfjit_stack, extmem));
882 if (status != SLJIT_SUCCESS)
883 return status;
884 dst = SLJIT_MEM1(BJ_TMP1REG);
885 }
886
887 return sljit_emit_op1(compiler, SLJIT_MOV_UI, dst, dstw, src, 0);
888 }
889
890 /*
891 * Generate code for BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf).
892 */
893 static int
894 emit_msh(struct sljit_compiler* compiler,
895 const struct bpf_insn *pc, struct sljit_jump *to_mchain_jump,
896 struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize)
897 {
898 int status;
899 #ifdef _KERNEL
900 struct sljit_label *label;
901 struct sljit_jump *jump, *over_mchain_jump;
902 const bool check_zero_buflen = (to_mchain_jump != NULL);
903 #endif
904 const uint32_t k = pc->k;
905
906 #ifdef _KERNEL
907 if (to_mchain_jump == NULL) {
908 to_mchain_jump = sljit_emit_cmp(compiler,
909 SLJIT_C_EQUAL,
910 BJ_BUFLEN, 0,
911 SLJIT_IMM, 0);
912 if (to_mchain_jump == NULL)
913 return SLJIT_ERR_ALLOC_FAILED;
914 }
915 #endif
916
917 /* tmp1 = buf[k] */
918 status = sljit_emit_op1(compiler,
919 SLJIT_MOV_UB,
920 BJ_TMP1REG, 0,
921 SLJIT_MEM1(BJ_BUF), k);
922 if (status != SLJIT_SUCCESS)
923 return status;
924
925 /* tmp1 &= 0xf */
926 status = sljit_emit_op2(compiler,
927 SLJIT_AND,
928 BJ_TMP1REG, 0,
929 BJ_TMP1REG, 0,
930 SLJIT_IMM, 0xf);
931 if (status != SLJIT_SUCCESS)
932 return status;
933
934 /* tmp1 = tmp1 << 2 */
935 status = sljit_emit_op2(compiler,
936 SLJIT_SHL,
937 BJ_XREG, 0,
938 BJ_TMP1REG, 0,
939 SLJIT_IMM, 2);
940 if (status != SLJIT_SUCCESS)
941 return status;
942
943 #ifdef _KERNEL
944 over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP);
945 if (over_mchain_jump == NULL)
946 return SLJIT_ERR_ALLOC_FAILED;
947
948 /* entry point to mchain handler */
949 label = sljit_emit_label(compiler);
950 if (label == NULL)
951 return SLJIT_ERR_ALLOC_FAILED;
952 sljit_set_label(to_mchain_jump, label);
953
954 if (check_zero_buflen) {
955 /* if (buflen != 0) return 0; */
956 jump = sljit_emit_cmp(compiler,
957 SLJIT_C_NOT_EQUAL,
958 BJ_BUFLEN, 0,
959 SLJIT_IMM, 0);
960 if (jump == NULL)
961 return SLJIT_ERR_ALLOC_FAILED;
962 if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
963 return SLJIT_ERR_ALLOC_FAILED;
964 }
965
966 status = emit_xcall(compiler, pc, BJ_TMP1REG, 0, &jump, &m_xbyte);
967 if (status != SLJIT_SUCCESS)
968 return status;
969
970 if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
971 return SLJIT_ERR_ALLOC_FAILED;
972
973 /* tmp1 &= 0xf */
974 status = sljit_emit_op2(compiler,
975 SLJIT_AND,
976 BJ_TMP1REG, 0,
977 BJ_TMP1REG, 0,
978 SLJIT_IMM, 0xf);
979 if (status != SLJIT_SUCCESS)
980 return status;
981
982 /* tmp1 = tmp1 << 2 */
983 status = sljit_emit_op2(compiler,
984 SLJIT_SHL,
985 BJ_XREG, 0,
986 BJ_TMP1REG, 0,
987 SLJIT_IMM, 2);
988 if (status != SLJIT_SUCCESS)
989 return status;
990
991
992 label = sljit_emit_label(compiler);
993 if (label == NULL)
994 return SLJIT_ERR_ALLOC_FAILED;
995 sljit_set_label(over_mchain_jump, label);
996 #endif
997
998 return status;
999 }
1000
1001 static int
1002 emit_pow2_division(struct sljit_compiler* compiler, uint32_t k)
1003 {
1004 int shift = 0;
1005 int status = SLJIT_SUCCESS;
1006
1007 while (k > 1) {
1008 k >>= 1;
1009 shift++;
1010 }
1011
1012 BJ_ASSERT(k == 1 && shift < 32);
1013
1014 if (shift != 0) {
1015 status = sljit_emit_op2(compiler,
1016 SLJIT_LSHR|SLJIT_INT_OP,
1017 BJ_AREG, 0,
1018 BJ_AREG, 0,
1019 SLJIT_IMM, shift);
1020 }
1021
1022 return status;
1023 }
1024
1025 #if !defined(BPFJIT_USE_UDIV)
1026 static sljit_uw
1027 divide(sljit_uw x, sljit_uw y)
1028 {
1029
1030 return (uint32_t)x / (uint32_t)y;
1031 }
1032 #endif
1033
1034 /*
1035 * Generate A = A / div.
1036 * divt,divw are either SLJIT_IMM,pc->k or BJ_XREG,0.
1037 */
1038 static int
1039 emit_division(struct sljit_compiler* compiler, int divt, sljit_sw divw)
1040 {
1041 int status;
1042
1043 #if BJ_XREG == SLJIT_RETURN_REG || \
1044 BJ_XREG == SLJIT_SCRATCH_REG1 || \
1045 BJ_XREG == SLJIT_SCRATCH_REG2 || \
1046 BJ_AREG == SLJIT_SCRATCH_REG2
1047 #error "Not supported assignment of registers."
1048 #endif
1049
1050 #if BJ_AREG != SLJIT_SCRATCH_REG1
1051 status = sljit_emit_op1(compiler,
1052 SLJIT_MOV,
1053 SLJIT_SCRATCH_REG1, 0,
1054 BJ_AREG, 0);
1055 if (status != SLJIT_SUCCESS)
1056 return status;
1057 #endif
1058
1059 status = sljit_emit_op1(compiler,
1060 SLJIT_MOV,
1061 SLJIT_SCRATCH_REG2, 0,
1062 divt, divw);
1063 if (status != SLJIT_SUCCESS)
1064 return status;
1065
1066 #if defined(BPFJIT_USE_UDIV)
1067 status = sljit_emit_op0(compiler, SLJIT_UDIV|SLJIT_INT_OP);
1068
1069 #if BJ_AREG != SLJIT_SCRATCH_REG1
1070 status = sljit_emit_op1(compiler,
1071 SLJIT_MOV,
1072 BJ_AREG, 0,
1073 SLJIT_SCRATCH_REG1, 0);
1074 if (status != SLJIT_SUCCESS)
1075 return status;
1076 #endif
1077 #else
1078 status = sljit_emit_ijump(compiler,
1079 SLJIT_CALL2,
1080 SLJIT_IMM, SLJIT_FUNC_OFFSET(divide));
1081
1082 #if BJ_AREG != SLJIT_RETURN_REG
1083 status = sljit_emit_op1(compiler,
1084 SLJIT_MOV,
1085 BJ_AREG, 0,
1086 SLJIT_RETURN_REG, 0);
1087 if (status != SLJIT_SUCCESS)
1088 return status;
1089 #endif
1090 #endif
1091
1092 return status;
1093 }
1094
1095 /*
1096 * Return true if pc is a "read from packet" instruction.
1097 * If length is not NULL and return value is true, *length will
1098 * be set to a safe length required to read a packet.
1099 */
1100 static bool
1101 read_pkt_insn(const struct bpf_insn *pc, bpfjit_abc_length_t *length)
1102 {
1103 bool rv;
1104 bpfjit_abc_length_t width;
1105
1106 switch (BPF_CLASS(pc->code)) {
1107 default:
1108 rv = false;
1109 break;
1110
1111 case BPF_LD:
1112 rv = BPF_MODE(pc->code) == BPF_ABS ||
1113 BPF_MODE(pc->code) == BPF_IND;
1114 if (rv)
1115 width = read_width(pc);
1116 break;
1117
1118 case BPF_LDX:
1119 rv = pc->code == (BPF_LDX|BPF_B|BPF_MSH);
1120 width = 1;
1121 break;
1122 }
1123
1124 if (rv && length != NULL) {
1125 /*
1126 * Values greater than UINT32_MAX will generate
1127 * unconditional "return 0".
1128 */
1129 *length = (uint32_t)pc->k + width;
1130 }
1131
1132 return rv;
1133 }
1134
1135 static void
1136 optimize_init(struct bpfjit_insn_data *insn_dat, size_t insn_count)
1137 {
1138 size_t i;
1139
1140 for (i = 0; i < insn_count; i++) {
1141 SLIST_INIT(&insn_dat[i].bjumps);
1142 insn_dat[i].invalid = BJ_INIT_NOBITS;
1143 }
1144 }
1145
1146 /*
1147 * The function divides instructions into blocks. Destination of a jump
1148 * instruction starts a new block. BPF_RET and BPF_JMP instructions
1149 * terminate a block. Blocks are linear, that is, there are no jumps out
1150 * from the middle of a block and there are no jumps in to the middle of
1151 * a block.
1152 *
1153 * The function also sets bits in *initmask for memwords that
1154 * need to be initialized to zero. Note that this set should be empty
1155 * for any valid kernel filter program.
1156 */
1157 static bool
1158 optimize_pass1(const struct bpf_insn *insns,
1159 struct bpfjit_insn_data *insn_dat, size_t insn_count, size_t extwords,
1160 bpf_memword_init_t *initmask, int *nscratches, int *ncopfuncs)
1161 {
1162 struct bpfjit_jump *jtf;
1163 size_t i;
1164 uint32_t jt, jf;
1165 bpfjit_abc_length_t length;
1166 bpf_memword_init_t invalid; /* borrowed from bpf_filter() */
1167 bool unreachable;
1168
1169 const size_t memwords = (extwords != 0) ? extwords : BPF_MEMWORDS;
1170
1171 *ncopfuncs = 0;
1172 *nscratches = 2;
1173 *initmask = BJ_INIT_NOBITS;
1174
1175 unreachable = false;
1176 invalid = ~BJ_INIT_NOBITS;
1177
1178 for (i = 0; i < insn_count; i++) {
1179 if (!SLIST_EMPTY(&insn_dat[i].bjumps))
1180 unreachable = false;
1181 insn_dat[i].unreachable = unreachable;
1182
1183 if (unreachable)
1184 continue;
1185
1186 invalid |= insn_dat[i].invalid;
1187
1188 if (read_pkt_insn(&insns[i], &length) && length > UINT32_MAX)
1189 unreachable = true;
1190
1191 switch (BPF_CLASS(insns[i].code)) {
1192 case BPF_RET:
1193 if (BPF_RVAL(insns[i].code) == BPF_A)
1194 *initmask |= invalid & BJ_INIT_ABIT;
1195
1196 unreachable = true;
1197 continue;
1198
1199 case BPF_LD:
1200 if (BPF_MODE(insns[i].code) == BPF_IND ||
1201 BPF_MODE(insns[i].code) == BPF_ABS) {
1202 if (BPF_MODE(insns[i].code) == BPF_IND &&
1203 *nscratches < 4) {
1204 /* uses BJ_XREG */
1205 *nscratches = 4;
1206 }
1207 if (*nscratches < 3 &&
1208 read_width(&insns[i]) == 4) {
1209 /* uses BJ_TMP2REG */
1210 *nscratches = 3;
1211 }
1212 }
1213
1214 if (BPF_MODE(insns[i].code) == BPF_IND)
1215 *initmask |= invalid & BJ_INIT_XBIT;
1216
1217 if (BPF_MODE(insns[i].code) == BPF_MEM &&
1218 (uint32_t)insns[i].k < memwords) {
1219 *initmask |= invalid & BJ_INIT_MBIT(insns[i].k);
1220 }
1221
1222 invalid &= ~BJ_INIT_ABIT;
1223 continue;
1224
1225 case BPF_LDX:
1226 #if defined(_KERNEL)
1227 /* uses BJ_TMP3REG */
1228 *nscratches = 5;
1229 #endif
1230 /* uses BJ_XREG */
1231 if (*nscratches < 4)
1232 *nscratches = 4;
1233
1234 if (BPF_MODE(insns[i].code) == BPF_MEM &&
1235 (uint32_t)insns[i].k < memwords) {
1236 *initmask |= invalid & BJ_INIT_MBIT(insns[i].k);
1237 }
1238
1239 invalid &= ~BJ_INIT_XBIT;
1240 continue;
1241
1242 case BPF_ST:
1243 *initmask |= invalid & BJ_INIT_ABIT;
1244
1245 if ((uint32_t)insns[i].k < memwords)
1246 invalid &= ~BJ_INIT_MBIT(insns[i].k);
1247
1248 continue;
1249
1250 case BPF_STX:
1251 /* uses BJ_XREG */
1252 if (*nscratches < 4)
1253 *nscratches = 4;
1254
1255 *initmask |= invalid & BJ_INIT_XBIT;
1256
1257 if ((uint32_t)insns[i].k < memwords)
1258 invalid &= ~BJ_INIT_MBIT(insns[i].k);
1259
1260 continue;
1261
1262 case BPF_ALU:
1263 *initmask |= invalid & BJ_INIT_ABIT;
1264
1265 if (insns[i].code != (BPF_ALU|BPF_NEG) &&
1266 BPF_SRC(insns[i].code) == BPF_X) {
1267 *initmask |= invalid & BJ_INIT_XBIT;
1268 /* uses BJ_XREG */
1269 if (*nscratches < 4)
1270 *nscratches = 4;
1271
1272 }
1273
1274 invalid &= ~BJ_INIT_ABIT;
1275 continue;
1276
1277 case BPF_MISC:
1278 switch (BPF_MISCOP(insns[i].code)) {
1279 case BPF_TAX: // X <- A
1280 /* uses BJ_XREG */
1281 if (*nscratches < 4)
1282 *nscratches = 4;
1283
1284 *initmask |= invalid & BJ_INIT_ABIT;
1285 invalid &= ~BJ_INIT_XBIT;
1286 continue;
1287
1288 case BPF_TXA: // A <- X
1289 /* uses BJ_XREG */
1290 if (*nscratches < 4)
1291 *nscratches = 4;
1292
1293 *initmask |= invalid & BJ_INIT_XBIT;
1294 invalid &= ~BJ_INIT_ABIT;
1295 continue;
1296
1297 case BPF_COPX:
1298 /* uses BJ_XREG */
1299 if (*nscratches < 4)
1300 *nscratches = 4;
1301 /* FALLTHROUGH */
1302
1303 case BPF_COP:
1304 /* calls copfunc with three arguments */
1305 if (*nscratches < 3)
1306 *nscratches = 3;
1307
1308 (*ncopfuncs)++;
1309 *initmask |= invalid & BJ_INIT_ABIT;
1310 invalid &= ~BJ_INIT_ABIT;
1311 continue;
1312 }
1313
1314 continue;
1315
1316 case BPF_JMP:
1317 /* Initialize abc_length for ABC pass. */
1318 insn_dat[i].u.jdata.abc_length = MAX_ABC_LENGTH;
1319
1320 if (BPF_OP(insns[i].code) == BPF_JA) {
1321 jt = jf = insns[i].k;
1322 } else {
1323 jt = insns[i].jt;
1324 jf = insns[i].jf;
1325 }
1326
1327 if (jt >= insn_count - (i + 1) ||
1328 jf >= insn_count - (i + 1)) {
1329 return false;
1330 }
1331
1332 if (jt > 0 && jf > 0)
1333 unreachable = true;
1334
1335 jt += i + 1;
1336 jf += i + 1;
1337
1338 jtf = insn_dat[i].u.jdata.jtf;
1339
1340 jtf[0].sjump = NULL;
1341 jtf[0].jdata = &insn_dat[i].u.jdata;
1342 SLIST_INSERT_HEAD(&insn_dat[jt].bjumps,
1343 &jtf[0], entries);
1344
1345 if (jf != jt) {
1346 jtf[1].sjump = NULL;
1347 jtf[1].jdata = &insn_dat[i].u.jdata;
1348 SLIST_INSERT_HEAD(&insn_dat[jf].bjumps,
1349 &jtf[1], entries);
1350 }
1351
1352 insn_dat[jf].invalid |= invalid;
1353 insn_dat[jt].invalid |= invalid;
1354 invalid = 0;
1355
1356 continue;
1357 }
1358 }
1359
1360 return true;
1361 }
1362
1363 /*
1364 * Array Bounds Check Elimination (ABC) pass.
1365 */
1366 static void
1367 optimize_pass2(const struct bpf_insn *insns,
1368 struct bpfjit_insn_data *insn_dat, size_t insn_count, size_t extwords)
1369 {
1370 struct bpfjit_jump *jmp;
1371 const struct bpf_insn *pc;
1372 struct bpfjit_insn_data *pd;
1373 size_t i;
1374 bpfjit_abc_length_t length, abc_length = 0;
1375
1376 for (i = insn_count; i != 0; i--) {
1377 pc = &insns[i-1];
1378 pd = &insn_dat[i-1];
1379
1380 if (pd->unreachable)
1381 continue;
1382
1383 switch (BPF_CLASS(pc->code)) {
1384 case BPF_RET:
1385 /*
1386 * It's quite common for bpf programs to
1387 * check packet bytes in increasing order
1388 * and return zero if bytes don't match
1389 * specified critetion. Such programs disable
1390 * ABC optimization completely because for
1391 * every jump there is a branch with no read
1392 * instruction.
1393 * With no side effects, BPF_STMT(BPF_RET+BPF_K, 0)
1394 * is indistinguishable from out-of-bound load.
1395 * Therefore, abc_length can be set to
1396 * MAX_ABC_LENGTH and enable ABC for many
1397 * bpf programs.
1398 * If this optimization encounters any
1399 * instruction with a side effect, it will
1400 * reset abc_length.
1401 */
1402 if (BPF_RVAL(pc->code) == BPF_K && pc->k == 0)
1403 abc_length = MAX_ABC_LENGTH;
1404 else
1405 abc_length = 0;
1406 break;
1407
1408 case BPF_MISC:
1409 if (BPF_MISCOP(pc->code) == BPF_COP ||
1410 BPF_MISCOP(pc->code) == BPF_COPX) {
1411 /* COP instructions can have side effects. */
1412 abc_length = 0;
1413 }
1414 break;
1415
1416 case BPF_ST:
1417 case BPF_STX:
1418 if (extwords != 0) {
1419 /* Write to memory is visible after a call. */
1420 abc_length = 0;
1421 }
1422 break;
1423
1424 case BPF_JMP:
1425 abc_length = pd->u.jdata.abc_length;
1426 break;
1427
1428 default:
1429 if (read_pkt_insn(pc, &length)) {
1430 if (abc_length < length)
1431 abc_length = length;
1432 pd->u.rdata.abc_length = abc_length;
1433 }
1434 break;
1435 }
1436
1437 SLIST_FOREACH(jmp, &pd->bjumps, entries) {
1438 if (jmp->jdata->abc_length > abc_length)
1439 jmp->jdata->abc_length = abc_length;
1440 }
1441 }
1442 }
1443
1444 static void
1445 optimize_pass3(const struct bpf_insn *insns,
1446 struct bpfjit_insn_data *insn_dat, size_t insn_count)
1447 {
1448 struct bpfjit_jump *jmp;
1449 size_t i;
1450 bpfjit_abc_length_t checked_length = 0;
1451
1452 for (i = 0; i < insn_count; i++) {
1453 if (insn_dat[i].unreachable)
1454 continue;
1455
1456 SLIST_FOREACH(jmp, &insn_dat[i].bjumps, entries) {
1457 if (jmp->jdata->checked_length < checked_length)
1458 checked_length = jmp->jdata->checked_length;
1459 }
1460
1461 if (BPF_CLASS(insns[i].code) == BPF_JMP) {
1462 insn_dat[i].u.jdata.checked_length = checked_length;
1463 } else if (read_pkt_insn(&insns[i], NULL)) {
1464 struct bpfjit_read_pkt_data *rdata =
1465 &insn_dat[i].u.rdata;
1466 rdata->check_length = 0;
1467 if (checked_length < rdata->abc_length) {
1468 checked_length = rdata->abc_length;
1469 rdata->check_length = checked_length;
1470 }
1471 }
1472 }
1473 }
1474
1475 static bool
1476 optimize(const struct bpf_insn *insns,
1477 struct bpfjit_insn_data *insn_dat, size_t insn_count,
1478 size_t extwords,
1479 bpf_memword_init_t *initmask, int *nscratches, int *ncopfuncs)
1480 {
1481
1482 optimize_init(insn_dat, insn_count);
1483
1484 if (!optimize_pass1(insns, insn_dat, insn_count,
1485 extwords, initmask, nscratches, ncopfuncs)) {
1486 return false;
1487 }
1488
1489 optimize_pass2(insns, insn_dat, insn_count, extwords);
1490 optimize_pass3(insns, insn_dat, insn_count);
1491
1492 return true;
1493 }
1494
1495 /*
1496 * Convert BPF_ALU operations except BPF_NEG and BPF_DIV to sljit operation.
1497 */
1498 static int
1499 bpf_alu_to_sljit_op(const struct bpf_insn *pc)
1500 {
1501
1502 /*
1503 * Note: all supported 64bit arches have 32bit multiply
1504 * instruction so SLJIT_INT_OP doesn't have any overhead.
1505 */
1506 switch (BPF_OP(pc->code)) {
1507 case BPF_ADD: return SLJIT_ADD;
1508 case BPF_SUB: return SLJIT_SUB;
1509 case BPF_MUL: return SLJIT_MUL|SLJIT_INT_OP;
1510 case BPF_OR: return SLJIT_OR;
1511 case BPF_AND: return SLJIT_AND;
1512 case BPF_LSH: return SLJIT_SHL;
1513 case BPF_RSH: return SLJIT_LSHR|SLJIT_INT_OP;
1514 default:
1515 BJ_ASSERT(false);
1516 return 0;
1517 }
1518 }
1519
1520 /*
1521 * Convert BPF_JMP operations except BPF_JA to sljit condition.
1522 */
1523 static int
1524 bpf_jmp_to_sljit_cond(const struct bpf_insn *pc, bool negate)
1525 {
1526 /*
1527 * Note: all supported 64bit arches have 32bit comparison
1528 * instructions so SLJIT_INT_OP doesn't have any overhead.
1529 */
1530 int rv = SLJIT_INT_OP;
1531
1532 switch (BPF_OP(pc->code)) {
1533 case BPF_JGT:
1534 rv |= negate ? SLJIT_C_LESS_EQUAL : SLJIT_C_GREATER;
1535 break;
1536 case BPF_JGE:
1537 rv |= negate ? SLJIT_C_LESS : SLJIT_C_GREATER_EQUAL;
1538 break;
1539 case BPF_JEQ:
1540 rv |= negate ? SLJIT_C_NOT_EQUAL : SLJIT_C_EQUAL;
1541 break;
1542 case BPF_JSET:
1543 rv |= negate ? SLJIT_C_EQUAL : SLJIT_C_NOT_EQUAL;
1544 break;
1545 default:
1546 BJ_ASSERT(false);
1547 }
1548
1549 return rv;
1550 }
1551
1552 /*
1553 * Convert BPF_K and BPF_X to sljit register.
1554 */
1555 static int
1556 kx_to_reg(const struct bpf_insn *pc)
1557 {
1558
1559 switch (BPF_SRC(pc->code)) {
1560 case BPF_K: return SLJIT_IMM;
1561 case BPF_X: return BJ_XREG;
1562 default:
1563 BJ_ASSERT(false);
1564 return 0;
1565 }
1566 }
1567
1568 static sljit_sw
1569 kx_to_reg_arg(const struct bpf_insn *pc)
1570 {
1571
1572 switch (BPF_SRC(pc->code)) {
1573 case BPF_K: return (uint32_t)pc->k; /* SLJIT_IMM, pc->k, */
1574 case BPF_X: return 0; /* BJ_XREG, 0, */
1575 default:
1576 BJ_ASSERT(false);
1577 return 0;
1578 }
1579 }
1580
1581 bpfjit_func_t
1582 bpfjit_generate_code(const bpf_ctx_t *bc,
1583 const struct bpf_insn *insns, size_t insn_count)
1584 {
1585 void *rv;
1586 struct sljit_compiler *compiler;
1587
1588 size_t i;
1589 int status;
1590 int branching, negate;
1591 unsigned int rval, mode, src;
1592
1593 /* optimization related */
1594 bpf_memword_init_t initmask;
1595 int nscratches, ncopfuncs;
1596
1597 /* a list of jumps to out-of-bound return from a generated function */
1598 struct sljit_jump **ret0;
1599 size_t ret0_size, ret0_maxsize;
1600
1601 const struct bpf_insn *pc;
1602 struct bpfjit_insn_data *insn_dat;
1603
1604 /* for local use */
1605 struct sljit_label *label;
1606 struct sljit_jump *jump;
1607 struct bpfjit_jump *bjump, *jtf;
1608
1609 struct sljit_jump *to_mchain_jump;
1610 bool unconditional_ret;
1611
1612 uint32_t jt, jf;
1613
1614 const size_t extwords = bc ? bc->extwords : 0;
1615 const size_t memwords = extwords ? extwords : BPF_MEMWORDS;
1616 const bpf_memword_init_t preinited = extwords ? bc->preinited : 0;
1617
1618 rv = NULL;
1619 ret0 = NULL;
1620 compiler = NULL;
1621 insn_dat = NULL;
1622
1623 if (memwords > MAX_MEMWORDS)
1624 goto fail;
1625
1626 if (insn_count == 0 || insn_count > SIZE_MAX / sizeof(insn_dat[0]))
1627 goto fail;
1628
1629 insn_dat = BJ_ALLOC(insn_count * sizeof(insn_dat[0]));
1630 if (insn_dat == NULL)
1631 goto fail;
1632
1633 if (!optimize(insns, insn_dat, insn_count,
1634 extwords, &initmask, &nscratches, &ncopfuncs)) {
1635 goto fail;
1636 }
1637
1638 ret0_size = 0;
1639 ret0_maxsize = 64;
1640 ret0 = BJ_ALLOC(ret0_maxsize * sizeof(ret0[0]));
1641 if (ret0 == NULL)
1642 goto fail;
1643
1644 compiler = sljit_create_compiler();
1645 if (compiler == NULL)
1646 goto fail;
1647
1648 #if !defined(_KERNEL) && defined(SLJIT_VERBOSE) && SLJIT_VERBOSE
1649 sljit_compiler_verbose(compiler, stderr);
1650 #endif
1651
1652 status = sljit_emit_enter(compiler,
1653 3, nscratches, 3, sizeof(struct bpfjit_stack));
1654 if (status != SLJIT_SUCCESS)
1655 goto fail;
1656
1657 if (ncopfuncs > 0) {
1658 /* save ctx argument */
1659 status = sljit_emit_op1(compiler,
1660 SLJIT_MOV_P,
1661 SLJIT_MEM1(SLJIT_LOCALS_REG),
1662 offsetof(struct bpfjit_stack, ctx),
1663 BJ_CTX_ARG, 0);
1664 if (status != SLJIT_SUCCESS)
1665 goto fail;
1666 }
1667
1668 if (extwords != 0) {
1669 /* copy "mem" argument from bpf_args to bpfjit_stack */
1670 status = sljit_emit_op1(compiler,
1671 SLJIT_MOV_P,
1672 BJ_TMP1REG, 0,
1673 SLJIT_MEM1(BJ_ARGS), offsetof(struct bpf_args, mem));
1674 if (status != SLJIT_SUCCESS)
1675 goto fail;
1676
1677 status = sljit_emit_op1(compiler,
1678 SLJIT_MOV_P,
1679 SLJIT_MEM1(SLJIT_LOCALS_REG),
1680 offsetof(struct bpfjit_stack, extmem),
1681 BJ_TMP1REG, 0);
1682 if (status != SLJIT_SUCCESS)
1683 goto fail;
1684 }
1685
1686 status = load_buf_buflen(compiler);
1687 if (status != SLJIT_SUCCESS)
1688 goto fail;
1689
1690 /*
1691 * Exclude pre-initialised external memory words but keep
1692 * initialization statuses of A and X registers in case
1693 * bc->preinited wrongly sets those two bits.
1694 */
1695 initmask &= ~preinited | BJ_INIT_ABIT | BJ_INIT_XBIT;
1696
1697 #if defined(_KERNEL)
1698 /* bpf_filter() checks initialization of memwords. */
1699 BJ_ASSERT((initmask & (BJ_INIT_MBIT(memwords) - 1)) == 0);
1700 #endif
1701 for (i = 0; i < memwords; i++) {
1702 if (initmask & BJ_INIT_MBIT(i)) {
1703 /* M[i] = 0; */
1704 status = sljit_emit_op1(compiler,
1705 SLJIT_MOV_UI,
1706 SLJIT_MEM1(SLJIT_LOCALS_REG),
1707 offsetof(struct bpfjit_stack, mem) +
1708 i * sizeof(uint32_t),
1709 SLJIT_IMM, 0);
1710 if (status != SLJIT_SUCCESS)
1711 goto fail;
1712 }
1713 }
1714
1715 if (initmask & BJ_INIT_ABIT) {
1716 /* A = 0; */
1717 status = sljit_emit_op1(compiler,
1718 SLJIT_MOV,
1719 BJ_AREG, 0,
1720 SLJIT_IMM, 0);
1721 if (status != SLJIT_SUCCESS)
1722 goto fail;
1723 }
1724
1725 if (initmask & BJ_INIT_XBIT) {
1726 /* X = 0; */
1727 status = sljit_emit_op1(compiler,
1728 SLJIT_MOV,
1729 BJ_XREG, 0,
1730 SLJIT_IMM, 0);
1731 if (status != SLJIT_SUCCESS)
1732 goto fail;
1733 }
1734
1735 for (i = 0; i < insn_count; i++) {
1736 if (insn_dat[i].unreachable)
1737 continue;
1738
1739 /*
1740 * Resolve jumps to the current insn.
1741 */
1742 label = NULL;
1743 SLIST_FOREACH(bjump, &insn_dat[i].bjumps, entries) {
1744 if (bjump->sjump != NULL) {
1745 if (label == NULL)
1746 label = sljit_emit_label(compiler);
1747 if (label == NULL)
1748 goto fail;
1749 sljit_set_label(bjump->sjump, label);
1750 }
1751 }
1752
1753 to_mchain_jump = NULL;
1754 unconditional_ret = false;
1755
1756 if (read_pkt_insn(&insns[i], NULL)) {
1757 if (insn_dat[i].u.rdata.check_length > UINT32_MAX) {
1758 /* Jump to "return 0" unconditionally. */
1759 unconditional_ret = true;
1760 jump = sljit_emit_jump(compiler, SLJIT_JUMP);
1761 if (jump == NULL)
1762 goto fail;
1763 if (!append_jump(jump, &ret0,
1764 &ret0_size, &ret0_maxsize))
1765 goto fail;
1766 } else if (insn_dat[i].u.rdata.check_length > 0) {
1767 /* if (buflen < check_length) return 0; */
1768 jump = sljit_emit_cmp(compiler,
1769 SLJIT_C_LESS,
1770 BJ_BUFLEN, 0,
1771 SLJIT_IMM,
1772 insn_dat[i].u.rdata.check_length);
1773 if (jump == NULL)
1774 goto fail;
1775 #ifdef _KERNEL
1776 to_mchain_jump = jump;
1777 #else
1778 if (!append_jump(jump, &ret0,
1779 &ret0_size, &ret0_maxsize))
1780 goto fail;
1781 #endif
1782 }
1783 }
1784
1785 pc = &insns[i];
1786 switch (BPF_CLASS(pc->code)) {
1787
1788 default:
1789 goto fail;
1790
1791 case BPF_LD:
1792 /* BPF_LD+BPF_IMM A <- k */
1793 if (pc->code == (BPF_LD|BPF_IMM)) {
1794 status = sljit_emit_op1(compiler,
1795 SLJIT_MOV,
1796 BJ_AREG, 0,
1797 SLJIT_IMM, (uint32_t)pc->k);
1798 if (status != SLJIT_SUCCESS)
1799 goto fail;
1800
1801 continue;
1802 }
1803
1804 /* BPF_LD+BPF_MEM A <- M[k] */
1805 if (pc->code == (BPF_LD|BPF_MEM)) {
1806 if ((uint32_t)pc->k >= memwords)
1807 goto fail;
1808 status = emit_memload(compiler,
1809 BJ_AREG, pc->k, extwords);
1810 if (status != SLJIT_SUCCESS)
1811 goto fail;
1812
1813 continue;
1814 }
1815
1816 /* BPF_LD+BPF_W+BPF_LEN A <- len */
1817 if (pc->code == (BPF_LD|BPF_W|BPF_LEN)) {
1818 status = sljit_emit_op1(compiler,
1819 SLJIT_MOV,
1820 BJ_AREG, 0,
1821 SLJIT_MEM1(BJ_ARGS),
1822 offsetof(struct bpf_args, wirelen));
1823 if (status != SLJIT_SUCCESS)
1824 goto fail;
1825
1826 continue;
1827 }
1828
1829 mode = BPF_MODE(pc->code);
1830 if (mode != BPF_ABS && mode != BPF_IND)
1831 goto fail;
1832
1833 if (unconditional_ret)
1834 continue;
1835
1836 status = emit_pkt_read(compiler, pc,
1837 to_mchain_jump, &ret0, &ret0_size, &ret0_maxsize);
1838 if (status != SLJIT_SUCCESS)
1839 goto fail;
1840
1841 continue;
1842
1843 case BPF_LDX:
1844 mode = BPF_MODE(pc->code);
1845
1846 /* BPF_LDX+BPF_W+BPF_IMM X <- k */
1847 if (mode == BPF_IMM) {
1848 if (BPF_SIZE(pc->code) != BPF_W)
1849 goto fail;
1850 status = sljit_emit_op1(compiler,
1851 SLJIT_MOV,
1852 BJ_XREG, 0,
1853 SLJIT_IMM, (uint32_t)pc->k);
1854 if (status != SLJIT_SUCCESS)
1855 goto fail;
1856
1857 continue;
1858 }
1859
1860 /* BPF_LDX+BPF_W+BPF_LEN X <- len */
1861 if (mode == BPF_LEN) {
1862 if (BPF_SIZE(pc->code) != BPF_W)
1863 goto fail;
1864 status = sljit_emit_op1(compiler,
1865 SLJIT_MOV,
1866 BJ_XREG, 0,
1867 SLJIT_MEM1(BJ_ARGS),
1868 offsetof(struct bpf_args, wirelen));
1869 if (status != SLJIT_SUCCESS)
1870 goto fail;
1871
1872 continue;
1873 }
1874
1875 /* BPF_LDX+BPF_W+BPF_MEM X <- M[k] */
1876 if (mode == BPF_MEM) {
1877 if (BPF_SIZE(pc->code) != BPF_W)
1878 goto fail;
1879 if ((uint32_t)pc->k >= memwords)
1880 goto fail;
1881 status = emit_memload(compiler,
1882 BJ_XREG, pc->k, extwords);
1883 if (status != SLJIT_SUCCESS)
1884 goto fail;
1885
1886 continue;
1887 }
1888
1889 /* BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf) */
1890 if (mode != BPF_MSH || BPF_SIZE(pc->code) != BPF_B)
1891 goto fail;
1892
1893 if (unconditional_ret)
1894 continue;
1895
1896 status = emit_msh(compiler, pc,
1897 to_mchain_jump, &ret0, &ret0_size, &ret0_maxsize);
1898 if (status != SLJIT_SUCCESS)
1899 goto fail;
1900
1901 continue;
1902
1903 case BPF_ST:
1904 if (pc->code != BPF_ST ||
1905 (uint32_t)pc->k >= memwords) {
1906 goto fail;
1907 }
1908
1909 status = emit_memstore(compiler,
1910 BJ_AREG, pc->k, extwords);
1911 if (status != SLJIT_SUCCESS)
1912 goto fail;
1913
1914 continue;
1915
1916 case BPF_STX:
1917 if (pc->code != BPF_STX ||
1918 (uint32_t)pc->k >= memwords) {
1919 goto fail;
1920 }
1921
1922 status = emit_memstore(compiler,
1923 BJ_XREG, pc->k, extwords);
1924 if (status != SLJIT_SUCCESS)
1925 goto fail;
1926
1927 continue;
1928
1929 case BPF_ALU:
1930 if (pc->code == (BPF_ALU|BPF_NEG)) {
1931 status = sljit_emit_op1(compiler,
1932 SLJIT_NEG,
1933 BJ_AREG, 0,
1934 BJ_AREG, 0);
1935 if (status != SLJIT_SUCCESS)
1936 goto fail;
1937
1938 continue;
1939 }
1940
1941 if (BPF_OP(pc->code) != BPF_DIV) {
1942 status = sljit_emit_op2(compiler,
1943 bpf_alu_to_sljit_op(pc),
1944 BJ_AREG, 0,
1945 BJ_AREG, 0,
1946 kx_to_reg(pc), kx_to_reg_arg(pc));
1947 if (status != SLJIT_SUCCESS)
1948 goto fail;
1949
1950 continue;
1951 }
1952
1953 /* BPF_DIV */
1954
1955 src = BPF_SRC(pc->code);
1956 if (src != BPF_X && src != BPF_K)
1957 goto fail;
1958
1959 /* division by zero? */
1960 if (src == BPF_X) {
1961 jump = sljit_emit_cmp(compiler,
1962 SLJIT_C_EQUAL|SLJIT_INT_OP,
1963 BJ_XREG, 0,
1964 SLJIT_IMM, 0);
1965 if (jump == NULL)
1966 goto fail;
1967 if (!append_jump(jump, &ret0,
1968 &ret0_size, &ret0_maxsize))
1969 goto fail;
1970 } else if (pc->k == 0) {
1971 jump = sljit_emit_jump(compiler, SLJIT_JUMP);
1972 if (jump == NULL)
1973 goto fail;
1974 if (!append_jump(jump, &ret0,
1975 &ret0_size, &ret0_maxsize))
1976 goto fail;
1977 }
1978
1979 if (src == BPF_X) {
1980 status = emit_division(compiler, BJ_XREG, 0);
1981 if (status != SLJIT_SUCCESS)
1982 goto fail;
1983 } else if (pc->k != 0) {
1984 if (pc->k & (pc->k - 1)) {
1985 status = emit_division(compiler,
1986 SLJIT_IMM, (uint32_t)pc->k);
1987 } else {
1988 status = emit_pow2_division(compiler,
1989 (uint32_t)pc->k);
1990 }
1991 if (status != SLJIT_SUCCESS)
1992 goto fail;
1993 }
1994
1995 continue;
1996
1997 case BPF_JMP:
1998 if (BPF_OP(pc->code) == BPF_JA) {
1999 jt = jf = pc->k;
2000 } else {
2001 jt = pc->jt;
2002 jf = pc->jf;
2003 }
2004
2005 negate = (jt == 0) ? 1 : 0;
2006 branching = (jt == jf) ? 0 : 1;
2007 jtf = insn_dat[i].u.jdata.jtf;
2008
2009 if (branching) {
2010 if (BPF_OP(pc->code) != BPF_JSET) {
2011 jump = sljit_emit_cmp(compiler,
2012 bpf_jmp_to_sljit_cond(pc, negate),
2013 BJ_AREG, 0,
2014 kx_to_reg(pc), kx_to_reg_arg(pc));
2015 } else {
2016 status = sljit_emit_op2(compiler,
2017 SLJIT_AND,
2018 BJ_TMP1REG, 0,
2019 BJ_AREG, 0,
2020 kx_to_reg(pc), kx_to_reg_arg(pc));
2021 if (status != SLJIT_SUCCESS)
2022 goto fail;
2023
2024 jump = sljit_emit_cmp(compiler,
2025 bpf_jmp_to_sljit_cond(pc, negate),
2026 BJ_TMP1REG, 0,
2027 SLJIT_IMM, 0);
2028 }
2029
2030 if (jump == NULL)
2031 goto fail;
2032
2033 BJ_ASSERT(jtf[negate].sjump == NULL);
2034 jtf[negate].sjump = jump;
2035 }
2036
2037 if (!branching || (jt != 0 && jf != 0)) {
2038 jump = sljit_emit_jump(compiler, SLJIT_JUMP);
2039 if (jump == NULL)
2040 goto fail;
2041
2042 BJ_ASSERT(jtf[branching].sjump == NULL);
2043 jtf[branching].sjump = jump;
2044 }
2045
2046 continue;
2047
2048 case BPF_RET:
2049 rval = BPF_RVAL(pc->code);
2050 if (rval == BPF_X)
2051 goto fail;
2052
2053 /* BPF_RET+BPF_K accept k bytes */
2054 if (rval == BPF_K) {
2055 status = sljit_emit_return(compiler,
2056 SLJIT_MOV_UI,
2057 SLJIT_IMM, (uint32_t)pc->k);
2058 if (status != SLJIT_SUCCESS)
2059 goto fail;
2060 }
2061
2062 /* BPF_RET+BPF_A accept A bytes */
2063 if (rval == BPF_A) {
2064 status = sljit_emit_return(compiler,
2065 SLJIT_MOV_UI,
2066 BJ_AREG, 0);
2067 if (status != SLJIT_SUCCESS)
2068 goto fail;
2069 }
2070
2071 continue;
2072
2073 case BPF_MISC:
2074 switch (BPF_MISCOP(pc->code)) {
2075 case BPF_TAX:
2076 status = sljit_emit_op1(compiler,
2077 SLJIT_MOV_UI,
2078 BJ_XREG, 0,
2079 BJ_AREG, 0);
2080 if (status != SLJIT_SUCCESS)
2081 goto fail;
2082
2083 continue;
2084
2085 case BPF_TXA:
2086 status = sljit_emit_op1(compiler,
2087 SLJIT_MOV,
2088 BJ_AREG, 0,
2089 BJ_XREG, 0);
2090 if (status != SLJIT_SUCCESS)
2091 goto fail;
2092
2093 continue;
2094
2095 case BPF_COP:
2096 case BPF_COPX:
2097 if (bc == NULL || bc->copfuncs == NULL)
2098 goto fail;
2099 if (BPF_MISCOP(pc->code) == BPF_COP &&
2100 (uint32_t)pc->k >= bc->nfuncs) {
2101 goto fail;
2102 }
2103
2104 jump = NULL;
2105 status = emit_cop(compiler, bc, pc, &jump);
2106 if (status != SLJIT_SUCCESS)
2107 goto fail;
2108
2109 if (jump != NULL && !append_jump(jump,
2110 &ret0, &ret0_size, &ret0_maxsize))
2111 goto fail;
2112
2113 continue;
2114 }
2115
2116 goto fail;
2117 } /* switch */
2118 } /* main loop */
2119
2120 BJ_ASSERT(ret0_size <= ret0_maxsize);
2121
2122 if (ret0_size > 0) {
2123 label = sljit_emit_label(compiler);
2124 if (label == NULL)
2125 goto fail;
2126 for (i = 0; i < ret0_size; i++)
2127 sljit_set_label(ret0[i], label);
2128 }
2129
2130 status = sljit_emit_return(compiler,
2131 SLJIT_MOV_UI,
2132 SLJIT_IMM, 0);
2133 if (status != SLJIT_SUCCESS)
2134 goto fail;
2135
2136 rv = sljit_generate_code(compiler);
2137
2138 fail:
2139 if (compiler != NULL)
2140 sljit_free_compiler(compiler);
2141
2142 if (insn_dat != NULL)
2143 BJ_FREE(insn_dat, insn_count * sizeof(insn_dat[0]));
2144
2145 if (ret0 != NULL)
2146 BJ_FREE(ret0, ret0_maxsize * sizeof(ret0[0]));
2147
2148 return (bpfjit_func_t)rv;
2149 }
2150
2151 void
2152 bpfjit_free_code(bpfjit_func_t code)
2153 {
2154
2155 sljit_free_code((void *)code);
2156 }
2157