bpfjit.c revision 1.39 1 /* $NetBSD: bpfjit.c,v 1.39 2015/02/12 23:09:55 alnsn Exp $ */
2
3 /*-
4 * Copyright (c) 2011-2014 Alexander Nasonov.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #ifdef _KERNEL
34 __KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.39 2015/02/12 23:09:55 alnsn Exp $");
35 #else
36 __RCSID("$NetBSD: bpfjit.c,v 1.39 2015/02/12 23:09:55 alnsn Exp $");
37 #endif
38
39 #include <sys/types.h>
40 #include <sys/queue.h>
41
42 #ifndef _KERNEL
43 #include <assert.h>
44 #define BJ_ASSERT(c) assert(c)
45 #else
46 #define BJ_ASSERT(c) KASSERT(c)
47 #endif
48
49 #ifndef _KERNEL
50 #include <stdlib.h>
51 #define BJ_ALLOC(sz) malloc(sz)
52 #define BJ_FREE(p, sz) free(p)
53 #else
54 #include <sys/kmem.h>
55 #define BJ_ALLOC(sz) kmem_alloc(sz, KM_SLEEP)
56 #define BJ_FREE(p, sz) kmem_free(p, sz)
57 #endif
58
59 #ifndef _KERNEL
60 #include <limits.h>
61 #include <stdbool.h>
62 #include <stddef.h>
63 #include <stdint.h>
64 #else
65 #include <sys/atomic.h>
66 #include <sys/module.h>
67 #endif
68
69 #define __BPF_PRIVATE
70 #include <net/bpf.h>
71 #include <net/bpfjit.h>
72 #include <sljitLir.h>
73
74 #if !defined(_KERNEL) && defined(SLJIT_VERBOSE) && SLJIT_VERBOSE
75 #include <stdio.h> /* for stderr */
76 #endif
77
78 /*
79 * Arguments of generated bpfjit_func_t.
80 * The first argument is reassigned upon entry
81 * to a more frequently used buf argument.
82 */
83 #define BJ_CTX_ARG SLJIT_SAVED_REG1
84 #define BJ_ARGS SLJIT_SAVED_REG2
85
86 /*
87 * Permanent register assignments.
88 */
89 #define BJ_BUF SLJIT_SAVED_REG1
90 //#define BJ_ARGS SLJIT_SAVED_REG2
91 #define BJ_BUFLEN SLJIT_SAVED_REG3
92 #define BJ_AREG SLJIT_SCRATCH_REG1
93 #define BJ_TMP1REG SLJIT_SCRATCH_REG2
94 #define BJ_TMP2REG SLJIT_SCRATCH_REG3
95 #define BJ_XREG SLJIT_TEMPORARY_EREG1
96 #define BJ_TMP3REG SLJIT_TEMPORARY_EREG2
97
98 #ifdef _KERNEL
99 #define MAX_MEMWORDS BPF_MAX_MEMWORDS
100 #else
101 #define MAX_MEMWORDS BPF_MEMWORDS
102 #endif
103
104 #define BJ_INIT_NOBITS ((bpf_memword_init_t)0)
105 #define BJ_INIT_MBIT(k) BPF_MEMWORD_INIT(k)
106 #define BJ_INIT_ABIT BJ_INIT_MBIT(MAX_MEMWORDS)
107 #define BJ_INIT_XBIT BJ_INIT_MBIT(MAX_MEMWORDS + 1)
108
109 /*
110 * Get a number of memwords and external memwords from a bpf_ctx object.
111 */
112 #define GET_EXTWORDS(bc) ((bc) ? (bc)->extwords : 0)
113 #define GET_MEMWORDS(bc) (GET_EXTWORDS(bc) ? GET_EXTWORDS(bc) : BPF_MEMWORDS)
114
115 /*
116 * Optimization hints.
117 */
118 typedef unsigned int bpfjit_hint_t;
119 #define BJ_HINT_ABS 0x01 /* packet read at absolute offset */
120 #define BJ_HINT_IND 0x02 /* packet read at variable offset */
121 #define BJ_HINT_MSH 0x04 /* BPF_MSH instruction */
122 #define BJ_HINT_COP 0x08 /* BPF_COP or BPF_COPX instruction */
123 #define BJ_HINT_COPX 0x10 /* BPF_COPX instruction */
124 #define BJ_HINT_XREG 0x20 /* BJ_XREG is needed */
125 #define BJ_HINT_LDX 0x40 /* BPF_LDX instruction */
126 #define BJ_HINT_PKT (BJ_HINT_ABS|BJ_HINT_IND|BJ_HINT_MSH)
127
128 /*
129 * Datatype for Array Bounds Check Elimination (ABC) pass.
130 */
131 typedef uint64_t bpfjit_abc_length_t;
132 #define MAX_ABC_LENGTH (UINT32_MAX + UINT64_C(4)) /* max. width is 4 */
133
134 struct bpfjit_stack
135 {
136 bpf_ctx_t *ctx;
137 uint32_t *extmem; /* pointer to external memory store */
138 uint32_t reg; /* saved A or X register */
139 #ifdef _KERNEL
140 int err; /* 3rd argument for m_xword/m_xhalf/m_xbyte function call */
141 #endif
142 uint32_t mem[BPF_MEMWORDS]; /* internal memory store */
143 };
144
145 /*
146 * Data for BPF_JMP instruction.
147 * Forward declaration for struct bpfjit_jump.
148 */
149 struct bpfjit_jump_data;
150
151 /*
152 * Node of bjumps list.
153 */
154 struct bpfjit_jump {
155 struct sljit_jump *sjump;
156 SLIST_ENTRY(bpfjit_jump) entries;
157 struct bpfjit_jump_data *jdata;
158 };
159
160 /*
161 * Data for BPF_JMP instruction.
162 */
163 struct bpfjit_jump_data {
164 /*
165 * These entries make up bjumps list:
166 * jtf[0] - when coming from jt path,
167 * jtf[1] - when coming from jf path.
168 */
169 struct bpfjit_jump jtf[2];
170 /*
171 * Length calculated by Array Bounds Check Elimination (ABC) pass.
172 */
173 bpfjit_abc_length_t abc_length;
174 /*
175 * Length checked by the last out-of-bounds check.
176 */
177 bpfjit_abc_length_t checked_length;
178 };
179
180 /*
181 * Data for "read from packet" instructions.
182 * See also read_pkt_insn() function below.
183 */
184 struct bpfjit_read_pkt_data {
185 /*
186 * Length calculated by Array Bounds Check Elimination (ABC) pass.
187 */
188 bpfjit_abc_length_t abc_length;
189 /*
190 * If positive, emit "if (buflen < check_length) return 0"
191 * out-of-bounds check.
192 * Values greater than UINT32_MAX generate unconditional "return 0".
193 */
194 bpfjit_abc_length_t check_length;
195 };
196
197 /*
198 * Additional (optimization-related) data for bpf_insn.
199 */
200 struct bpfjit_insn_data {
201 /* List of jumps to this insn. */
202 SLIST_HEAD(, bpfjit_jump) bjumps;
203
204 union {
205 struct bpfjit_jump_data jdata;
206 struct bpfjit_read_pkt_data rdata;
207 } u;
208
209 bpf_memword_init_t invalid;
210 bool unreachable;
211 };
212
213 #ifdef _KERNEL
214
215 uint32_t m_xword(const struct mbuf *, uint32_t, int *);
216 uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
217 uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
218
219 MODULE(MODULE_CLASS_MISC, bpfjit, "sljit")
220
221 static int
222 bpfjit_modcmd(modcmd_t cmd, void *arg)
223 {
224
225 switch (cmd) {
226 case MODULE_CMD_INIT:
227 bpfjit_module_ops.bj_free_code = &bpfjit_free_code;
228 membar_producer();
229 bpfjit_module_ops.bj_generate_code = &bpfjit_generate_code;
230 membar_producer();
231 return 0;
232
233 case MODULE_CMD_FINI:
234 return EOPNOTSUPP;
235
236 default:
237 return ENOTTY;
238 }
239 }
240 #endif
241
242 /*
243 * Return a number of scratch registers to pass
244 * to sljit_emit_enter() function.
245 */
246 static sljit_si
247 nscratches(bpfjit_hint_t hints)
248 {
249 sljit_si rv = 2;
250
251 #ifdef _KERNEL
252 if (hints & BJ_HINT_PKT)
253 rv = 3; /* xcall with three arguments */
254 #endif
255
256 if (hints & BJ_HINT_IND)
257 rv = 3; /* uses BJ_TMP2REG */
258
259 if (hints & BJ_HINT_COP)
260 rv = 3; /* calls copfunc with three arguments */
261
262 if (hints & BJ_HINT_XREG)
263 rv = 4; /* uses BJ_XREG */
264
265 #ifdef _KERNEL
266 if (hints & BJ_HINT_LDX)
267 rv = 5; /* uses BJ_TMP3REG */
268 #endif
269
270 if (hints & BJ_HINT_COPX)
271 rv = 5; /* uses BJ_TMP3REG */
272
273 return rv;
274 }
275
276 /*
277 * Return a number of saved registers to pass
278 * to sljit_emit_enter() function.
279 */
280 static sljit_si
281 nsaveds(bpfjit_hint_t hints)
282 {
283 sljit_si rv = 3;
284
285 return rv;
286 }
287
288 static uint32_t
289 read_width(const struct bpf_insn *pc)
290 {
291
292 switch (BPF_SIZE(pc->code)) {
293 case BPF_W: return 4;
294 case BPF_H: return 2;
295 case BPF_B: return 1;
296 default: return 0;
297 }
298 }
299
300 /*
301 * Copy buf and buflen members of bpf_args from BJ_ARGS
302 * pointer to BJ_BUF and BJ_BUFLEN registers.
303 */
304 static int
305 load_buf_buflen(struct sljit_compiler *compiler)
306 {
307 int status;
308
309 status = sljit_emit_op1(compiler,
310 SLJIT_MOV_P,
311 BJ_BUF, 0,
312 SLJIT_MEM1(BJ_ARGS),
313 offsetof(struct bpf_args, pkt));
314 if (status != SLJIT_SUCCESS)
315 return status;
316
317 status = sljit_emit_op1(compiler,
318 SLJIT_MOV, /* size_t source */
319 BJ_BUFLEN, 0,
320 SLJIT_MEM1(BJ_ARGS),
321 offsetof(struct bpf_args, buflen));
322
323 return status;
324 }
325
326 static bool
327 grow_jumps(struct sljit_jump ***jumps, size_t *size)
328 {
329 struct sljit_jump **newptr;
330 const size_t elemsz = sizeof(struct sljit_jump *);
331 size_t old_size = *size;
332 size_t new_size = 2 * old_size;
333
334 if (new_size < old_size || new_size > SIZE_MAX / elemsz)
335 return false;
336
337 newptr = BJ_ALLOC(new_size * elemsz);
338 if (newptr == NULL)
339 return false;
340
341 memcpy(newptr, *jumps, old_size * elemsz);
342 BJ_FREE(*jumps, old_size * elemsz);
343
344 *jumps = newptr;
345 *size = new_size;
346 return true;
347 }
348
349 static bool
350 append_jump(struct sljit_jump *jump, struct sljit_jump ***jumps,
351 size_t *size, size_t *max_size)
352 {
353 if (*size == *max_size && !grow_jumps(jumps, max_size))
354 return false;
355
356 (*jumps)[(*size)++] = jump;
357 return true;
358 }
359
360 /*
361 * Emit code for BPF_LD+BPF_B+BPF_ABS A <- P[k:1].
362 */
363 static int
364 emit_read8(struct sljit_compiler *compiler, sljit_si src, uint32_t k)
365 {
366
367 return sljit_emit_op1(compiler,
368 SLJIT_MOV_UB,
369 BJ_AREG, 0,
370 SLJIT_MEM1(src), k);
371 }
372
373 /*
374 * Emit code for BPF_LD+BPF_H+BPF_ABS A <- P[k:2].
375 */
376 static int
377 emit_read16(struct sljit_compiler *compiler, sljit_si src, uint32_t k)
378 {
379 int status;
380
381 BJ_ASSERT(k <= UINT32_MAX - 1);
382
383 /* A = buf[k]; */
384 status = sljit_emit_op1(compiler,
385 SLJIT_MOV_UB,
386 BJ_AREG, 0,
387 SLJIT_MEM1(src), k);
388 if (status != SLJIT_SUCCESS)
389 return status;
390
391 /* tmp1 = buf[k+1]; */
392 status = sljit_emit_op1(compiler,
393 SLJIT_MOV_UB,
394 BJ_TMP1REG, 0,
395 SLJIT_MEM1(src), k+1);
396 if (status != SLJIT_SUCCESS)
397 return status;
398
399 /* A = A << 8; */
400 status = sljit_emit_op2(compiler,
401 SLJIT_SHL,
402 BJ_AREG, 0,
403 BJ_AREG, 0,
404 SLJIT_IMM, 8);
405 if (status != SLJIT_SUCCESS)
406 return status;
407
408 /* A = A + tmp1; */
409 status = sljit_emit_op2(compiler,
410 SLJIT_ADD,
411 BJ_AREG, 0,
412 BJ_AREG, 0,
413 BJ_TMP1REG, 0);
414 return status;
415 }
416
417 /*
418 * Emit code for BPF_LD+BPF_W+BPF_ABS A <- P[k:4].
419 */
420 static int
421 emit_read32(struct sljit_compiler *compiler, sljit_si src, uint32_t k)
422 {
423 int status;
424
425 BJ_ASSERT(k <= UINT32_MAX - 3);
426
427 /* A = buf[k]; */
428 status = sljit_emit_op1(compiler,
429 SLJIT_MOV_UB,
430 BJ_AREG, 0,
431 SLJIT_MEM1(src), k);
432 if (status != SLJIT_SUCCESS)
433 return status;
434
435 /* tmp1 = buf[k+1]; */
436 status = sljit_emit_op1(compiler,
437 SLJIT_MOV_UB,
438 BJ_TMP1REG, 0,
439 SLJIT_MEM1(src), k+1);
440 if (status != SLJIT_SUCCESS)
441 return status;
442
443 /* A = A << 8; */
444 status = sljit_emit_op2(compiler,
445 SLJIT_SHL,
446 BJ_AREG, 0,
447 BJ_AREG, 0,
448 SLJIT_IMM, 8);
449 if (status != SLJIT_SUCCESS)
450 return status;
451
452 /* A = A + tmp1; */
453 status = sljit_emit_op2(compiler,
454 SLJIT_ADD,
455 BJ_AREG, 0,
456 BJ_AREG, 0,
457 BJ_TMP1REG, 0);
458 if (status != SLJIT_SUCCESS)
459 return status;
460
461 /* tmp1 = buf[k+2]; */
462 status = sljit_emit_op1(compiler,
463 SLJIT_MOV_UB,
464 BJ_TMP1REG, 0,
465 SLJIT_MEM1(src), k+2);
466 if (status != SLJIT_SUCCESS)
467 return status;
468
469 /* A = A << 8; */
470 status = sljit_emit_op2(compiler,
471 SLJIT_SHL,
472 BJ_AREG, 0,
473 BJ_AREG, 0,
474 SLJIT_IMM, 8);
475 if (status != SLJIT_SUCCESS)
476 return status;
477
478 /* A = A + tmp1; */
479 status = sljit_emit_op2(compiler,
480 SLJIT_ADD,
481 BJ_AREG, 0,
482 BJ_AREG, 0,
483 BJ_TMP1REG, 0);
484 if (status != SLJIT_SUCCESS)
485 return status;
486
487 /* tmp1 = buf[k+3]; */
488 status = sljit_emit_op1(compiler,
489 SLJIT_MOV_UB,
490 BJ_TMP1REG, 0,
491 SLJIT_MEM1(src), k+3);
492 if (status != SLJIT_SUCCESS)
493 return status;
494
495 /* A = A << 8; */
496 status = sljit_emit_op2(compiler,
497 SLJIT_SHL,
498 BJ_AREG, 0,
499 BJ_AREG, 0,
500 SLJIT_IMM, 8);
501 if (status != SLJIT_SUCCESS)
502 return status;
503
504 /* A = A + tmp1; */
505 status = sljit_emit_op2(compiler,
506 SLJIT_ADD,
507 BJ_AREG, 0,
508 BJ_AREG, 0,
509 BJ_TMP1REG, 0);
510 return status;
511 }
512
513 #ifdef _KERNEL
514 /*
515 * Emit code for m_xword/m_xhalf/m_xbyte call.
516 *
517 * @pc BPF_LD+BPF_W+BPF_ABS A <- P[k:4]
518 * BPF_LD+BPF_H+BPF_ABS A <- P[k:2]
519 * BPF_LD+BPF_B+BPF_ABS A <- P[k:1]
520 * BPF_LD+BPF_W+BPF_IND A <- P[X+k:4]
521 * BPF_LD+BPF_H+BPF_IND A <- P[X+k:2]
522 * BPF_LD+BPF_B+BPF_IND A <- P[X+k:1]
523 * BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf)
524 */
525 static int
526 emit_xcall(struct sljit_compiler *compiler, bpfjit_hint_t hints,
527 const struct bpf_insn *pc, int dst, struct sljit_jump ***ret0,
528 size_t *ret0_size, size_t *ret0_maxsize,
529 uint32_t (*fn)(const struct mbuf *, uint32_t, int *))
530 {
531 #if BJ_XREG == SLJIT_RETURN_REG || \
532 BJ_XREG == SLJIT_SCRATCH_REG1 || \
533 BJ_XREG == SLJIT_SCRATCH_REG2 || \
534 BJ_XREG == SLJIT_SCRATCH_REG3
535 #error "Not supported assignment of registers."
536 #endif
537 struct sljit_jump *jump;
538 sljit_si save_reg;
539 int status;
540
541 save_reg = (BPF_CLASS(pc->code) == BPF_LDX) ? BJ_AREG : BJ_XREG;
542
543 if (save_reg == BJ_AREG || (hints & BJ_HINT_XREG)) {
544 /* save A or X */
545 status = sljit_emit_op1(compiler,
546 SLJIT_MOV_UI, /* uint32_t destination */
547 SLJIT_MEM1(SLJIT_LOCALS_REG),
548 offsetof(struct bpfjit_stack, reg),
549 save_reg, 0);
550 if (status != SLJIT_SUCCESS)
551 return status;
552 }
553
554 /*
555 * Prepare registers for fn(mbuf, k, &err) call.
556 */
557 status = sljit_emit_op1(compiler,
558 SLJIT_MOV,
559 SLJIT_SCRATCH_REG1, 0,
560 BJ_BUF, 0);
561 if (status != SLJIT_SUCCESS)
562 return status;
563
564 if (BPF_CLASS(pc->code) == BPF_LD && BPF_MODE(pc->code) == BPF_IND) {
565 if (pc->k == 0) {
566 /* k = X; */
567 status = sljit_emit_op1(compiler,
568 SLJIT_MOV,
569 SLJIT_SCRATCH_REG2, 0,
570 BJ_XREG, 0);
571 if (status != SLJIT_SUCCESS)
572 return status;
573 } else {
574 /* if (X > UINT32_MAX - pc->k) return 0; */
575 jump = sljit_emit_cmp(compiler,
576 SLJIT_C_GREATER,
577 BJ_XREG, 0,
578 SLJIT_IMM, UINT32_MAX - pc->k);
579 if (jump == NULL)
580 return SLJIT_ERR_ALLOC_FAILED;
581 if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
582 return SLJIT_ERR_ALLOC_FAILED;
583
584 /* k = X + pc->k; */
585 status = sljit_emit_op2(compiler,
586 SLJIT_ADD,
587 SLJIT_SCRATCH_REG2, 0,
588 BJ_XREG, 0,
589 SLJIT_IMM, (uint32_t)pc->k);
590 if (status != SLJIT_SUCCESS)
591 return status;
592 }
593 } else {
594 /* k = pc->k */
595 status = sljit_emit_op1(compiler,
596 SLJIT_MOV,
597 SLJIT_SCRATCH_REG2, 0,
598 SLJIT_IMM, (uint32_t)pc->k);
599 if (status != SLJIT_SUCCESS)
600 return status;
601 }
602
603 /*
604 * The third argument of fn is an address on stack.
605 */
606 status = sljit_get_local_base(compiler,
607 SLJIT_SCRATCH_REG3, 0,
608 offsetof(struct bpfjit_stack, err));
609 if (status != SLJIT_SUCCESS)
610 return status;
611
612 /* fn(buf, k, &err); */
613 status = sljit_emit_ijump(compiler,
614 SLJIT_CALL3,
615 SLJIT_IMM, SLJIT_FUNC_OFFSET(fn));
616 if (status != SLJIT_SUCCESS)
617 return status;
618
619 if (dst != SLJIT_RETURN_REG) {
620 /* move return value to dst */
621 status = sljit_emit_op1(compiler,
622 SLJIT_MOV,
623 dst, 0,
624 SLJIT_RETURN_REG, 0);
625 if (status != SLJIT_SUCCESS)
626 return status;
627 }
628
629 /* if (*err != 0) return 0; */
630 jump = sljit_emit_cmp(compiler,
631 SLJIT_C_NOT_EQUAL|SLJIT_INT_OP,
632 SLJIT_MEM1(SLJIT_LOCALS_REG),
633 offsetof(struct bpfjit_stack, err),
634 SLJIT_IMM, 0);
635 if (jump == NULL)
636 return SLJIT_ERR_ALLOC_FAILED;
637
638 if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
639 return SLJIT_ERR_ALLOC_FAILED;
640
641 if (save_reg == BJ_AREG || (hints & BJ_HINT_XREG)) {
642 /* restore A or X */
643 status = sljit_emit_op1(compiler,
644 SLJIT_MOV_UI, /* uint32_t source */
645 save_reg, 0,
646 SLJIT_MEM1(SLJIT_LOCALS_REG),
647 offsetof(struct bpfjit_stack, reg));
648 if (status != SLJIT_SUCCESS)
649 return status;
650 }
651
652 return SLJIT_SUCCESS;
653 }
654 #endif
655
656 /*
657 * Emit code for BPF_COP and BPF_COPX instructions.
658 */
659 static int
660 emit_cop(struct sljit_compiler *compiler, bpfjit_hint_t hints,
661 const bpf_ctx_t *bc, const struct bpf_insn *pc,
662 struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize)
663 {
664 #if BJ_XREG == SLJIT_RETURN_REG || \
665 BJ_XREG == SLJIT_SCRATCH_REG1 || \
666 BJ_XREG == SLJIT_SCRATCH_REG2 || \
667 BJ_XREG == SLJIT_SCRATCH_REG3 || \
668 BJ_TMP3REG == SLJIT_SCRATCH_REG1 || \
669 BJ_TMP3REG == SLJIT_SCRATCH_REG2 || \
670 BJ_TMP3REG == SLJIT_SCRATCH_REG3
671 #error "Not supported assignment of registers."
672 #endif
673
674 struct sljit_jump *jump;
675 sljit_si call_reg;
676 sljit_sw call_off;
677 int status;
678
679 BJ_ASSERT(bc != NULL && bc->copfuncs != NULL);
680
681 if (hints & BJ_HINT_LDX) {
682 /* save X */
683 status = sljit_emit_op1(compiler,
684 SLJIT_MOV_UI, /* uint32_t destination */
685 SLJIT_MEM1(SLJIT_LOCALS_REG),
686 offsetof(struct bpfjit_stack, reg),
687 BJ_XREG, 0);
688 if (status != SLJIT_SUCCESS)
689 return status;
690 }
691
692 if (BPF_MISCOP(pc->code) == BPF_COP) {
693 call_reg = SLJIT_IMM;
694 call_off = SLJIT_FUNC_OFFSET(bc->copfuncs[pc->k]);
695 } else {
696 /* if (X >= bc->nfuncs) return 0; */
697 jump = sljit_emit_cmp(compiler,
698 SLJIT_C_GREATER_EQUAL,
699 BJ_XREG, 0,
700 SLJIT_IMM, bc->nfuncs);
701 if (jump == NULL)
702 return SLJIT_ERR_ALLOC_FAILED;
703 if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
704 return SLJIT_ERR_ALLOC_FAILED;
705
706 /* tmp1 = ctx; */
707 status = sljit_emit_op1(compiler,
708 SLJIT_MOV_P,
709 BJ_TMP1REG, 0,
710 SLJIT_MEM1(SLJIT_LOCALS_REG),
711 offsetof(struct bpfjit_stack, ctx));
712 if (status != SLJIT_SUCCESS)
713 return status;
714
715 /* tmp1 = ctx->copfuncs; */
716 status = sljit_emit_op1(compiler,
717 SLJIT_MOV_P,
718 BJ_TMP1REG, 0,
719 SLJIT_MEM1(BJ_TMP1REG),
720 offsetof(struct bpf_ctx, copfuncs));
721 if (status != SLJIT_SUCCESS)
722 return status;
723
724 /* tmp2 = X; */
725 status = sljit_emit_op1(compiler,
726 SLJIT_MOV,
727 BJ_TMP2REG, 0,
728 BJ_XREG, 0);
729 if (status != SLJIT_SUCCESS)
730 return status;
731
732 /* tmp3 = ctx->copfuncs[tmp2]; */
733 call_reg = BJ_TMP3REG;
734 call_off = 0;
735 status = sljit_emit_op1(compiler,
736 SLJIT_MOV_P,
737 call_reg, call_off,
738 SLJIT_MEM2(BJ_TMP1REG, BJ_TMP2REG),
739 SLJIT_WORD_SHIFT);
740 if (status != SLJIT_SUCCESS)
741 return status;
742 }
743
744 /*
745 * Copy bpf_copfunc_t arguments to registers.
746 */
747 #if BJ_AREG != SLJIT_SCRATCH_REG3
748 status = sljit_emit_op1(compiler,
749 SLJIT_MOV_UI,
750 SLJIT_SCRATCH_REG3, 0,
751 BJ_AREG, 0);
752 if (status != SLJIT_SUCCESS)
753 return status;
754 #endif
755
756 status = sljit_emit_op1(compiler,
757 SLJIT_MOV_P,
758 SLJIT_SCRATCH_REG1, 0,
759 SLJIT_MEM1(SLJIT_LOCALS_REG),
760 offsetof(struct bpfjit_stack, ctx));
761 if (status != SLJIT_SUCCESS)
762 return status;
763
764 status = sljit_emit_op1(compiler,
765 SLJIT_MOV_P,
766 SLJIT_SCRATCH_REG2, 0,
767 BJ_ARGS, 0);
768 if (status != SLJIT_SUCCESS)
769 return status;
770
771 status = sljit_emit_ijump(compiler,
772 SLJIT_CALL3, call_reg, call_off);
773 if (status != SLJIT_SUCCESS)
774 return status;
775
776 #if BJ_AREG != SLJIT_RETURN_REG
777 status = sljit_emit_op1(compiler,
778 SLJIT_MOV,
779 BJ_AREG, 0,
780 SLJIT_RETURN_REG, 0);
781 if (status != SLJIT_SUCCESS)
782 return status;
783 #endif
784
785 if (hints & BJ_HINT_LDX) {
786 /* restore X */
787 status = sljit_emit_op1(compiler,
788 SLJIT_MOV_UI, /* uint32_t source */
789 BJ_XREG, 0,
790 SLJIT_MEM1(SLJIT_LOCALS_REG),
791 offsetof(struct bpfjit_stack, reg));
792 if (status != SLJIT_SUCCESS)
793 return status;
794 }
795
796 return SLJIT_SUCCESS;
797 }
798
799 /*
800 * Generate code for
801 * BPF_LD+BPF_W+BPF_ABS A <- P[k:4]
802 * BPF_LD+BPF_H+BPF_ABS A <- P[k:2]
803 * BPF_LD+BPF_B+BPF_ABS A <- P[k:1]
804 * BPF_LD+BPF_W+BPF_IND A <- P[X+k:4]
805 * BPF_LD+BPF_H+BPF_IND A <- P[X+k:2]
806 * BPF_LD+BPF_B+BPF_IND A <- P[X+k:1]
807 */
808 static int
809 emit_pkt_read(struct sljit_compiler *compiler, bpfjit_hint_t hints,
810 const struct bpf_insn *pc, struct sljit_jump *to_mchain_jump,
811 struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize)
812 {
813 int status = SLJIT_ERR_ALLOC_FAILED;
814 uint32_t width;
815 sljit_si ld_reg;
816 struct sljit_jump *jump;
817 #ifdef _KERNEL
818 struct sljit_label *label;
819 struct sljit_jump *over_mchain_jump;
820 const bool check_zero_buflen = (to_mchain_jump != NULL);
821 #endif
822 const uint32_t k = pc->k;
823
824 #ifdef _KERNEL
825 if (to_mchain_jump == NULL) {
826 to_mchain_jump = sljit_emit_cmp(compiler,
827 SLJIT_C_EQUAL,
828 BJ_BUFLEN, 0,
829 SLJIT_IMM, 0);
830 if (to_mchain_jump == NULL)
831 return SLJIT_ERR_ALLOC_FAILED;
832 }
833 #endif
834
835 ld_reg = BJ_BUF;
836 width = read_width(pc);
837 if (width == 0)
838 return SLJIT_ERR_ALLOC_FAILED;
839
840 if (BPF_MODE(pc->code) == BPF_IND) {
841 /* tmp1 = buflen - (pc->k + width); */
842 status = sljit_emit_op2(compiler,
843 SLJIT_SUB,
844 BJ_TMP1REG, 0,
845 BJ_BUFLEN, 0,
846 SLJIT_IMM, k + width);
847 if (status != SLJIT_SUCCESS)
848 return status;
849
850 /* ld_reg = buf + X; */
851 ld_reg = BJ_TMP2REG;
852 status = sljit_emit_op2(compiler,
853 SLJIT_ADD,
854 ld_reg, 0,
855 BJ_BUF, 0,
856 BJ_XREG, 0);
857 if (status != SLJIT_SUCCESS)
858 return status;
859
860 /* if (tmp1 < X) return 0; */
861 jump = sljit_emit_cmp(compiler,
862 SLJIT_C_LESS,
863 BJ_TMP1REG, 0,
864 BJ_XREG, 0);
865 if (jump == NULL)
866 return SLJIT_ERR_ALLOC_FAILED;
867 if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
868 return SLJIT_ERR_ALLOC_FAILED;
869 }
870
871 switch (width) {
872 case 4:
873 status = emit_read32(compiler, ld_reg, k);
874 break;
875 case 2:
876 status = emit_read16(compiler, ld_reg, k);
877 break;
878 case 1:
879 status = emit_read8(compiler, ld_reg, k);
880 break;
881 }
882
883 if (status != SLJIT_SUCCESS)
884 return status;
885
886 #ifdef _KERNEL
887 over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP);
888 if (over_mchain_jump == NULL)
889 return SLJIT_ERR_ALLOC_FAILED;
890
891 /* entry point to mchain handler */
892 label = sljit_emit_label(compiler);
893 if (label == NULL)
894 return SLJIT_ERR_ALLOC_FAILED;
895 sljit_set_label(to_mchain_jump, label);
896
897 if (check_zero_buflen) {
898 /* if (buflen != 0) return 0; */
899 jump = sljit_emit_cmp(compiler,
900 SLJIT_C_NOT_EQUAL,
901 BJ_BUFLEN, 0,
902 SLJIT_IMM, 0);
903 if (jump == NULL)
904 return SLJIT_ERR_ALLOC_FAILED;
905 if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
906 return SLJIT_ERR_ALLOC_FAILED;
907 }
908
909 switch (width) {
910 case 4:
911 status = emit_xcall(compiler, hints, pc, BJ_AREG,
912 ret0, ret0_size, ret0_maxsize, &m_xword);
913 break;
914 case 2:
915 status = emit_xcall(compiler, hints, pc, BJ_AREG,
916 ret0, ret0_size, ret0_maxsize, &m_xhalf);
917 break;
918 case 1:
919 status = emit_xcall(compiler, hints, pc, BJ_AREG,
920 ret0, ret0_size, ret0_maxsize, &m_xbyte);
921 break;
922 }
923
924 if (status != SLJIT_SUCCESS)
925 return status;
926
927 label = sljit_emit_label(compiler);
928 if (label == NULL)
929 return SLJIT_ERR_ALLOC_FAILED;
930 sljit_set_label(over_mchain_jump, label);
931 #endif
932
933 return SLJIT_SUCCESS;
934 }
935
936 static int
937 emit_memload(struct sljit_compiler *compiler,
938 sljit_si dst, uint32_t k, size_t extwords)
939 {
940 int status;
941 sljit_si src;
942 sljit_sw srcw;
943
944 srcw = k * sizeof(uint32_t);
945
946 if (extwords == 0) {
947 src = SLJIT_MEM1(SLJIT_LOCALS_REG);
948 srcw += offsetof(struct bpfjit_stack, mem);
949 } else {
950 /* copy extmem pointer to the tmp1 register */
951 status = sljit_emit_op1(compiler,
952 SLJIT_MOV_P,
953 BJ_TMP1REG, 0,
954 SLJIT_MEM1(SLJIT_LOCALS_REG),
955 offsetof(struct bpfjit_stack, extmem));
956 if (status != SLJIT_SUCCESS)
957 return status;
958 src = SLJIT_MEM1(BJ_TMP1REG);
959 }
960
961 return sljit_emit_op1(compiler, SLJIT_MOV_UI, dst, 0, src, srcw);
962 }
963
964 static int
965 emit_memstore(struct sljit_compiler *compiler,
966 sljit_si src, uint32_t k, size_t extwords)
967 {
968 int status;
969 sljit_si dst;
970 sljit_sw dstw;
971
972 dstw = k * sizeof(uint32_t);
973
974 if (extwords == 0) {
975 dst = SLJIT_MEM1(SLJIT_LOCALS_REG);
976 dstw += offsetof(struct bpfjit_stack, mem);
977 } else {
978 /* copy extmem pointer to the tmp1 register */
979 status = sljit_emit_op1(compiler,
980 SLJIT_MOV_P,
981 BJ_TMP1REG, 0,
982 SLJIT_MEM1(SLJIT_LOCALS_REG),
983 offsetof(struct bpfjit_stack, extmem));
984 if (status != SLJIT_SUCCESS)
985 return status;
986 dst = SLJIT_MEM1(BJ_TMP1REG);
987 }
988
989 return sljit_emit_op1(compiler, SLJIT_MOV_UI, dst, dstw, src, 0);
990 }
991
992 /*
993 * Emit code for BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf).
994 */
995 static int
996 emit_msh(struct sljit_compiler *compiler, bpfjit_hint_t hints,
997 const struct bpf_insn *pc, struct sljit_jump *to_mchain_jump,
998 struct sljit_jump ***ret0, size_t *ret0_size, size_t *ret0_maxsize)
999 {
1000 int status;
1001 #ifdef _KERNEL
1002 struct sljit_label *label;
1003 struct sljit_jump *jump, *over_mchain_jump;
1004 const bool check_zero_buflen = (to_mchain_jump != NULL);
1005 #endif
1006 const uint32_t k = pc->k;
1007
1008 #ifdef _KERNEL
1009 if (to_mchain_jump == NULL) {
1010 to_mchain_jump = sljit_emit_cmp(compiler,
1011 SLJIT_C_EQUAL,
1012 BJ_BUFLEN, 0,
1013 SLJIT_IMM, 0);
1014 if (to_mchain_jump == NULL)
1015 return SLJIT_ERR_ALLOC_FAILED;
1016 }
1017 #endif
1018
1019 /* tmp1 = buf[k] */
1020 status = sljit_emit_op1(compiler,
1021 SLJIT_MOV_UB,
1022 BJ_TMP1REG, 0,
1023 SLJIT_MEM1(BJ_BUF), k);
1024 if (status != SLJIT_SUCCESS)
1025 return status;
1026
1027 #ifdef _KERNEL
1028 over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP);
1029 if (over_mchain_jump == NULL)
1030 return SLJIT_ERR_ALLOC_FAILED;
1031
1032 /* entry point to mchain handler */
1033 label = sljit_emit_label(compiler);
1034 if (label == NULL)
1035 return SLJIT_ERR_ALLOC_FAILED;
1036 sljit_set_label(to_mchain_jump, label);
1037
1038 if (check_zero_buflen) {
1039 /* if (buflen != 0) return 0; */
1040 jump = sljit_emit_cmp(compiler,
1041 SLJIT_C_NOT_EQUAL,
1042 BJ_BUFLEN, 0,
1043 SLJIT_IMM, 0);
1044 if (jump == NULL)
1045 return SLJIT_ERR_ALLOC_FAILED;
1046 if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
1047 return SLJIT_ERR_ALLOC_FAILED;
1048 }
1049
1050 status = emit_xcall(compiler, hints, pc, BJ_TMP1REG,
1051 ret0, ret0_size, ret0_maxsize, &m_xbyte);
1052 if (status != SLJIT_SUCCESS)
1053 return status;
1054
1055 label = sljit_emit_label(compiler);
1056 if (label == NULL)
1057 return SLJIT_ERR_ALLOC_FAILED;
1058 sljit_set_label(over_mchain_jump, label);
1059 #endif
1060
1061 /* tmp1 &= 0xf */
1062 status = sljit_emit_op2(compiler,
1063 SLJIT_AND,
1064 BJ_TMP1REG, 0,
1065 BJ_TMP1REG, 0,
1066 SLJIT_IMM, 0xf);
1067 if (status != SLJIT_SUCCESS)
1068 return status;
1069
1070 /* X = tmp1 << 2 */
1071 status = sljit_emit_op2(compiler,
1072 SLJIT_SHL,
1073 BJ_XREG, 0,
1074 BJ_TMP1REG, 0,
1075 SLJIT_IMM, 2);
1076 if (status != SLJIT_SUCCESS)
1077 return status;
1078
1079 return SLJIT_SUCCESS;
1080 }
1081
1082 /*
1083 * Emit code for A = A / k or A = A % k when k is a power of 2.
1084 * @pc BPF_DIV or BPF_MOD instruction.
1085 */
1086 static int
1087 emit_pow2_moddiv(struct sljit_compiler *compiler, const struct bpf_insn *pc)
1088 {
1089 uint32_t k = pc->k;
1090 int status = SLJIT_SUCCESS;
1091
1092 BJ_ASSERT(k != 0 && (k & (k - 1)) == 0);
1093
1094 if (BPF_OP(pc->code) == BPF_MOD) {
1095 status = sljit_emit_op2(compiler,
1096 SLJIT_AND,
1097 BJ_AREG, 0,
1098 BJ_AREG, 0,
1099 SLJIT_IMM, k - 1);
1100 } else {
1101 int shift = 0;
1102
1103 /*
1104 * Do shift = __builtin_ctz(k).
1105 * The loop is slower, but that's ok.
1106 */
1107 while (k > 1) {
1108 k >>= 1;
1109 shift++;
1110 }
1111
1112 if (shift != 0) {
1113 status = sljit_emit_op2(compiler,
1114 SLJIT_LSHR|SLJIT_INT_OP,
1115 BJ_AREG, 0,
1116 BJ_AREG, 0,
1117 SLJIT_IMM, shift);
1118 }
1119 }
1120
1121 return status;
1122 }
1123
1124 #if !defined(BPFJIT_USE_UDIV)
1125 static sljit_uw
1126 divide(sljit_uw x, sljit_uw y)
1127 {
1128
1129 return (uint32_t)x / (uint32_t)y;
1130 }
1131
1132 static sljit_uw
1133 modulus(sljit_uw x, sljit_uw y)
1134 {
1135
1136 return (uint32_t)x % (uint32_t)y;
1137 }
1138 #endif
1139
1140 /*
1141 * Emit code for A = A / div or A = A % div.
1142 * @pc BPF_DIV or BPF_MOD instruction.
1143 */
1144 static int
1145 emit_moddiv(struct sljit_compiler *compiler, const struct bpf_insn *pc)
1146 {
1147 int status;
1148 const bool xdiv = BPF_OP(pc->code) == BPF_DIV;
1149 const bool xreg = BPF_SRC(pc->code) == BPF_X;
1150
1151 #if BJ_XREG == SLJIT_RETURN_REG || \
1152 BJ_XREG == SLJIT_SCRATCH_REG1 || \
1153 BJ_XREG == SLJIT_SCRATCH_REG2 || \
1154 BJ_AREG == SLJIT_SCRATCH_REG2
1155 #error "Not supported assignment of registers."
1156 #endif
1157
1158 #if BJ_AREG != SLJIT_SCRATCH_REG1
1159 status = sljit_emit_op1(compiler,
1160 SLJIT_MOV,
1161 SLJIT_SCRATCH_REG1, 0,
1162 BJ_AREG, 0);
1163 if (status != SLJIT_SUCCESS)
1164 return status;
1165 #endif
1166
1167 status = sljit_emit_op1(compiler,
1168 SLJIT_MOV,
1169 SLJIT_SCRATCH_REG2, 0,
1170 xreg ? BJ_XREG : SLJIT_IMM,
1171 xreg ? 0 : (uint32_t)pc->k);
1172 if (status != SLJIT_SUCCESS)
1173 return status;
1174
1175 #if defined(BPFJIT_USE_UDIV)
1176 status = sljit_emit_op0(compiler, SLJIT_UDIV|SLJIT_INT_OP);
1177
1178 if (BPF_OP(pc->code) == BPF_DIV) {
1179 #if BJ_AREG != SLJIT_SCRATCH_REG1
1180 status = sljit_emit_op1(compiler,
1181 SLJIT_MOV,
1182 BJ_AREG, 0,
1183 SLJIT_SCRATCH_REG1, 0);
1184 #endif
1185 } else {
1186 #if BJ_AREG != SLJIT_SCRATCH_REG2
1187 /* Remainder is in SLJIT_SCRATCH_REG2. */
1188 status = sljit_emit_op1(compiler,
1189 SLJIT_MOV,
1190 BJ_AREG, 0,
1191 SLJIT_SCRATCH_REG2, 0);
1192 #endif
1193 }
1194
1195 if (status != SLJIT_SUCCESS)
1196 return status;
1197 #else
1198 status = sljit_emit_ijump(compiler,
1199 SLJIT_CALL2,
1200 SLJIT_IMM, xdiv ? SLJIT_FUNC_OFFSET(divide) :
1201 SLJIT_FUNC_OFFSET(modulus));
1202
1203 #if BJ_AREG != SLJIT_RETURN_REG
1204 status = sljit_emit_op1(compiler,
1205 SLJIT_MOV,
1206 BJ_AREG, 0,
1207 SLJIT_RETURN_REG, 0);
1208 if (status != SLJIT_SUCCESS)
1209 return status;
1210 #endif
1211 #endif
1212
1213 return status;
1214 }
1215
1216 /*
1217 * Return true if pc is a "read from packet" instruction.
1218 * If length is not NULL and return value is true, *length will
1219 * be set to a safe length required to read a packet.
1220 */
1221 static bool
1222 read_pkt_insn(const struct bpf_insn *pc, bpfjit_abc_length_t *length)
1223 {
1224 bool rv;
1225 bpfjit_abc_length_t width = 0; /* XXXuninit */
1226
1227 switch (BPF_CLASS(pc->code)) {
1228 default:
1229 rv = false;
1230 break;
1231
1232 case BPF_LD:
1233 rv = BPF_MODE(pc->code) == BPF_ABS ||
1234 BPF_MODE(pc->code) == BPF_IND;
1235 if (rv) {
1236 width = read_width(pc);
1237 rv = (width != 0);
1238 }
1239 break;
1240
1241 case BPF_LDX:
1242 rv = BPF_MODE(pc->code) == BPF_MSH &&
1243 BPF_SIZE(pc->code) == BPF_B;
1244 width = 1;
1245 break;
1246 }
1247
1248 if (rv && length != NULL) {
1249 /*
1250 * Values greater than UINT32_MAX will generate
1251 * unconditional "return 0".
1252 */
1253 *length = (uint32_t)pc->k + width;
1254 }
1255
1256 return rv;
1257 }
1258
1259 static void
1260 optimize_init(struct bpfjit_insn_data *insn_dat, size_t insn_count)
1261 {
1262 size_t i;
1263
1264 for (i = 0; i < insn_count; i++) {
1265 SLIST_INIT(&insn_dat[i].bjumps);
1266 insn_dat[i].invalid = BJ_INIT_NOBITS;
1267 }
1268 }
1269
1270 /*
1271 * The function divides instructions into blocks. Destination of a jump
1272 * instruction starts a new block. BPF_RET and BPF_JMP instructions
1273 * terminate a block. Blocks are linear, that is, there are no jumps out
1274 * from the middle of a block and there are no jumps in to the middle of
1275 * a block.
1276 *
1277 * The function also sets bits in *initmask for memwords that
1278 * need to be initialized to zero. Note that this set should be empty
1279 * for any valid kernel filter program.
1280 */
1281 static bool
1282 optimize_pass1(const bpf_ctx_t *bc, const struct bpf_insn *insns,
1283 struct bpfjit_insn_data *insn_dat, size_t insn_count,
1284 bpf_memword_init_t *initmask, bpfjit_hint_t *hints)
1285 {
1286 struct bpfjit_jump *jtf;
1287 size_t i;
1288 uint32_t jt, jf;
1289 bpfjit_abc_length_t length;
1290 bpf_memword_init_t invalid; /* borrowed from bpf_filter() */
1291 bool unreachable;
1292
1293 const size_t memwords = GET_MEMWORDS(bc);
1294
1295 *hints = 0;
1296 *initmask = BJ_INIT_NOBITS;
1297
1298 unreachable = false;
1299 invalid = ~BJ_INIT_NOBITS;
1300
1301 for (i = 0; i < insn_count; i++) {
1302 if (!SLIST_EMPTY(&insn_dat[i].bjumps))
1303 unreachable = false;
1304 insn_dat[i].unreachable = unreachable;
1305
1306 if (unreachable)
1307 continue;
1308
1309 invalid |= insn_dat[i].invalid;
1310
1311 if (read_pkt_insn(&insns[i], &length) && length > UINT32_MAX)
1312 unreachable = true;
1313
1314 switch (BPF_CLASS(insns[i].code)) {
1315 case BPF_RET:
1316 if (BPF_RVAL(insns[i].code) == BPF_A)
1317 *initmask |= invalid & BJ_INIT_ABIT;
1318
1319 unreachable = true;
1320 continue;
1321
1322 case BPF_LD:
1323 if (BPF_MODE(insns[i].code) == BPF_ABS)
1324 *hints |= BJ_HINT_ABS;
1325
1326 if (BPF_MODE(insns[i].code) == BPF_IND) {
1327 *hints |= BJ_HINT_IND | BJ_HINT_XREG;
1328 *initmask |= invalid & BJ_INIT_XBIT;
1329 }
1330
1331 if (BPF_MODE(insns[i].code) == BPF_MEM &&
1332 (uint32_t)insns[i].k < memwords) {
1333 *initmask |= invalid & BJ_INIT_MBIT(insns[i].k);
1334 }
1335
1336 invalid &= ~BJ_INIT_ABIT;
1337 continue;
1338
1339 case BPF_LDX:
1340 *hints |= BJ_HINT_XREG | BJ_HINT_LDX;
1341
1342 if (BPF_MODE(insns[i].code) == BPF_MEM &&
1343 (uint32_t)insns[i].k < memwords) {
1344 *initmask |= invalid & BJ_INIT_MBIT(insns[i].k);
1345 }
1346
1347 if (BPF_MODE(insns[i].code) == BPF_MSH &&
1348 BPF_SIZE(insns[i].code) == BPF_B) {
1349 *hints |= BJ_HINT_MSH;
1350 }
1351
1352 invalid &= ~BJ_INIT_XBIT;
1353 continue;
1354
1355 case BPF_ST:
1356 *initmask |= invalid & BJ_INIT_ABIT;
1357
1358 if ((uint32_t)insns[i].k < memwords)
1359 invalid &= ~BJ_INIT_MBIT(insns[i].k);
1360
1361 continue;
1362
1363 case BPF_STX:
1364 *hints |= BJ_HINT_XREG;
1365 *initmask |= invalid & BJ_INIT_XBIT;
1366
1367 if ((uint32_t)insns[i].k < memwords)
1368 invalid &= ~BJ_INIT_MBIT(insns[i].k);
1369
1370 continue;
1371
1372 case BPF_ALU:
1373 *initmask |= invalid & BJ_INIT_ABIT;
1374
1375 if (insns[i].code != (BPF_ALU|BPF_NEG) &&
1376 BPF_SRC(insns[i].code) == BPF_X) {
1377 *hints |= BJ_HINT_XREG;
1378 *initmask |= invalid & BJ_INIT_XBIT;
1379 }
1380
1381 invalid &= ~BJ_INIT_ABIT;
1382 continue;
1383
1384 case BPF_MISC:
1385 switch (BPF_MISCOP(insns[i].code)) {
1386 case BPF_TAX: // X <- A
1387 *hints |= BJ_HINT_XREG;
1388 *initmask |= invalid & BJ_INIT_ABIT;
1389 invalid &= ~BJ_INIT_XBIT;
1390 continue;
1391
1392 case BPF_TXA: // A <- X
1393 *hints |= BJ_HINT_XREG;
1394 *initmask |= invalid & BJ_INIT_XBIT;
1395 invalid &= ~BJ_INIT_ABIT;
1396 continue;
1397
1398 case BPF_COPX:
1399 *hints |= BJ_HINT_XREG | BJ_HINT_COPX;
1400 /* FALLTHROUGH */
1401
1402 case BPF_COP:
1403 *hints |= BJ_HINT_COP;
1404 *initmask |= invalid & BJ_INIT_ABIT;
1405 invalid &= ~BJ_INIT_ABIT;
1406 continue;
1407 }
1408
1409 continue;
1410
1411 case BPF_JMP:
1412 /* Initialize abc_length for ABC pass. */
1413 insn_dat[i].u.jdata.abc_length = MAX_ABC_LENGTH;
1414
1415 if (BPF_SRC(insns[i].code) == BPF_X)
1416 *hints |= BJ_HINT_XREG;
1417
1418 if (BPF_OP(insns[i].code) == BPF_JA) {
1419 jt = jf = insns[i].k;
1420 } else {
1421 jt = insns[i].jt;
1422 jf = insns[i].jf;
1423 }
1424
1425 if (jt >= insn_count - (i + 1) ||
1426 jf >= insn_count - (i + 1)) {
1427 return false;
1428 }
1429
1430 if (jt > 0 && jf > 0)
1431 unreachable = true;
1432
1433 jt += i + 1;
1434 jf += i + 1;
1435
1436 jtf = insn_dat[i].u.jdata.jtf;
1437
1438 jtf[0].jdata = &insn_dat[i].u.jdata;
1439 SLIST_INSERT_HEAD(&insn_dat[jt].bjumps,
1440 &jtf[0], entries);
1441
1442 if (jf != jt) {
1443 jtf[1].jdata = &insn_dat[i].u.jdata;
1444 SLIST_INSERT_HEAD(&insn_dat[jf].bjumps,
1445 &jtf[1], entries);
1446 }
1447
1448 insn_dat[jf].invalid |= invalid;
1449 insn_dat[jt].invalid |= invalid;
1450 invalid = 0;
1451
1452 continue;
1453 }
1454 }
1455
1456 return true;
1457 }
1458
1459 /*
1460 * Array Bounds Check Elimination (ABC) pass.
1461 */
1462 static void
1463 optimize_pass2(const bpf_ctx_t *bc, const struct bpf_insn *insns,
1464 struct bpfjit_insn_data *insn_dat, size_t insn_count)
1465 {
1466 struct bpfjit_jump *jmp;
1467 const struct bpf_insn *pc;
1468 struct bpfjit_insn_data *pd;
1469 size_t i;
1470 bpfjit_abc_length_t length, abc_length = 0;
1471
1472 const size_t extwords = GET_EXTWORDS(bc);
1473
1474 for (i = insn_count; i != 0; i--) {
1475 pc = &insns[i-1];
1476 pd = &insn_dat[i-1];
1477
1478 if (pd->unreachable)
1479 continue;
1480
1481 switch (BPF_CLASS(pc->code)) {
1482 case BPF_RET:
1483 /*
1484 * It's quite common for bpf programs to
1485 * check packet bytes in increasing order
1486 * and return zero if bytes don't match
1487 * specified critetion. Such programs disable
1488 * ABC optimization completely because for
1489 * every jump there is a branch with no read
1490 * instruction.
1491 * With no side effects, BPF_STMT(BPF_RET+BPF_K, 0)
1492 * is indistinguishable from out-of-bound load.
1493 * Therefore, abc_length can be set to
1494 * MAX_ABC_LENGTH and enable ABC for many
1495 * bpf programs.
1496 * If this optimization encounters any
1497 * instruction with a side effect, it will
1498 * reset abc_length.
1499 */
1500 if (BPF_RVAL(pc->code) == BPF_K && pc->k == 0)
1501 abc_length = MAX_ABC_LENGTH;
1502 else
1503 abc_length = 0;
1504 break;
1505
1506 case BPF_MISC:
1507 if (BPF_MISCOP(pc->code) == BPF_COP ||
1508 BPF_MISCOP(pc->code) == BPF_COPX) {
1509 /* COP instructions can have side effects. */
1510 abc_length = 0;
1511 }
1512 break;
1513
1514 case BPF_ST:
1515 case BPF_STX:
1516 if (extwords != 0) {
1517 /* Write to memory is visible after a call. */
1518 abc_length = 0;
1519 }
1520 break;
1521
1522 case BPF_JMP:
1523 abc_length = pd->u.jdata.abc_length;
1524 break;
1525
1526 default:
1527 if (read_pkt_insn(pc, &length)) {
1528 if (abc_length < length)
1529 abc_length = length;
1530 pd->u.rdata.abc_length = abc_length;
1531 }
1532 break;
1533 }
1534
1535 SLIST_FOREACH(jmp, &pd->bjumps, entries) {
1536 if (jmp->jdata->abc_length > abc_length)
1537 jmp->jdata->abc_length = abc_length;
1538 }
1539 }
1540 }
1541
1542 static void
1543 optimize_pass3(const struct bpf_insn *insns,
1544 struct bpfjit_insn_data *insn_dat, size_t insn_count)
1545 {
1546 struct bpfjit_jump *jmp;
1547 size_t i;
1548 bpfjit_abc_length_t checked_length = 0;
1549
1550 for (i = 0; i < insn_count; i++) {
1551 if (insn_dat[i].unreachable)
1552 continue;
1553
1554 SLIST_FOREACH(jmp, &insn_dat[i].bjumps, entries) {
1555 if (jmp->jdata->checked_length < checked_length)
1556 checked_length = jmp->jdata->checked_length;
1557 }
1558
1559 if (BPF_CLASS(insns[i].code) == BPF_JMP) {
1560 insn_dat[i].u.jdata.checked_length = checked_length;
1561 } else if (read_pkt_insn(&insns[i], NULL)) {
1562 struct bpfjit_read_pkt_data *rdata =
1563 &insn_dat[i].u.rdata;
1564 rdata->check_length = 0;
1565 if (checked_length < rdata->abc_length) {
1566 checked_length = rdata->abc_length;
1567 rdata->check_length = checked_length;
1568 }
1569 }
1570 }
1571 }
1572
1573 static bool
1574 optimize(const bpf_ctx_t *bc, const struct bpf_insn *insns,
1575 struct bpfjit_insn_data *insn_dat, size_t insn_count,
1576 bpf_memword_init_t *initmask, bpfjit_hint_t *hints)
1577 {
1578
1579 optimize_init(insn_dat, insn_count);
1580
1581 if (!optimize_pass1(bc, insns, insn_dat, insn_count, initmask, hints))
1582 return false;
1583
1584 optimize_pass2(bc, insns, insn_dat, insn_count);
1585 optimize_pass3(insns, insn_dat, insn_count);
1586
1587 return true;
1588 }
1589
1590 /*
1591 * Convert BPF_ALU operations except BPF_NEG and BPF_DIV to sljit operation.
1592 */
1593 static int
1594 bpf_alu_to_sljit_op(const struct bpf_insn *pc)
1595 {
1596 const int bad = SLJIT_UNUSED;
1597
1598 /*
1599 * Note: all supported 64bit arches have 32bit multiply
1600 * instruction so SLJIT_INT_OP doesn't have any overhead.
1601 */
1602 switch (BPF_OP(pc->code)) {
1603 case BPF_ADD: return SLJIT_ADD;
1604 case BPF_SUB: return SLJIT_SUB;
1605 case BPF_MUL: return SLJIT_MUL|SLJIT_INT_OP;
1606 case BPF_OR: return SLJIT_OR;
1607 case BPF_XOR: return SLJIT_XOR;
1608 case BPF_AND: return SLJIT_AND;
1609 case BPF_LSH: return (pc->k > 31) ? bad : SLJIT_SHL;
1610 case BPF_RSH: return (pc->k > 31) ? bad : SLJIT_LSHR|SLJIT_INT_OP;
1611 default:
1612 return bad;
1613 }
1614 }
1615
1616 /*
1617 * Convert BPF_JMP operations except BPF_JA to sljit condition.
1618 */
1619 static int
1620 bpf_jmp_to_sljit_cond(const struct bpf_insn *pc, bool negate)
1621 {
1622 /*
1623 * Note: all supported 64bit arches have 32bit comparison
1624 * instructions so SLJIT_INT_OP doesn't have any overhead.
1625 */
1626 int rv = SLJIT_INT_OP;
1627
1628 switch (BPF_OP(pc->code)) {
1629 case BPF_JGT:
1630 rv |= negate ? SLJIT_C_LESS_EQUAL : SLJIT_C_GREATER;
1631 break;
1632 case BPF_JGE:
1633 rv |= negate ? SLJIT_C_LESS : SLJIT_C_GREATER_EQUAL;
1634 break;
1635 case BPF_JEQ:
1636 rv |= negate ? SLJIT_C_NOT_EQUAL : SLJIT_C_EQUAL;
1637 break;
1638 case BPF_JSET:
1639 rv |= negate ? SLJIT_C_EQUAL : SLJIT_C_NOT_EQUAL;
1640 break;
1641 default:
1642 BJ_ASSERT(false);
1643 }
1644
1645 return rv;
1646 }
1647
1648 /*
1649 * Convert BPF_K and BPF_X to sljit register.
1650 */
1651 static int
1652 kx_to_reg(const struct bpf_insn *pc)
1653 {
1654
1655 switch (BPF_SRC(pc->code)) {
1656 case BPF_K: return SLJIT_IMM;
1657 case BPF_X: return BJ_XREG;
1658 default:
1659 BJ_ASSERT(false);
1660 return 0;
1661 }
1662 }
1663
1664 static sljit_sw
1665 kx_to_reg_arg(const struct bpf_insn *pc)
1666 {
1667
1668 switch (BPF_SRC(pc->code)) {
1669 case BPF_K: return (uint32_t)pc->k; /* SLJIT_IMM, pc->k, */
1670 case BPF_X: return 0; /* BJ_XREG, 0, */
1671 default:
1672 BJ_ASSERT(false);
1673 return 0;
1674 }
1675 }
1676
1677 static bool
1678 generate_insn_code(struct sljit_compiler *compiler, bpfjit_hint_t hints,
1679 const bpf_ctx_t *bc, const struct bpf_insn *insns,
1680 struct bpfjit_insn_data *insn_dat, size_t insn_count)
1681 {
1682 /* a list of jumps to out-of-bound return from a generated function */
1683 struct sljit_jump **ret0;
1684 size_t ret0_size, ret0_maxsize;
1685
1686 struct sljit_jump *jump;
1687 struct sljit_label *label;
1688 const struct bpf_insn *pc;
1689 struct bpfjit_jump *bjump, *jtf;
1690 struct sljit_jump *to_mchain_jump;
1691
1692 size_t i;
1693 int status;
1694 int branching, negate;
1695 unsigned int rval, mode, src, op;
1696 uint32_t jt, jf;
1697
1698 bool unconditional_ret;
1699 bool rv;
1700
1701 const size_t extwords = GET_EXTWORDS(bc);
1702 const size_t memwords = GET_MEMWORDS(bc);
1703
1704 ret0 = NULL;
1705 rv = false;
1706
1707 ret0_size = 0;
1708 ret0_maxsize = 64;
1709 ret0 = BJ_ALLOC(ret0_maxsize * sizeof(ret0[0]));
1710 if (ret0 == NULL)
1711 goto fail;
1712
1713 /* reset sjump members of jdata */
1714 for (i = 0; i < insn_count; i++) {
1715 if (insn_dat[i].unreachable ||
1716 BPF_CLASS(insns[i].code) != BPF_JMP) {
1717 continue;
1718 }
1719
1720 jtf = insn_dat[i].u.jdata.jtf;
1721 jtf[0].sjump = jtf[1].sjump = NULL;
1722 }
1723
1724 /* main loop */
1725 for (i = 0; i < insn_count; i++) {
1726 if (insn_dat[i].unreachable)
1727 continue;
1728
1729 /*
1730 * Resolve jumps to the current insn.
1731 */
1732 label = NULL;
1733 SLIST_FOREACH(bjump, &insn_dat[i].bjumps, entries) {
1734 if (bjump->sjump != NULL) {
1735 if (label == NULL)
1736 label = sljit_emit_label(compiler);
1737 if (label == NULL)
1738 goto fail;
1739 sljit_set_label(bjump->sjump, label);
1740 }
1741 }
1742
1743 to_mchain_jump = NULL;
1744 unconditional_ret = false;
1745
1746 if (read_pkt_insn(&insns[i], NULL)) {
1747 if (insn_dat[i].u.rdata.check_length > UINT32_MAX) {
1748 /* Jump to "return 0" unconditionally. */
1749 unconditional_ret = true;
1750 jump = sljit_emit_jump(compiler, SLJIT_JUMP);
1751 if (jump == NULL)
1752 goto fail;
1753 if (!append_jump(jump, &ret0,
1754 &ret0_size, &ret0_maxsize))
1755 goto fail;
1756 } else if (insn_dat[i].u.rdata.check_length > 0) {
1757 /* if (buflen < check_length) return 0; */
1758 jump = sljit_emit_cmp(compiler,
1759 SLJIT_C_LESS,
1760 BJ_BUFLEN, 0,
1761 SLJIT_IMM,
1762 insn_dat[i].u.rdata.check_length);
1763 if (jump == NULL)
1764 goto fail;
1765 #ifdef _KERNEL
1766 to_mchain_jump = jump;
1767 #else
1768 if (!append_jump(jump, &ret0,
1769 &ret0_size, &ret0_maxsize))
1770 goto fail;
1771 #endif
1772 }
1773 }
1774
1775 pc = &insns[i];
1776 switch (BPF_CLASS(pc->code)) {
1777
1778 default:
1779 goto fail;
1780
1781 case BPF_LD:
1782 /* BPF_LD+BPF_IMM A <- k */
1783 if (pc->code == (BPF_LD|BPF_IMM)) {
1784 status = sljit_emit_op1(compiler,
1785 SLJIT_MOV,
1786 BJ_AREG, 0,
1787 SLJIT_IMM, (uint32_t)pc->k);
1788 if (status != SLJIT_SUCCESS)
1789 goto fail;
1790
1791 continue;
1792 }
1793
1794 /* BPF_LD+BPF_MEM A <- M[k] */
1795 if (pc->code == (BPF_LD|BPF_MEM)) {
1796 if ((uint32_t)pc->k >= memwords)
1797 goto fail;
1798 status = emit_memload(compiler,
1799 BJ_AREG, pc->k, extwords);
1800 if (status != SLJIT_SUCCESS)
1801 goto fail;
1802
1803 continue;
1804 }
1805
1806 /* BPF_LD+BPF_W+BPF_LEN A <- len */
1807 if (pc->code == (BPF_LD|BPF_W|BPF_LEN)) {
1808 status = sljit_emit_op1(compiler,
1809 SLJIT_MOV, /* size_t source */
1810 BJ_AREG, 0,
1811 SLJIT_MEM1(BJ_ARGS),
1812 offsetof(struct bpf_args, wirelen));
1813 if (status != SLJIT_SUCCESS)
1814 goto fail;
1815
1816 continue;
1817 }
1818
1819 mode = BPF_MODE(pc->code);
1820 if (mode != BPF_ABS && mode != BPF_IND)
1821 goto fail;
1822
1823 if (unconditional_ret)
1824 continue;
1825
1826 status = emit_pkt_read(compiler, hints, pc,
1827 to_mchain_jump, &ret0, &ret0_size, &ret0_maxsize);
1828 if (status != SLJIT_SUCCESS)
1829 goto fail;
1830
1831 continue;
1832
1833 case BPF_LDX:
1834 mode = BPF_MODE(pc->code);
1835
1836 /* BPF_LDX+BPF_W+BPF_IMM X <- k */
1837 if (mode == BPF_IMM) {
1838 if (BPF_SIZE(pc->code) != BPF_W)
1839 goto fail;
1840 status = sljit_emit_op1(compiler,
1841 SLJIT_MOV,
1842 BJ_XREG, 0,
1843 SLJIT_IMM, (uint32_t)pc->k);
1844 if (status != SLJIT_SUCCESS)
1845 goto fail;
1846
1847 continue;
1848 }
1849
1850 /* BPF_LDX+BPF_W+BPF_LEN X <- len */
1851 if (mode == BPF_LEN) {
1852 if (BPF_SIZE(pc->code) != BPF_W)
1853 goto fail;
1854 status = sljit_emit_op1(compiler,
1855 SLJIT_MOV, /* size_t source */
1856 BJ_XREG, 0,
1857 SLJIT_MEM1(BJ_ARGS),
1858 offsetof(struct bpf_args, wirelen));
1859 if (status != SLJIT_SUCCESS)
1860 goto fail;
1861
1862 continue;
1863 }
1864
1865 /* BPF_LDX+BPF_W+BPF_MEM X <- M[k] */
1866 if (mode == BPF_MEM) {
1867 if (BPF_SIZE(pc->code) != BPF_W)
1868 goto fail;
1869 if ((uint32_t)pc->k >= memwords)
1870 goto fail;
1871 status = emit_memload(compiler,
1872 BJ_XREG, pc->k, extwords);
1873 if (status != SLJIT_SUCCESS)
1874 goto fail;
1875
1876 continue;
1877 }
1878
1879 /* BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf) */
1880 if (mode != BPF_MSH || BPF_SIZE(pc->code) != BPF_B)
1881 goto fail;
1882
1883 if (unconditional_ret)
1884 continue;
1885
1886 status = emit_msh(compiler, hints, pc,
1887 to_mchain_jump, &ret0, &ret0_size, &ret0_maxsize);
1888 if (status != SLJIT_SUCCESS)
1889 goto fail;
1890
1891 continue;
1892
1893 case BPF_ST:
1894 if (pc->code != BPF_ST ||
1895 (uint32_t)pc->k >= memwords) {
1896 goto fail;
1897 }
1898
1899 status = emit_memstore(compiler,
1900 BJ_AREG, pc->k, extwords);
1901 if (status != SLJIT_SUCCESS)
1902 goto fail;
1903
1904 continue;
1905
1906 case BPF_STX:
1907 if (pc->code != BPF_STX ||
1908 (uint32_t)pc->k >= memwords) {
1909 goto fail;
1910 }
1911
1912 status = emit_memstore(compiler,
1913 BJ_XREG, pc->k, extwords);
1914 if (status != SLJIT_SUCCESS)
1915 goto fail;
1916
1917 continue;
1918
1919 case BPF_ALU:
1920 if (pc->code == (BPF_ALU|BPF_NEG)) {
1921 status = sljit_emit_op1(compiler,
1922 SLJIT_NEG,
1923 BJ_AREG, 0,
1924 BJ_AREG, 0);
1925 if (status != SLJIT_SUCCESS)
1926 goto fail;
1927
1928 continue;
1929 }
1930
1931 op = BPF_OP(pc->code);
1932 if (op != BPF_DIV && op != BPF_MOD) {
1933 const int op2 = bpf_alu_to_sljit_op(pc);
1934
1935 if (op2 == SLJIT_UNUSED)
1936 goto fail;
1937 status = sljit_emit_op2(compiler,
1938 op2, BJ_AREG, 0, BJ_AREG, 0,
1939 kx_to_reg(pc), kx_to_reg_arg(pc));
1940 if (status != SLJIT_SUCCESS)
1941 goto fail;
1942
1943 continue;
1944 }
1945
1946 /* BPF_DIV/BPF_MOD */
1947
1948 src = BPF_SRC(pc->code);
1949 if (src != BPF_X && src != BPF_K)
1950 goto fail;
1951
1952 /* division by zero? */
1953 if (src == BPF_X) {
1954 jump = sljit_emit_cmp(compiler,
1955 SLJIT_C_EQUAL|SLJIT_INT_OP,
1956 BJ_XREG, 0,
1957 SLJIT_IMM, 0);
1958 if (jump == NULL)
1959 goto fail;
1960 if (!append_jump(jump, &ret0,
1961 &ret0_size, &ret0_maxsize))
1962 goto fail;
1963 } else if (pc->k == 0) {
1964 jump = sljit_emit_jump(compiler, SLJIT_JUMP);
1965 if (jump == NULL)
1966 goto fail;
1967 if (!append_jump(jump, &ret0,
1968 &ret0_size, &ret0_maxsize))
1969 goto fail;
1970 }
1971
1972 if (src == BPF_X) {
1973 status = emit_moddiv(compiler, pc);
1974 if (status != SLJIT_SUCCESS)
1975 goto fail;
1976 } else if (pc->k != 0) {
1977 if (pc->k & (pc->k - 1)) {
1978 status = emit_moddiv(compiler, pc);
1979 } else {
1980 status = emit_pow2_moddiv(compiler, pc);
1981 }
1982 if (status != SLJIT_SUCCESS)
1983 goto fail;
1984 }
1985
1986 continue;
1987
1988 case BPF_JMP:
1989 op = BPF_OP(pc->code);
1990 if (op == BPF_JA) {
1991 jt = jf = pc->k;
1992 } else {
1993 jt = pc->jt;
1994 jf = pc->jf;
1995 }
1996
1997 negate = (jt == 0) ? 1 : 0;
1998 branching = (jt == jf) ? 0 : 1;
1999 jtf = insn_dat[i].u.jdata.jtf;
2000
2001 if (branching) {
2002 if (op != BPF_JSET) {
2003 jump = sljit_emit_cmp(compiler,
2004 bpf_jmp_to_sljit_cond(pc, negate),
2005 BJ_AREG, 0,
2006 kx_to_reg(pc), kx_to_reg_arg(pc));
2007 } else {
2008 status = sljit_emit_op2(compiler,
2009 SLJIT_AND,
2010 BJ_TMP1REG, 0,
2011 BJ_AREG, 0,
2012 kx_to_reg(pc), kx_to_reg_arg(pc));
2013 if (status != SLJIT_SUCCESS)
2014 goto fail;
2015
2016 jump = sljit_emit_cmp(compiler,
2017 bpf_jmp_to_sljit_cond(pc, negate),
2018 BJ_TMP1REG, 0,
2019 SLJIT_IMM, 0);
2020 }
2021
2022 if (jump == NULL)
2023 goto fail;
2024
2025 BJ_ASSERT(jtf[negate].sjump == NULL);
2026 jtf[negate].sjump = jump;
2027 }
2028
2029 if (!branching || (jt != 0 && jf != 0)) {
2030 jump = sljit_emit_jump(compiler, SLJIT_JUMP);
2031 if (jump == NULL)
2032 goto fail;
2033
2034 BJ_ASSERT(jtf[branching].sjump == NULL);
2035 jtf[branching].sjump = jump;
2036 }
2037
2038 continue;
2039
2040 case BPF_RET:
2041 rval = BPF_RVAL(pc->code);
2042 if (rval == BPF_X)
2043 goto fail;
2044
2045 /* BPF_RET+BPF_K accept k bytes */
2046 if (rval == BPF_K) {
2047 status = sljit_emit_return(compiler,
2048 SLJIT_MOV_UI,
2049 SLJIT_IMM, (uint32_t)pc->k);
2050 if (status != SLJIT_SUCCESS)
2051 goto fail;
2052 }
2053
2054 /* BPF_RET+BPF_A accept A bytes */
2055 if (rval == BPF_A) {
2056 status = sljit_emit_return(compiler,
2057 SLJIT_MOV_UI,
2058 BJ_AREG, 0);
2059 if (status != SLJIT_SUCCESS)
2060 goto fail;
2061 }
2062
2063 continue;
2064
2065 case BPF_MISC:
2066 switch (BPF_MISCOP(pc->code)) {
2067 case BPF_TAX:
2068 status = sljit_emit_op1(compiler,
2069 SLJIT_MOV_UI,
2070 BJ_XREG, 0,
2071 BJ_AREG, 0);
2072 if (status != SLJIT_SUCCESS)
2073 goto fail;
2074
2075 continue;
2076
2077 case BPF_TXA:
2078 status = sljit_emit_op1(compiler,
2079 SLJIT_MOV,
2080 BJ_AREG, 0,
2081 BJ_XREG, 0);
2082 if (status != SLJIT_SUCCESS)
2083 goto fail;
2084
2085 continue;
2086
2087 case BPF_COP:
2088 case BPF_COPX:
2089 if (bc == NULL || bc->copfuncs == NULL)
2090 goto fail;
2091 if (BPF_MISCOP(pc->code) == BPF_COP &&
2092 (uint32_t)pc->k >= bc->nfuncs) {
2093 goto fail;
2094 }
2095
2096 status = emit_cop(compiler, hints, bc, pc,
2097 &ret0, &ret0_size, &ret0_maxsize);
2098 if (status != SLJIT_SUCCESS)
2099 goto fail;
2100
2101 continue;
2102 }
2103
2104 goto fail;
2105 } /* switch */
2106 } /* main loop */
2107
2108 BJ_ASSERT(ret0_size <= ret0_maxsize);
2109
2110 if (ret0_size > 0) {
2111 label = sljit_emit_label(compiler);
2112 if (label == NULL)
2113 goto fail;
2114 for (i = 0; i < ret0_size; i++)
2115 sljit_set_label(ret0[i], label);
2116 }
2117
2118 status = sljit_emit_return(compiler,
2119 SLJIT_MOV_UI,
2120 SLJIT_IMM, 0);
2121 if (status != SLJIT_SUCCESS)
2122 goto fail;
2123
2124 rv = true;
2125
2126 fail:
2127 if (ret0 != NULL)
2128 BJ_FREE(ret0, ret0_maxsize * sizeof(ret0[0]));
2129
2130 return rv;
2131 }
2132
2133 bpfjit_func_t
2134 bpfjit_generate_code(const bpf_ctx_t *bc,
2135 const struct bpf_insn *insns, size_t insn_count)
2136 {
2137 void *rv;
2138 struct sljit_compiler *compiler;
2139
2140 size_t i;
2141 int status;
2142
2143 /* optimization related */
2144 bpf_memword_init_t initmask;
2145 bpfjit_hint_t hints;
2146
2147 /* memory store location for initial zero initialization */
2148 sljit_si mem_reg;
2149 sljit_sw mem_off;
2150
2151 struct bpfjit_insn_data *insn_dat;
2152
2153 const size_t extwords = GET_EXTWORDS(bc);
2154 const size_t memwords = GET_MEMWORDS(bc);
2155 const bpf_memword_init_t preinited = extwords ? bc->preinited : 0;
2156
2157 rv = NULL;
2158 compiler = NULL;
2159 insn_dat = NULL;
2160
2161 if (memwords > MAX_MEMWORDS)
2162 goto fail;
2163
2164 if (insn_count == 0 || insn_count > SIZE_MAX / sizeof(insn_dat[0]))
2165 goto fail;
2166
2167 insn_dat = BJ_ALLOC(insn_count * sizeof(insn_dat[0]));
2168 if (insn_dat == NULL)
2169 goto fail;
2170
2171 if (!optimize(bc, insns, insn_dat, insn_count, &initmask, &hints))
2172 goto fail;
2173
2174 compiler = sljit_create_compiler();
2175 if (compiler == NULL)
2176 goto fail;
2177
2178 #if !defined(_KERNEL) && defined(SLJIT_VERBOSE) && SLJIT_VERBOSE
2179 sljit_compiler_verbose(compiler, stderr);
2180 #endif
2181
2182 status = sljit_emit_enter(compiler,
2183 2, nscratches(hints), nsaveds(hints), sizeof(struct bpfjit_stack));
2184 if (status != SLJIT_SUCCESS)
2185 goto fail;
2186
2187 if (hints & BJ_HINT_COP) {
2188 /* save ctx argument */
2189 status = sljit_emit_op1(compiler,
2190 SLJIT_MOV_P,
2191 SLJIT_MEM1(SLJIT_LOCALS_REG),
2192 offsetof(struct bpfjit_stack, ctx),
2193 BJ_CTX_ARG, 0);
2194 if (status != SLJIT_SUCCESS)
2195 goto fail;
2196 }
2197
2198 if (extwords == 0) {
2199 mem_reg = SLJIT_MEM1(SLJIT_LOCALS_REG);
2200 mem_off = offsetof(struct bpfjit_stack, mem);
2201 } else {
2202 /* copy "mem" argument from bpf_args to bpfjit_stack */
2203 status = sljit_emit_op1(compiler,
2204 SLJIT_MOV_P,
2205 BJ_TMP1REG, 0,
2206 SLJIT_MEM1(BJ_ARGS), offsetof(struct bpf_args, mem));
2207 if (status != SLJIT_SUCCESS)
2208 goto fail;
2209
2210 status = sljit_emit_op1(compiler,
2211 SLJIT_MOV_P,
2212 SLJIT_MEM1(SLJIT_LOCALS_REG),
2213 offsetof(struct bpfjit_stack, extmem),
2214 BJ_TMP1REG, 0);
2215 if (status != SLJIT_SUCCESS)
2216 goto fail;
2217
2218 mem_reg = SLJIT_MEM1(BJ_TMP1REG);
2219 mem_off = 0;
2220 }
2221
2222 /*
2223 * Exclude pre-initialised external memory words but keep
2224 * initialization statuses of A and X registers in case
2225 * bc->preinited wrongly sets those two bits.
2226 */
2227 initmask &= ~preinited | BJ_INIT_ABIT | BJ_INIT_XBIT;
2228
2229 #if defined(_KERNEL)
2230 /* bpf_filter() checks initialization of memwords. */
2231 BJ_ASSERT((initmask & (BJ_INIT_MBIT(memwords) - 1)) == 0);
2232 #endif
2233 for (i = 0; i < memwords; i++) {
2234 if (initmask & BJ_INIT_MBIT(i)) {
2235 /* M[i] = 0; */
2236 status = sljit_emit_op1(compiler,
2237 SLJIT_MOV_UI,
2238 mem_reg, mem_off + i * sizeof(uint32_t),
2239 SLJIT_IMM, 0);
2240 if (status != SLJIT_SUCCESS)
2241 goto fail;
2242 }
2243 }
2244
2245 if (initmask & BJ_INIT_ABIT) {
2246 /* A = 0; */
2247 status = sljit_emit_op1(compiler,
2248 SLJIT_MOV,
2249 BJ_AREG, 0,
2250 SLJIT_IMM, 0);
2251 if (status != SLJIT_SUCCESS)
2252 goto fail;
2253 }
2254
2255 if (initmask & BJ_INIT_XBIT) {
2256 /* X = 0; */
2257 status = sljit_emit_op1(compiler,
2258 SLJIT_MOV,
2259 BJ_XREG, 0,
2260 SLJIT_IMM, 0);
2261 if (status != SLJIT_SUCCESS)
2262 goto fail;
2263 }
2264
2265 status = load_buf_buflen(compiler);
2266 if (status != SLJIT_SUCCESS)
2267 goto fail;
2268
2269 if (!generate_insn_code(compiler, hints,
2270 bc, insns, insn_dat, insn_count)) {
2271 goto fail;
2272 }
2273
2274 rv = sljit_generate_code(compiler);
2275
2276 fail:
2277 if (compiler != NULL)
2278 sljit_free_compiler(compiler);
2279
2280 if (insn_dat != NULL)
2281 BJ_FREE(insn_dat, insn_count * sizeof(insn_dat[0]));
2282
2283 return (bpfjit_func_t)rv;
2284 }
2285
2286 void
2287 bpfjit_free_code(bpfjit_func_t code)
2288 {
2289
2290 sljit_free_code((void *)code);
2291 }
2292