Home | History | Annotate | Line # | Download | only in npfctl
npf_bpf_comp.c revision 1.10.14.2
      1        1.1     rmind /*-
      2  1.10.14.2    martin  * Copyright (c) 2010-2019 The NetBSD Foundation, Inc.
      3        1.1     rmind  * All rights reserved.
      4        1.1     rmind  *
      5        1.1     rmind  * This material is based upon work partially supported by The
      6        1.1     rmind  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      7        1.1     rmind  *
      8        1.1     rmind  * Redistribution and use in source and binary forms, with or without
      9        1.1     rmind  * modification, are permitted provided that the following conditions
     10        1.1     rmind  * are met:
     11        1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     12        1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     13        1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     14        1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     15        1.1     rmind  *    documentation and/or other materials provided with the distribution.
     16        1.1     rmind  *
     17        1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18        1.1     rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19        1.1     rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20        1.1     rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21        1.1     rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22        1.1     rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23        1.1     rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24        1.1     rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25        1.1     rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26        1.1     rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27        1.1     rmind  * POSSIBILITY OF SUCH DAMAGE.
     28        1.1     rmind  */
     29        1.1     rmind 
     30        1.1     rmind /*
     31        1.1     rmind  * BPF byte-code generation for NPF rules.
     32  1.10.14.2    martin  *
     33  1.10.14.2    martin  * Overview
     34  1.10.14.2    martin  *
     35  1.10.14.2    martin  *	Each NPF rule is compiled into a BPF micro-program.  There is a
     36  1.10.14.2    martin  *	BPF byte-code fragment for each higher-level filtering logic,
     37  1.10.14.2    martin  *	e.g. to match L4 protocol, IP/mask, etc.  The generation process
     38  1.10.14.2    martin  *	combines multiple BPF-byte code fragments into one program.
     39  1.10.14.2    martin  *
     40  1.10.14.2    martin  * Basic case
     41  1.10.14.2    martin  *
     42  1.10.14.2    martin  *	Consider a basic case where all filters should match.  They
     43  1.10.14.2    martin  *	are expressed as logical conjunction, e.g.:
     44  1.10.14.2    martin  *
     45  1.10.14.2    martin  *		A and B and C and D
     46  1.10.14.2    martin  *
     47  1.10.14.2    martin  *	Each test (filter) criterion can be evaluated to true (match) or
     48  1.10.14.2    martin  *	false (no match) and the logic is as follows:
     49  1.10.14.2    martin  *
     50  1.10.14.2    martin  *	- If the value is true, then jump to the "next" test (offset 0).
     51  1.10.14.2    martin  *
     52  1.10.14.2    martin  *	- If the value is false, then jump to the JUMP_MAGIC value (0xff).
     53  1.10.14.2    martin  *	This "magic" value is used to indicate that it will have to be
     54  1.10.14.2    martin  *	patched at a later stage.
     55  1.10.14.2    martin  *
     56  1.10.14.2    martin  *	Once all byte-code fragments are combined into one, then there
     57  1.10.14.2    martin  *	are two additional steps:
     58  1.10.14.2    martin  *
     59  1.10.14.2    martin  *	- Two instructions are appended at the end of the program: "return
     60  1.10.14.2    martin  *	success" followed by "return failure".
     61  1.10.14.2    martin  *
     62  1.10.14.2    martin  *	- All jumps with the JUMP_MAGIC value are patched to point to the
     63  1.10.14.2    martin  *	"return failure" instruction.
     64  1.10.14.2    martin  *
     65  1.10.14.2    martin  *	Therefore, if all filter criteria will match, then the first
     66  1.10.14.2    martin  *	instruction will be reached, indicating a successful match of the
     67  1.10.14.2    martin  *	rule.  Otherwise, if any of the criteria will not match, it will
     68  1.10.14.2    martin  *	take the failure path and the rule will not be matching.
     69  1.10.14.2    martin  *
     70  1.10.14.2    martin  * Grouping
     71  1.10.14.2    martin  *
     72  1.10.14.2    martin  *	Filters can have groups, which have a meaning of logical
     73  1.10.14.2    martin  *	disjunction, e.g.:
     74  1.10.14.2    martin  *
     75  1.10.14.2    martin  *		A and B and (C or D)
     76  1.10.14.2    martin  *
     77  1.10.14.2    martin  *	In such case, the logic inside the group has to be inverted i.e.
     78  1.10.14.2    martin  *	the jump values swapped.  If the test value is true, then jump
     79  1.10.14.2    martin  *	out of the group; if false, then jump "next".  At the end of the
     80  1.10.14.2    martin  *	group, an addition failure path is appended and the JUMP_MAGIC
     81  1.10.14.2    martin  *	uses within the group are patched to jump past the said path.
     82        1.1     rmind  */
     83        1.1     rmind 
     84        1.1     rmind #include <sys/cdefs.h>
     85  1.10.14.2    martin __RCSID("$NetBSD: npf_bpf_comp.c,v 1.10.14.2 2020/04/13 08:05:55 martin Exp $");
     86        1.1     rmind 
     87        1.1     rmind #include <stdlib.h>
     88        1.1     rmind #include <stdbool.h>
     89        1.1     rmind #include <stddef.h>
     90        1.1     rmind #include <string.h>
     91        1.1     rmind #include <inttypes.h>
     92        1.1     rmind #include <err.h>
     93        1.1     rmind #include <assert.h>
     94        1.1     rmind 
     95        1.1     rmind #include <netinet/in.h>
     96        1.1     rmind #include <netinet/in_systm.h>
     97        1.9  christos #define	__FAVOR_BSD
     98        1.1     rmind #include <netinet/ip.h>
     99        1.1     rmind #include <netinet/ip6.h>
    100        1.1     rmind #include <netinet/udp.h>
    101        1.1     rmind #include <netinet/tcp.h>
    102        1.1     rmind #include <netinet/ip_icmp.h>
    103        1.1     rmind #include <netinet/icmp6.h>
    104        1.1     rmind 
    105        1.1     rmind #include <net/bpf.h>
    106        1.1     rmind 
    107        1.1     rmind #include "npfctl.h"
    108        1.1     rmind 
    109        1.1     rmind /*
    110        1.1     rmind  * Note: clear X_EQ_L4OFF when register X is invalidated i.e. it stores
    111        1.1     rmind  * something other than L4 header offset.  Generally, when BPF_LDX is used.
    112        1.1     rmind  */
    113        1.1     rmind #define	FETCHED_L3		0x01
    114        1.6     rmind #define	CHECKED_L4		0x02
    115        1.6     rmind #define	X_EQ_L4OFF		0x04
    116        1.1     rmind 
    117        1.1     rmind struct npf_bpf {
    118        1.1     rmind 	/*
    119        1.1     rmind 	 * BPF program code, the allocated length (in bytes), the number
    120        1.1     rmind 	 * of logical blocks and the flags.
    121        1.1     rmind 	 */
    122        1.1     rmind 	struct bpf_program	prog;
    123        1.1     rmind 	size_t			alen;
    124        1.1     rmind 	u_int			nblocks;
    125        1.1     rmind 	sa_family_t		af;
    126        1.1     rmind 	uint32_t		flags;
    127        1.1     rmind 
    128  1.10.14.2    martin 	/*
    129  1.10.14.2    martin 	 * The current group offset (counted in BPF instructions)
    130  1.10.14.2    martin 	 * and block number at the start of the group.
    131  1.10.14.2    martin 	 */
    132        1.1     rmind 	bool			ingroup;
    133        1.1     rmind 	u_int			goff;
    134        1.1     rmind 	u_int			gblock;
    135        1.1     rmind 
    136        1.1     rmind 	/* BPF marks, allocated length and the real length. */
    137        1.1     rmind 	uint32_t *		marks;
    138        1.1     rmind 	size_t			malen;
    139        1.1     rmind 	size_t			mlen;
    140        1.1     rmind };
    141        1.1     rmind 
    142        1.1     rmind /*
    143        1.1     rmind  * NPF success and failure values to be returned from BPF.
    144        1.1     rmind  */
    145        1.1     rmind #define	NPF_BPF_SUCCESS		((u_int)-1)
    146        1.1     rmind #define	NPF_BPF_FAILURE		0
    147        1.1     rmind 
    148        1.1     rmind /*
    149        1.1     rmind  * Magic value to indicate the failure path, which is fixed up on completion.
    150        1.1     rmind  * Note: this is the longest jump offset in BPF, since the offset is one byte.
    151        1.1     rmind  */
    152        1.1     rmind #define	JUMP_MAGIC		0xff
    153        1.1     rmind 
    154        1.1     rmind /* Reduce re-allocations by expanding in 64 byte blocks. */
    155        1.1     rmind #define	ALLOC_MASK		(64 - 1)
    156        1.1     rmind #define	ALLOC_ROUND(x)		(((x) + ALLOC_MASK) & ~ALLOC_MASK)
    157        1.1     rmind 
    158        1.9  christos #ifndef IPV6_VERSION
    159        1.9  christos #define	IPV6_VERSION		0x60
    160        1.9  christos #endif
    161        1.9  christos 
    162        1.1     rmind npf_bpf_t *
    163        1.1     rmind npfctl_bpf_create(void)
    164        1.1     rmind {
    165        1.1     rmind 	return ecalloc(1, sizeof(npf_bpf_t));
    166        1.1     rmind }
    167        1.1     rmind 
    168        1.1     rmind static void
    169        1.1     rmind fixup_jumps(npf_bpf_t *ctx, u_int start, u_int end, bool swap)
    170        1.1     rmind {
    171        1.1     rmind 	struct bpf_program *bp = &ctx->prog;
    172        1.1     rmind 
    173        1.1     rmind 	for (u_int i = start; i < end; i++) {
    174        1.1     rmind 		struct bpf_insn *insn = &bp->bf_insns[i];
    175        1.1     rmind 		const u_int fail_off = end - i;
    176  1.10.14.2    martin 		bool seen_magic = false;
    177        1.1     rmind 
    178        1.1     rmind 		if (fail_off >= JUMP_MAGIC) {
    179        1.1     rmind 			errx(EXIT_FAILURE, "BPF generation error: "
    180        1.1     rmind 			    "the number of instructions is over the limit");
    181        1.1     rmind 		}
    182        1.1     rmind 		if (BPF_CLASS(insn->code) != BPF_JMP) {
    183        1.1     rmind 			continue;
    184        1.1     rmind 		}
    185  1.10.14.2    martin 		if (BPF_OP(insn->code) == BPF_JA) {
    186  1.10.14.2    martin 			/*
    187  1.10.14.2    martin 			 * BPF_JA can be used to jump to the failure path.
    188  1.10.14.2    martin 			 * If we are swapping i.e. inside the group, then
    189  1.10.14.2    martin 			 * jump "next"; groups have a failure path appended
    190  1.10.14.2    martin 			 * at their end.
    191  1.10.14.2    martin 			 */
    192  1.10.14.2    martin 			if (insn->k == JUMP_MAGIC) {
    193  1.10.14.2    martin 				insn->k = swap ? 0 : fail_off;
    194  1.10.14.2    martin 			}
    195  1.10.14.2    martin 			continue;
    196  1.10.14.2    martin 		}
    197  1.10.14.2    martin 
    198  1.10.14.2    martin 		/*
    199  1.10.14.2    martin 		 * Fixup the "magic" value.  Swap only the "magic" jumps.
    200  1.10.14.2    martin 		 */
    201  1.10.14.2    martin 
    202  1.10.14.2    martin 		if (insn->jt == JUMP_MAGIC) {
    203  1.10.14.2    martin 			insn->jt = fail_off;
    204  1.10.14.2    martin 			seen_magic = true;
    205  1.10.14.2    martin 		}
    206  1.10.14.2    martin 		if (insn->jf == JUMP_MAGIC) {
    207  1.10.14.2    martin 			insn->jf = fail_off;
    208  1.10.14.2    martin 			seen_magic = true;
    209  1.10.14.2    martin 		}
    210  1.10.14.2    martin 
    211  1.10.14.2    martin 		if (seen_magic && swap) {
    212        1.1     rmind 			uint8_t jt = insn->jt;
    213        1.1     rmind 			insn->jt = insn->jf;
    214        1.1     rmind 			insn->jf = jt;
    215        1.1     rmind 		}
    216        1.1     rmind 	}
    217        1.1     rmind }
    218        1.1     rmind 
    219        1.1     rmind static void
    220        1.1     rmind add_insns(npf_bpf_t *ctx, struct bpf_insn *insns, size_t count)
    221        1.1     rmind {
    222        1.1     rmind 	struct bpf_program *bp = &ctx->prog;
    223        1.1     rmind 	size_t offset, len, reqlen;
    224        1.1     rmind 
    225        1.1     rmind 	/* Note: bf_len is the count of instructions. */
    226        1.1     rmind 	offset = bp->bf_len * sizeof(struct bpf_insn);
    227        1.1     rmind 	len = count * sizeof(struct bpf_insn);
    228        1.1     rmind 
    229        1.1     rmind 	/* Ensure the memory buffer for the program. */
    230        1.1     rmind 	reqlen = ALLOC_ROUND(offset + len);
    231        1.1     rmind 	if (reqlen > ctx->alen) {
    232        1.1     rmind 		bp->bf_insns = erealloc(bp->bf_insns, reqlen);
    233        1.1     rmind 		ctx->alen = reqlen;
    234        1.1     rmind 	}
    235        1.1     rmind 
    236        1.1     rmind 	/* Add the code block. */
    237        1.1     rmind 	memcpy((uint8_t *)bp->bf_insns + offset, insns, len);
    238        1.1     rmind 	bp->bf_len += count;
    239        1.1     rmind }
    240        1.1     rmind 
    241        1.1     rmind static void
    242        1.1     rmind done_raw_block(npf_bpf_t *ctx, const uint32_t *m, size_t len)
    243        1.1     rmind {
    244        1.1     rmind 	size_t reqlen, nargs = m[1];
    245        1.1     rmind 
    246        1.1     rmind 	if ((len / sizeof(uint32_t) - 2) != nargs) {
    247        1.1     rmind 		errx(EXIT_FAILURE, "invalid BPF block description");
    248        1.1     rmind 	}
    249        1.1     rmind 	reqlen = ALLOC_ROUND(ctx->mlen + len);
    250        1.1     rmind 	if (reqlen > ctx->malen) {
    251        1.1     rmind 		ctx->marks = erealloc(ctx->marks, reqlen);
    252        1.1     rmind 		ctx->malen = reqlen;
    253        1.1     rmind 	}
    254        1.1     rmind 	memcpy((uint8_t *)ctx->marks + ctx->mlen, m, len);
    255        1.1     rmind 	ctx->mlen += len;
    256        1.1     rmind }
    257        1.1     rmind 
    258        1.1     rmind static void
    259        1.1     rmind done_block(npf_bpf_t *ctx, const uint32_t *m, size_t len)
    260        1.1     rmind {
    261        1.1     rmind 	done_raw_block(ctx, m, len);
    262        1.1     rmind 	ctx->nblocks++;
    263        1.1     rmind }
    264        1.1     rmind 
    265        1.1     rmind struct bpf_program *
    266        1.1     rmind npfctl_bpf_complete(npf_bpf_t *ctx)
    267        1.1     rmind {
    268        1.1     rmind 	struct bpf_program *bp = &ctx->prog;
    269        1.1     rmind 	const u_int retoff = bp->bf_len;
    270        1.1     rmind 
    271        1.8     rmind 	/* No instructions (optimised out). */
    272        1.8     rmind 	if (!bp->bf_len)
    273        1.8     rmind 		return NULL;
    274        1.8     rmind 
    275        1.1     rmind 	/* Add the return fragment (success and failure paths). */
    276        1.1     rmind 	struct bpf_insn insns_ret[] = {
    277        1.1     rmind 		BPF_STMT(BPF_RET+BPF_K, NPF_BPF_SUCCESS),
    278        1.1     rmind 		BPF_STMT(BPF_RET+BPF_K, NPF_BPF_FAILURE),
    279        1.1     rmind 	};
    280        1.1     rmind 	add_insns(ctx, insns_ret, __arraycount(insns_ret));
    281        1.1     rmind 
    282        1.1     rmind 	/* Fixup all jumps to the main failure path. */
    283        1.1     rmind 	fixup_jumps(ctx, 0, retoff, false);
    284        1.1     rmind 
    285        1.1     rmind 	return &ctx->prog;
    286        1.1     rmind }
    287        1.1     rmind 
    288        1.1     rmind const void *
    289        1.1     rmind npfctl_bpf_bmarks(npf_bpf_t *ctx, size_t *len)
    290        1.1     rmind {
    291        1.1     rmind 	*len = ctx->mlen;
    292        1.1     rmind 	return ctx->marks;
    293        1.1     rmind }
    294        1.1     rmind 
    295        1.1     rmind void
    296        1.1     rmind npfctl_bpf_destroy(npf_bpf_t *ctx)
    297        1.1     rmind {
    298        1.1     rmind 	free(ctx->prog.bf_insns);
    299        1.1     rmind 	free(ctx->marks);
    300        1.1     rmind 	free(ctx);
    301        1.1     rmind }
    302        1.1     rmind 
    303        1.1     rmind /*
    304  1.10.14.2    martin  * npfctl_bpf_group_enter: begin a logical group.  It merely uses logical
    305        1.1     rmind  * disjunction (OR) for compares within the group.
    306        1.1     rmind  */
    307        1.1     rmind void
    308  1.10.14.2    martin npfctl_bpf_group_enter(npf_bpf_t *ctx)
    309        1.1     rmind {
    310        1.1     rmind 	struct bpf_program *bp = &ctx->prog;
    311        1.1     rmind 
    312        1.1     rmind 	assert(ctx->goff == 0);
    313        1.1     rmind 	assert(ctx->gblock == 0);
    314        1.1     rmind 
    315        1.1     rmind 	ctx->goff = bp->bf_len;
    316        1.1     rmind 	ctx->gblock = ctx->nblocks;
    317        1.1     rmind 	ctx->ingroup = true;
    318        1.1     rmind }
    319        1.1     rmind 
    320        1.1     rmind void
    321  1.10.14.2    martin npfctl_bpf_group_exit(npf_bpf_t *ctx, bool invert)
    322        1.1     rmind {
    323        1.1     rmind 	struct bpf_program *bp = &ctx->prog;
    324        1.1     rmind 	const size_t curoff = bp->bf_len;
    325        1.1     rmind 
    326        1.1     rmind 	/* If there are no blocks or only one - nothing to do. */
    327       1.10     rmind 	if (!invert && (ctx->nblocks - ctx->gblock) <= 1) {
    328        1.1     rmind 		ctx->goff = ctx->gblock = 0;
    329        1.1     rmind 		return;
    330        1.1     rmind 	}
    331        1.1     rmind 
    332        1.1     rmind 	/*
    333       1.10     rmind 	 * If inverting, then prepend a jump over the statement below.
    334  1.10.14.2    martin 	 * On match, it will skip-through and the fail path will be taken.
    335       1.10     rmind 	 */
    336       1.10     rmind 	if (invert) {
    337       1.10     rmind 		struct bpf_insn insns_ret[] = {
    338       1.10     rmind 			BPF_STMT(BPF_JMP+BPF_JA, 1),
    339       1.10     rmind 		};
    340       1.10     rmind 		add_insns(ctx, insns_ret, __arraycount(insns_ret));
    341       1.10     rmind 	}
    342       1.10     rmind 
    343       1.10     rmind 	/*
    344        1.1     rmind 	 * Append a failure return as a fall-through i.e. if there is
    345        1.1     rmind 	 * no match within the group.
    346        1.1     rmind 	 */
    347        1.1     rmind 	struct bpf_insn insns_ret[] = {
    348        1.1     rmind 		BPF_STMT(BPF_RET+BPF_K, NPF_BPF_FAILURE),
    349        1.1     rmind 	};
    350        1.1     rmind 	add_insns(ctx, insns_ret, __arraycount(insns_ret));
    351        1.1     rmind 
    352        1.1     rmind 	/*
    353        1.1     rmind 	 * Adjust jump offsets: on match - jump outside the group i.e.
    354        1.1     rmind 	 * to the current offset.  Otherwise, jump to the next instruction
    355        1.1     rmind 	 * which would lead to the fall-through code above if none matches.
    356        1.1     rmind 	 */
    357        1.1     rmind 	fixup_jumps(ctx, ctx->goff, curoff, true);
    358        1.1     rmind 	ctx->goff = ctx->gblock = 0;
    359        1.1     rmind }
    360        1.1     rmind 
    361        1.1     rmind static void
    362        1.1     rmind fetch_l3(npf_bpf_t *ctx, sa_family_t af, u_int flags)
    363        1.1     rmind {
    364        1.1     rmind 	u_int ver;
    365        1.1     rmind 
    366        1.1     rmind 	switch (af) {
    367        1.1     rmind 	case AF_INET:
    368        1.1     rmind 		ver = IPVERSION;
    369        1.1     rmind 		break;
    370        1.1     rmind 	case AF_INET6:
    371        1.1     rmind 		ver = IPV6_VERSION >> 4;
    372        1.1     rmind 		break;
    373        1.1     rmind 	case AF_UNSPEC:
    374        1.1     rmind 		ver = 0;
    375        1.1     rmind 		break;
    376        1.1     rmind 	default:
    377        1.1     rmind 		abort();
    378        1.1     rmind 	}
    379        1.1     rmind 
    380        1.1     rmind 	/*
    381        1.7     rmind 	 * The memory store is populated with:
    382        1.1     rmind 	 * - BPF_MW_IPVER: IP version (4 or 6).
    383        1.1     rmind 	 * - BPF_MW_L4OFF: L4 header offset.
    384        1.1     rmind 	 * - BPF_MW_L4PROTO: L4 protocol.
    385        1.1     rmind 	 */
    386        1.1     rmind 	if ((ctx->flags & FETCHED_L3) == 0 || (af && ctx->af == 0)) {
    387        1.1     rmind 		const uint8_t jt = ver ? 0 : JUMP_MAGIC;
    388        1.1     rmind 		const uint8_t jf = ver ? JUMP_MAGIC : 0;
    389        1.1     rmind 		bool ingroup = ctx->ingroup;
    390        1.1     rmind 
    391        1.1     rmind 		/*
    392        1.1     rmind 		 * L3 block cannot be inserted in the middle of a group.
    393        1.1     rmind 		 * In fact, it never is.  Check and start the group after.
    394        1.1     rmind 		 */
    395        1.1     rmind 		if (ingroup) {
    396        1.1     rmind 			assert(ctx->nblocks == ctx->gblock);
    397  1.10.14.2    martin 			npfctl_bpf_group_exit(ctx, false);
    398        1.1     rmind 		}
    399        1.1     rmind 
    400        1.1     rmind 		/*
    401        1.1     rmind 		 * A <- IP version; A == expected-version?
    402        1.1     rmind 		 * If no particular version specified, check for non-zero.
    403        1.1     rmind 		 */
    404        1.7     rmind 		struct bpf_insn insns_af[] = {
    405        1.7     rmind 			BPF_STMT(BPF_LD+BPF_W+BPF_MEM, BPF_MW_IPVER),
    406        1.7     rmind 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, ver, jt, jf),
    407        1.7     rmind 		};
    408        1.7     rmind 		add_insns(ctx, insns_af, __arraycount(insns_af));
    409        1.7     rmind 		ctx->flags |= FETCHED_L3;
    410        1.1     rmind 		ctx->af = af;
    411        1.1     rmind 
    412        1.1     rmind 		if (af) {
    413        1.1     rmind 			uint32_t mwords[] = { BM_IPVER, 1, af };
    414        1.1     rmind 			done_raw_block(ctx, mwords, sizeof(mwords));
    415        1.1     rmind 		}
    416        1.1     rmind 		if (ingroup) {
    417  1.10.14.2    martin 			npfctl_bpf_group_enter(ctx);
    418        1.1     rmind 		}
    419        1.1     rmind 
    420        1.1     rmind 	} else if (af && af != ctx->af) {
    421        1.1     rmind 		errx(EXIT_FAILURE, "address family mismatch");
    422        1.1     rmind 	}
    423        1.1     rmind 
    424        1.1     rmind 	if ((flags & X_EQ_L4OFF) != 0 && (ctx->flags & X_EQ_L4OFF) == 0) {
    425        1.1     rmind 		/* X <- IP header length */
    426        1.1     rmind 		struct bpf_insn insns_hlen[] = {
    427        1.1     rmind 			BPF_STMT(BPF_LDX+BPF_MEM, BPF_MW_L4OFF),
    428        1.1     rmind 		};
    429        1.1     rmind 		add_insns(ctx, insns_hlen, __arraycount(insns_hlen));
    430        1.1     rmind 		ctx->flags |= X_EQ_L4OFF;
    431        1.1     rmind 	}
    432        1.1     rmind }
    433        1.1     rmind 
    434        1.1     rmind /*
    435        1.1     rmind  * npfctl_bpf_proto: code block to match IP version and L4 protocol.
    436        1.1     rmind  */
    437        1.1     rmind void
    438        1.1     rmind npfctl_bpf_proto(npf_bpf_t *ctx, sa_family_t af, int proto)
    439        1.1     rmind {
    440        1.1     rmind 	assert(af != AF_UNSPEC || proto != -1);
    441        1.1     rmind 
    442        1.1     rmind 	/* Note: fails if IP version does not match. */
    443        1.1     rmind 	fetch_l3(ctx, af, 0);
    444        1.1     rmind 	if (proto == -1) {
    445        1.1     rmind 		return;
    446        1.1     rmind 	}
    447        1.1     rmind 
    448        1.1     rmind 	struct bpf_insn insns_proto[] = {
    449        1.1     rmind 		/* A <- L4 protocol; A == expected-protocol? */
    450        1.1     rmind 		BPF_STMT(BPF_LD+BPF_W+BPF_MEM, BPF_MW_L4PROTO),
    451        1.1     rmind 		BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, proto, 0, JUMP_MAGIC),
    452        1.1     rmind 	};
    453        1.1     rmind 	add_insns(ctx, insns_proto, __arraycount(insns_proto));
    454        1.1     rmind 
    455        1.1     rmind 	uint32_t mwords[] = { BM_PROTO, 1, proto };
    456        1.1     rmind 	done_block(ctx, mwords, sizeof(mwords));
    457        1.6     rmind 	ctx->flags |= CHECKED_L4;
    458        1.1     rmind }
    459        1.1     rmind 
    460        1.1     rmind /*
    461        1.1     rmind  * npfctl_bpf_cidr: code block to match IPv4 or IPv6 CIDR.
    462        1.1     rmind  *
    463        1.1     rmind  * => IP address shall be in the network byte order.
    464        1.1     rmind  */
    465        1.1     rmind void
    466        1.1     rmind npfctl_bpf_cidr(npf_bpf_t *ctx, u_int opts, sa_family_t af,
    467        1.1     rmind     const npf_addr_t *addr, const npf_netmask_t mask)
    468        1.1     rmind {
    469        1.1     rmind 	const uint32_t *awords = (const uint32_t *)addr;
    470        1.1     rmind 	u_int nwords, length, maxmask, off;
    471        1.1     rmind 
    472        1.1     rmind 	assert(((opts & MATCH_SRC) != 0) ^ ((opts & MATCH_DST) != 0));
    473        1.1     rmind 	assert((mask && mask <= NPF_MAX_NETMASK) || mask == NPF_NO_NETMASK);
    474        1.1     rmind 
    475        1.1     rmind 	switch (af) {
    476        1.1     rmind 	case AF_INET:
    477        1.1     rmind 		maxmask = 32;
    478        1.1     rmind 		off = (opts & MATCH_SRC) ?
    479        1.1     rmind 		    offsetof(struct ip, ip_src) :
    480        1.1     rmind 		    offsetof(struct ip, ip_dst);
    481        1.1     rmind 		nwords = sizeof(struct in_addr) / sizeof(uint32_t);
    482        1.1     rmind 		break;
    483        1.1     rmind 	case AF_INET6:
    484        1.1     rmind 		maxmask = 128;
    485        1.1     rmind 		off = (opts & MATCH_SRC) ?
    486        1.1     rmind 		    offsetof(struct ip6_hdr, ip6_src) :
    487        1.1     rmind 		    offsetof(struct ip6_hdr, ip6_dst);
    488        1.1     rmind 		nwords = sizeof(struct in6_addr) / sizeof(uint32_t);
    489        1.1     rmind 		break;
    490        1.1     rmind 	default:
    491        1.1     rmind 		abort();
    492        1.1     rmind 	}
    493        1.1     rmind 
    494        1.1     rmind 	/* Ensure address family. */
    495        1.1     rmind 	fetch_l3(ctx, af, 0);
    496        1.1     rmind 
    497        1.1     rmind 	length = (mask == NPF_NO_NETMASK) ? maxmask : mask;
    498        1.1     rmind 
    499        1.1     rmind 	/* CAUTION: BPF operates in host byte-order. */
    500        1.1     rmind 	for (u_int i = 0; i < nwords; i++) {
    501        1.1     rmind 		const u_int woff = i * sizeof(uint32_t);
    502        1.1     rmind 		uint32_t word = ntohl(awords[i]);
    503        1.1     rmind 		uint32_t wordmask;
    504        1.1     rmind 
    505        1.1     rmind 		if (length >= 32) {
    506        1.1     rmind 			/* The mask is a full word - do not apply it. */
    507        1.1     rmind 			wordmask = 0;
    508        1.1     rmind 			length -= 32;
    509        1.1     rmind 		} else if (length) {
    510        1.4     rmind 			wordmask = 0xffffffff << (32 - length);
    511        1.1     rmind 			length = 0;
    512        1.1     rmind 		} else {
    513        1.3     rmind 			/* The mask became zero - skip the rest. */
    514        1.3     rmind 			break;
    515        1.1     rmind 		}
    516        1.1     rmind 
    517        1.1     rmind 		/* A <- IP address (or one word of it) */
    518        1.1     rmind 		struct bpf_insn insns_ip[] = {
    519        1.1     rmind 			BPF_STMT(BPF_LD+BPF_W+BPF_ABS, off + woff),
    520        1.1     rmind 		};
    521        1.1     rmind 		add_insns(ctx, insns_ip, __arraycount(insns_ip));
    522        1.1     rmind 
    523        1.1     rmind 		/* A <- (A & MASK) */
    524        1.1     rmind 		if (wordmask) {
    525        1.1     rmind 			struct bpf_insn insns_mask[] = {
    526        1.1     rmind 				BPF_STMT(BPF_ALU+BPF_AND+BPF_K, wordmask),
    527        1.1     rmind 			};
    528        1.1     rmind 			add_insns(ctx, insns_mask, __arraycount(insns_mask));
    529        1.1     rmind 		}
    530        1.1     rmind 
    531        1.1     rmind 		/* A == expected-IP-word ? */
    532        1.1     rmind 		struct bpf_insn insns_cmp[] = {
    533        1.1     rmind 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, word, 0, JUMP_MAGIC),
    534        1.1     rmind 		};
    535        1.1     rmind 		add_insns(ctx, insns_cmp, __arraycount(insns_cmp));
    536        1.1     rmind 	}
    537        1.1     rmind 
    538        1.1     rmind 	uint32_t mwords[] = {
    539        1.1     rmind 		(opts & MATCH_SRC) ? BM_SRC_CIDR: BM_DST_CIDR, 6,
    540        1.1     rmind 		af, mask, awords[0], awords[1], awords[2], awords[3],
    541        1.1     rmind 	};
    542        1.1     rmind 	done_block(ctx, mwords, sizeof(mwords));
    543        1.1     rmind }
    544        1.1     rmind 
    545        1.1     rmind /*
    546        1.1     rmind  * npfctl_bpf_ports: code block to match TCP/UDP port range.
    547        1.1     rmind  *
    548        1.1     rmind  * => Port numbers shall be in the network byte order.
    549        1.1     rmind  */
    550        1.1     rmind void
    551        1.1     rmind npfctl_bpf_ports(npf_bpf_t *ctx, u_int opts, in_port_t from, in_port_t to)
    552        1.1     rmind {
    553        1.1     rmind 	const u_int sport_off = offsetof(struct udphdr, uh_sport);
    554        1.1     rmind 	const u_int dport_off = offsetof(struct udphdr, uh_dport);
    555        1.1     rmind 	u_int off;
    556        1.1     rmind 
    557        1.1     rmind 	/* TCP and UDP port offsets are the same. */
    558        1.1     rmind 	assert(sport_off == offsetof(struct tcphdr, th_sport));
    559        1.1     rmind 	assert(dport_off == offsetof(struct tcphdr, th_dport));
    560        1.6     rmind 	assert(ctx->flags & CHECKED_L4);
    561        1.1     rmind 
    562        1.1     rmind 	assert(((opts & MATCH_SRC) != 0) ^ ((opts & MATCH_DST) != 0));
    563        1.1     rmind 	off = (opts & MATCH_SRC) ? sport_off : dport_off;
    564        1.1     rmind 
    565        1.1     rmind 	/* X <- IP header length */
    566        1.2     rmind 	fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF);
    567        1.1     rmind 
    568        1.1     rmind 	struct bpf_insn insns_fetch[] = {
    569        1.1     rmind 		/* A <- port */
    570        1.1     rmind 		BPF_STMT(BPF_LD+BPF_H+BPF_IND, off),
    571        1.1     rmind 	};
    572        1.1     rmind 	add_insns(ctx, insns_fetch, __arraycount(insns_fetch));
    573        1.1     rmind 
    574        1.1     rmind 	/* CAUTION: BPF operates in host byte-order. */
    575        1.1     rmind 	from = ntohs(from);
    576        1.1     rmind 	to = ntohs(to);
    577        1.1     rmind 
    578        1.1     rmind 	if (from == to) {
    579        1.1     rmind 		/* Single port case. */
    580        1.1     rmind 		struct bpf_insn insns_port[] = {
    581        1.1     rmind 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, from, 0, JUMP_MAGIC),
    582        1.1     rmind 		};
    583        1.1     rmind 		add_insns(ctx, insns_port, __arraycount(insns_port));
    584        1.1     rmind 	} else {
    585        1.1     rmind 		/* Port range case. */
    586        1.1     rmind 		struct bpf_insn insns_range[] = {
    587  1.10.14.2    martin 			BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, from, 0, 1),
    588  1.10.14.2    martin 			BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, to, 0, 1),
    589  1.10.14.2    martin 			BPF_STMT(BPF_JMP+BPF_JA, JUMP_MAGIC),
    590        1.1     rmind 		};
    591        1.1     rmind 		add_insns(ctx, insns_range, __arraycount(insns_range));
    592        1.1     rmind 	}
    593        1.1     rmind 
    594        1.1     rmind 	uint32_t mwords[] = {
    595        1.1     rmind 		opts & MATCH_SRC ? BM_SRC_PORTS : BM_DST_PORTS, 2, from, to
    596        1.1     rmind 	};
    597        1.1     rmind 	done_block(ctx, mwords, sizeof(mwords));
    598        1.1     rmind }
    599        1.1     rmind 
    600        1.1     rmind /*
    601        1.1     rmind  * npfctl_bpf_tcpfl: code block to match TCP flags.
    602        1.1     rmind  */
    603        1.1     rmind void
    604        1.5     rmind npfctl_bpf_tcpfl(npf_bpf_t *ctx, uint8_t tf, uint8_t tf_mask, bool checktcp)
    605        1.1     rmind {
    606        1.1     rmind 	const u_int tcpfl_off = offsetof(struct tcphdr, th_flags);
    607        1.6     rmind 	const bool usingmask = tf_mask != tf;
    608        1.1     rmind 
    609        1.1     rmind 	/* X <- IP header length */
    610        1.2     rmind 	fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF);
    611        1.5     rmind 	if (checktcp) {
    612        1.6     rmind 		const u_int jf = usingmask ? 3 : 2;
    613        1.5     rmind 		assert(ctx->ingroup == false);
    614        1.5     rmind 
    615        1.5     rmind 		/* A <- L4 protocol; A == TCP?  If not, jump out. */
    616        1.5     rmind 		struct bpf_insn insns_tcp[] = {
    617        1.5     rmind 			BPF_STMT(BPF_LD+BPF_W+BPF_MEM, BPF_MW_L4PROTO),
    618        1.5     rmind 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, IPPROTO_TCP, 0, jf),
    619        1.5     rmind 		};
    620        1.5     rmind 		add_insns(ctx, insns_tcp, __arraycount(insns_tcp));
    621        1.6     rmind 	} else {
    622        1.6     rmind 		assert(ctx->flags & CHECKED_L4);
    623        1.5     rmind 	}
    624        1.1     rmind 
    625        1.1     rmind 	struct bpf_insn insns_tf[] = {
    626        1.1     rmind 		/* A <- TCP flags */
    627        1.1     rmind 		BPF_STMT(BPF_LD+BPF_B+BPF_IND, tcpfl_off),
    628        1.1     rmind 	};
    629        1.1     rmind 	add_insns(ctx, insns_tf, __arraycount(insns_tf));
    630        1.1     rmind 
    631        1.6     rmind 	if (usingmask) {
    632        1.1     rmind 		/* A <- (A & mask) */
    633        1.1     rmind 		struct bpf_insn insns_mask[] = {
    634        1.1     rmind 			BPF_STMT(BPF_ALU+BPF_AND+BPF_K, tf_mask),
    635        1.1     rmind 		};
    636        1.1     rmind 		add_insns(ctx, insns_mask, __arraycount(insns_mask));
    637        1.1     rmind 	}
    638        1.1     rmind 
    639        1.1     rmind 	struct bpf_insn insns_cmp[] = {
    640        1.1     rmind 		/* A == expected-TCP-flags? */
    641        1.1     rmind 		BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, tf, 0, JUMP_MAGIC),
    642        1.1     rmind 	};
    643        1.1     rmind 	add_insns(ctx, insns_cmp, __arraycount(insns_cmp));
    644        1.1     rmind 
    645  1.10.14.1  christos 	uint32_t mwords[] = { BM_TCPFL, 2, tf, tf_mask};
    646  1.10.14.1  christos 	done_block(ctx, mwords, sizeof(mwords));
    647        1.1     rmind }
    648        1.1     rmind 
    649        1.1     rmind /*
    650        1.1     rmind  * npfctl_bpf_icmp: code block to match ICMP type and/or code.
    651        1.1     rmind  * Note: suitable both for the ICMPv4 and ICMPv6.
    652        1.1     rmind  */
    653        1.1     rmind void
    654        1.1     rmind npfctl_bpf_icmp(npf_bpf_t *ctx, int type, int code)
    655        1.1     rmind {
    656        1.1     rmind 	const u_int type_off = offsetof(struct icmp, icmp_type);
    657        1.1     rmind 	const u_int code_off = offsetof(struct icmp, icmp_code);
    658        1.1     rmind 
    659        1.6     rmind 	assert(ctx->flags & CHECKED_L4);
    660        1.1     rmind 	assert(offsetof(struct icmp6_hdr, icmp6_type) == type_off);
    661        1.1     rmind 	assert(offsetof(struct icmp6_hdr, icmp6_code) == code_off);
    662        1.1     rmind 	assert(type != -1 || code != -1);
    663        1.1     rmind 
    664        1.1     rmind 	/* X <- IP header length */
    665        1.2     rmind 	fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF);
    666        1.1     rmind 
    667        1.1     rmind 	if (type != -1) {
    668        1.1     rmind 		struct bpf_insn insns_type[] = {
    669        1.1     rmind 			BPF_STMT(BPF_LD+BPF_B+BPF_IND, type_off),
    670        1.1     rmind 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, type, 0, JUMP_MAGIC),
    671        1.1     rmind 		};
    672        1.1     rmind 		add_insns(ctx, insns_type, __arraycount(insns_type));
    673        1.1     rmind 
    674        1.1     rmind 		uint32_t mwords[] = { BM_ICMP_TYPE, 1, type };
    675        1.1     rmind 		done_block(ctx, mwords, sizeof(mwords));
    676        1.1     rmind 	}
    677        1.1     rmind 
    678        1.1     rmind 	if (code != -1) {
    679        1.1     rmind 		struct bpf_insn insns_code[] = {
    680        1.1     rmind 			BPF_STMT(BPF_LD+BPF_B+BPF_IND, code_off),
    681        1.1     rmind 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, code, 0, JUMP_MAGIC),
    682        1.1     rmind 		};
    683        1.1     rmind 		add_insns(ctx, insns_code, __arraycount(insns_code));
    684        1.1     rmind 
    685        1.1     rmind 		uint32_t mwords[] = { BM_ICMP_CODE, 1, code };
    686        1.1     rmind 		done_block(ctx, mwords, sizeof(mwords));
    687        1.1     rmind 	}
    688        1.1     rmind }
    689        1.1     rmind 
    690        1.1     rmind #define	SRC_FLAG_BIT	(1U << 31)
    691        1.1     rmind 
    692        1.1     rmind /*
    693        1.1     rmind  * npfctl_bpf_table: code block to match source/destination IP address
    694        1.1     rmind  * against NPF table specified by ID.
    695        1.1     rmind  */
    696        1.1     rmind void
    697        1.1     rmind npfctl_bpf_table(npf_bpf_t *ctx, u_int opts, u_int tid)
    698        1.1     rmind {
    699        1.1     rmind 	const bool src = (opts & MATCH_SRC) != 0;
    700        1.1     rmind 
    701        1.1     rmind 	struct bpf_insn insns_table[] = {
    702        1.1     rmind 		BPF_STMT(BPF_LD+BPF_IMM, (src ? SRC_FLAG_BIT : 0) | tid),
    703        1.1     rmind 		BPF_STMT(BPF_MISC+BPF_COP, NPF_COP_TABLE),
    704        1.1     rmind 		BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, 0, JUMP_MAGIC, 0),
    705        1.1     rmind 	};
    706        1.1     rmind 	add_insns(ctx, insns_table, __arraycount(insns_table));
    707        1.1     rmind 
    708        1.1     rmind 	uint32_t mwords[] = { src ? BM_SRC_TABLE: BM_DST_TABLE, 1, tid };
    709        1.1     rmind 	done_block(ctx, mwords, sizeof(mwords));
    710        1.1     rmind }
    711