Home | History | Annotate | Line # | Download | only in npfctl
npf_bpf_comp.c revision 1.7
      1 /*	$NetBSD: npf_bpf_comp.c,v 1.7 2014/06/29 00:05:24 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2010-2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This material is based upon work partially supported by The
      8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * BPF byte-code generation for NPF rules.
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __RCSID("$NetBSD: npf_bpf_comp.c,v 1.7 2014/06/29 00:05:24 rmind Exp $");
     38 
     39 #include <stdlib.h>
     40 #include <stdbool.h>
     41 #include <stddef.h>
     42 #include <string.h>
     43 #include <inttypes.h>
     44 #include <err.h>
     45 #include <assert.h>
     46 
     47 #include <netinet/in.h>
     48 #include <netinet/in_systm.h>
     49 #include <netinet/ip.h>
     50 #include <netinet/ip6.h>
     51 #include <netinet/udp.h>
     52 #include <netinet/tcp.h>
     53 #include <netinet/ip_icmp.h>
     54 #include <netinet/icmp6.h>
     55 
     56 #include <net/bpf.h>
     57 
     58 #include "npfctl.h"
     59 
     60 /*
     61  * Note: clear X_EQ_L4OFF when register X is invalidated i.e. it stores
     62  * something other than L4 header offset.  Generally, when BPF_LDX is used.
     63  */
     64 #define	FETCHED_L3		0x01
     65 #define	CHECKED_L4		0x02
     66 #define	X_EQ_L4OFF		0x04
     67 
     68 struct npf_bpf {
     69 	/*
     70 	 * BPF program code, the allocated length (in bytes), the number
     71 	 * of logical blocks and the flags.
     72 	 */
     73 	struct bpf_program	prog;
     74 	size_t			alen;
     75 	u_int			nblocks;
     76 	sa_family_t		af;
     77 	uint32_t		flags;
     78 
     79 	/* The current group offset and block number. */
     80 	bool			ingroup;
     81 	u_int			goff;
     82 	u_int			gblock;
     83 
     84 	/* BPF marks, allocated length and the real length. */
     85 	uint32_t *		marks;
     86 	size_t			malen;
     87 	size_t			mlen;
     88 };
     89 
     90 /*
     91  * NPF success and failure values to be returned from BPF.
     92  */
     93 #define	NPF_BPF_SUCCESS		((u_int)-1)
     94 #define	NPF_BPF_FAILURE		0
     95 
     96 /*
     97  * Magic value to indicate the failure path, which is fixed up on completion.
     98  * Note: this is the longest jump offset in BPF, since the offset is one byte.
     99  */
    100 #define	JUMP_MAGIC		0xff
    101 
    102 /* Reduce re-allocations by expanding in 64 byte blocks. */
    103 #define	ALLOC_MASK		(64 - 1)
    104 #define	ALLOC_ROUND(x)		(((x) + ALLOC_MASK) & ~ALLOC_MASK)
    105 
    106 npf_bpf_t *
    107 npfctl_bpf_create(void)
    108 {
    109 	return ecalloc(1, sizeof(npf_bpf_t));
    110 }
    111 
    112 static void
    113 fixup_jumps(npf_bpf_t *ctx, u_int start, u_int end, bool swap)
    114 {
    115 	struct bpf_program *bp = &ctx->prog;
    116 
    117 	for (u_int i = start; i < end; i++) {
    118 		struct bpf_insn *insn = &bp->bf_insns[i];
    119 		const u_int fail_off = end - i;
    120 
    121 		if (fail_off >= JUMP_MAGIC) {
    122 			errx(EXIT_FAILURE, "BPF generation error: "
    123 			    "the number of instructions is over the limit");
    124 		}
    125 		if (BPF_CLASS(insn->code) != BPF_JMP) {
    126 			continue;
    127 		}
    128 		if (swap) {
    129 			uint8_t jt = insn->jt;
    130 			insn->jt = insn->jf;
    131 			insn->jf = jt;
    132 		}
    133 		if (insn->jt == JUMP_MAGIC)
    134 			insn->jt = fail_off;
    135 		if (insn->jf == JUMP_MAGIC)
    136 			insn->jf = fail_off;
    137 	}
    138 }
    139 
    140 static void
    141 add_insns(npf_bpf_t *ctx, struct bpf_insn *insns, size_t count)
    142 {
    143 	struct bpf_program *bp = &ctx->prog;
    144 	size_t offset, len, reqlen;
    145 
    146 	/* Note: bf_len is the count of instructions. */
    147 	offset = bp->bf_len * sizeof(struct bpf_insn);
    148 	len = count * sizeof(struct bpf_insn);
    149 
    150 	/* Ensure the memory buffer for the program. */
    151 	reqlen = ALLOC_ROUND(offset + len);
    152 	if (reqlen > ctx->alen) {
    153 		bp->bf_insns = erealloc(bp->bf_insns, reqlen);
    154 		ctx->alen = reqlen;
    155 	}
    156 
    157 	/* Add the code block. */
    158 	memcpy((uint8_t *)bp->bf_insns + offset, insns, len);
    159 	bp->bf_len += count;
    160 }
    161 
    162 static void
    163 done_raw_block(npf_bpf_t *ctx, const uint32_t *m, size_t len)
    164 {
    165 	size_t reqlen, nargs = m[1];
    166 
    167 	if ((len / sizeof(uint32_t) - 2) != nargs) {
    168 		errx(EXIT_FAILURE, "invalid BPF block description");
    169 	}
    170 	reqlen = ALLOC_ROUND(ctx->mlen + len);
    171 	if (reqlen > ctx->malen) {
    172 		ctx->marks = erealloc(ctx->marks, reqlen);
    173 		ctx->malen = reqlen;
    174 	}
    175 	memcpy((uint8_t *)ctx->marks + ctx->mlen, m, len);
    176 	ctx->mlen += len;
    177 }
    178 
    179 static void
    180 done_block(npf_bpf_t *ctx, const uint32_t *m, size_t len)
    181 {
    182 	done_raw_block(ctx, m, len);
    183 	ctx->nblocks++;
    184 }
    185 
    186 struct bpf_program *
    187 npfctl_bpf_complete(npf_bpf_t *ctx)
    188 {
    189 	struct bpf_program *bp = &ctx->prog;
    190 	const u_int retoff = bp->bf_len;
    191 
    192 	/* Add the return fragment (success and failure paths). */
    193 	struct bpf_insn insns_ret[] = {
    194 		BPF_STMT(BPF_RET+BPF_K, NPF_BPF_SUCCESS),
    195 		BPF_STMT(BPF_RET+BPF_K, NPF_BPF_FAILURE),
    196 	};
    197 	add_insns(ctx, insns_ret, __arraycount(insns_ret));
    198 
    199 	/* Fixup all jumps to the main failure path. */
    200 	fixup_jumps(ctx, 0, retoff, false);
    201 
    202 	return &ctx->prog;
    203 }
    204 
    205 const void *
    206 npfctl_bpf_bmarks(npf_bpf_t *ctx, size_t *len)
    207 {
    208 	*len = ctx->mlen;
    209 	return ctx->marks;
    210 }
    211 
    212 void
    213 npfctl_bpf_destroy(npf_bpf_t *ctx)
    214 {
    215 	free(ctx->prog.bf_insns);
    216 	free(ctx->marks);
    217 	free(ctx);
    218 }
    219 
    220 /*
    221  * npfctl_bpf_group: begin a logical group.  It merely uses logical
    222  * disjunction (OR) for compares within the group.
    223  */
    224 void
    225 npfctl_bpf_group(npf_bpf_t *ctx)
    226 {
    227 	struct bpf_program *bp = &ctx->prog;
    228 
    229 	assert(ctx->goff == 0);
    230 	assert(ctx->gblock == 0);
    231 
    232 	ctx->goff = bp->bf_len;
    233 	ctx->gblock = ctx->nblocks;
    234 	ctx->ingroup = true;
    235 }
    236 
    237 void
    238 npfctl_bpf_endgroup(npf_bpf_t *ctx)
    239 {
    240 	struct bpf_program *bp = &ctx->prog;
    241 	const size_t curoff = bp->bf_len;
    242 
    243 	/* If there are no blocks or only one - nothing to do. */
    244 	if ((ctx->nblocks - ctx->gblock) <= 1) {
    245 		ctx->goff = ctx->gblock = 0;
    246 		return;
    247 	}
    248 
    249 	/*
    250 	 * Append a failure return as a fall-through i.e. if there is
    251 	 * no match within the group.
    252 	 */
    253 	struct bpf_insn insns_ret[] = {
    254 		BPF_STMT(BPF_RET+BPF_K, NPF_BPF_FAILURE),
    255 	};
    256 	add_insns(ctx, insns_ret, __arraycount(insns_ret));
    257 
    258 	/*
    259 	 * Adjust jump offsets: on match - jump outside the group i.e.
    260 	 * to the current offset.  Otherwise, jump to the next instruction
    261 	 * which would lead to the fall-through code above if none matches.
    262 	 */
    263 	fixup_jumps(ctx, ctx->goff, curoff, true);
    264 	ctx->goff = ctx->gblock = 0;
    265 }
    266 
    267 static void
    268 fetch_l3(npf_bpf_t *ctx, sa_family_t af, u_int flags)
    269 {
    270 	u_int ver;
    271 
    272 	switch (af) {
    273 	case AF_INET:
    274 		ver = IPVERSION;
    275 		break;
    276 	case AF_INET6:
    277 		ver = IPV6_VERSION >> 4;
    278 		break;
    279 	case AF_UNSPEC:
    280 		ver = 0;
    281 		break;
    282 	default:
    283 		abort();
    284 	}
    285 
    286 	/*
    287 	 * The memory store is populated with:
    288 	 * - BPF_MW_IPVER: IP version (4 or 6).
    289 	 * - BPF_MW_L4OFF: L4 header offset.
    290 	 * - BPF_MW_L4PROTO: L4 protocol.
    291 	 */
    292 	if ((ctx->flags & FETCHED_L3) == 0 || (af && ctx->af == 0)) {
    293 		const uint8_t jt = ver ? 0 : JUMP_MAGIC;
    294 		const uint8_t jf = ver ? JUMP_MAGIC : 0;
    295 		bool ingroup = ctx->ingroup;
    296 
    297 		/*
    298 		 * L3 block cannot be inserted in the middle of a group.
    299 		 * In fact, it never is.  Check and start the group after.
    300 		 */
    301 		if (ingroup) {
    302 			assert(ctx->nblocks == ctx->gblock);
    303 			npfctl_bpf_endgroup(ctx);
    304 		}
    305 
    306 		/*
    307 		 * A <- IP version; A == expected-version?
    308 		 * If no particular version specified, check for non-zero.
    309 		 */
    310 		struct bpf_insn insns_af[] = {
    311 			BPF_STMT(BPF_LD+BPF_W+BPF_MEM, BPF_MW_IPVER),
    312 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, ver, jt, jf),
    313 		};
    314 		add_insns(ctx, insns_af, __arraycount(insns_af));
    315 		ctx->flags |= FETCHED_L3;
    316 		ctx->af = af;
    317 
    318 		if (af) {
    319 			uint32_t mwords[] = { BM_IPVER, 1, af };
    320 			done_raw_block(ctx, mwords, sizeof(mwords));
    321 		}
    322 		if (ingroup) {
    323 			npfctl_bpf_group(ctx);
    324 		}
    325 
    326 	} else if (af && af != ctx->af) {
    327 		errx(EXIT_FAILURE, "address family mismatch");
    328 	}
    329 
    330 	if ((flags & X_EQ_L4OFF) != 0 && (ctx->flags & X_EQ_L4OFF) == 0) {
    331 		/* X <- IP header length */
    332 		struct bpf_insn insns_hlen[] = {
    333 			BPF_STMT(BPF_LDX+BPF_MEM, BPF_MW_L4OFF),
    334 		};
    335 		add_insns(ctx, insns_hlen, __arraycount(insns_hlen));
    336 		ctx->flags |= X_EQ_L4OFF;
    337 	}
    338 }
    339 
    340 /*
    341  * npfctl_bpf_proto: code block to match IP version and L4 protocol.
    342  */
    343 void
    344 npfctl_bpf_proto(npf_bpf_t *ctx, sa_family_t af, int proto)
    345 {
    346 	assert(af != AF_UNSPEC || proto != -1);
    347 
    348 	/* Note: fails if IP version does not match. */
    349 	fetch_l3(ctx, af, 0);
    350 	if (proto == -1) {
    351 		return;
    352 	}
    353 
    354 	struct bpf_insn insns_proto[] = {
    355 		/* A <- L4 protocol; A == expected-protocol? */
    356 		BPF_STMT(BPF_LD+BPF_W+BPF_MEM, BPF_MW_L4PROTO),
    357 		BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, proto, 0, JUMP_MAGIC),
    358 	};
    359 	add_insns(ctx, insns_proto, __arraycount(insns_proto));
    360 
    361 	uint32_t mwords[] = { BM_PROTO, 1, proto };
    362 	done_block(ctx, mwords, sizeof(mwords));
    363 	ctx->flags |= CHECKED_L4;
    364 }
    365 
    366 /*
    367  * npfctl_bpf_cidr: code block to match IPv4 or IPv6 CIDR.
    368  *
    369  * => IP address shall be in the network byte order.
    370  */
    371 void
    372 npfctl_bpf_cidr(npf_bpf_t *ctx, u_int opts, sa_family_t af,
    373     const npf_addr_t *addr, const npf_netmask_t mask)
    374 {
    375 	const uint32_t *awords = (const uint32_t *)addr;
    376 	u_int nwords, length, maxmask, off;
    377 
    378 	assert(((opts & MATCH_SRC) != 0) ^ ((opts & MATCH_DST) != 0));
    379 	assert((mask && mask <= NPF_MAX_NETMASK) || mask == NPF_NO_NETMASK);
    380 
    381 	switch (af) {
    382 	case AF_INET:
    383 		maxmask = 32;
    384 		off = (opts & MATCH_SRC) ?
    385 		    offsetof(struct ip, ip_src) :
    386 		    offsetof(struct ip, ip_dst);
    387 		nwords = sizeof(struct in_addr) / sizeof(uint32_t);
    388 		break;
    389 	case AF_INET6:
    390 		maxmask = 128;
    391 		off = (opts & MATCH_SRC) ?
    392 		    offsetof(struct ip6_hdr, ip6_src) :
    393 		    offsetof(struct ip6_hdr, ip6_dst);
    394 		nwords = sizeof(struct in6_addr) / sizeof(uint32_t);
    395 		break;
    396 	default:
    397 		abort();
    398 	}
    399 
    400 	/* Ensure address family. */
    401 	fetch_l3(ctx, af, 0);
    402 
    403 	length = (mask == NPF_NO_NETMASK) ? maxmask : mask;
    404 
    405 	/* CAUTION: BPF operates in host byte-order. */
    406 	for (u_int i = 0; i < nwords; i++) {
    407 		const u_int woff = i * sizeof(uint32_t);
    408 		uint32_t word = ntohl(awords[i]);
    409 		uint32_t wordmask;
    410 
    411 		if (length >= 32) {
    412 			/* The mask is a full word - do not apply it. */
    413 			wordmask = 0;
    414 			length -= 32;
    415 		} else if (length) {
    416 			wordmask = 0xffffffff << (32 - length);
    417 			length = 0;
    418 		} else {
    419 			/* The mask became zero - skip the rest. */
    420 			break;
    421 		}
    422 
    423 		/* A <- IP address (or one word of it) */
    424 		struct bpf_insn insns_ip[] = {
    425 			BPF_STMT(BPF_LD+BPF_W+BPF_ABS, off + woff),
    426 		};
    427 		add_insns(ctx, insns_ip, __arraycount(insns_ip));
    428 
    429 		/* A <- (A & MASK) */
    430 		if (wordmask) {
    431 			struct bpf_insn insns_mask[] = {
    432 				BPF_STMT(BPF_ALU+BPF_AND+BPF_K, wordmask),
    433 			};
    434 			add_insns(ctx, insns_mask, __arraycount(insns_mask));
    435 		}
    436 
    437 		/* A == expected-IP-word ? */
    438 		struct bpf_insn insns_cmp[] = {
    439 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, word, 0, JUMP_MAGIC),
    440 		};
    441 		add_insns(ctx, insns_cmp, __arraycount(insns_cmp));
    442 	}
    443 
    444 	uint32_t mwords[] = {
    445 		(opts & MATCH_SRC) ? BM_SRC_CIDR: BM_DST_CIDR, 6,
    446 		af, mask, awords[0], awords[1], awords[2], awords[3],
    447 	};
    448 	done_block(ctx, mwords, sizeof(mwords));
    449 }
    450 
    451 /*
    452  * npfctl_bpf_ports: code block to match TCP/UDP port range.
    453  *
    454  * => Port numbers shall be in the network byte order.
    455  */
    456 void
    457 npfctl_bpf_ports(npf_bpf_t *ctx, u_int opts, in_port_t from, in_port_t to)
    458 {
    459 	const u_int sport_off = offsetof(struct udphdr, uh_sport);
    460 	const u_int dport_off = offsetof(struct udphdr, uh_dport);
    461 	u_int off;
    462 
    463 	/* TCP and UDP port offsets are the same. */
    464 	assert(sport_off == offsetof(struct tcphdr, th_sport));
    465 	assert(dport_off == offsetof(struct tcphdr, th_dport));
    466 	assert(ctx->flags & CHECKED_L4);
    467 
    468 	assert(((opts & MATCH_SRC) != 0) ^ ((opts & MATCH_DST) != 0));
    469 	off = (opts & MATCH_SRC) ? sport_off : dport_off;
    470 
    471 	/* X <- IP header length */
    472 	fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF);
    473 
    474 	struct bpf_insn insns_fetch[] = {
    475 		/* A <- port */
    476 		BPF_STMT(BPF_LD+BPF_H+BPF_IND, off),
    477 	};
    478 	add_insns(ctx, insns_fetch, __arraycount(insns_fetch));
    479 
    480 	/* CAUTION: BPF operates in host byte-order. */
    481 	from = ntohs(from);
    482 	to = ntohs(to);
    483 
    484 	if (from == to) {
    485 		/* Single port case. */
    486 		struct bpf_insn insns_port[] = {
    487 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, from, 0, JUMP_MAGIC),
    488 		};
    489 		add_insns(ctx, insns_port, __arraycount(insns_port));
    490 	} else {
    491 		/* Port range case. */
    492 		struct bpf_insn insns_range[] = {
    493 			BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, from, 0, JUMP_MAGIC),
    494 			BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, to, JUMP_MAGIC, 0),
    495 		};
    496 		add_insns(ctx, insns_range, __arraycount(insns_range));
    497 	}
    498 
    499 	uint32_t mwords[] = {
    500 		opts & MATCH_SRC ? BM_SRC_PORTS : BM_DST_PORTS, 2, from, to
    501 	};
    502 	done_block(ctx, mwords, sizeof(mwords));
    503 }
    504 
    505 /*
    506  * npfctl_bpf_tcpfl: code block to match TCP flags.
    507  */
    508 void
    509 npfctl_bpf_tcpfl(npf_bpf_t *ctx, uint8_t tf, uint8_t tf_mask, bool checktcp)
    510 {
    511 	const u_int tcpfl_off = offsetof(struct tcphdr, th_flags);
    512 	const bool usingmask = tf_mask != tf;
    513 
    514 	/* X <- IP header length */
    515 	fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF);
    516 	if (checktcp) {
    517 		const u_int jf = usingmask ? 3 : 2;
    518 		assert(ctx->ingroup == false);
    519 
    520 		/* A <- L4 protocol; A == TCP?  If not, jump out. */
    521 		struct bpf_insn insns_tcp[] = {
    522 			BPF_STMT(BPF_LD+BPF_W+BPF_MEM, BPF_MW_L4PROTO),
    523 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, IPPROTO_TCP, 0, jf),
    524 		};
    525 		add_insns(ctx, insns_tcp, __arraycount(insns_tcp));
    526 	} else {
    527 		assert(ctx->flags & CHECKED_L4);
    528 	}
    529 
    530 	struct bpf_insn insns_tf[] = {
    531 		/* A <- TCP flags */
    532 		BPF_STMT(BPF_LD+BPF_B+BPF_IND, tcpfl_off),
    533 	};
    534 	add_insns(ctx, insns_tf, __arraycount(insns_tf));
    535 
    536 	if (usingmask) {
    537 		/* A <- (A & mask) */
    538 		struct bpf_insn insns_mask[] = {
    539 			BPF_STMT(BPF_ALU+BPF_AND+BPF_K, tf_mask),
    540 		};
    541 		add_insns(ctx, insns_mask, __arraycount(insns_mask));
    542 	}
    543 
    544 	struct bpf_insn insns_cmp[] = {
    545 		/* A == expected-TCP-flags? */
    546 		BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, tf, 0, JUMP_MAGIC),
    547 	};
    548 	add_insns(ctx, insns_cmp, __arraycount(insns_cmp));
    549 
    550 	if (!checktcp) {
    551 		uint32_t mwords[] = { BM_TCPFL, 2, tf, tf_mask};
    552 		done_block(ctx, mwords, sizeof(mwords));
    553 	}
    554 }
    555 
    556 /*
    557  * npfctl_bpf_icmp: code block to match ICMP type and/or code.
    558  * Note: suitable both for the ICMPv4 and ICMPv6.
    559  */
    560 void
    561 npfctl_bpf_icmp(npf_bpf_t *ctx, int type, int code)
    562 {
    563 	const u_int type_off = offsetof(struct icmp, icmp_type);
    564 	const u_int code_off = offsetof(struct icmp, icmp_code);
    565 
    566 	assert(ctx->flags & CHECKED_L4);
    567 	assert(offsetof(struct icmp6_hdr, icmp6_type) == type_off);
    568 	assert(offsetof(struct icmp6_hdr, icmp6_code) == code_off);
    569 	assert(type != -1 || code != -1);
    570 
    571 	/* X <- IP header length */
    572 	fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF);
    573 
    574 	if (type != -1) {
    575 		struct bpf_insn insns_type[] = {
    576 			BPF_STMT(BPF_LD+BPF_B+BPF_IND, type_off),
    577 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, type, 0, JUMP_MAGIC),
    578 		};
    579 		add_insns(ctx, insns_type, __arraycount(insns_type));
    580 
    581 		uint32_t mwords[] = { BM_ICMP_TYPE, 1, type };
    582 		done_block(ctx, mwords, sizeof(mwords));
    583 	}
    584 
    585 	if (code != -1) {
    586 		struct bpf_insn insns_code[] = {
    587 			BPF_STMT(BPF_LD+BPF_B+BPF_IND, code_off),
    588 			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, code, 0, JUMP_MAGIC),
    589 		};
    590 		add_insns(ctx, insns_code, __arraycount(insns_code));
    591 
    592 		uint32_t mwords[] = { BM_ICMP_CODE, 1, code };
    593 		done_block(ctx, mwords, sizeof(mwords));
    594 	}
    595 }
    596 
    597 #define	SRC_FLAG_BIT	(1U << 31)
    598 
    599 /*
    600  * npfctl_bpf_table: code block to match source/destination IP address
    601  * against NPF table specified by ID.
    602  */
    603 void
    604 npfctl_bpf_table(npf_bpf_t *ctx, u_int opts, u_int tid)
    605 {
    606 	const bool src = (opts & MATCH_SRC) != 0;
    607 
    608 	struct bpf_insn insns_table[] = {
    609 		BPF_STMT(BPF_LD+BPF_IMM, (src ? SRC_FLAG_BIT : 0) | tid),
    610 		BPF_STMT(BPF_MISC+BPF_COP, NPF_COP_TABLE),
    611 		BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, 0, JUMP_MAGIC, 0),
    612 	};
    613 	add_insns(ctx, insns_table, __arraycount(insns_table));
    614 
    615 	uint32_t mwords[] = { src ? BM_SRC_TABLE: BM_DST_TABLE, 1, tid };
    616 	done_block(ctx, mwords, sizeof(mwords));
    617 }
    618