Home | History | Annotate | Line # | Download | only in dist
optimize.c revision 1.1.1.4
      1 /*	$NetBSD: optimize.c,v 1.1.1.4 2013/12/31 16:57:25 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that: (1) source code distributions
      9  * retain the above copyright notice and this paragraph in its entirety, (2)
     10  * distributions including binary code include the above copyright notice and
     11  * this paragraph in its entirety in the documentation or other materials
     12  * provided with the distribution, and (3) all advertising materials mentioning
     13  * features or use of this software display the following acknowledgement:
     14  * ``This product includes software developed by the University of California,
     15  * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
     16  * the University nor the names of its contributors may be used to endorse
     17  * or promote products derived from this software without specific prior
     18  * written permission.
     19  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
     20  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
     21  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
     22  *
     23  *  Optimization module for tcpdump intermediate representation.
     24  */
     25 #ifndef lint
     26 static const char rcsid[] _U_ =
     27     "@(#) Header: /tcpdump/master/libpcap/optimize.c,v 1.91 2008-01-02 04:16:46 guy Exp  (LBL)";
     28 #endif
     29 
     30 #ifdef HAVE_CONFIG_H
     31 #include "config.h"
     32 #endif
     33 
     34 #ifdef WIN32
     35 #include <pcap-stdinc.h>
     36 #else /* WIN32 */
     37 #if HAVE_INTTYPES_H
     38 #include <inttypes.h>
     39 #elif HAVE_STDINT_H
     40 #include <stdint.h>
     41 #endif
     42 #ifdef HAVE_SYS_BITYPES_H
     43 #include <sys/bitypes.h>
     44 #endif
     45 #include <sys/types.h>
     46 #endif /* WIN32 */
     47 
     48 #include <stdio.h>
     49 #include <stdlib.h>
     50 #include <memory.h>
     51 #include <string.h>
     52 
     53 #include <errno.h>
     54 
     55 #include "pcap-int.h"
     56 
     57 #include "gencode.h"
     58 
     59 #ifdef HAVE_OS_PROTO_H
     60 #include "os-proto.h"
     61 #endif
     62 
     63 #ifdef BDEBUG
     64 extern int dflag;
     65 #endif
     66 
     67 #if defined(MSDOS) && !defined(__DJGPP__)
     68 extern int _w32_ffs (int mask);
     69 #define ffs _w32_ffs
     70 #endif
     71 
     72 #if defined(WIN32) && defined (_MSC_VER)
     73 int ffs(int mask);
     74 #endif
     75 
     76 /*
     77  * Represents a deleted instruction.
     78  */
     79 #define NOP -1
     80 
     81 /*
     82  * Register numbers for use-def values.
     83  * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
     84  * location.  A_ATOM is the accumulator and X_ATOM is the index
     85  * register.
     86  */
     87 #define A_ATOM BPF_MEMWORDS
     88 #define X_ATOM (BPF_MEMWORDS+1)
     89 
     90 /*
     91  * This define is used to represent *both* the accumulator and
     92  * x register in use-def computations.
     93  * Currently, the use-def code assumes only one definition per instruction.
     94  */
     95 #define AX_ATOM N_ATOMS
     96 
     97 /*
     98  * A flag to indicate that further optimization is needed.
     99  * Iterative passes are continued until a given pass yields no
    100  * branch movement.
    101  */
    102 static int done;
    103 
    104 /*
    105  * A block is marked if only if its mark equals the current mark.
    106  * Rather than traverse the code array, marking each item, 'cur_mark' is
    107  * incremented.  This automatically makes each element unmarked.
    108  */
    109 static int cur_mark;
    110 #define isMarked(p) ((p)->mark == cur_mark)
    111 #define unMarkAll() cur_mark += 1
    112 #define Mark(p) ((p)->mark = cur_mark)
    113 
    114 static void opt_init(struct block *);
    115 static void opt_cleanup(void);
    116 
    117 static void intern_blocks(struct block *);
    118 
    119 static void find_inedges(struct block *);
    120 #ifdef BDEBUG
    121 static void opt_dump(struct block *);
    122 #endif
    123 
    124 static int n_blocks;
    125 struct block **blocks;
    126 static int n_edges;
    127 struct edge **edges;
    128 
    129 /*
    130  * A bit vector set representation of the dominators.
    131  * We round up the set size to the next power of two.
    132  */
    133 static int nodewords;
    134 static int edgewords;
    135 struct block **levels;
    136 bpf_u_int32 *space;
    137 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
    138 /*
    139  * True if a is in uset {p}
    140  */
    141 #define SET_MEMBER(p, a) \
    142 ((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD)))
    143 
    144 /*
    145  * Add 'a' to uset p.
    146  */
    147 #define SET_INSERT(p, a) \
    148 (p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD))
    149 
    150 /*
    151  * Delete 'a' from uset p.
    152  */
    153 #define SET_DELETE(p, a) \
    154 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD))
    155 
    156 /*
    157  * a := a intersect b
    158  */
    159 #define SET_INTERSECT(a, b, n)\
    160 {\
    161 	register bpf_u_int32 *_x = a, *_y = b;\
    162 	register int _n = n;\
    163 	while (--_n >= 0) *_x++ &= *_y++;\
    164 }
    165 
    166 /*
    167  * a := a - b
    168  */
    169 #define SET_SUBTRACT(a, b, n)\
    170 {\
    171 	register bpf_u_int32 *_x = a, *_y = b;\
    172 	register int _n = n;\
    173 	while (--_n >= 0) *_x++ &=~ *_y++;\
    174 }
    175 
    176 /*
    177  * a := a union b
    178  */
    179 #define SET_UNION(a, b, n)\
    180 {\
    181 	register bpf_u_int32 *_x = a, *_y = b;\
    182 	register int _n = n;\
    183 	while (--_n >= 0) *_x++ |= *_y++;\
    184 }
    185 
    186 static uset all_dom_sets;
    187 static uset all_closure_sets;
    188 static uset all_edge_sets;
    189 
    190 #ifndef MAX
    191 #define MAX(a,b) ((a)>(b)?(a):(b))
    192 #endif
    193 
    194 static void
    195 find_levels_r(struct block *b)
    196 {
    197 	int level;
    198 
    199 	if (isMarked(b))
    200 		return;
    201 
    202 	Mark(b);
    203 	b->link = 0;
    204 
    205 	if (JT(b)) {
    206 		find_levels_r(JT(b));
    207 		find_levels_r(JF(b));
    208 		level = MAX(JT(b)->level, JF(b)->level) + 1;
    209 	} else
    210 		level = 0;
    211 	b->level = level;
    212 	b->link = levels[level];
    213 	levels[level] = b;
    214 }
    215 
    216 /*
    217  * Level graph.  The levels go from 0 at the leaves to
    218  * N_LEVELS at the root.  The levels[] array points to the
    219  * first node of the level list, whose elements are linked
    220  * with the 'link' field of the struct block.
    221  */
    222 static void
    223 find_levels(struct block *root)
    224 {
    225 	memset((char *)levels, 0, n_blocks * sizeof(*levels));
    226 	unMarkAll();
    227 	find_levels_r(root);
    228 }
    229 
    230 /*
    231  * Find dominator relationships.
    232  * Assumes graph has been leveled.
    233  */
    234 static void
    235 find_dom(struct block *root)
    236 {
    237 	int i;
    238 	struct block *b;
    239 	bpf_u_int32 *x;
    240 
    241 	/*
    242 	 * Initialize sets to contain all nodes.
    243 	 */
    244 	x = all_dom_sets;
    245 	i = n_blocks * nodewords;
    246 	while (--i >= 0)
    247 		*x++ = ~0;
    248 	/* Root starts off empty. */
    249 	for (i = nodewords; --i >= 0;)
    250 		root->dom[i] = 0;
    251 
    252 	/* root->level is the highest level no found. */
    253 	for (i = root->level; i >= 0; --i) {
    254 		for (b = levels[i]; b; b = b->link) {
    255 			SET_INSERT(b->dom, b->id);
    256 			if (JT(b) == 0)
    257 				continue;
    258 			SET_INTERSECT(JT(b)->dom, b->dom, nodewords);
    259 			SET_INTERSECT(JF(b)->dom, b->dom, nodewords);
    260 		}
    261 	}
    262 }
    263 
    264 static void
    265 propedom(struct edge *ep)
    266 {
    267 	SET_INSERT(ep->edom, ep->id);
    268 	if (ep->succ) {
    269 		SET_INTERSECT(ep->succ->et.edom, ep->edom, edgewords);
    270 		SET_INTERSECT(ep->succ->ef.edom, ep->edom, edgewords);
    271 	}
    272 }
    273 
    274 /*
    275  * Compute edge dominators.
    276  * Assumes graph has been leveled and predecessors established.
    277  */
    278 static void
    279 find_edom(struct block *root)
    280 {
    281 	int i;
    282 	uset x;
    283 	struct block *b;
    284 
    285 	x = all_edge_sets;
    286 	for (i = n_edges * edgewords; --i >= 0; )
    287 		x[i] = ~0;
    288 
    289 	/* root->level is the highest level no found. */
    290 	memset(root->et.edom, 0, edgewords * sizeof(*(uset)0));
    291 	memset(root->ef.edom, 0, edgewords * sizeof(*(uset)0));
    292 	for (i = root->level; i >= 0; --i) {
    293 		for (b = levels[i]; b != 0; b = b->link) {
    294 			propedom(&b->et);
    295 			propedom(&b->ef);
    296 		}
    297 	}
    298 }
    299 
    300 /*
    301  * Find the backwards transitive closure of the flow graph.  These sets
    302  * are backwards in the sense that we find the set of nodes that reach
    303  * a given node, not the set of nodes that can be reached by a node.
    304  *
    305  * Assumes graph has been leveled.
    306  */
    307 static void
    308 find_closure(struct block *root)
    309 {
    310 	int i;
    311 	struct block *b;
    312 
    313 	/*
    314 	 * Initialize sets to contain no nodes.
    315 	 */
    316 	memset((char *)all_closure_sets, 0,
    317 	      n_blocks * nodewords * sizeof(*all_closure_sets));
    318 
    319 	/* root->level is the highest level no found. */
    320 	for (i = root->level; i >= 0; --i) {
    321 		for (b = levels[i]; b; b = b->link) {
    322 			SET_INSERT(b->closure, b->id);
    323 			if (JT(b) == 0)
    324 				continue;
    325 			SET_UNION(JT(b)->closure, b->closure, nodewords);
    326 			SET_UNION(JF(b)->closure, b->closure, nodewords);
    327 		}
    328 	}
    329 }
    330 
    331 /*
    332  * Return the register number that is used by s.  If A and X are both
    333  * used, return AX_ATOM.  If no register is used, return -1.
    334  *
    335  * The implementation should probably change to an array access.
    336  */
    337 static int
    338 atomuse(struct stmt *s)
    339 {
    340 	register int c = s->code;
    341 
    342 	if (c == NOP)
    343 		return -1;
    344 
    345 	switch (BPF_CLASS(c)) {
    346 
    347 	case BPF_RET:
    348 		return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
    349 			(BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
    350 
    351 	case BPF_LD:
    352 	case BPF_LDX:
    353 		return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
    354 			(BPF_MODE(c) == BPF_MEM) ? s->k : -1;
    355 
    356 	case BPF_ST:
    357 		return A_ATOM;
    358 
    359 	case BPF_STX:
    360 		return X_ATOM;
    361 
    362 	case BPF_JMP:
    363 	case BPF_ALU:
    364 		if (BPF_SRC(c) == BPF_X)
    365 			return AX_ATOM;
    366 		return A_ATOM;
    367 
    368 	case BPF_MISC:
    369 		return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
    370 	}
    371 	abort();
    372 	/* NOTREACHED */
    373 }
    374 
    375 /*
    376  * Return the register number that is defined by 's'.  We assume that
    377  * a single stmt cannot define more than one register.  If no register
    378  * is defined, return -1.
    379  *
    380  * The implementation should probably change to an array access.
    381  */
    382 static int
    383 atomdef(struct stmt *s)
    384 {
    385 	if (s->code == NOP)
    386 		return -1;
    387 
    388 	switch (BPF_CLASS(s->code)) {
    389 
    390 	case BPF_LD:
    391 	case BPF_ALU:
    392 		return A_ATOM;
    393 
    394 	case BPF_LDX:
    395 		return X_ATOM;
    396 
    397 	case BPF_ST:
    398 	case BPF_STX:
    399 		return s->k;
    400 
    401 	case BPF_MISC:
    402 		return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
    403 	}
    404 	return -1;
    405 }
    406 
    407 /*
    408  * Compute the sets of registers used, defined, and killed by 'b'.
    409  *
    410  * "Used" means that a statement in 'b' uses the register before any
    411  * statement in 'b' defines it, i.e. it uses the value left in
    412  * that register by a predecessor block of this block.
    413  * "Defined" means that a statement in 'b' defines it.
    414  * "Killed" means that a statement in 'b' defines it before any
    415  * statement in 'b' uses it, i.e. it kills the value left in that
    416  * register by a predecessor block of this block.
    417  */
    418 static void
    419 compute_local_ud(struct block *b)
    420 {
    421 	struct slist *s;
    422 	atomset def = 0, use = 0, kill = 0;
    423 	int atom;
    424 
    425 	for (s = b->stmts; s; s = s->next) {
    426 		if (s->s.code == NOP)
    427 			continue;
    428 		atom = atomuse(&s->s);
    429 		if (atom >= 0) {
    430 			if (atom == AX_ATOM) {
    431 				if (!ATOMELEM(def, X_ATOM))
    432 					use |= ATOMMASK(X_ATOM);
    433 				if (!ATOMELEM(def, A_ATOM))
    434 					use |= ATOMMASK(A_ATOM);
    435 			}
    436 			else if (atom < N_ATOMS) {
    437 				if (!ATOMELEM(def, atom))
    438 					use |= ATOMMASK(atom);
    439 			}
    440 			else
    441 				abort();
    442 		}
    443 		atom = atomdef(&s->s);
    444 		if (atom >= 0) {
    445 			if (!ATOMELEM(use, atom))
    446 				kill |= ATOMMASK(atom);
    447 			def |= ATOMMASK(atom);
    448 		}
    449 	}
    450 	if (BPF_CLASS(b->s.code) == BPF_JMP) {
    451 		/*
    452 		 * XXX - what about RET?
    453 		 */
    454 		atom = atomuse(&b->s);
    455 		if (atom >= 0) {
    456 			if (atom == AX_ATOM) {
    457 				if (!ATOMELEM(def, X_ATOM))
    458 					use |= ATOMMASK(X_ATOM);
    459 				if (!ATOMELEM(def, A_ATOM))
    460 					use |= ATOMMASK(A_ATOM);
    461 			}
    462 			else if (atom < N_ATOMS) {
    463 				if (!ATOMELEM(def, atom))
    464 					use |= ATOMMASK(atom);
    465 			}
    466 			else
    467 				abort();
    468 		}
    469 	}
    470 
    471 	b->def = def;
    472 	b->kill = kill;
    473 	b->in_use = use;
    474 }
    475 
    476 /*
    477  * Assume graph is already leveled.
    478  */
    479 static void
    480 find_ud(struct block *root)
    481 {
    482 	int i, maxlevel;
    483 	struct block *p;
    484 
    485 	/*
    486 	 * root->level is the highest level no found;
    487 	 * count down from there.
    488 	 */
    489 	maxlevel = root->level;
    490 	for (i = maxlevel; i >= 0; --i)
    491 		for (p = levels[i]; p; p = p->link) {
    492 			compute_local_ud(p);
    493 			p->out_use = 0;
    494 		}
    495 
    496 	for (i = 1; i <= maxlevel; ++i) {
    497 		for (p = levels[i]; p; p = p->link) {
    498 			p->out_use |= JT(p)->in_use | JF(p)->in_use;
    499 			p->in_use |= p->out_use &~ p->kill;
    500 		}
    501 	}
    502 }
    503 
    504 /*
    505  * These data structures are used in a Cocke and Shwarz style
    506  * value numbering scheme.  Since the flowgraph is acyclic,
    507  * exit values can be propagated from a node's predecessors
    508  * provided it is uniquely defined.
    509  */
    510 struct valnode {
    511 	int code;
    512 	int v0, v1;
    513 	int val;
    514 	struct valnode *next;
    515 };
    516 
    517 #define MODULUS 213
    518 static struct valnode *hashtbl[MODULUS];
    519 static int curval;
    520 static int maxval;
    521 
    522 /* Integer constants mapped with the load immediate opcode. */
    523 #define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L)
    524 
    525 struct vmapinfo {
    526 	int is_const;
    527 	bpf_int32 const_val;
    528 };
    529 
    530 struct vmapinfo *vmap;
    531 struct valnode *vnode_base;
    532 struct valnode *next_vnode;
    533 
    534 static void
    535 init_val(void)
    536 {
    537 	curval = 0;
    538 	next_vnode = vnode_base;
    539 	memset((char *)vmap, 0, maxval * sizeof(*vmap));
    540 	memset((char *)hashtbl, 0, sizeof hashtbl);
    541 }
    542 
    543 /* Because we really don't have an IR, this stuff is a little messy. */
    544 static int
    545 F(int code, int v0, int v1)
    546 {
    547 	u_int hash;
    548 	int val;
    549 	struct valnode *p;
    550 
    551 	hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
    552 	hash %= MODULUS;
    553 
    554 	for (p = hashtbl[hash]; p; p = p->next)
    555 		if (p->code == code && p->v0 == v0 && p->v1 == v1)
    556 			return p->val;
    557 
    558 	val = ++curval;
    559 	if (BPF_MODE(code) == BPF_IMM &&
    560 	    (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
    561 		vmap[val].const_val = v0;
    562 		vmap[val].is_const = 1;
    563 	}
    564 	p = next_vnode++;
    565 	p->val = val;
    566 	p->code = code;
    567 	p->v0 = v0;
    568 	p->v1 = v1;
    569 	p->next = hashtbl[hash];
    570 	hashtbl[hash] = p;
    571 
    572 	return val;
    573 }
    574 
    575 static inline void
    576 vstore(struct stmt *s, int *valp, int newval, int alter)
    577 {
    578 	if (alter && *valp == newval)
    579 		s->code = NOP;
    580 	else
    581 		*valp = newval;
    582 }
    583 
    584 /*
    585  * Do constant-folding on binary operators.
    586  * (Unary operators are handled elsewhere.)
    587  */
    588 static void
    589 fold_op(struct stmt *s, int v0, int v1)
    590 {
    591 	bpf_u_int32 a, b;
    592 
    593 	a = vmap[v0].const_val;
    594 	b = vmap[v1].const_val;
    595 
    596 	switch (BPF_OP(s->code)) {
    597 	case BPF_ADD:
    598 		a += b;
    599 		break;
    600 
    601 	case BPF_SUB:
    602 		a -= b;
    603 		break;
    604 
    605 	case BPF_MUL:
    606 		a *= b;
    607 		break;
    608 
    609 	case BPF_DIV:
    610 		if (b == 0)
    611 			bpf_error("division by zero");
    612 		a /= b;
    613 		break;
    614 
    615 	case BPF_AND:
    616 		a &= b;
    617 		break;
    618 
    619 	case BPF_OR:
    620 		a |= b;
    621 		break;
    622 
    623 	case BPF_LSH:
    624 		a <<= b;
    625 		break;
    626 
    627 	case BPF_RSH:
    628 		a >>= b;
    629 		break;
    630 
    631 	default:
    632 		abort();
    633 	}
    634 	s->k = a;
    635 	s->code = BPF_LD|BPF_IMM;
    636 	done = 0;
    637 }
    638 
    639 static inline struct slist *
    640 this_op(struct slist *s)
    641 {
    642 	while (s != 0 && s->s.code == NOP)
    643 		s = s->next;
    644 	return s;
    645 }
    646 
    647 static void
    648 opt_not(struct block *b)
    649 {
    650 	struct block *tmp = JT(b);
    651 
    652 	JT(b) = JF(b);
    653 	JF(b) = tmp;
    654 }
    655 
    656 static void
    657 opt_peep(struct block *b)
    658 {
    659 	struct slist *s;
    660 	struct slist *next, *last;
    661 	int val;
    662 
    663 	s = b->stmts;
    664 	if (s == 0)
    665 		return;
    666 
    667 	last = s;
    668 	for (/*empty*/; /*empty*/; s = next) {
    669 		/*
    670 		 * Skip over nops.
    671 		 */
    672 		s = this_op(s);
    673 		if (s == 0)
    674 			break;	/* nothing left in the block */
    675 
    676 		/*
    677 		 * Find the next real instruction after that one
    678 		 * (skipping nops).
    679 		 */
    680 		next = this_op(s->next);
    681 		if (next == 0)
    682 			break;	/* no next instruction */
    683 		last = next;
    684 
    685 		/*
    686 		 * st  M[k]	-->	st  M[k]
    687 		 * ldx M[k]		tax
    688 		 */
    689 		if (s->s.code == BPF_ST &&
    690 		    next->s.code == (BPF_LDX|BPF_MEM) &&
    691 		    s->s.k == next->s.k) {
    692 			done = 0;
    693 			next->s.code = BPF_MISC|BPF_TAX;
    694 		}
    695 		/*
    696 		 * ld  #k	-->	ldx  #k
    697 		 * tax			txa
    698 		 */
    699 		if (s->s.code == (BPF_LD|BPF_IMM) &&
    700 		    next->s.code == (BPF_MISC|BPF_TAX)) {
    701 			s->s.code = BPF_LDX|BPF_IMM;
    702 			next->s.code = BPF_MISC|BPF_TXA;
    703 			done = 0;
    704 		}
    705 		/*
    706 		 * This is an ugly special case, but it happens
    707 		 * when you say tcp[k] or udp[k] where k is a constant.
    708 		 */
    709 		if (s->s.code == (BPF_LD|BPF_IMM)) {
    710 			struct slist *add, *tax, *ild;
    711 
    712 			/*
    713 			 * Check that X isn't used on exit from this
    714 			 * block (which the optimizer might cause).
    715 			 * We know the code generator won't generate
    716 			 * any local dependencies.
    717 			 */
    718 			if (ATOMELEM(b->out_use, X_ATOM))
    719 				continue;
    720 
    721 			/*
    722 			 * Check that the instruction following the ldi
    723 			 * is an addx, or it's an ldxms with an addx
    724 			 * following it (with 0 or more nops between the
    725 			 * ldxms and addx).
    726 			 */
    727 			if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
    728 				add = next;
    729 			else
    730 				add = this_op(next->next);
    731 			if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
    732 				continue;
    733 
    734 			/*
    735 			 * Check that a tax follows that (with 0 or more
    736 			 * nops between them).
    737 			 */
    738 			tax = this_op(add->next);
    739 			if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
    740 				continue;
    741 
    742 			/*
    743 			 * Check that an ild follows that (with 0 or more
    744 			 * nops between them).
    745 			 */
    746 			ild = this_op(tax->next);
    747 			if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
    748 			    BPF_MODE(ild->s.code) != BPF_IND)
    749 				continue;
    750 			/*
    751 			 * We want to turn this sequence:
    752 			 *
    753 			 * (004) ldi     #0x2		{s}
    754 			 * (005) ldxms   [14]		{next}  -- optional
    755 			 * (006) addx			{add}
    756 			 * (007) tax			{tax}
    757 			 * (008) ild     [x+0]		{ild}
    758 			 *
    759 			 * into this sequence:
    760 			 *
    761 			 * (004) nop
    762 			 * (005) ldxms   [14]
    763 			 * (006) nop
    764 			 * (007) nop
    765 			 * (008) ild     [x+2]
    766 			 *
    767 			 * XXX We need to check that X is not
    768 			 * subsequently used, because we want to change
    769 			 * what'll be in it after this sequence.
    770 			 *
    771 			 * We know we can eliminate the accumulator
    772 			 * modifications earlier in the sequence since
    773 			 * it is defined by the last stmt of this sequence
    774 			 * (i.e., the last statement of the sequence loads
    775 			 * a value into the accumulator, so we can eliminate
    776 			 * earlier operations on the accumulator).
    777 			 */
    778 			ild->s.k += s->s.k;
    779 			s->s.code = NOP;
    780 			add->s.code = NOP;
    781 			tax->s.code = NOP;
    782 			done = 0;
    783 		}
    784 	}
    785 	/*
    786 	 * If the comparison at the end of a block is an equality
    787 	 * comparison against a constant, and nobody uses the value
    788 	 * we leave in the A register at the end of a block, and
    789 	 * the operation preceding the comparison is an arithmetic
    790 	 * operation, we can sometime optimize it away.
    791 	 */
    792 	if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
    793 	    !ATOMELEM(b->out_use, A_ATOM)) {
    794 	    	/*
    795 	    	 * We can optimize away certain subtractions of the
    796 	    	 * X register.
    797 	    	 */
    798 		if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
    799 			val = b->val[X_ATOM];
    800 			if (vmap[val].is_const) {
    801 				/*
    802 				 * If we have a subtract to do a comparison,
    803 				 * and the X register is a known constant,
    804 				 * we can merge this value into the
    805 				 * comparison:
    806 				 *
    807 				 * sub x  ->	nop
    808 				 * jeq #y	jeq #(x+y)
    809 				 */
    810 				b->s.k += vmap[val].const_val;
    811 				last->s.code = NOP;
    812 				done = 0;
    813 			} else if (b->s.k == 0) {
    814 				/*
    815 				 * If the X register isn't a constant,
    816 				 * and the comparison in the test is
    817 				 * against 0, we can compare with the
    818 				 * X register, instead:
    819 				 *
    820 				 * sub x  ->	nop
    821 				 * jeq #0	jeq x
    822 				 */
    823 				last->s.code = NOP;
    824 				b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
    825 				done = 0;
    826 			}
    827 		}
    828 		/*
    829 		 * Likewise, a constant subtract can be simplified:
    830 		 *
    831 		 * sub #x ->	nop
    832 		 * jeq #y ->	jeq #(x+y)
    833 		 */
    834 		else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
    835 			last->s.code = NOP;
    836 			b->s.k += last->s.k;
    837 			done = 0;
    838 		}
    839 		/*
    840 		 * And, similarly, a constant AND can be simplified
    841 		 * if we're testing against 0, i.e.:
    842 		 *
    843 		 * and #k	nop
    844 		 * jeq #0  ->	jset #k
    845 		 */
    846 		else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
    847 		    b->s.k == 0) {
    848 			b->s.k = last->s.k;
    849 			b->s.code = BPF_JMP|BPF_K|BPF_JSET;
    850 			last->s.code = NOP;
    851 			done = 0;
    852 			opt_not(b);
    853 		}
    854 	}
    855 	/*
    856 	 * jset #0        ->   never
    857 	 * jset #ffffffff ->   always
    858 	 */
    859 	if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
    860 		if (b->s.k == 0)
    861 			JT(b) = JF(b);
    862 		if (b->s.k == 0xffffffff)
    863 			JF(b) = JT(b);
    864 	}
    865 	/*
    866 	 * If we're comparing against the index register, and the index
    867 	 * register is a known constant, we can just compare against that
    868 	 * constant.
    869 	 */
    870 	val = b->val[X_ATOM];
    871 	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
    872 		bpf_int32 v = vmap[val].const_val;
    873 		b->s.code &= ~BPF_X;
    874 		b->s.k = v;
    875 	}
    876 	/*
    877 	 * If the accumulator is a known constant, we can compute the
    878 	 * comparison result.
    879 	 */
    880 	val = b->val[A_ATOM];
    881 	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
    882 		bpf_int32 v = vmap[val].const_val;
    883 		switch (BPF_OP(b->s.code)) {
    884 
    885 		case BPF_JEQ:
    886 			v = v == b->s.k;
    887 			break;
    888 
    889 		case BPF_JGT:
    890 			v = (unsigned)v > b->s.k;
    891 			break;
    892 
    893 		case BPF_JGE:
    894 			v = (unsigned)v >= b->s.k;
    895 			break;
    896 
    897 		case BPF_JSET:
    898 			v &= b->s.k;
    899 			break;
    900 
    901 		default:
    902 			abort();
    903 		}
    904 		if (JF(b) != JT(b))
    905 			done = 0;
    906 		if (v)
    907 			JF(b) = JT(b);
    908 		else
    909 			JT(b) = JF(b);
    910 	}
    911 }
    912 
    913 /*
    914  * Compute the symbolic value of expression of 's', and update
    915  * anything it defines in the value table 'val'.  If 'alter' is true,
    916  * do various optimizations.  This code would be cleaner if symbolic
    917  * evaluation and code transformations weren't folded together.
    918  */
    919 static void
    920 opt_stmt(struct stmt *s, int val[], int alter)
    921 {
    922 	int op;
    923 	int v;
    924 
    925 	switch (s->code) {
    926 
    927 	case BPF_LD|BPF_ABS|BPF_W:
    928 	case BPF_LD|BPF_ABS|BPF_H:
    929 	case BPF_LD|BPF_ABS|BPF_B:
    930 		v = F(s->code, s->k, 0L);
    931 		vstore(s, &val[A_ATOM], v, alter);
    932 		break;
    933 
    934 	case BPF_LD|BPF_IND|BPF_W:
    935 	case BPF_LD|BPF_IND|BPF_H:
    936 	case BPF_LD|BPF_IND|BPF_B:
    937 		v = val[X_ATOM];
    938 		if (alter && vmap[v].is_const) {
    939 			s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
    940 			s->k += vmap[v].const_val;
    941 			v = F(s->code, s->k, 0L);
    942 			done = 0;
    943 		}
    944 		else
    945 			v = F(s->code, s->k, v);
    946 		vstore(s, &val[A_ATOM], v, alter);
    947 		break;
    948 
    949 	case BPF_LD|BPF_LEN:
    950 		v = F(s->code, 0L, 0L);
    951 		vstore(s, &val[A_ATOM], v, alter);
    952 		break;
    953 
    954 	case BPF_LD|BPF_IMM:
    955 		v = K(s->k);
    956 		vstore(s, &val[A_ATOM], v, alter);
    957 		break;
    958 
    959 	case BPF_LDX|BPF_IMM:
    960 		v = K(s->k);
    961 		vstore(s, &val[X_ATOM], v, alter);
    962 		break;
    963 
    964 	case BPF_LDX|BPF_MSH|BPF_B:
    965 		v = F(s->code, s->k, 0L);
    966 		vstore(s, &val[X_ATOM], v, alter);
    967 		break;
    968 
    969 	case BPF_ALU|BPF_NEG:
    970 		if (alter && vmap[val[A_ATOM]].is_const) {
    971 			s->code = BPF_LD|BPF_IMM;
    972 			s->k = -vmap[val[A_ATOM]].const_val;
    973 			val[A_ATOM] = K(s->k);
    974 		}
    975 		else
    976 			val[A_ATOM] = F(s->code, val[A_ATOM], 0L);
    977 		break;
    978 
    979 	case BPF_ALU|BPF_ADD|BPF_K:
    980 	case BPF_ALU|BPF_SUB|BPF_K:
    981 	case BPF_ALU|BPF_MUL|BPF_K:
    982 	case BPF_ALU|BPF_DIV|BPF_K:
    983 	case BPF_ALU|BPF_AND|BPF_K:
    984 	case BPF_ALU|BPF_OR|BPF_K:
    985 	case BPF_ALU|BPF_LSH|BPF_K:
    986 	case BPF_ALU|BPF_RSH|BPF_K:
    987 		op = BPF_OP(s->code);
    988 		if (alter) {
    989 			if (s->k == 0) {
    990 				/* don't optimize away "sub #0"
    991 				 * as it may be needed later to
    992 				 * fixup the generated math code */
    993 				if (op == BPF_ADD ||
    994 				    op == BPF_LSH || op == BPF_RSH ||
    995 				    op == BPF_OR) {
    996 					s->code = NOP;
    997 					break;
    998 				}
    999 				if (op == BPF_MUL || op == BPF_AND) {
   1000 					s->code = BPF_LD|BPF_IMM;
   1001 					val[A_ATOM] = K(s->k);
   1002 					break;
   1003 				}
   1004 			}
   1005 			if (vmap[val[A_ATOM]].is_const) {
   1006 				fold_op(s, val[A_ATOM], K(s->k));
   1007 				val[A_ATOM] = K(s->k);
   1008 				break;
   1009 			}
   1010 		}
   1011 		val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k));
   1012 		break;
   1013 
   1014 	case BPF_ALU|BPF_ADD|BPF_X:
   1015 	case BPF_ALU|BPF_SUB|BPF_X:
   1016 	case BPF_ALU|BPF_MUL|BPF_X:
   1017 	case BPF_ALU|BPF_DIV|BPF_X:
   1018 	case BPF_ALU|BPF_AND|BPF_X:
   1019 	case BPF_ALU|BPF_OR|BPF_X:
   1020 	case BPF_ALU|BPF_LSH|BPF_X:
   1021 	case BPF_ALU|BPF_RSH|BPF_X:
   1022 		op = BPF_OP(s->code);
   1023 		if (alter && vmap[val[X_ATOM]].is_const) {
   1024 			if (vmap[val[A_ATOM]].is_const) {
   1025 				fold_op(s, val[A_ATOM], val[X_ATOM]);
   1026 				val[A_ATOM] = K(s->k);
   1027 			}
   1028 			else {
   1029 				s->code = BPF_ALU|BPF_K|op;
   1030 				s->k = vmap[val[X_ATOM]].const_val;
   1031 				done = 0;
   1032 				val[A_ATOM] =
   1033 					F(s->code, val[A_ATOM], K(s->k));
   1034 			}
   1035 			break;
   1036 		}
   1037 		/*
   1038 		 * Check if we're doing something to an accumulator
   1039 		 * that is 0, and simplify.  This may not seem like
   1040 		 * much of a simplification but it could open up further
   1041 		 * optimizations.
   1042 		 * XXX We could also check for mul by 1, etc.
   1043 		 */
   1044 		if (alter && vmap[val[A_ATOM]].is_const
   1045 		    && vmap[val[A_ATOM]].const_val == 0) {
   1046 			if (op == BPF_ADD || op == BPF_OR) {
   1047 				s->code = BPF_MISC|BPF_TXA;
   1048 				vstore(s, &val[A_ATOM], val[X_ATOM], alter);
   1049 				break;
   1050 			}
   1051 			else if (op == BPF_MUL || op == BPF_DIV ||
   1052 				 op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
   1053 				s->code = BPF_LD|BPF_IMM;
   1054 				s->k = 0;
   1055 				vstore(s, &val[A_ATOM], K(s->k), alter);
   1056 				break;
   1057 			}
   1058 			else if (op == BPF_NEG) {
   1059 				s->code = NOP;
   1060 				break;
   1061 			}
   1062 		}
   1063 		val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]);
   1064 		break;
   1065 
   1066 	case BPF_MISC|BPF_TXA:
   1067 		vstore(s, &val[A_ATOM], val[X_ATOM], alter);
   1068 		break;
   1069 
   1070 	case BPF_LD|BPF_MEM:
   1071 		v = val[s->k];
   1072 		if (alter && vmap[v].is_const) {
   1073 			s->code = BPF_LD|BPF_IMM;
   1074 			s->k = vmap[v].const_val;
   1075 			done = 0;
   1076 		}
   1077 		vstore(s, &val[A_ATOM], v, alter);
   1078 		break;
   1079 
   1080 	case BPF_MISC|BPF_TAX:
   1081 		vstore(s, &val[X_ATOM], val[A_ATOM], alter);
   1082 		break;
   1083 
   1084 	case BPF_LDX|BPF_MEM:
   1085 		v = val[s->k];
   1086 		if (alter && vmap[v].is_const) {
   1087 			s->code = BPF_LDX|BPF_IMM;
   1088 			s->k = vmap[v].const_val;
   1089 			done = 0;
   1090 		}
   1091 		vstore(s, &val[X_ATOM], v, alter);
   1092 		break;
   1093 
   1094 	case BPF_ST:
   1095 		vstore(s, &val[s->k], val[A_ATOM], alter);
   1096 		break;
   1097 
   1098 	case BPF_STX:
   1099 		vstore(s, &val[s->k], val[X_ATOM], alter);
   1100 		break;
   1101 	}
   1102 }
   1103 
   1104 static void
   1105 deadstmt(register struct stmt *s, register struct stmt *last[])
   1106 {
   1107 	register int atom;
   1108 
   1109 	atom = atomuse(s);
   1110 	if (atom >= 0) {
   1111 		if (atom == AX_ATOM) {
   1112 			last[X_ATOM] = 0;
   1113 			last[A_ATOM] = 0;
   1114 		}
   1115 		else
   1116 			last[atom] = 0;
   1117 	}
   1118 	atom = atomdef(s);
   1119 	if (atom >= 0) {
   1120 		if (last[atom]) {
   1121 			done = 0;
   1122 			last[atom]->code = NOP;
   1123 		}
   1124 		last[atom] = s;
   1125 	}
   1126 }
   1127 
   1128 static void
   1129 opt_deadstores(register struct block *b)
   1130 {
   1131 	register struct slist *s;
   1132 	register int atom;
   1133 	struct stmt *last[N_ATOMS];
   1134 
   1135 	memset((char *)last, 0, sizeof last);
   1136 
   1137 	for (s = b->stmts; s != 0; s = s->next)
   1138 		deadstmt(&s->s, last);
   1139 	deadstmt(&b->s, last);
   1140 
   1141 	for (atom = 0; atom < N_ATOMS; ++atom)
   1142 		if (last[atom] && !ATOMELEM(b->out_use, atom)) {
   1143 			last[atom]->code = NOP;
   1144 			done = 0;
   1145 		}
   1146 }
   1147 
   1148 static void
   1149 opt_blk(struct block *b, int do_stmts)
   1150 {
   1151 	struct slist *s;
   1152 	struct edge *p;
   1153 	int i;
   1154 	bpf_int32 aval, xval;
   1155 
   1156 #if 0
   1157 	for (s = b->stmts; s && s->next; s = s->next)
   1158 		if (BPF_CLASS(s->s.code) == BPF_JMP) {
   1159 			do_stmts = 0;
   1160 			break;
   1161 		}
   1162 #endif
   1163 
   1164 	/*
   1165 	 * Initialize the atom values.
   1166 	 */
   1167 	p = b->in_edges;
   1168 	if (p == 0) {
   1169 		/*
   1170 		 * We have no predecessors, so everything is undefined
   1171 		 * upon entry to this block.
   1172 		 */
   1173 		memset((char *)b->val, 0, sizeof(b->val));
   1174 	} else {
   1175 		/*
   1176 		 * Inherit values from our predecessors.
   1177 		 *
   1178 		 * First, get the values from the predecessor along the
   1179 		 * first edge leading to this node.
   1180 		 */
   1181 		memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
   1182 		/*
   1183 		 * Now look at all the other nodes leading to this node.
   1184 		 * If, for the predecessor along that edge, a register
   1185 		 * has a different value from the one we have (i.e.,
   1186 		 * control paths are merging, and the merging paths
   1187 		 * assign different values to that register), give the
   1188 		 * register the undefined value of 0.
   1189 		 */
   1190 		while ((p = p->next) != NULL) {
   1191 			for (i = 0; i < N_ATOMS; ++i)
   1192 				if (b->val[i] != p->pred->val[i])
   1193 					b->val[i] = 0;
   1194 		}
   1195 	}
   1196 	aval = b->val[A_ATOM];
   1197 	xval = b->val[X_ATOM];
   1198 	for (s = b->stmts; s; s = s->next)
   1199 		opt_stmt(&s->s, b->val, do_stmts);
   1200 
   1201 	/*
   1202 	 * This is a special case: if we don't use anything from this
   1203 	 * block, and we load the accumulator or index register with a
   1204 	 * value that is already there, or if this block is a return,
   1205 	 * eliminate all the statements.
   1206 	 *
   1207 	 * XXX - what if it does a store?
   1208 	 *
   1209 	 * XXX - why does it matter whether we use anything from this
   1210 	 * block?  If the accumulator or index register doesn't change
   1211 	 * its value, isn't that OK even if we use that value?
   1212 	 *
   1213 	 * XXX - if we load the accumulator with a different value,
   1214 	 * and the block ends with a conditional branch, we obviously
   1215 	 * can't eliminate it, as the branch depends on that value.
   1216 	 * For the index register, the conditional branch only depends
   1217 	 * on the index register value if the test is against the index
   1218 	 * register value rather than a constant; if nothing uses the
   1219 	 * value we put into the index register, and we're not testing
   1220 	 * against the index register's value, and there aren't any
   1221 	 * other problems that would keep us from eliminating this
   1222 	 * block, can we eliminate it?
   1223 	 */
   1224 	if (do_stmts &&
   1225 	    ((b->out_use == 0 && aval != 0 && b->val[A_ATOM] == aval &&
   1226 	      xval != 0 && b->val[X_ATOM] == xval) ||
   1227 	     BPF_CLASS(b->s.code) == BPF_RET)) {
   1228 		if (b->stmts != 0) {
   1229 			b->stmts = 0;
   1230 			done = 0;
   1231 		}
   1232 	} else {
   1233 		opt_peep(b);
   1234 		opt_deadstores(b);
   1235 	}
   1236 	/*
   1237 	 * Set up values for branch optimizer.
   1238 	 */
   1239 	if (BPF_SRC(b->s.code) == BPF_K)
   1240 		b->oval = K(b->s.k);
   1241 	else
   1242 		b->oval = b->val[X_ATOM];
   1243 	b->et.code = b->s.code;
   1244 	b->ef.code = -b->s.code;
   1245 }
   1246 
   1247 /*
   1248  * Return true if any register that is used on exit from 'succ', has
   1249  * an exit value that is different from the corresponding exit value
   1250  * from 'b'.
   1251  */
   1252 static int
   1253 use_conflict(struct block *b, struct block *succ)
   1254 {
   1255 	int atom;
   1256 	atomset use = succ->out_use;
   1257 
   1258 	if (use == 0)
   1259 		return 0;
   1260 
   1261 	for (atom = 0; atom < N_ATOMS; ++atom)
   1262 		if (ATOMELEM(use, atom))
   1263 			if (b->val[atom] != succ->val[atom])
   1264 				return 1;
   1265 	return 0;
   1266 }
   1267 
   1268 static struct block *
   1269 fold_edge(struct block *child, struct edge *ep)
   1270 {
   1271 	int sense;
   1272 	int aval0, aval1, oval0, oval1;
   1273 	int code = ep->code;
   1274 
   1275 	if (code < 0) {
   1276 		code = -code;
   1277 		sense = 0;
   1278 	} else
   1279 		sense = 1;
   1280 
   1281 	if (child->s.code != code)
   1282 		return 0;
   1283 
   1284 	aval0 = child->val[A_ATOM];
   1285 	oval0 = child->oval;
   1286 	aval1 = ep->pred->val[A_ATOM];
   1287 	oval1 = ep->pred->oval;
   1288 
   1289 	if (aval0 != aval1)
   1290 		return 0;
   1291 
   1292 	if (oval0 == oval1)
   1293 		/*
   1294 		 * The operands of the branch instructions are
   1295 		 * identical, so the result is true if a true
   1296 		 * branch was taken to get here, otherwise false.
   1297 		 */
   1298 		return sense ? JT(child) : JF(child);
   1299 
   1300 	if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
   1301 		/*
   1302 		 * At this point, we only know the comparison if we
   1303 		 * came down the true branch, and it was an equality
   1304 		 * comparison with a constant.
   1305 		 *
   1306 		 * I.e., if we came down the true branch, and the branch
   1307 		 * was an equality comparison with a constant, we know the
   1308 		 * accumulator contains that constant.  If we came down
   1309 		 * the false branch, or the comparison wasn't with a
   1310 		 * constant, we don't know what was in the accumulator.
   1311 		 *
   1312 		 * We rely on the fact that distinct constants have distinct
   1313 		 * value numbers.
   1314 		 */
   1315 		return JF(child);
   1316 
   1317 	return 0;
   1318 }
   1319 
   1320 static void
   1321 opt_j(struct edge *ep)
   1322 {
   1323 	register int i, k;
   1324 	register struct block *target;
   1325 
   1326 	if (JT(ep->succ) == 0)
   1327 		return;
   1328 
   1329 	if (JT(ep->succ) == JF(ep->succ)) {
   1330 		/*
   1331 		 * Common branch targets can be eliminated, provided
   1332 		 * there is no data dependency.
   1333 		 */
   1334 		if (!use_conflict(ep->pred, ep->succ->et.succ)) {
   1335 			done = 0;
   1336 			ep->succ = JT(ep->succ);
   1337 		}
   1338 	}
   1339 	/*
   1340 	 * For each edge dominator that matches the successor of this
   1341 	 * edge, promote the edge successor to the its grandchild.
   1342 	 *
   1343 	 * XXX We violate the set abstraction here in favor a reasonably
   1344 	 * efficient loop.
   1345 	 */
   1346  top:
   1347 	for (i = 0; i < edgewords; ++i) {
   1348 		register bpf_u_int32 x = ep->edom[i];
   1349 
   1350 		while (x != 0) {
   1351 			k = ffs(x) - 1;
   1352 			x &=~ (1 << k);
   1353 			k += i * BITS_PER_WORD;
   1354 
   1355 			target = fold_edge(ep->succ, edges[k]);
   1356 			/*
   1357 			 * Check that there is no data dependency between
   1358 			 * nodes that will be violated if we move the edge.
   1359 			 */
   1360 			if (target != 0 && !use_conflict(ep->pred, target)) {
   1361 				done = 0;
   1362 				ep->succ = target;
   1363 				if (JT(target) != 0)
   1364 					/*
   1365 					 * Start over unless we hit a leaf.
   1366 					 */
   1367 					goto top;
   1368 				return;
   1369 			}
   1370 		}
   1371 	}
   1372 }
   1373 
   1374 
   1375 static void
   1376 or_pullup(struct block *b)
   1377 {
   1378 	int val, at_top;
   1379 	struct block *pull;
   1380 	struct block **diffp, **samep;
   1381 	struct edge *ep;
   1382 
   1383 	ep = b->in_edges;
   1384 	if (ep == 0)
   1385 		return;
   1386 
   1387 	/*
   1388 	 * Make sure each predecessor loads the same value.
   1389 	 * XXX why?
   1390 	 */
   1391 	val = ep->pred->val[A_ATOM];
   1392 	for (ep = ep->next; ep != 0; ep = ep->next)
   1393 		if (val != ep->pred->val[A_ATOM])
   1394 			return;
   1395 
   1396 	if (JT(b->in_edges->pred) == b)
   1397 		diffp = &JT(b->in_edges->pred);
   1398 	else
   1399 		diffp = &JF(b->in_edges->pred);
   1400 
   1401 	at_top = 1;
   1402 	while (1) {
   1403 		if (*diffp == 0)
   1404 			return;
   1405 
   1406 		if (JT(*diffp) != JT(b))
   1407 			return;
   1408 
   1409 		if (!SET_MEMBER((*diffp)->dom, b->id))
   1410 			return;
   1411 
   1412 		if ((*diffp)->val[A_ATOM] != val)
   1413 			break;
   1414 
   1415 		diffp = &JF(*diffp);
   1416 		at_top = 0;
   1417 	}
   1418 	samep = &JF(*diffp);
   1419 	while (1) {
   1420 		if (*samep == 0)
   1421 			return;
   1422 
   1423 		if (JT(*samep) != JT(b))
   1424 			return;
   1425 
   1426 		if (!SET_MEMBER((*samep)->dom, b->id))
   1427 			return;
   1428 
   1429 		if ((*samep)->val[A_ATOM] == val)
   1430 			break;
   1431 
   1432 		/* XXX Need to check that there are no data dependencies
   1433 		   between dp0 and dp1.  Currently, the code generator
   1434 		   will not produce such dependencies. */
   1435 		samep = &JF(*samep);
   1436 	}
   1437 #ifdef notdef
   1438 	/* XXX This doesn't cover everything. */
   1439 	for (i = 0; i < N_ATOMS; ++i)
   1440 		if ((*samep)->val[i] != pred->val[i])
   1441 			return;
   1442 #endif
   1443 	/* Pull up the node. */
   1444 	pull = *samep;
   1445 	*samep = JF(pull);
   1446 	JF(pull) = *diffp;
   1447 
   1448 	/*
   1449 	 * At the top of the chain, each predecessor needs to point at the
   1450 	 * pulled up node.  Inside the chain, there is only one predecessor
   1451 	 * to worry about.
   1452 	 */
   1453 	if (at_top) {
   1454 		for (ep = b->in_edges; ep != 0; ep = ep->next) {
   1455 			if (JT(ep->pred) == b)
   1456 				JT(ep->pred) = pull;
   1457 			else
   1458 				JF(ep->pred) = pull;
   1459 		}
   1460 	}
   1461 	else
   1462 		*diffp = pull;
   1463 
   1464 	done = 0;
   1465 }
   1466 
   1467 static void
   1468 and_pullup(struct block *b)
   1469 {
   1470 	int val, at_top;
   1471 	struct block *pull;
   1472 	struct block **diffp, **samep;
   1473 	struct edge *ep;
   1474 
   1475 	ep = b->in_edges;
   1476 	if (ep == 0)
   1477 		return;
   1478 
   1479 	/*
   1480 	 * Make sure each predecessor loads the same value.
   1481 	 */
   1482 	val = ep->pred->val[A_ATOM];
   1483 	for (ep = ep->next; ep != 0; ep = ep->next)
   1484 		if (val != ep->pred->val[A_ATOM])
   1485 			return;
   1486 
   1487 	if (JT(b->in_edges->pred) == b)
   1488 		diffp = &JT(b->in_edges->pred);
   1489 	else
   1490 		diffp = &JF(b->in_edges->pred);
   1491 
   1492 	at_top = 1;
   1493 	while (1) {
   1494 		if (*diffp == 0)
   1495 			return;
   1496 
   1497 		if (JF(*diffp) != JF(b))
   1498 			return;
   1499 
   1500 		if (!SET_MEMBER((*diffp)->dom, b->id))
   1501 			return;
   1502 
   1503 		if ((*diffp)->val[A_ATOM] != val)
   1504 			break;
   1505 
   1506 		diffp = &JT(*diffp);
   1507 		at_top = 0;
   1508 	}
   1509 	samep = &JT(*diffp);
   1510 	while (1) {
   1511 		if (*samep == 0)
   1512 			return;
   1513 
   1514 		if (JF(*samep) != JF(b))
   1515 			return;
   1516 
   1517 		if (!SET_MEMBER((*samep)->dom, b->id))
   1518 			return;
   1519 
   1520 		if ((*samep)->val[A_ATOM] == val)
   1521 			break;
   1522 
   1523 		/* XXX Need to check that there are no data dependencies
   1524 		   between diffp and samep.  Currently, the code generator
   1525 		   will not produce such dependencies. */
   1526 		samep = &JT(*samep);
   1527 	}
   1528 #ifdef notdef
   1529 	/* XXX This doesn't cover everything. */
   1530 	for (i = 0; i < N_ATOMS; ++i)
   1531 		if ((*samep)->val[i] != pred->val[i])
   1532 			return;
   1533 #endif
   1534 	/* Pull up the node. */
   1535 	pull = *samep;
   1536 	*samep = JT(pull);
   1537 	JT(pull) = *diffp;
   1538 
   1539 	/*
   1540 	 * At the top of the chain, each predecessor needs to point at the
   1541 	 * pulled up node.  Inside the chain, there is only one predecessor
   1542 	 * to worry about.
   1543 	 */
   1544 	if (at_top) {
   1545 		for (ep = b->in_edges; ep != 0; ep = ep->next) {
   1546 			if (JT(ep->pred) == b)
   1547 				JT(ep->pred) = pull;
   1548 			else
   1549 				JF(ep->pred) = pull;
   1550 		}
   1551 	}
   1552 	else
   1553 		*diffp = pull;
   1554 
   1555 	done = 0;
   1556 }
   1557 
   1558 static void
   1559 opt_blks(struct block *root, int do_stmts)
   1560 {
   1561 	int i, maxlevel;
   1562 	struct block *p;
   1563 
   1564 	init_val();
   1565 	maxlevel = root->level;
   1566 
   1567 	find_inedges(root);
   1568 	for (i = maxlevel; i >= 0; --i)
   1569 		for (p = levels[i]; p; p = p->link)
   1570 			opt_blk(p, do_stmts);
   1571 
   1572 	if (do_stmts)
   1573 		/*
   1574 		 * No point trying to move branches; it can't possibly
   1575 		 * make a difference at this point.
   1576 		 */
   1577 		return;
   1578 
   1579 	for (i = 1; i <= maxlevel; ++i) {
   1580 		for (p = levels[i]; p; p = p->link) {
   1581 			opt_j(&p->et);
   1582 			opt_j(&p->ef);
   1583 		}
   1584 	}
   1585 
   1586 	find_inedges(root);
   1587 	for (i = 1; i <= maxlevel; ++i) {
   1588 		for (p = levels[i]; p; p = p->link) {
   1589 			or_pullup(p);
   1590 			and_pullup(p);
   1591 		}
   1592 	}
   1593 }
   1594 
   1595 static inline void
   1596 link_inedge(struct edge *parent, struct block *child)
   1597 {
   1598 	parent->next = child->in_edges;
   1599 	child->in_edges = parent;
   1600 }
   1601 
   1602 static void
   1603 find_inedges(struct block *root)
   1604 {
   1605 	int i;
   1606 	struct block *b;
   1607 
   1608 	for (i = 0; i < n_blocks; ++i)
   1609 		blocks[i]->in_edges = 0;
   1610 
   1611 	/*
   1612 	 * Traverse the graph, adding each edge to the predecessor
   1613 	 * list of its successors.  Skip the leaves (i.e. level 0).
   1614 	 */
   1615 	for (i = root->level; i > 0; --i) {
   1616 		for (b = levels[i]; b != 0; b = b->link) {
   1617 			link_inedge(&b->et, JT(b));
   1618 			link_inedge(&b->ef, JF(b));
   1619 		}
   1620 	}
   1621 }
   1622 
   1623 static void
   1624 opt_root(struct block **b)
   1625 {
   1626 	struct slist *tmp, *s;
   1627 
   1628 	s = (*b)->stmts;
   1629 	(*b)->stmts = 0;
   1630 	while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
   1631 		*b = JT(*b);
   1632 
   1633 	tmp = (*b)->stmts;
   1634 	if (tmp != 0)
   1635 		sappend(s, tmp);
   1636 	(*b)->stmts = s;
   1637 
   1638 	/*
   1639 	 * If the root node is a return, then there is no
   1640 	 * point executing any statements (since the bpf machine
   1641 	 * has no side effects).
   1642 	 */
   1643 	if (BPF_CLASS((*b)->s.code) == BPF_RET)
   1644 		(*b)->stmts = 0;
   1645 }
   1646 
   1647 static void
   1648 opt_loop(struct block *root, int do_stmts)
   1649 {
   1650 
   1651 #ifdef BDEBUG
   1652 	if (dflag > 1) {
   1653 		printf("opt_loop(root, %d) begin\n", do_stmts);
   1654 		opt_dump(root);
   1655 	}
   1656 #endif
   1657 	do {
   1658 		done = 1;
   1659 		find_levels(root);
   1660 		find_dom(root);
   1661 		find_closure(root);
   1662 		find_ud(root);
   1663 		find_edom(root);
   1664 		opt_blks(root, do_stmts);
   1665 #ifdef BDEBUG
   1666 		if (dflag > 1) {
   1667 			printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, done);
   1668 			opt_dump(root);
   1669 		}
   1670 #endif
   1671 	} while (!done);
   1672 }
   1673 
   1674 /*
   1675  * Optimize the filter code in its dag representation.
   1676  */
   1677 void
   1678 bpf_optimize(struct block **rootp)
   1679 {
   1680 	struct block *root;
   1681 
   1682 	root = *rootp;
   1683 
   1684 	opt_init(root);
   1685 	opt_loop(root, 0);
   1686 	opt_loop(root, 1);
   1687 	intern_blocks(root);
   1688 #ifdef BDEBUG
   1689 	if (dflag > 1) {
   1690 		printf("after intern_blocks()\n");
   1691 		opt_dump(root);
   1692 	}
   1693 #endif
   1694 	opt_root(rootp);
   1695 #ifdef BDEBUG
   1696 	if (dflag > 1) {
   1697 		printf("after opt_root()\n");
   1698 		opt_dump(root);
   1699 	}
   1700 #endif
   1701 	opt_cleanup();
   1702 }
   1703 
   1704 static void
   1705 make_marks(struct block *p)
   1706 {
   1707 	if (!isMarked(p)) {
   1708 		Mark(p);
   1709 		if (BPF_CLASS(p->s.code) != BPF_RET) {
   1710 			make_marks(JT(p));
   1711 			make_marks(JF(p));
   1712 		}
   1713 	}
   1714 }
   1715 
   1716 /*
   1717  * Mark code array such that isMarked(i) is true
   1718  * only for nodes that are alive.
   1719  */
   1720 static void
   1721 mark_code(struct block *p)
   1722 {
   1723 	cur_mark += 1;
   1724 	make_marks(p);
   1725 }
   1726 
   1727 /*
   1728  * True iff the two stmt lists load the same value from the packet into
   1729  * the accumulator.
   1730  */
   1731 static int
   1732 eq_slist(struct slist *x, struct slist *y)
   1733 {
   1734 	while (1) {
   1735 		while (x && x->s.code == NOP)
   1736 			x = x->next;
   1737 		while (y && y->s.code == NOP)
   1738 			y = y->next;
   1739 		if (x == 0)
   1740 			return y == 0;
   1741 		if (y == 0)
   1742 			return x == 0;
   1743 		if (x->s.code != y->s.code || x->s.k != y->s.k)
   1744 			return 0;
   1745 		x = x->next;
   1746 		y = y->next;
   1747 	}
   1748 }
   1749 
   1750 static inline int
   1751 eq_blk(struct block *b0, struct block *b1)
   1752 {
   1753 	if (b0->s.code == b1->s.code &&
   1754 	    b0->s.k == b1->s.k &&
   1755 	    b0->et.succ == b1->et.succ &&
   1756 	    b0->ef.succ == b1->ef.succ)
   1757 		return eq_slist(b0->stmts, b1->stmts);
   1758 	return 0;
   1759 }
   1760 
   1761 static void
   1762 intern_blocks(struct block *root)
   1763 {
   1764 	struct block *p;
   1765 	int i, j;
   1766 	int done1; /* don't shadow global */
   1767  top:
   1768 	done1 = 1;
   1769 	for (i = 0; i < n_blocks; ++i)
   1770 		blocks[i]->link = 0;
   1771 
   1772 	mark_code(root);
   1773 
   1774 	for (i = n_blocks - 1; --i >= 0; ) {
   1775 		if (!isMarked(blocks[i]))
   1776 			continue;
   1777 		for (j = i + 1; j < n_blocks; ++j) {
   1778 			if (!isMarked(blocks[j]))
   1779 				continue;
   1780 			if (eq_blk(blocks[i], blocks[j])) {
   1781 				blocks[i]->link = blocks[j]->link ?
   1782 					blocks[j]->link : blocks[j];
   1783 				break;
   1784 			}
   1785 		}
   1786 	}
   1787 	for (i = 0; i < n_blocks; ++i) {
   1788 		p = blocks[i];
   1789 		if (JT(p) == 0)
   1790 			continue;
   1791 		if (JT(p)->link) {
   1792 			done1 = 0;
   1793 			JT(p) = JT(p)->link;
   1794 		}
   1795 		if (JF(p)->link) {
   1796 			done1 = 0;
   1797 			JF(p) = JF(p)->link;
   1798 		}
   1799 	}
   1800 	if (!done1)
   1801 		goto top;
   1802 }
   1803 
   1804 static void
   1805 opt_cleanup(void)
   1806 {
   1807 	free((void *)vnode_base);
   1808 	free((void *)vmap);
   1809 	free((void *)edges);
   1810 	free((void *)space);
   1811 	free((void *)levels);
   1812 	free((void *)blocks);
   1813 }
   1814 
   1815 /*
   1816  * Return the number of stmts in 's'.
   1817  */
   1818 static u_int
   1819 slength(struct slist *s)
   1820 {
   1821 	u_int n = 0;
   1822 
   1823 	for (; s; s = s->next)
   1824 		if (s->s.code != NOP)
   1825 			++n;
   1826 	return n;
   1827 }
   1828 
   1829 /*
   1830  * Return the number of nodes reachable by 'p'.
   1831  * All nodes should be initially unmarked.
   1832  */
   1833 static int
   1834 count_blocks(struct block *p)
   1835 {
   1836 	if (p == 0 || isMarked(p))
   1837 		return 0;
   1838 	Mark(p);
   1839 	return count_blocks(JT(p)) + count_blocks(JF(p)) + 1;
   1840 }
   1841 
   1842 /*
   1843  * Do a depth first search on the flow graph, numbering the
   1844  * the basic blocks, and entering them into the 'blocks' array.`
   1845  */
   1846 static void
   1847 number_blks_r(struct block *p)
   1848 {
   1849 	int n;
   1850 
   1851 	if (p == 0 || isMarked(p))
   1852 		return;
   1853 
   1854 	Mark(p);
   1855 	n = n_blocks++;
   1856 	p->id = n;
   1857 	blocks[n] = p;
   1858 
   1859 	number_blks_r(JT(p));
   1860 	number_blks_r(JF(p));
   1861 }
   1862 
   1863 /*
   1864  * Return the number of stmts in the flowgraph reachable by 'p'.
   1865  * The nodes should be unmarked before calling.
   1866  *
   1867  * Note that "stmts" means "instructions", and that this includes
   1868  *
   1869  *	side-effect statements in 'p' (slength(p->stmts));
   1870  *
   1871  *	statements in the true branch from 'p' (count_stmts(JT(p)));
   1872  *
   1873  *	statements in the false branch from 'p' (count_stmts(JF(p)));
   1874  *
   1875  *	the conditional jump itself (1);
   1876  *
   1877  *	an extra long jump if the true branch requires it (p->longjt);
   1878  *
   1879  *	an extra long jump if the false branch requires it (p->longjf).
   1880  */
   1881 static u_int
   1882 count_stmts(struct block *p)
   1883 {
   1884 	u_int n;
   1885 
   1886 	if (p == 0 || isMarked(p))
   1887 		return 0;
   1888 	Mark(p);
   1889 	n = count_stmts(JT(p)) + count_stmts(JF(p));
   1890 	return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
   1891 }
   1892 
   1893 /*
   1894  * Allocate memory.  All allocation is done before optimization
   1895  * is begun.  A linear bound on the size of all data structures is computed
   1896  * from the total number of blocks and/or statements.
   1897  */
   1898 static void
   1899 opt_init(struct block *root)
   1900 {
   1901 	bpf_u_int32 *p;
   1902 	int i, n, max_stmts;
   1903 
   1904 	/*
   1905 	 * First, count the blocks, so we can malloc an array to map
   1906 	 * block number to block.  Then, put the blocks into the array.
   1907 	 */
   1908 	unMarkAll();
   1909 	n = count_blocks(root);
   1910 	blocks = (struct block **)calloc(n, sizeof(*blocks));
   1911 	if (blocks == NULL)
   1912 		bpf_error("malloc");
   1913 	unMarkAll();
   1914 	n_blocks = 0;
   1915 	number_blks_r(root);
   1916 
   1917 	n_edges = 2 * n_blocks;
   1918 	edges = (struct edge **)calloc(n_edges, sizeof(*edges));
   1919 	if (edges == NULL)
   1920 		bpf_error("malloc");
   1921 
   1922 	/*
   1923 	 * The number of levels is bounded by the number of nodes.
   1924 	 */
   1925 	levels = (struct block **)calloc(n_blocks, sizeof(*levels));
   1926 	if (levels == NULL)
   1927 		bpf_error("malloc");
   1928 
   1929 	edgewords = n_edges / (8 * sizeof(bpf_u_int32)) + 1;
   1930 	nodewords = n_blocks / (8 * sizeof(bpf_u_int32)) + 1;
   1931 
   1932 	/* XXX */
   1933 	space = (bpf_u_int32 *)malloc(2 * n_blocks * nodewords * sizeof(*space)
   1934 				 + n_edges * edgewords * sizeof(*space));
   1935 	if (space == NULL)
   1936 		bpf_error("malloc");
   1937 	p = space;
   1938 	all_dom_sets = p;
   1939 	for (i = 0; i < n; ++i) {
   1940 		blocks[i]->dom = p;
   1941 		p += nodewords;
   1942 	}
   1943 	all_closure_sets = p;
   1944 	for (i = 0; i < n; ++i) {
   1945 		blocks[i]->closure = p;
   1946 		p += nodewords;
   1947 	}
   1948 	all_edge_sets = p;
   1949 	for (i = 0; i < n; ++i) {
   1950 		register struct block *b = blocks[i];
   1951 
   1952 		b->et.edom = p;
   1953 		p += edgewords;
   1954 		b->ef.edom = p;
   1955 		p += edgewords;
   1956 		b->et.id = i;
   1957 		edges[i] = &b->et;
   1958 		b->ef.id = n_blocks + i;
   1959 		edges[n_blocks + i] = &b->ef;
   1960 		b->et.pred = b;
   1961 		b->ef.pred = b;
   1962 	}
   1963 	max_stmts = 0;
   1964 	for (i = 0; i < n; ++i)
   1965 		max_stmts += slength(blocks[i]->stmts) + 1;
   1966 	/*
   1967 	 * We allocate at most 3 value numbers per statement,
   1968 	 * so this is an upper bound on the number of valnodes
   1969 	 * we'll need.
   1970 	 */
   1971 	maxval = 3 * max_stmts;
   1972 	vmap = (struct vmapinfo *)calloc(maxval, sizeof(*vmap));
   1973 	vnode_base = (struct valnode *)calloc(maxval, sizeof(*vnode_base));
   1974 	if (vmap == NULL || vnode_base == NULL)
   1975 		bpf_error("malloc");
   1976 }
   1977 
   1978 /*
   1979  * Some pointers used to convert the basic block form of the code,
   1980  * into the array form that BPF requires.  'fstart' will point to
   1981  * the malloc'd array while 'ftail' is used during the recursive traversal.
   1982  */
   1983 static struct bpf_insn *fstart;
   1984 static struct bpf_insn *ftail;
   1985 
   1986 #ifdef BDEBUG
   1987 int bids[1000];
   1988 #endif
   1989 
   1990 /*
   1991  * Returns true if successful.  Returns false if a branch has
   1992  * an offset that is too large.  If so, we have marked that
   1993  * branch so that on a subsequent iteration, it will be treated
   1994  * properly.
   1995  */
   1996 static int
   1997 convert_code_r(struct block *p)
   1998 {
   1999 	struct bpf_insn *dst;
   2000 	struct slist *src;
   2001 	int slen;
   2002 	u_int off;
   2003 	int extrajmps;		/* number of extra jumps inserted */
   2004 	struct slist **offset = NULL;
   2005 
   2006 	if (p == 0 || isMarked(p))
   2007 		return (1);
   2008 	Mark(p);
   2009 
   2010 	if (convert_code_r(JF(p)) == 0)
   2011 		return (0);
   2012 	if (convert_code_r(JT(p)) == 0)
   2013 		return (0);
   2014 
   2015 	slen = slength(p->stmts);
   2016 	dst = ftail -= (slen + 1 + p->longjt + p->longjf);
   2017 		/* inflate length by any extra jumps */
   2018 
   2019 	p->offset = dst - fstart;
   2020 
   2021 	/* generate offset[] for convenience  */
   2022 	if (slen) {
   2023 		offset = (struct slist **)calloc(slen, sizeof(struct slist *));
   2024 		if (!offset) {
   2025 			bpf_error("not enough core");
   2026 			/*NOTREACHED*/
   2027 		}
   2028 	}
   2029 	src = p->stmts;
   2030 	for (off = 0; off < slen && src; off++) {
   2031 #if 0
   2032 		printf("off=%d src=%x\n", off, src);
   2033 #endif
   2034 		offset[off] = src;
   2035 		src = src->next;
   2036 	}
   2037 
   2038 	off = 0;
   2039 	for (src = p->stmts; src; src = src->next) {
   2040 		if (src->s.code == NOP)
   2041 			continue;
   2042 		dst->code = (u_short)src->s.code;
   2043 		dst->k = src->s.k;
   2044 
   2045 		/* fill block-local relative jump */
   2046 		if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
   2047 #if 0
   2048 			if (src->s.jt || src->s.jf) {
   2049 				bpf_error("illegal jmp destination");
   2050 				/*NOTREACHED*/
   2051 			}
   2052 #endif
   2053 			goto filled;
   2054 		}
   2055 		if (off == slen - 2)	/*???*/
   2056 			goto filled;
   2057 
   2058 	    {
   2059 		int i;
   2060 		int jt, jf;
   2061 		const char *ljerr = "%s for block-local relative jump: off=%d";
   2062 
   2063 #if 0
   2064 		printf("code=%x off=%d %x %x\n", src->s.code,
   2065 			off, src->s.jt, src->s.jf);
   2066 #endif
   2067 
   2068 		if (!src->s.jt || !src->s.jf) {
   2069 			bpf_error(ljerr, "no jmp destination", off);
   2070 			/*NOTREACHED*/
   2071 		}
   2072 
   2073 		jt = jf = 0;
   2074 		for (i = 0; i < slen; i++) {
   2075 			if (offset[i] == src->s.jt) {
   2076 				if (jt) {
   2077 					bpf_error(ljerr, "multiple matches", off);
   2078 					/*NOTREACHED*/
   2079 				}
   2080 
   2081 				dst->jt = i - off - 1;
   2082 				jt++;
   2083 			}
   2084 			if (offset[i] == src->s.jf) {
   2085 				if (jf) {
   2086 					bpf_error(ljerr, "multiple matches", off);
   2087 					/*NOTREACHED*/
   2088 				}
   2089 				dst->jf = i - off - 1;
   2090 				jf++;
   2091 			}
   2092 		}
   2093 		if (!jt || !jf) {
   2094 			bpf_error(ljerr, "no destination found", off);
   2095 			/*NOTREACHED*/
   2096 		}
   2097 	    }
   2098 filled:
   2099 		++dst;
   2100 		++off;
   2101 	}
   2102 	if (offset)
   2103 		free(offset);
   2104 
   2105 #ifdef BDEBUG
   2106 	bids[dst - fstart] = p->id + 1;
   2107 #endif
   2108 	dst->code = (u_short)p->s.code;
   2109 	dst->k = p->s.k;
   2110 	if (JT(p)) {
   2111 		extrajmps = 0;
   2112 		off = JT(p)->offset - (p->offset + slen) - 1;
   2113 		if (off >= 256) {
   2114 		    /* offset too large for branch, must add a jump */
   2115 		    if (p->longjt == 0) {
   2116 		    	/* mark this instruction and retry */
   2117 			p->longjt++;
   2118 			return(0);
   2119 		    }
   2120 		    /* branch if T to following jump */
   2121 		    dst->jt = extrajmps;
   2122 		    extrajmps++;
   2123 		    dst[extrajmps].code = BPF_JMP|BPF_JA;
   2124 		    dst[extrajmps].k = off - extrajmps;
   2125 		}
   2126 		else
   2127 		    dst->jt = off;
   2128 		off = JF(p)->offset - (p->offset + slen) - 1;
   2129 		if (off >= 256) {
   2130 		    /* offset too large for branch, must add a jump */
   2131 		    if (p->longjf == 0) {
   2132 		    	/* mark this instruction and retry */
   2133 			p->longjf++;
   2134 			return(0);
   2135 		    }
   2136 		    /* branch if F to following jump */
   2137 		    /* if two jumps are inserted, F goes to second one */
   2138 		    dst->jf = extrajmps;
   2139 		    extrajmps++;
   2140 		    dst[extrajmps].code = BPF_JMP|BPF_JA;
   2141 		    dst[extrajmps].k = off - extrajmps;
   2142 		}
   2143 		else
   2144 		    dst->jf = off;
   2145 	}
   2146 	return (1);
   2147 }
   2148 
   2149 
   2150 /*
   2151  * Convert flowgraph intermediate representation to the
   2152  * BPF array representation.  Set *lenp to the number of instructions.
   2153  *
   2154  * This routine does *NOT* leak the memory pointed to by fp.  It *must
   2155  * not* do free(fp) before returning fp; doing so would make no sense,
   2156  * as the BPF array pointed to by the return value of icode_to_fcode()
   2157  * must be valid - it's being returned for use in a bpf_program structure.
   2158  *
   2159  * If it appears that icode_to_fcode() is leaking, the problem is that
   2160  * the program using pcap_compile() is failing to free the memory in
   2161  * the BPF program when it's done - the leak is in the program, not in
   2162  * the routine that happens to be allocating the memory.  (By analogy, if
   2163  * a program calls fopen() without ever calling fclose() on the FILE *,
   2164  * it will leak the FILE structure; the leak is not in fopen(), it's in
   2165  * the program.)  Change the program to use pcap_freecode() when it's
   2166  * done with the filter program.  See the pcap man page.
   2167  */
   2168 struct bpf_insn *
   2169 icode_to_fcode(struct block *root, u_int *lenp)
   2170 {
   2171 	u_int n;
   2172 	struct bpf_insn *fp;
   2173 
   2174 	/*
   2175 	 * Loop doing convert_code_r() until no branches remain
   2176 	 * with too-large offsets.
   2177 	 */
   2178 	while (1) {
   2179 	    unMarkAll();
   2180 	    n = *lenp = count_stmts(root);
   2181 
   2182 	    fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
   2183 	    if (fp == NULL)
   2184 		    bpf_error("malloc");
   2185 	    memset((char *)fp, 0, sizeof(*fp) * n);
   2186 	    fstart = fp;
   2187 	    ftail = fp + n;
   2188 
   2189 	    unMarkAll();
   2190 	    if (convert_code_r(root))
   2191 		break;
   2192 	    free(fp);
   2193 	}
   2194 
   2195 	return fp;
   2196 }
   2197 
   2198 /*
   2199  * Make a copy of a BPF program and put it in the "fcode" member of
   2200  * a "pcap_t".
   2201  *
   2202  * If we fail to allocate memory for the copy, fill in the "errbuf"
   2203  * member of the "pcap_t" with an error message, and return -1;
   2204  * otherwise, return 0.
   2205  */
   2206 int
   2207 install_bpf_program(pcap_t *p, struct bpf_program *fp)
   2208 {
   2209 	size_t prog_size;
   2210 
   2211 	/*
   2212 	 * Validate the program.
   2213 	 */
   2214 	if (!bpf_validate(fp->bf_insns, fp->bf_len)) {
   2215 		snprintf(p->errbuf, sizeof(p->errbuf),
   2216 			"BPF program is not valid");
   2217 		return (-1);
   2218 	}
   2219 
   2220 	/*
   2221 	 * Free up any already installed program.
   2222 	 */
   2223 	pcap_freecode(&p->fcode);
   2224 
   2225 	prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
   2226 	p->fcode.bf_len = fp->bf_len;
   2227 	p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
   2228 	if (p->fcode.bf_insns == NULL) {
   2229 		snprintf(p->errbuf, sizeof(p->errbuf),
   2230 			 "malloc: %s", pcap_strerror(errno));
   2231 		return (-1);
   2232 	}
   2233 	memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
   2234 	return (0);
   2235 }
   2236 
   2237 #ifdef BDEBUG
   2238 static void
   2239 opt_dump(struct block *root)
   2240 {
   2241 	struct bpf_program f;
   2242 
   2243 	memset(bids, 0, sizeof bids);
   2244 	f.bf_insns = icode_to_fcode(root, &f.bf_len);
   2245 	bpf_dump(&f, 1);
   2246 	putchar('\n');
   2247 	free((char *)f.bf_insns);
   2248 }
   2249 #endif
   2250