radix.c revision 1.2 1 1.1 cgd /*
2 1.1 cgd * Copyright (c) 1988, 1989 Regents of the University of California.
3 1.1 cgd * All rights reserved.
4 1.1 cgd *
5 1.1 cgd * Redistribution and use in source and binary forms, with or without
6 1.1 cgd * modification, are permitted provided that the following conditions
7 1.1 cgd * are met:
8 1.1 cgd * 1. Redistributions of source code must retain the above copyright
9 1.1 cgd * notice, this list of conditions and the following disclaimer.
10 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer in the
12 1.1 cgd * documentation and/or other materials provided with the distribution.
13 1.1 cgd * 3. All advertising materials mentioning features or use of this software
14 1.1 cgd * must display the following acknowledgement:
15 1.1 cgd * This product includes software developed by the University of
16 1.1 cgd * California, Berkeley and its contributors.
17 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
18 1.1 cgd * may be used to endorse or promote products derived from this software
19 1.1 cgd * without specific prior written permission.
20 1.1 cgd *
21 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 1.1 cgd * SUCH DAMAGE.
32 1.1 cgd *
33 1.2 cgd * from: @(#)radix.c 7.9 (Berkeley) 2/4/91
34 1.2 cgd * $Id: radix.c,v 1.2 1993/05/20 03:06:06 cgd Exp $
35 1.1 cgd */
36 1.1 cgd
37 1.1 cgd /*
38 1.1 cgd * Routines to build and maintain radix trees for routing lookups.
39 1.1 cgd */
40 1.1 cgd #ifndef RNF_NORMAL
41 1.1 cgd #include "param.h"
42 1.1 cgd #include "radix.h"
43 1.1 cgd #include "malloc.h"
44 1.1 cgd #define M_DONTWAIT M_NOWAIT
45 1.1 cgd #endif
46 1.1 cgd struct radix_node_head *mask_rnhead;
47 1.1 cgd #define rn_maskhead mask_rnhead->rnh_treetop
48 1.1 cgd struct radix_mask *rn_mkfreelist;
49 1.1 cgd struct radix_node_head *radix_node_head;
50 1.1 cgd #undef Bcmp
51 1.1 cgd #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
52 1.1 cgd /*
53 1.1 cgd * The data structure for the keys is a radix tree with one way
54 1.1 cgd * branching removed. The index rn_b at an internal node n represents a bit
55 1.1 cgd * position to be tested. The tree is arranged so that all descendants
56 1.1 cgd * of a node n have keys whose bits all agree up to position rn_b - 1.
57 1.1 cgd * (We say the index of n is rn_b.)
58 1.1 cgd *
59 1.1 cgd * There is at least one descendant which has a one bit at position rn_b,
60 1.1 cgd * and at least one with a zero there.
61 1.1 cgd *
62 1.1 cgd * A route is determined by a pair of key and mask. We require that the
63 1.1 cgd * bit-wise logical and of the key and mask to be the key.
64 1.1 cgd * We define the index of a route to associated with the mask to be
65 1.1 cgd * the first bit number in the mask where 0 occurs (with bit number 0
66 1.1 cgd * representing the highest order bit).
67 1.1 cgd *
68 1.1 cgd * We say a mask is normal if every bit is 0, past the index of the mask.
69 1.1 cgd * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
70 1.1 cgd * and m is a normal mask, then the route applies to every descendant of n.
71 1.1 cgd * If the index(m) < rn_b, this implies the trailing last few bits of k
72 1.1 cgd * before bit b are all 0, (and hence consequently true of every descendant
73 1.1 cgd * of n), so the route applies to all descendants of the node as well.
74 1.1 cgd *
75 1.1 cgd * The present version of the code makes no use of normal routes,
76 1.1 cgd * but similar logic shows that a non-normal mask m such that
77 1.1 cgd * index(m) <= index(n) could potentially apply to many children of n.
78 1.1 cgd * Thus, for each non-host route, we attach its mask to a list at an internal
79 1.1 cgd * node as high in the tree as we can go.
80 1.1 cgd */
81 1.1 cgd
82 1.1 cgd struct radix_node *
83 1.1 cgd rn_search(v, head)
84 1.1 cgd struct radix_node *head;
85 1.1 cgd register caddr_t v;
86 1.1 cgd {
87 1.1 cgd register struct radix_node *x;
88 1.1 cgd
89 1.1 cgd for (x = head; x->rn_b >= 0;) {
90 1.1 cgd if (x->rn_bmask & v[x->rn_off])
91 1.1 cgd x = x->rn_r;
92 1.1 cgd else
93 1.1 cgd x = x->rn_l;
94 1.1 cgd }
95 1.1 cgd return x;
96 1.1 cgd };
97 1.1 cgd
98 1.1 cgd struct radix_node *
99 1.1 cgd rn_search_m(v, head, m)
100 1.1 cgd struct radix_node *head;
101 1.1 cgd register caddr_t v, m;
102 1.1 cgd {
103 1.1 cgd register struct radix_node *x;
104 1.1 cgd
105 1.1 cgd for (x = head; x->rn_b >= 0;) {
106 1.1 cgd if ((x->rn_bmask & m[x->rn_off]) &&
107 1.1 cgd (x->rn_bmask & v[x->rn_off]))
108 1.1 cgd x = x->rn_r;
109 1.1 cgd else
110 1.1 cgd x = x->rn_l;
111 1.1 cgd }
112 1.1 cgd return x;
113 1.1 cgd };
114 1.1 cgd
115 1.1 cgd
116 1.1 cgd static int gotOddMasks;
117 1.1 cgd static char maskedKey[MAXKEYLEN];
118 1.1 cgd
119 1.1 cgd struct radix_node *
120 1.1 cgd rn_match(v, head)
121 1.1 cgd struct radix_node *head;
122 1.1 cgd caddr_t v;
123 1.1 cgd {
124 1.1 cgd register struct radix_node *t = head, *x;
125 1.1 cgd register caddr_t cp = v, cp2, cp3;
126 1.1 cgd caddr_t cplim, mstart;
127 1.1 cgd struct radix_node *saved_t;
128 1.1 cgd int off = t->rn_off, vlen = *(u_char *)cp, matched_off;
129 1.1 cgd
130 1.1 cgd /*
131 1.1 cgd * Open code rn_search(v, head) to avoid overhead of extra
132 1.1 cgd * subroutine call.
133 1.1 cgd */
134 1.1 cgd for (; t->rn_b >= 0; ) {
135 1.1 cgd if (t->rn_bmask & cp[t->rn_off])
136 1.1 cgd t = t->rn_r;
137 1.1 cgd else
138 1.1 cgd t = t->rn_l;
139 1.1 cgd }
140 1.1 cgd /*
141 1.1 cgd * See if we match exactly as a host destination
142 1.1 cgd */
143 1.1 cgd cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
144 1.1 cgd for (; cp < cplim; cp++, cp2++)
145 1.1 cgd if (*cp != *cp2)
146 1.1 cgd goto on1;
147 1.1 cgd /*
148 1.1 cgd * This extra grot is in case we are explicitly asked
149 1.1 cgd * to look up the default. Ugh!
150 1.1 cgd */
151 1.1 cgd if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey)
152 1.1 cgd t = t->rn_dupedkey;
153 1.1 cgd return t;
154 1.1 cgd on1:
155 1.1 cgd matched_off = cp - v;
156 1.1 cgd saved_t = t;
157 1.1 cgd do {
158 1.1 cgd if (t->rn_mask) {
159 1.1 cgd /*
160 1.1 cgd * Even if we don't match exactly as a hosts;
161 1.1 cgd * we may match if the leaf we wound up at is
162 1.1 cgd * a route to a net.
163 1.1 cgd */
164 1.1 cgd cp3 = matched_off + t->rn_mask;
165 1.1 cgd cp2 = matched_off + t->rn_key;
166 1.1 cgd for (; cp < cplim; cp++)
167 1.1 cgd if ((*cp2++ ^ *cp) & *cp3++)
168 1.1 cgd break;
169 1.1 cgd if (cp == cplim)
170 1.1 cgd return t;
171 1.1 cgd cp = matched_off + v;
172 1.1 cgd }
173 1.1 cgd } while (t = t->rn_dupedkey);
174 1.1 cgd t = saved_t;
175 1.1 cgd /* start searching up the tree */
176 1.1 cgd do {
177 1.1 cgd register struct radix_mask *m;
178 1.1 cgd t = t->rn_p;
179 1.1 cgd if (m = t->rn_mklist) {
180 1.1 cgd /*
181 1.1 cgd * After doing measurements here, it may
182 1.1 cgd * turn out to be faster to open code
183 1.1 cgd * rn_search_m here instead of always
184 1.1 cgd * copying and masking.
185 1.1 cgd */
186 1.1 cgd off = min(t->rn_off, matched_off);
187 1.1 cgd mstart = maskedKey + off;
188 1.1 cgd do {
189 1.1 cgd cp2 = mstart;
190 1.1 cgd cp3 = m->rm_mask + off;
191 1.1 cgd for (cp = v + off; cp < cplim;)
192 1.1 cgd *cp2++ = *cp++ & *cp3++;
193 1.1 cgd x = rn_search(maskedKey, t);
194 1.1 cgd while (x && x->rn_mask != m->rm_mask)
195 1.1 cgd x = x->rn_dupedkey;
196 1.1 cgd if (x &&
197 1.1 cgd (Bcmp(mstart, x->rn_key + off,
198 1.1 cgd vlen - off) == 0))
199 1.1 cgd return x;
200 1.1 cgd } while (m = m->rm_mklist);
201 1.1 cgd }
202 1.1 cgd } while (t != head);
203 1.1 cgd return 0;
204 1.1 cgd };
205 1.1 cgd
206 1.1 cgd #ifdef RN_DEBUG
207 1.1 cgd int rn_nodenum;
208 1.1 cgd struct radix_node *rn_clist;
209 1.1 cgd int rn_saveinfo;
210 1.1 cgd #endif
211 1.1 cgd
212 1.1 cgd struct radix_node *
213 1.1 cgd rn_newpair(v, b, nodes)
214 1.1 cgd caddr_t v;
215 1.1 cgd struct radix_node nodes[2];
216 1.1 cgd {
217 1.1 cgd register struct radix_node *tt = nodes, *t = tt + 1;
218 1.1 cgd t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
219 1.1 cgd t->rn_l = tt; t->rn_off = b >> 3;
220 1.1 cgd tt->rn_b = -1; tt->rn_key = v; tt->rn_p = t;
221 1.1 cgd tt->rn_flags = t->rn_flags = RNF_ACTIVE;
222 1.1 cgd #ifdef RN_DEBUG
223 1.1 cgd tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
224 1.1 cgd tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
225 1.1 cgd #endif
226 1.1 cgd return t;
227 1.1 cgd }
228 1.1 cgd
229 1.1 cgd int rn_debug = 1;
230 1.1 cgd struct radix_node *
231 1.1 cgd rn_insert(v, head, dupentry, nodes)
232 1.1 cgd caddr_t v;
233 1.1 cgd struct radix_node *head;
234 1.1 cgd int *dupentry;
235 1.1 cgd struct radix_node nodes[2];
236 1.1 cgd {
237 1.1 cgd int head_off = head->rn_off, vlen = (int)*((u_char *)v);
238 1.1 cgd register struct radix_node *t = rn_search(v, head);
239 1.1 cgd register caddr_t cp = v + head_off;
240 1.1 cgd register int b;
241 1.1 cgd struct radix_node *tt;
242 1.1 cgd /*
243 1.1 cgd *find first bit at which v and t->rn_key differ
244 1.1 cgd */
245 1.1 cgd {
246 1.1 cgd register caddr_t cp2 = t->rn_key + head_off;
247 1.1 cgd register int cmp_res;
248 1.1 cgd caddr_t cplim = v + vlen;
249 1.1 cgd
250 1.1 cgd while (cp < cplim)
251 1.1 cgd if (*cp2++ != *cp++)
252 1.1 cgd goto on1;
253 1.1 cgd *dupentry = 1;
254 1.1 cgd return t;
255 1.1 cgd on1:
256 1.1 cgd *dupentry = 0;
257 1.1 cgd cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
258 1.1 cgd for (b = (cp - v) << 3; cmp_res; b--)
259 1.1 cgd cmp_res >>= 1;
260 1.1 cgd }
261 1.1 cgd {
262 1.1 cgd register struct radix_node *p, *x = head;
263 1.1 cgd cp = v;
264 1.1 cgd do {
265 1.1 cgd p = x;
266 1.1 cgd if (cp[x->rn_off] & x->rn_bmask)
267 1.1 cgd x = x->rn_r;
268 1.1 cgd else x = x->rn_l;
269 1.1 cgd } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
270 1.1 cgd #ifdef RN_DEBUG
271 1.1 cgd if (rn_debug)
272 1.1 cgd printf("Going In:\n"), traverse(p);
273 1.1 cgd #endif
274 1.1 cgd t = rn_newpair(v, b, nodes); tt = t->rn_l;
275 1.1 cgd if ((cp[p->rn_off] & p->rn_bmask) == 0)
276 1.1 cgd p->rn_l = t;
277 1.1 cgd else
278 1.1 cgd p->rn_r = t;
279 1.1 cgd x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
280 1.1 cgd if ((cp[t->rn_off] & t->rn_bmask) == 0) {
281 1.1 cgd t->rn_r = x;
282 1.1 cgd } else {
283 1.1 cgd t->rn_r = tt; t->rn_l = x;
284 1.1 cgd }
285 1.1 cgd #ifdef RN_DEBUG
286 1.1 cgd if (rn_debug)
287 1.1 cgd printf("Coming out:\n"), traverse(p);
288 1.1 cgd #endif
289 1.1 cgd }
290 1.1 cgd return (tt);
291 1.1 cgd }
292 1.1 cgd
293 1.1 cgd struct radix_node *
294 1.1 cgd rn_addmask(netmask, search, skip)
295 1.1 cgd caddr_t netmask;
296 1.1 cgd {
297 1.1 cgd register struct radix_node *x;
298 1.1 cgd register caddr_t cp, cplim;
299 1.1 cgd register int b, mlen, j;
300 1.1 cgd int maskduplicated;
301 1.1 cgd
302 1.1 cgd mlen = *(u_char *)netmask;
303 1.1 cgd if (search) {
304 1.1 cgd x = rn_search(netmask, rn_maskhead);
305 1.1 cgd mlen = *(u_char *)netmask;
306 1.1 cgd if (Bcmp(netmask, x->rn_key, mlen) == 0)
307 1.1 cgd return (x);
308 1.1 cgd }
309 1.1 cgd R_Malloc(x, struct radix_node *, MAXKEYLEN + 2 * sizeof (*x));
310 1.1 cgd if (x == 0)
311 1.1 cgd return (0);
312 1.1 cgd Bzero(x, MAXKEYLEN + 2 * sizeof (*x));
313 1.1 cgd cp = (caddr_t)(x + 2);
314 1.1 cgd Bcopy(netmask, cp, mlen);
315 1.1 cgd netmask = cp;
316 1.1 cgd x = rn_insert(netmask, rn_maskhead, &maskduplicated, x);
317 1.1 cgd /*
318 1.1 cgd * Calculate index of mask.
319 1.1 cgd */
320 1.1 cgd cplim = netmask + mlen;
321 1.1 cgd for (cp = netmask + skip; cp < cplim; cp++)
322 1.1 cgd if (*(u_char *)cp != 0xff)
323 1.1 cgd break;
324 1.1 cgd b = (cp - netmask) << 3;
325 1.1 cgd if (cp != cplim) {
326 1.1 cgd if (*cp != 0) {
327 1.1 cgd gotOddMasks = 1;
328 1.1 cgd for (j = 0x80; j; b++, j >>= 1)
329 1.1 cgd if ((j & *cp) == 0)
330 1.1 cgd break;
331 1.1 cgd }
332 1.1 cgd }
333 1.1 cgd x->rn_b = -1 - b;
334 1.1 cgd return (x);
335 1.1 cgd }
336 1.1 cgd
337 1.1 cgd struct radix_node *
338 1.1 cgd rn_addroute(v, netmask, head, treenodes)
339 1.1 cgd struct radix_node *head;
340 1.1 cgd caddr_t netmask, v;
341 1.1 cgd struct radix_node treenodes[2];
342 1.1 cgd {
343 1.1 cgd register int j;
344 1.1 cgd register caddr_t cp;
345 1.1 cgd register struct radix_node *t, *x, *tt;
346 1.1 cgd short b = 0, b_leaf;
347 1.1 cgd int vlen = *(u_char *)v, mlen, keyduplicated;
348 1.1 cgd caddr_t cplim; unsigned char *maskp;
349 1.1 cgd struct radix_mask *m, **mp;
350 1.1 cgd struct radix_node *saved_tt;
351 1.1 cgd
352 1.1 cgd /*
353 1.1 cgd * In dealing with non-contiguous masks, there may be
354 1.1 cgd * many different routes which have the same mask.
355 1.1 cgd * We will find it useful to have a unique pointer to
356 1.1 cgd * the mask to speed avoiding duplicate references at
357 1.1 cgd * nodes and possibly save time in calculating indices.
358 1.1 cgd */
359 1.1 cgd if (netmask) {
360 1.1 cgd x = rn_search(netmask, rn_maskhead);
361 1.1 cgd mlen = *(u_char *)netmask;
362 1.1 cgd if (Bcmp(netmask, x->rn_key, mlen) != 0) {
363 1.1 cgd x = rn_addmask(netmask, 0, head->rn_off);
364 1.1 cgd if (x == 0)
365 1.1 cgd return (0);
366 1.1 cgd }
367 1.1 cgd netmask = x->rn_key;
368 1.1 cgd b = -1 - x->rn_b;
369 1.1 cgd }
370 1.1 cgd /*
371 1.1 cgd * Deal with duplicated keys: attach node to previous instance
372 1.1 cgd */
373 1.1 cgd saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
374 1.1 cgd if (keyduplicated) {
375 1.1 cgd do {
376 1.1 cgd if (tt->rn_mask == netmask)
377 1.1 cgd return (0);
378 1.1 cgd t = tt;
379 1.1 cgd } while (tt = tt->rn_dupedkey);
380 1.1 cgd /*
381 1.1 cgd * If the mask is not duplicated, we wouldn't
382 1.1 cgd * find it among possible duplicate key entries
383 1.1 cgd * anyway, so the above test doesn't hurt.
384 1.1 cgd *
385 1.1 cgd * XXX: we really ought to sort the masks
386 1.1 cgd * for a duplicated key the same way as in a masklist.
387 1.1 cgd * It is an unfortunate pain having to relocate
388 1.1 cgd * the head of the list.
389 1.1 cgd */
390 1.1 cgd t->rn_dupedkey = tt = treenodes;
391 1.1 cgd #ifdef RN_DEBUG
392 1.1 cgd t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
393 1.1 cgd tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
394 1.1 cgd #endif
395 1.1 cgd t = saved_tt;
396 1.1 cgd tt->rn_key = (caddr_t) v;
397 1.1 cgd tt->rn_b = -1;
398 1.1 cgd tt->rn_flags = t->rn_flags & ~RNF_ROOT;
399 1.1 cgd }
400 1.1 cgd /*
401 1.1 cgd * Put mask in tree.
402 1.1 cgd */
403 1.1 cgd if (netmask) {
404 1.1 cgd tt->rn_mask = netmask;
405 1.1 cgd tt->rn_b = x->rn_b;
406 1.1 cgd }
407 1.1 cgd t = saved_tt->rn_p;
408 1.1 cgd b_leaf = -1 - t->rn_b;
409 1.1 cgd if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
410 1.1 cgd /* Promote general routes from below */
411 1.1 cgd if (x->rn_b < 0) {
412 1.1 cgd if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) {
413 1.1 cgd MKGet(m);
414 1.1 cgd if (m) {
415 1.1 cgd Bzero(m, sizeof *m);
416 1.1 cgd m->rm_b = x->rn_b;
417 1.1 cgd m->rm_mask = x->rn_mask;
418 1.1 cgd x->rn_mklist = t->rn_mklist = m;
419 1.1 cgd }
420 1.1 cgd }
421 1.1 cgd } else if (x->rn_mklist) {
422 1.1 cgd /*
423 1.1 cgd * Skip over masks whose index is > that of new node
424 1.1 cgd */
425 1.1 cgd for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
426 1.1 cgd if (m->rm_b >= b_leaf)
427 1.1 cgd break;
428 1.1 cgd t->rn_mklist = m; *mp = 0;
429 1.1 cgd }
430 1.1 cgd /* Add new route to highest possible ancestor's list */
431 1.1 cgd if ((netmask == 0) || (b > t->rn_b ))
432 1.1 cgd return tt; /* can't lift at all */
433 1.1 cgd b_leaf = tt->rn_b;
434 1.1 cgd do {
435 1.1 cgd x = t;
436 1.1 cgd t = t->rn_p;
437 1.1 cgd } while (b <= t->rn_b && x != head);
438 1.1 cgd /*
439 1.1 cgd * Search through routes associated with node to
440 1.1 cgd * insert new route according to index.
441 1.1 cgd * For nodes of equal index, place more specific
442 1.1 cgd * masks first.
443 1.1 cgd */
444 1.1 cgd cplim = netmask + mlen;
445 1.1 cgd for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) {
446 1.1 cgd if (m->rm_b < b_leaf)
447 1.1 cgd continue;
448 1.1 cgd if (m->rm_b > b_leaf)
449 1.1 cgd break;
450 1.1 cgd if (m->rm_mask == netmask) {
451 1.1 cgd m->rm_refs++;
452 1.1 cgd tt->rn_mklist = m;
453 1.1 cgd return tt;
454 1.1 cgd }
455 1.1 cgd maskp = (u_char *)m->rm_mask;
456 1.1 cgd for (cp = netmask; cp < cplim; cp++)
457 1.1 cgd if (*(u_char *)cp > *maskp++)
458 1.1 cgd goto on2;
459 1.1 cgd }
460 1.1 cgd on2:
461 1.1 cgd MKGet(m);
462 1.1 cgd if (m == 0) {
463 1.1 cgd printf("Mask for route not entered\n");
464 1.1 cgd return (tt);
465 1.1 cgd }
466 1.1 cgd Bzero(m, sizeof *m);
467 1.1 cgd m->rm_b = b_leaf;
468 1.1 cgd m->rm_mask = netmask;
469 1.1 cgd m->rm_mklist = *mp;
470 1.1 cgd *mp = m;
471 1.1 cgd tt->rn_mklist = m;
472 1.1 cgd return tt;
473 1.1 cgd }
474 1.1 cgd
475 1.1 cgd struct radix_node *
476 1.1 cgd rn_delete(v, netmask, head)
477 1.1 cgd caddr_t v, netmask;
478 1.1 cgd struct radix_node *head;
479 1.1 cgd {
480 1.1 cgd register struct radix_node *t, *p, *x = head;
481 1.1 cgd register struct radix_node *tt = rn_search(v, x);
482 1.1 cgd int b, head_off = x->rn_off, vlen = * (u_char *) v;
483 1.1 cgd struct radix_mask *m, *saved_m, **mp;
484 1.1 cgd struct radix_node *dupedkey, *saved_tt = tt;
485 1.1 cgd
486 1.1 cgd if (tt == 0 ||
487 1.1 cgd Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
488 1.1 cgd return (0);
489 1.1 cgd /*
490 1.1 cgd * Delete our route from mask lists.
491 1.1 cgd */
492 1.1 cgd if (dupedkey = tt->rn_dupedkey) {
493 1.1 cgd if (netmask)
494 1.1 cgd netmask = rn_search(netmask, rn_maskhead)->rn_key;
495 1.1 cgd while (tt->rn_mask != netmask)
496 1.1 cgd if ((tt = tt->rn_dupedkey) == 0)
497 1.1 cgd return (0);
498 1.1 cgd }
499 1.1 cgd if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
500 1.1 cgd goto on1;
501 1.1 cgd if (m->rm_mask != tt->rn_mask) {
502 1.1 cgd printf("rn_delete: inconsistent annotation\n");
503 1.1 cgd goto on1;
504 1.1 cgd }
505 1.1 cgd if (--m->rm_refs >= 0)
506 1.1 cgd goto on1;
507 1.1 cgd b = -1 - tt->rn_b;
508 1.1 cgd t = saved_tt->rn_p;
509 1.1 cgd if (b > t->rn_b)
510 1.1 cgd goto on1; /* Wasn't lifted at all */
511 1.1 cgd do {
512 1.1 cgd x = t;
513 1.1 cgd t = t->rn_p;
514 1.1 cgd } while (b <= t->rn_b && x != head);
515 1.1 cgd for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
516 1.1 cgd if (m == saved_m) {
517 1.1 cgd *mp = m->rm_mklist;
518 1.1 cgd MKFree(m);
519 1.1 cgd break;
520 1.1 cgd }
521 1.1 cgd if (m == 0)
522 1.1 cgd printf("rn_delete: couldn't find our annotation\n");
523 1.1 cgd on1:
524 1.1 cgd /*
525 1.1 cgd * Eliminate us from tree
526 1.1 cgd */
527 1.1 cgd if (tt->rn_flags & RNF_ROOT)
528 1.1 cgd return (0);
529 1.1 cgd #ifdef RN_DEBUG
530 1.1 cgd /* Get us out of the creation list */
531 1.1 cgd for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
532 1.1 cgd if (t) t->rn_ybro = tt->rn_ybro;
533 1.1 cgd #endif RN_DEBUG
534 1.1 cgd t = tt->rn_p;
535 1.1 cgd if (dupedkey) {
536 1.1 cgd if (tt == saved_tt) {
537 1.1 cgd x = dupedkey; x->rn_p = t;
538 1.1 cgd if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
539 1.1 cgd #ifndef RN_DEBUG
540 1.1 cgd x++; t = tt + 1; *x = *t; p = t->rn_p;
541 1.1 cgd #else
542 1.1 cgd x++; b = x->rn_info; t = tt + 1; *x = *t; p = t->rn_p;
543 1.1 cgd x->rn_info = b;
544 1.1 cgd #endif
545 1.1 cgd if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
546 1.1 cgd x->rn_l->rn_p = x; x->rn_r->rn_p = x;
547 1.1 cgd } else {
548 1.1 cgd for (p = saved_tt; p && p->rn_dupedkey != tt;)
549 1.1 cgd p = p->rn_dupedkey;
550 1.1 cgd if (p) p->rn_dupedkey = tt->rn_dupedkey;
551 1.1 cgd else printf("rn_delete: couldn't find us\n");
552 1.1 cgd }
553 1.1 cgd goto out;
554 1.1 cgd }
555 1.1 cgd if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
556 1.1 cgd p = t->rn_p;
557 1.1 cgd if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
558 1.1 cgd x->rn_p = p;
559 1.1 cgd /*
560 1.1 cgd * Demote routes attached to us.
561 1.1 cgd */
562 1.1 cgd if (t->rn_mklist) {
563 1.1 cgd if (x->rn_b >= 0) {
564 1.1 cgd for (mp = &x->rn_mklist; m = *mp;)
565 1.1 cgd mp = &m->rm_mklist;
566 1.1 cgd *mp = t->rn_mklist;
567 1.1 cgd } else {
568 1.1 cgd for (m = t->rn_mklist; m;) {
569 1.1 cgd struct radix_mask *mm = m->rm_mklist;
570 1.1 cgd if (m == x->rn_mklist && (--(m->rm_refs) < 0)) {
571 1.1 cgd x->rn_mklist = 0;
572 1.1 cgd MKFree(m);
573 1.1 cgd } else
574 1.1 cgd printf("%s %x at %x\n",
575 1.1 cgd "rn_delete: Orphaned Mask", m, x);
576 1.1 cgd m = mm;
577 1.1 cgd }
578 1.1 cgd }
579 1.1 cgd }
580 1.1 cgd /*
581 1.1 cgd * We may be holding an active internal node in the tree.
582 1.1 cgd */
583 1.1 cgd x = tt + 1;
584 1.1 cgd if (t != x) {
585 1.1 cgd #ifndef RN_DEBUG
586 1.1 cgd *t = *x;
587 1.1 cgd #else
588 1.1 cgd b = t->rn_info; *t = *x; t->rn_info = b;
589 1.1 cgd #endif
590 1.1 cgd t->rn_l->rn_p = t; t->rn_r->rn_p = t;
591 1.1 cgd p = x->rn_p;
592 1.1 cgd if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
593 1.1 cgd }
594 1.1 cgd out:
595 1.1 cgd tt->rn_flags &= ~RNF_ACTIVE;
596 1.1 cgd tt[1].rn_flags &= ~RNF_ACTIVE;
597 1.1 cgd return (tt);
598 1.1 cgd }
599 1.1 cgd char rn_zeros[MAXKEYLEN], rn_ones[MAXKEYLEN];
600 1.1 cgd
601 1.1 cgd rn_inithead(head, off, af)
602 1.1 cgd struct radix_node_head **head;
603 1.1 cgd int off;
604 1.1 cgd {
605 1.1 cgd register struct radix_node_head *rnh;
606 1.1 cgd register struct radix_node *t, *tt, *ttt;
607 1.1 cgd if (*head)
608 1.1 cgd return (1);
609 1.1 cgd R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
610 1.1 cgd if (rnh == 0)
611 1.1 cgd return (0);
612 1.1 cgd Bzero(rnh, sizeof (*rnh));
613 1.1 cgd *head = rnh;
614 1.1 cgd t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
615 1.1 cgd ttt = rnh->rnh_nodes + 2;
616 1.1 cgd t->rn_r = ttt;
617 1.1 cgd t->rn_p = t;
618 1.1 cgd tt = t->rn_l;
619 1.1 cgd tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
620 1.1 cgd tt->rn_b = -1 - off;
621 1.1 cgd *ttt = *tt;
622 1.1 cgd ttt->rn_key = rn_ones;
623 1.1 cgd rnh->rnh_af = af;
624 1.1 cgd rnh->rnh_treetop = t;
625 1.1 cgd if (radix_node_head == 0) {
626 1.1 cgd caddr_t cp = rn_ones, cplim = rn_ones + MAXKEYLEN;
627 1.1 cgd while (cp < cplim)
628 1.1 cgd *cp++ = -1;
629 1.1 cgd if (rn_inithead(&radix_node_head, 0, 0) == 0) {
630 1.1 cgd Free(rnh);
631 1.1 cgd *head = 0;
632 1.1 cgd return (0);
633 1.1 cgd }
634 1.1 cgd mask_rnhead = radix_node_head;
635 1.1 cgd }
636 1.1 cgd rnh->rnh_next = radix_node_head->rnh_next;
637 1.1 cgd if (radix_node_head != rnh)
638 1.1 cgd radix_node_head->rnh_next = rnh;
639 1.1 cgd return (1);
640 1.1 cgd }
641