radix.c revision 1.1 1 1.1 cgd /*
2 1.1 cgd * Copyright (c) 1988, 1989 Regents of the University of California.
3 1.1 cgd * All rights reserved.
4 1.1 cgd *
5 1.1 cgd * Redistribution and use in source and binary forms, with or without
6 1.1 cgd * modification, are permitted provided that the following conditions
7 1.1 cgd * are met:
8 1.1 cgd * 1. Redistributions of source code must retain the above copyright
9 1.1 cgd * notice, this list of conditions and the following disclaimer.
10 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer in the
12 1.1 cgd * documentation and/or other materials provided with the distribution.
13 1.1 cgd * 3. All advertising materials mentioning features or use of this software
14 1.1 cgd * must display the following acknowledgement:
15 1.1 cgd * This product includes software developed by the University of
16 1.1 cgd * California, Berkeley and its contributors.
17 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
18 1.1 cgd * may be used to endorse or promote products derived from this software
19 1.1 cgd * without specific prior written permission.
20 1.1 cgd *
21 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 1.1 cgd * SUCH DAMAGE.
32 1.1 cgd *
33 1.1 cgd * @(#)radix.c 7.9 (Berkeley) 2/4/91
34 1.1 cgd */
35 1.1 cgd
36 1.1 cgd /*
37 1.1 cgd * Routines to build and maintain radix trees for routing lookups.
38 1.1 cgd */
39 1.1 cgd #ifndef RNF_NORMAL
40 1.1 cgd #include "param.h"
41 1.1 cgd #include "radix.h"
42 1.1 cgd #include "malloc.h"
43 1.1 cgd #define M_DONTWAIT M_NOWAIT
44 1.1 cgd #endif
45 1.1 cgd struct radix_node_head *mask_rnhead;
46 1.1 cgd #define rn_maskhead mask_rnhead->rnh_treetop
47 1.1 cgd struct radix_mask *rn_mkfreelist;
48 1.1 cgd struct radix_node_head *radix_node_head;
49 1.1 cgd #undef Bcmp
50 1.1 cgd #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
51 1.1 cgd /*
52 1.1 cgd * The data structure for the keys is a radix tree with one way
53 1.1 cgd * branching removed. The index rn_b at an internal node n represents a bit
54 1.1 cgd * position to be tested. The tree is arranged so that all descendants
55 1.1 cgd * of a node n have keys whose bits all agree up to position rn_b - 1.
56 1.1 cgd * (We say the index of n is rn_b.)
57 1.1 cgd *
58 1.1 cgd * There is at least one descendant which has a one bit at position rn_b,
59 1.1 cgd * and at least one with a zero there.
60 1.1 cgd *
61 1.1 cgd * A route is determined by a pair of key and mask. We require that the
62 1.1 cgd * bit-wise logical and of the key and mask to be the key.
63 1.1 cgd * We define the index of a route to associated with the mask to be
64 1.1 cgd * the first bit number in the mask where 0 occurs (with bit number 0
65 1.1 cgd * representing the highest order bit).
66 1.1 cgd *
67 1.1 cgd * We say a mask is normal if every bit is 0, past the index of the mask.
68 1.1 cgd * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
69 1.1 cgd * and m is a normal mask, then the route applies to every descendant of n.
70 1.1 cgd * If the index(m) < rn_b, this implies the trailing last few bits of k
71 1.1 cgd * before bit b are all 0, (and hence consequently true of every descendant
72 1.1 cgd * of n), so the route applies to all descendants of the node as well.
73 1.1 cgd *
74 1.1 cgd * The present version of the code makes no use of normal routes,
75 1.1 cgd * but similar logic shows that a non-normal mask m such that
76 1.1 cgd * index(m) <= index(n) could potentially apply to many children of n.
77 1.1 cgd * Thus, for each non-host route, we attach its mask to a list at an internal
78 1.1 cgd * node as high in the tree as we can go.
79 1.1 cgd */
80 1.1 cgd
81 1.1 cgd struct radix_node *
82 1.1 cgd rn_search(v, head)
83 1.1 cgd struct radix_node *head;
84 1.1 cgd register caddr_t v;
85 1.1 cgd {
86 1.1 cgd register struct radix_node *x;
87 1.1 cgd
88 1.1 cgd for (x = head; x->rn_b >= 0;) {
89 1.1 cgd if (x->rn_bmask & v[x->rn_off])
90 1.1 cgd x = x->rn_r;
91 1.1 cgd else
92 1.1 cgd x = x->rn_l;
93 1.1 cgd }
94 1.1 cgd return x;
95 1.1 cgd };
96 1.1 cgd
97 1.1 cgd struct radix_node *
98 1.1 cgd rn_search_m(v, head, m)
99 1.1 cgd struct radix_node *head;
100 1.1 cgd register caddr_t v, m;
101 1.1 cgd {
102 1.1 cgd register struct radix_node *x;
103 1.1 cgd
104 1.1 cgd for (x = head; x->rn_b >= 0;) {
105 1.1 cgd if ((x->rn_bmask & m[x->rn_off]) &&
106 1.1 cgd (x->rn_bmask & v[x->rn_off]))
107 1.1 cgd x = x->rn_r;
108 1.1 cgd else
109 1.1 cgd x = x->rn_l;
110 1.1 cgd }
111 1.1 cgd return x;
112 1.1 cgd };
113 1.1 cgd
114 1.1 cgd
115 1.1 cgd static int gotOddMasks;
116 1.1 cgd static char maskedKey[MAXKEYLEN];
117 1.1 cgd
118 1.1 cgd struct radix_node *
119 1.1 cgd rn_match(v, head)
120 1.1 cgd struct radix_node *head;
121 1.1 cgd caddr_t v;
122 1.1 cgd {
123 1.1 cgd register struct radix_node *t = head, *x;
124 1.1 cgd register caddr_t cp = v, cp2, cp3;
125 1.1 cgd caddr_t cplim, mstart;
126 1.1 cgd struct radix_node *saved_t;
127 1.1 cgd int off = t->rn_off, vlen = *(u_char *)cp, matched_off;
128 1.1 cgd
129 1.1 cgd /*
130 1.1 cgd * Open code rn_search(v, head) to avoid overhead of extra
131 1.1 cgd * subroutine call.
132 1.1 cgd */
133 1.1 cgd for (; t->rn_b >= 0; ) {
134 1.1 cgd if (t->rn_bmask & cp[t->rn_off])
135 1.1 cgd t = t->rn_r;
136 1.1 cgd else
137 1.1 cgd t = t->rn_l;
138 1.1 cgd }
139 1.1 cgd /*
140 1.1 cgd * See if we match exactly as a host destination
141 1.1 cgd */
142 1.1 cgd cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
143 1.1 cgd for (; cp < cplim; cp++, cp2++)
144 1.1 cgd if (*cp != *cp2)
145 1.1 cgd goto on1;
146 1.1 cgd /*
147 1.1 cgd * This extra grot is in case we are explicitly asked
148 1.1 cgd * to look up the default. Ugh!
149 1.1 cgd */
150 1.1 cgd if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey)
151 1.1 cgd t = t->rn_dupedkey;
152 1.1 cgd return t;
153 1.1 cgd on1:
154 1.1 cgd matched_off = cp - v;
155 1.1 cgd saved_t = t;
156 1.1 cgd do {
157 1.1 cgd if (t->rn_mask) {
158 1.1 cgd /*
159 1.1 cgd * Even if we don't match exactly as a hosts;
160 1.1 cgd * we may match if the leaf we wound up at is
161 1.1 cgd * a route to a net.
162 1.1 cgd */
163 1.1 cgd cp3 = matched_off + t->rn_mask;
164 1.1 cgd cp2 = matched_off + t->rn_key;
165 1.1 cgd for (; cp < cplim; cp++)
166 1.1 cgd if ((*cp2++ ^ *cp) & *cp3++)
167 1.1 cgd break;
168 1.1 cgd if (cp == cplim)
169 1.1 cgd return t;
170 1.1 cgd cp = matched_off + v;
171 1.1 cgd }
172 1.1 cgd } while (t = t->rn_dupedkey);
173 1.1 cgd t = saved_t;
174 1.1 cgd /* start searching up the tree */
175 1.1 cgd do {
176 1.1 cgd register struct radix_mask *m;
177 1.1 cgd t = t->rn_p;
178 1.1 cgd if (m = t->rn_mklist) {
179 1.1 cgd /*
180 1.1 cgd * After doing measurements here, it may
181 1.1 cgd * turn out to be faster to open code
182 1.1 cgd * rn_search_m here instead of always
183 1.1 cgd * copying and masking.
184 1.1 cgd */
185 1.1 cgd off = min(t->rn_off, matched_off);
186 1.1 cgd mstart = maskedKey + off;
187 1.1 cgd do {
188 1.1 cgd cp2 = mstart;
189 1.1 cgd cp3 = m->rm_mask + off;
190 1.1 cgd for (cp = v + off; cp < cplim;)
191 1.1 cgd *cp2++ = *cp++ & *cp3++;
192 1.1 cgd x = rn_search(maskedKey, t);
193 1.1 cgd while (x && x->rn_mask != m->rm_mask)
194 1.1 cgd x = x->rn_dupedkey;
195 1.1 cgd if (x &&
196 1.1 cgd (Bcmp(mstart, x->rn_key + off,
197 1.1 cgd vlen - off) == 0))
198 1.1 cgd return x;
199 1.1 cgd } while (m = m->rm_mklist);
200 1.1 cgd }
201 1.1 cgd } while (t != head);
202 1.1 cgd return 0;
203 1.1 cgd };
204 1.1 cgd
205 1.1 cgd #ifdef RN_DEBUG
206 1.1 cgd int rn_nodenum;
207 1.1 cgd struct radix_node *rn_clist;
208 1.1 cgd int rn_saveinfo;
209 1.1 cgd #endif
210 1.1 cgd
211 1.1 cgd struct radix_node *
212 1.1 cgd rn_newpair(v, b, nodes)
213 1.1 cgd caddr_t v;
214 1.1 cgd struct radix_node nodes[2];
215 1.1 cgd {
216 1.1 cgd register struct radix_node *tt = nodes, *t = tt + 1;
217 1.1 cgd t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
218 1.1 cgd t->rn_l = tt; t->rn_off = b >> 3;
219 1.1 cgd tt->rn_b = -1; tt->rn_key = v; tt->rn_p = t;
220 1.1 cgd tt->rn_flags = t->rn_flags = RNF_ACTIVE;
221 1.1 cgd #ifdef RN_DEBUG
222 1.1 cgd tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
223 1.1 cgd tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
224 1.1 cgd #endif
225 1.1 cgd return t;
226 1.1 cgd }
227 1.1 cgd
228 1.1 cgd int rn_debug = 1;
229 1.1 cgd struct radix_node *
230 1.1 cgd rn_insert(v, head, dupentry, nodes)
231 1.1 cgd caddr_t v;
232 1.1 cgd struct radix_node *head;
233 1.1 cgd int *dupentry;
234 1.1 cgd struct radix_node nodes[2];
235 1.1 cgd {
236 1.1 cgd int head_off = head->rn_off, vlen = (int)*((u_char *)v);
237 1.1 cgd register struct radix_node *t = rn_search(v, head);
238 1.1 cgd register caddr_t cp = v + head_off;
239 1.1 cgd register int b;
240 1.1 cgd struct radix_node *tt;
241 1.1 cgd /*
242 1.1 cgd *find first bit at which v and t->rn_key differ
243 1.1 cgd */
244 1.1 cgd {
245 1.1 cgd register caddr_t cp2 = t->rn_key + head_off;
246 1.1 cgd register int cmp_res;
247 1.1 cgd caddr_t cplim = v + vlen;
248 1.1 cgd
249 1.1 cgd while (cp < cplim)
250 1.1 cgd if (*cp2++ != *cp++)
251 1.1 cgd goto on1;
252 1.1 cgd *dupentry = 1;
253 1.1 cgd return t;
254 1.1 cgd on1:
255 1.1 cgd *dupentry = 0;
256 1.1 cgd cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
257 1.1 cgd for (b = (cp - v) << 3; cmp_res; b--)
258 1.1 cgd cmp_res >>= 1;
259 1.1 cgd }
260 1.1 cgd {
261 1.1 cgd register struct radix_node *p, *x = head;
262 1.1 cgd cp = v;
263 1.1 cgd do {
264 1.1 cgd p = x;
265 1.1 cgd if (cp[x->rn_off] & x->rn_bmask)
266 1.1 cgd x = x->rn_r;
267 1.1 cgd else x = x->rn_l;
268 1.1 cgd } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
269 1.1 cgd #ifdef RN_DEBUG
270 1.1 cgd if (rn_debug)
271 1.1 cgd printf("Going In:\n"), traverse(p);
272 1.1 cgd #endif
273 1.1 cgd t = rn_newpair(v, b, nodes); tt = t->rn_l;
274 1.1 cgd if ((cp[p->rn_off] & p->rn_bmask) == 0)
275 1.1 cgd p->rn_l = t;
276 1.1 cgd else
277 1.1 cgd p->rn_r = t;
278 1.1 cgd x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
279 1.1 cgd if ((cp[t->rn_off] & t->rn_bmask) == 0) {
280 1.1 cgd t->rn_r = x;
281 1.1 cgd } else {
282 1.1 cgd t->rn_r = tt; t->rn_l = x;
283 1.1 cgd }
284 1.1 cgd #ifdef RN_DEBUG
285 1.1 cgd if (rn_debug)
286 1.1 cgd printf("Coming out:\n"), traverse(p);
287 1.1 cgd #endif
288 1.1 cgd }
289 1.1 cgd return (tt);
290 1.1 cgd }
291 1.1 cgd
292 1.1 cgd struct radix_node *
293 1.1 cgd rn_addmask(netmask, search, skip)
294 1.1 cgd caddr_t netmask;
295 1.1 cgd {
296 1.1 cgd register struct radix_node *x;
297 1.1 cgd register caddr_t cp, cplim;
298 1.1 cgd register int b, mlen, j;
299 1.1 cgd int maskduplicated;
300 1.1 cgd
301 1.1 cgd mlen = *(u_char *)netmask;
302 1.1 cgd if (search) {
303 1.1 cgd x = rn_search(netmask, rn_maskhead);
304 1.1 cgd mlen = *(u_char *)netmask;
305 1.1 cgd if (Bcmp(netmask, x->rn_key, mlen) == 0)
306 1.1 cgd return (x);
307 1.1 cgd }
308 1.1 cgd R_Malloc(x, struct radix_node *, MAXKEYLEN + 2 * sizeof (*x));
309 1.1 cgd if (x == 0)
310 1.1 cgd return (0);
311 1.1 cgd Bzero(x, MAXKEYLEN + 2 * sizeof (*x));
312 1.1 cgd cp = (caddr_t)(x + 2);
313 1.1 cgd Bcopy(netmask, cp, mlen);
314 1.1 cgd netmask = cp;
315 1.1 cgd x = rn_insert(netmask, rn_maskhead, &maskduplicated, x);
316 1.1 cgd /*
317 1.1 cgd * Calculate index of mask.
318 1.1 cgd */
319 1.1 cgd cplim = netmask + mlen;
320 1.1 cgd for (cp = netmask + skip; cp < cplim; cp++)
321 1.1 cgd if (*(u_char *)cp != 0xff)
322 1.1 cgd break;
323 1.1 cgd b = (cp - netmask) << 3;
324 1.1 cgd if (cp != cplim) {
325 1.1 cgd if (*cp != 0) {
326 1.1 cgd gotOddMasks = 1;
327 1.1 cgd for (j = 0x80; j; b++, j >>= 1)
328 1.1 cgd if ((j & *cp) == 0)
329 1.1 cgd break;
330 1.1 cgd }
331 1.1 cgd }
332 1.1 cgd x->rn_b = -1 - b;
333 1.1 cgd return (x);
334 1.1 cgd }
335 1.1 cgd
336 1.1 cgd struct radix_node *
337 1.1 cgd rn_addroute(v, netmask, head, treenodes)
338 1.1 cgd struct radix_node *head;
339 1.1 cgd caddr_t netmask, v;
340 1.1 cgd struct radix_node treenodes[2];
341 1.1 cgd {
342 1.1 cgd register int j;
343 1.1 cgd register caddr_t cp;
344 1.1 cgd register struct radix_node *t, *x, *tt;
345 1.1 cgd short b = 0, b_leaf;
346 1.1 cgd int vlen = *(u_char *)v, mlen, keyduplicated;
347 1.1 cgd caddr_t cplim; unsigned char *maskp;
348 1.1 cgd struct radix_mask *m, **mp;
349 1.1 cgd struct radix_node *saved_tt;
350 1.1 cgd
351 1.1 cgd /*
352 1.1 cgd * In dealing with non-contiguous masks, there may be
353 1.1 cgd * many different routes which have the same mask.
354 1.1 cgd * We will find it useful to have a unique pointer to
355 1.1 cgd * the mask to speed avoiding duplicate references at
356 1.1 cgd * nodes and possibly save time in calculating indices.
357 1.1 cgd */
358 1.1 cgd if (netmask) {
359 1.1 cgd x = rn_search(netmask, rn_maskhead);
360 1.1 cgd mlen = *(u_char *)netmask;
361 1.1 cgd if (Bcmp(netmask, x->rn_key, mlen) != 0) {
362 1.1 cgd x = rn_addmask(netmask, 0, head->rn_off);
363 1.1 cgd if (x == 0)
364 1.1 cgd return (0);
365 1.1 cgd }
366 1.1 cgd netmask = x->rn_key;
367 1.1 cgd b = -1 - x->rn_b;
368 1.1 cgd }
369 1.1 cgd /*
370 1.1 cgd * Deal with duplicated keys: attach node to previous instance
371 1.1 cgd */
372 1.1 cgd saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
373 1.1 cgd if (keyduplicated) {
374 1.1 cgd do {
375 1.1 cgd if (tt->rn_mask == netmask)
376 1.1 cgd return (0);
377 1.1 cgd t = tt;
378 1.1 cgd } while (tt = tt->rn_dupedkey);
379 1.1 cgd /*
380 1.1 cgd * If the mask is not duplicated, we wouldn't
381 1.1 cgd * find it among possible duplicate key entries
382 1.1 cgd * anyway, so the above test doesn't hurt.
383 1.1 cgd *
384 1.1 cgd * XXX: we really ought to sort the masks
385 1.1 cgd * for a duplicated key the same way as in a masklist.
386 1.1 cgd * It is an unfortunate pain having to relocate
387 1.1 cgd * the head of the list.
388 1.1 cgd */
389 1.1 cgd t->rn_dupedkey = tt = treenodes;
390 1.1 cgd #ifdef RN_DEBUG
391 1.1 cgd t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
392 1.1 cgd tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
393 1.1 cgd #endif
394 1.1 cgd t = saved_tt;
395 1.1 cgd tt->rn_key = (caddr_t) v;
396 1.1 cgd tt->rn_b = -1;
397 1.1 cgd tt->rn_flags = t->rn_flags & ~RNF_ROOT;
398 1.1 cgd }
399 1.1 cgd /*
400 1.1 cgd * Put mask in tree.
401 1.1 cgd */
402 1.1 cgd if (netmask) {
403 1.1 cgd tt->rn_mask = netmask;
404 1.1 cgd tt->rn_b = x->rn_b;
405 1.1 cgd }
406 1.1 cgd t = saved_tt->rn_p;
407 1.1 cgd b_leaf = -1 - t->rn_b;
408 1.1 cgd if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
409 1.1 cgd /* Promote general routes from below */
410 1.1 cgd if (x->rn_b < 0) {
411 1.1 cgd if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) {
412 1.1 cgd MKGet(m);
413 1.1 cgd if (m) {
414 1.1 cgd Bzero(m, sizeof *m);
415 1.1 cgd m->rm_b = x->rn_b;
416 1.1 cgd m->rm_mask = x->rn_mask;
417 1.1 cgd x->rn_mklist = t->rn_mklist = m;
418 1.1 cgd }
419 1.1 cgd }
420 1.1 cgd } else if (x->rn_mklist) {
421 1.1 cgd /*
422 1.1 cgd * Skip over masks whose index is > that of new node
423 1.1 cgd */
424 1.1 cgd for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
425 1.1 cgd if (m->rm_b >= b_leaf)
426 1.1 cgd break;
427 1.1 cgd t->rn_mklist = m; *mp = 0;
428 1.1 cgd }
429 1.1 cgd /* Add new route to highest possible ancestor's list */
430 1.1 cgd if ((netmask == 0) || (b > t->rn_b ))
431 1.1 cgd return tt; /* can't lift at all */
432 1.1 cgd b_leaf = tt->rn_b;
433 1.1 cgd do {
434 1.1 cgd x = t;
435 1.1 cgd t = t->rn_p;
436 1.1 cgd } while (b <= t->rn_b && x != head);
437 1.1 cgd /*
438 1.1 cgd * Search through routes associated with node to
439 1.1 cgd * insert new route according to index.
440 1.1 cgd * For nodes of equal index, place more specific
441 1.1 cgd * masks first.
442 1.1 cgd */
443 1.1 cgd cplim = netmask + mlen;
444 1.1 cgd for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) {
445 1.1 cgd if (m->rm_b < b_leaf)
446 1.1 cgd continue;
447 1.1 cgd if (m->rm_b > b_leaf)
448 1.1 cgd break;
449 1.1 cgd if (m->rm_mask == netmask) {
450 1.1 cgd m->rm_refs++;
451 1.1 cgd tt->rn_mklist = m;
452 1.1 cgd return tt;
453 1.1 cgd }
454 1.1 cgd maskp = (u_char *)m->rm_mask;
455 1.1 cgd for (cp = netmask; cp < cplim; cp++)
456 1.1 cgd if (*(u_char *)cp > *maskp++)
457 1.1 cgd goto on2;
458 1.1 cgd }
459 1.1 cgd on2:
460 1.1 cgd MKGet(m);
461 1.1 cgd if (m == 0) {
462 1.1 cgd printf("Mask for route not entered\n");
463 1.1 cgd return (tt);
464 1.1 cgd }
465 1.1 cgd Bzero(m, sizeof *m);
466 1.1 cgd m->rm_b = b_leaf;
467 1.1 cgd m->rm_mask = netmask;
468 1.1 cgd m->rm_mklist = *mp;
469 1.1 cgd *mp = m;
470 1.1 cgd tt->rn_mklist = m;
471 1.1 cgd return tt;
472 1.1 cgd }
473 1.1 cgd
474 1.1 cgd struct radix_node *
475 1.1 cgd rn_delete(v, netmask, head)
476 1.1 cgd caddr_t v, netmask;
477 1.1 cgd struct radix_node *head;
478 1.1 cgd {
479 1.1 cgd register struct radix_node *t, *p, *x = head;
480 1.1 cgd register struct radix_node *tt = rn_search(v, x);
481 1.1 cgd int b, head_off = x->rn_off, vlen = * (u_char *) v;
482 1.1 cgd struct radix_mask *m, *saved_m, **mp;
483 1.1 cgd struct radix_node *dupedkey, *saved_tt = tt;
484 1.1 cgd
485 1.1 cgd if (tt == 0 ||
486 1.1 cgd Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
487 1.1 cgd return (0);
488 1.1 cgd /*
489 1.1 cgd * Delete our route from mask lists.
490 1.1 cgd */
491 1.1 cgd if (dupedkey = tt->rn_dupedkey) {
492 1.1 cgd if (netmask)
493 1.1 cgd netmask = rn_search(netmask, rn_maskhead)->rn_key;
494 1.1 cgd while (tt->rn_mask != netmask)
495 1.1 cgd if ((tt = tt->rn_dupedkey) == 0)
496 1.1 cgd return (0);
497 1.1 cgd }
498 1.1 cgd if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
499 1.1 cgd goto on1;
500 1.1 cgd if (m->rm_mask != tt->rn_mask) {
501 1.1 cgd printf("rn_delete: inconsistent annotation\n");
502 1.1 cgd goto on1;
503 1.1 cgd }
504 1.1 cgd if (--m->rm_refs >= 0)
505 1.1 cgd goto on1;
506 1.1 cgd b = -1 - tt->rn_b;
507 1.1 cgd t = saved_tt->rn_p;
508 1.1 cgd if (b > t->rn_b)
509 1.1 cgd goto on1; /* Wasn't lifted at all */
510 1.1 cgd do {
511 1.1 cgd x = t;
512 1.1 cgd t = t->rn_p;
513 1.1 cgd } while (b <= t->rn_b && x != head);
514 1.1 cgd for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
515 1.1 cgd if (m == saved_m) {
516 1.1 cgd *mp = m->rm_mklist;
517 1.1 cgd MKFree(m);
518 1.1 cgd break;
519 1.1 cgd }
520 1.1 cgd if (m == 0)
521 1.1 cgd printf("rn_delete: couldn't find our annotation\n");
522 1.1 cgd on1:
523 1.1 cgd /*
524 1.1 cgd * Eliminate us from tree
525 1.1 cgd */
526 1.1 cgd if (tt->rn_flags & RNF_ROOT)
527 1.1 cgd return (0);
528 1.1 cgd #ifdef RN_DEBUG
529 1.1 cgd /* Get us out of the creation list */
530 1.1 cgd for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
531 1.1 cgd if (t) t->rn_ybro = tt->rn_ybro;
532 1.1 cgd #endif RN_DEBUG
533 1.1 cgd t = tt->rn_p;
534 1.1 cgd if (dupedkey) {
535 1.1 cgd if (tt == saved_tt) {
536 1.1 cgd x = dupedkey; x->rn_p = t;
537 1.1 cgd if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
538 1.1 cgd #ifndef RN_DEBUG
539 1.1 cgd x++; t = tt + 1; *x = *t; p = t->rn_p;
540 1.1 cgd #else
541 1.1 cgd x++; b = x->rn_info; t = tt + 1; *x = *t; p = t->rn_p;
542 1.1 cgd x->rn_info = b;
543 1.1 cgd #endif
544 1.1 cgd if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
545 1.1 cgd x->rn_l->rn_p = x; x->rn_r->rn_p = x;
546 1.1 cgd } else {
547 1.1 cgd for (p = saved_tt; p && p->rn_dupedkey != tt;)
548 1.1 cgd p = p->rn_dupedkey;
549 1.1 cgd if (p) p->rn_dupedkey = tt->rn_dupedkey;
550 1.1 cgd else printf("rn_delete: couldn't find us\n");
551 1.1 cgd }
552 1.1 cgd goto out;
553 1.1 cgd }
554 1.1 cgd if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
555 1.1 cgd p = t->rn_p;
556 1.1 cgd if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
557 1.1 cgd x->rn_p = p;
558 1.1 cgd /*
559 1.1 cgd * Demote routes attached to us.
560 1.1 cgd */
561 1.1 cgd if (t->rn_mklist) {
562 1.1 cgd if (x->rn_b >= 0) {
563 1.1 cgd for (mp = &x->rn_mklist; m = *mp;)
564 1.1 cgd mp = &m->rm_mklist;
565 1.1 cgd *mp = t->rn_mklist;
566 1.1 cgd } else {
567 1.1 cgd for (m = t->rn_mklist; m;) {
568 1.1 cgd struct radix_mask *mm = m->rm_mklist;
569 1.1 cgd if (m == x->rn_mklist && (--(m->rm_refs) < 0)) {
570 1.1 cgd x->rn_mklist = 0;
571 1.1 cgd MKFree(m);
572 1.1 cgd } else
573 1.1 cgd printf("%s %x at %x\n",
574 1.1 cgd "rn_delete: Orphaned Mask", m, x);
575 1.1 cgd m = mm;
576 1.1 cgd }
577 1.1 cgd }
578 1.1 cgd }
579 1.1 cgd /*
580 1.1 cgd * We may be holding an active internal node in the tree.
581 1.1 cgd */
582 1.1 cgd x = tt + 1;
583 1.1 cgd if (t != x) {
584 1.1 cgd #ifndef RN_DEBUG
585 1.1 cgd *t = *x;
586 1.1 cgd #else
587 1.1 cgd b = t->rn_info; *t = *x; t->rn_info = b;
588 1.1 cgd #endif
589 1.1 cgd t->rn_l->rn_p = t; t->rn_r->rn_p = t;
590 1.1 cgd p = x->rn_p;
591 1.1 cgd if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
592 1.1 cgd }
593 1.1 cgd out:
594 1.1 cgd tt->rn_flags &= ~RNF_ACTIVE;
595 1.1 cgd tt[1].rn_flags &= ~RNF_ACTIVE;
596 1.1 cgd return (tt);
597 1.1 cgd }
598 1.1 cgd char rn_zeros[MAXKEYLEN], rn_ones[MAXKEYLEN];
599 1.1 cgd
600 1.1 cgd rn_inithead(head, off, af)
601 1.1 cgd struct radix_node_head **head;
602 1.1 cgd int off;
603 1.1 cgd {
604 1.1 cgd register struct radix_node_head *rnh;
605 1.1 cgd register struct radix_node *t, *tt, *ttt;
606 1.1 cgd if (*head)
607 1.1 cgd return (1);
608 1.1 cgd R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
609 1.1 cgd if (rnh == 0)
610 1.1 cgd return (0);
611 1.1 cgd Bzero(rnh, sizeof (*rnh));
612 1.1 cgd *head = rnh;
613 1.1 cgd t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
614 1.1 cgd ttt = rnh->rnh_nodes + 2;
615 1.1 cgd t->rn_r = ttt;
616 1.1 cgd t->rn_p = t;
617 1.1 cgd tt = t->rn_l;
618 1.1 cgd tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
619 1.1 cgd tt->rn_b = -1 - off;
620 1.1 cgd *ttt = *tt;
621 1.1 cgd ttt->rn_key = rn_ones;
622 1.1 cgd rnh->rnh_af = af;
623 1.1 cgd rnh->rnh_treetop = t;
624 1.1 cgd if (radix_node_head == 0) {
625 1.1 cgd caddr_t cp = rn_ones, cplim = rn_ones + MAXKEYLEN;
626 1.1 cgd while (cp < cplim)
627 1.1 cgd *cp++ = -1;
628 1.1 cgd if (rn_inithead(&radix_node_head, 0, 0) == 0) {
629 1.1 cgd Free(rnh);
630 1.1 cgd *head = 0;
631 1.1 cgd return (0);
632 1.1 cgd }
633 1.1 cgd mask_rnhead = radix_node_head;
634 1.1 cgd }
635 1.1 cgd rnh->rnh_next = radix_node_head->rnh_next;
636 1.1 cgd if (radix_node_head != rnh)
637 1.1 cgd radix_node_head->rnh_next = rnh;
638 1.1 cgd return (1);
639 1.1 cgd }
640