1 /* 2 * Copyright (c) 1988, 1989 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from: @(#)radix.c 7.9 (Berkeley) 2/4/91 34 * $Id: radix.c,v 1.3 1993/09/04 00:00:19 jtc Exp $ 35 */ 36 37 /* 38 * Routines to build and maintain radix trees for routing lookups. 39 */ 40 #ifndef RNF_NORMAL 41 #include "param.h" 42 #include "systm.h" 43 #include "radix.h" 44 #include "malloc.h" 45 #define M_DONTWAIT M_NOWAIT 46 #endif 47 struct radix_node_head *mask_rnhead; 48 #define rn_maskhead mask_rnhead->rnh_treetop 49 struct radix_mask *rn_mkfreelist; 50 struct radix_node_head *radix_node_head; 51 #undef Bcmp 52 #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l)) 53 /* 54 * The data structure for the keys is a radix tree with one way 55 * branching removed. The index rn_b at an internal node n represents a bit 56 * position to be tested. The tree is arranged so that all descendants 57 * of a node n have keys whose bits all agree up to position rn_b - 1. 58 * (We say the index of n is rn_b.) 59 * 60 * There is at least one descendant which has a one bit at position rn_b, 61 * and at least one with a zero there. 62 * 63 * A route is determined by a pair of key and mask. We require that the 64 * bit-wise logical and of the key and mask to be the key. 65 * We define the index of a route to associated with the mask to be 66 * the first bit number in the mask where 0 occurs (with bit number 0 67 * representing the highest order bit). 68 * 69 * We say a mask is normal if every bit is 0, past the index of the mask. 70 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b, 71 * and m is a normal mask, then the route applies to every descendant of n. 72 * If the index(m) < rn_b, this implies the trailing last few bits of k 73 * before bit b are all 0, (and hence consequently true of every descendant 74 * of n), so the route applies to all descendants of the node as well. 75 * 76 * The present version of the code makes no use of normal routes, 77 * but similar logic shows that a non-normal mask m such that 78 * index(m) <= index(n) could potentially apply to many children of n. 79 * Thus, for each non-host route, we attach its mask to a list at an internal 80 * node as high in the tree as we can go. 81 */ 82 83 struct radix_node * 84 rn_search(v, head) 85 struct radix_node *head; 86 register caddr_t v; 87 { 88 register struct radix_node *x; 89 90 for (x = head; x->rn_b >= 0;) { 91 if (x->rn_bmask & v[x->rn_off]) 92 x = x->rn_r; 93 else 94 x = x->rn_l; 95 } 96 return x; 97 }; 98 99 struct radix_node * 100 rn_search_m(v, head, m) 101 struct radix_node *head; 102 register caddr_t v, m; 103 { 104 register struct radix_node *x; 105 106 for (x = head; x->rn_b >= 0;) { 107 if ((x->rn_bmask & m[x->rn_off]) && 108 (x->rn_bmask & v[x->rn_off])) 109 x = x->rn_r; 110 else 111 x = x->rn_l; 112 } 113 return x; 114 }; 115 116 117 static int gotOddMasks; 118 static char maskedKey[MAXKEYLEN]; 119 120 struct radix_node * 121 rn_match(v, head) 122 struct radix_node *head; 123 caddr_t v; 124 { 125 register struct radix_node *t = head, *x; 126 register caddr_t cp = v, cp2, cp3; 127 caddr_t cplim, mstart; 128 struct radix_node *saved_t; 129 int off = t->rn_off, vlen = *(u_char *)cp, matched_off; 130 131 /* 132 * Open code rn_search(v, head) to avoid overhead of extra 133 * subroutine call. 134 */ 135 for (; t->rn_b >= 0; ) { 136 if (t->rn_bmask & cp[t->rn_off]) 137 t = t->rn_r; 138 else 139 t = t->rn_l; 140 } 141 /* 142 * See if we match exactly as a host destination 143 */ 144 cp += off; cp2 = t->rn_key + off; cplim = v + vlen; 145 for (; cp < cplim; cp++, cp2++) 146 if (*cp != *cp2) 147 goto on1; 148 /* 149 * This extra grot is in case we are explicitly asked 150 * to look up the default. Ugh! 151 */ 152 if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey) 153 t = t->rn_dupedkey; 154 return t; 155 on1: 156 matched_off = cp - v; 157 saved_t = t; 158 do { 159 if (t->rn_mask) { 160 /* 161 * Even if we don't match exactly as a hosts; 162 * we may match if the leaf we wound up at is 163 * a route to a net. 164 */ 165 cp3 = matched_off + t->rn_mask; 166 cp2 = matched_off + t->rn_key; 167 for (; cp < cplim; cp++) 168 if ((*cp2++ ^ *cp) & *cp3++) 169 break; 170 if (cp == cplim) 171 return t; 172 cp = matched_off + v; 173 } 174 } while (t = t->rn_dupedkey); 175 t = saved_t; 176 /* start searching up the tree */ 177 do { 178 register struct radix_mask *m; 179 t = t->rn_p; 180 if (m = t->rn_mklist) { 181 /* 182 * After doing measurements here, it may 183 * turn out to be faster to open code 184 * rn_search_m here instead of always 185 * copying and masking. 186 */ 187 off = min(t->rn_off, matched_off); 188 mstart = maskedKey + off; 189 do { 190 cp2 = mstart; 191 cp3 = m->rm_mask + off; 192 for (cp = v + off; cp < cplim;) 193 *cp2++ = *cp++ & *cp3++; 194 x = rn_search(maskedKey, t); 195 while (x && x->rn_mask != m->rm_mask) 196 x = x->rn_dupedkey; 197 if (x && 198 (Bcmp(mstart, x->rn_key + off, 199 vlen - off) == 0)) 200 return x; 201 } while (m = m->rm_mklist); 202 } 203 } while (t != head); 204 return 0; 205 }; 206 207 #ifdef RN_DEBUG 208 int rn_nodenum; 209 struct radix_node *rn_clist; 210 int rn_saveinfo; 211 #endif 212 213 struct radix_node * 214 rn_newpair(v, b, nodes) 215 caddr_t v; 216 struct radix_node nodes[2]; 217 { 218 register struct radix_node *tt = nodes, *t = tt + 1; 219 t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7); 220 t->rn_l = tt; t->rn_off = b >> 3; 221 tt->rn_b = -1; tt->rn_key = v; tt->rn_p = t; 222 tt->rn_flags = t->rn_flags = RNF_ACTIVE; 223 #ifdef RN_DEBUG 224 tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; 225 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; 226 #endif 227 return t; 228 } 229 230 int rn_debug = 1; 231 struct radix_node * 232 rn_insert(v, head, dupentry, nodes) 233 caddr_t v; 234 struct radix_node *head; 235 int *dupentry; 236 struct radix_node nodes[2]; 237 { 238 int head_off = head->rn_off, vlen = (int)*((u_char *)v); 239 register struct radix_node *t = rn_search(v, head); 240 register caddr_t cp = v + head_off; 241 register int b; 242 struct radix_node *tt; 243 /* 244 *find first bit at which v and t->rn_key differ 245 */ 246 { 247 register caddr_t cp2 = t->rn_key + head_off; 248 register int cmp_res; 249 caddr_t cplim = v + vlen; 250 251 while (cp < cplim) 252 if (*cp2++ != *cp++) 253 goto on1; 254 *dupentry = 1; 255 return t; 256 on1: 257 *dupentry = 0; 258 cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; 259 for (b = (cp - v) << 3; cmp_res; b--) 260 cmp_res >>= 1; 261 } 262 { 263 register struct radix_node *p, *x = head; 264 cp = v; 265 do { 266 p = x; 267 if (cp[x->rn_off] & x->rn_bmask) 268 x = x->rn_r; 269 else x = x->rn_l; 270 } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */ 271 #ifdef RN_DEBUG 272 if (rn_debug) 273 printf("Going In:\n"), traverse(p); 274 #endif 275 t = rn_newpair(v, b, nodes); tt = t->rn_l; 276 if ((cp[p->rn_off] & p->rn_bmask) == 0) 277 p->rn_l = t; 278 else 279 p->rn_r = t; 280 x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */ 281 if ((cp[t->rn_off] & t->rn_bmask) == 0) { 282 t->rn_r = x; 283 } else { 284 t->rn_r = tt; t->rn_l = x; 285 } 286 #ifdef RN_DEBUG 287 if (rn_debug) 288 printf("Coming out:\n"), traverse(p); 289 #endif 290 } 291 return (tt); 292 } 293 294 struct radix_node * 295 rn_addmask(netmask, search, skip) 296 caddr_t netmask; 297 { 298 register struct radix_node *x; 299 register caddr_t cp, cplim; 300 register int b, mlen, j; 301 int maskduplicated; 302 303 mlen = *(u_char *)netmask; 304 if (search) { 305 x = rn_search(netmask, rn_maskhead); 306 mlen = *(u_char *)netmask; 307 if (Bcmp(netmask, x->rn_key, mlen) == 0) 308 return (x); 309 } 310 R_Malloc(x, struct radix_node *, MAXKEYLEN + 2 * sizeof (*x)); 311 if (x == 0) 312 return (0); 313 Bzero(x, MAXKEYLEN + 2 * sizeof (*x)); 314 cp = (caddr_t)(x + 2); 315 Bcopy(netmask, cp, mlen); 316 netmask = cp; 317 x = rn_insert(netmask, rn_maskhead, &maskduplicated, x); 318 /* 319 * Calculate index of mask. 320 */ 321 cplim = netmask + mlen; 322 for (cp = netmask + skip; cp < cplim; cp++) 323 if (*(u_char *)cp != 0xff) 324 break; 325 b = (cp - netmask) << 3; 326 if (cp != cplim) { 327 if (*cp != 0) { 328 gotOddMasks = 1; 329 for (j = 0x80; j; b++, j >>= 1) 330 if ((j & *cp) == 0) 331 break; 332 } 333 } 334 x->rn_b = -1 - b; 335 return (x); 336 } 337 338 struct radix_node * 339 rn_addroute(v, netmask, head, treenodes) 340 struct radix_node *head; 341 caddr_t netmask, v; 342 struct radix_node treenodes[2]; 343 { 344 register int j; 345 register caddr_t cp; 346 register struct radix_node *t, *x, *tt; 347 short b = 0, b_leaf; 348 int vlen = *(u_char *)v, mlen, keyduplicated; 349 caddr_t cplim; unsigned char *maskp; 350 struct radix_mask *m, **mp; 351 struct radix_node *saved_tt; 352 353 /* 354 * In dealing with non-contiguous masks, there may be 355 * many different routes which have the same mask. 356 * We will find it useful to have a unique pointer to 357 * the mask to speed avoiding duplicate references at 358 * nodes and possibly save time in calculating indices. 359 */ 360 if (netmask) { 361 x = rn_search(netmask, rn_maskhead); 362 mlen = *(u_char *)netmask; 363 if (Bcmp(netmask, x->rn_key, mlen) != 0) { 364 x = rn_addmask(netmask, 0, head->rn_off); 365 if (x == 0) 366 return (0); 367 } 368 netmask = x->rn_key; 369 b = -1 - x->rn_b; 370 } 371 /* 372 * Deal with duplicated keys: attach node to previous instance 373 */ 374 saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes); 375 if (keyduplicated) { 376 do { 377 if (tt->rn_mask == netmask) 378 return (0); 379 t = tt; 380 } while (tt = tt->rn_dupedkey); 381 /* 382 * If the mask is not duplicated, we wouldn't 383 * find it among possible duplicate key entries 384 * anyway, so the above test doesn't hurt. 385 * 386 * XXX: we really ought to sort the masks 387 * for a duplicated key the same way as in a masklist. 388 * It is an unfortunate pain having to relocate 389 * the head of the list. 390 */ 391 t->rn_dupedkey = tt = treenodes; 392 #ifdef RN_DEBUG 393 t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; 394 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; 395 #endif 396 t = saved_tt; 397 tt->rn_key = (caddr_t) v; 398 tt->rn_b = -1; 399 tt->rn_flags = t->rn_flags & ~RNF_ROOT; 400 } 401 /* 402 * Put mask in tree. 403 */ 404 if (netmask) { 405 tt->rn_mask = netmask; 406 tt->rn_b = x->rn_b; 407 } 408 t = saved_tt->rn_p; 409 b_leaf = -1 - t->rn_b; 410 if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r; 411 /* Promote general routes from below */ 412 if (x->rn_b < 0) { 413 if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) { 414 MKGet(m); 415 if (m) { 416 Bzero(m, sizeof *m); 417 m->rm_b = x->rn_b; 418 m->rm_mask = x->rn_mask; 419 x->rn_mklist = t->rn_mklist = m; 420 } 421 } 422 } else if (x->rn_mklist) { 423 /* 424 * Skip over masks whose index is > that of new node 425 */ 426 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) 427 if (m->rm_b >= b_leaf) 428 break; 429 t->rn_mklist = m; *mp = 0; 430 } 431 /* Add new route to highest possible ancestor's list */ 432 if ((netmask == 0) || (b > t->rn_b )) 433 return tt; /* can't lift at all */ 434 b_leaf = tt->rn_b; 435 do { 436 x = t; 437 t = t->rn_p; 438 } while (b <= t->rn_b && x != head); 439 /* 440 * Search through routes associated with node to 441 * insert new route according to index. 442 * For nodes of equal index, place more specific 443 * masks first. 444 */ 445 cplim = netmask + mlen; 446 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) { 447 if (m->rm_b < b_leaf) 448 continue; 449 if (m->rm_b > b_leaf) 450 break; 451 if (m->rm_mask == netmask) { 452 m->rm_refs++; 453 tt->rn_mklist = m; 454 return tt; 455 } 456 maskp = (u_char *)m->rm_mask; 457 for (cp = netmask; cp < cplim; cp++) 458 if (*(u_char *)cp > *maskp++) 459 goto on2; 460 } 461 on2: 462 MKGet(m); 463 if (m == 0) { 464 printf("Mask for route not entered\n"); 465 return (tt); 466 } 467 Bzero(m, sizeof *m); 468 m->rm_b = b_leaf; 469 m->rm_mask = netmask; 470 m->rm_mklist = *mp; 471 *mp = m; 472 tt->rn_mklist = m; 473 return tt; 474 } 475 476 struct radix_node * 477 rn_delete(v, netmask, head) 478 caddr_t v, netmask; 479 struct radix_node *head; 480 { 481 register struct radix_node *t, *p, *x = head; 482 register struct radix_node *tt = rn_search(v, x); 483 int b, head_off = x->rn_off, vlen = * (u_char *) v; 484 struct radix_mask *m, *saved_m, **mp; 485 struct radix_node *dupedkey, *saved_tt = tt; 486 487 if (tt == 0 || 488 Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off)) 489 return (0); 490 /* 491 * Delete our route from mask lists. 492 */ 493 if (dupedkey = tt->rn_dupedkey) { 494 if (netmask) 495 netmask = rn_search(netmask, rn_maskhead)->rn_key; 496 while (tt->rn_mask != netmask) 497 if ((tt = tt->rn_dupedkey) == 0) 498 return (0); 499 } 500 if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0) 501 goto on1; 502 if (m->rm_mask != tt->rn_mask) { 503 printf("rn_delete: inconsistent annotation\n"); 504 goto on1; 505 } 506 if (--m->rm_refs >= 0) 507 goto on1; 508 b = -1 - tt->rn_b; 509 t = saved_tt->rn_p; 510 if (b > t->rn_b) 511 goto on1; /* Wasn't lifted at all */ 512 do { 513 x = t; 514 t = t->rn_p; 515 } while (b <= t->rn_b && x != head); 516 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) 517 if (m == saved_m) { 518 *mp = m->rm_mklist; 519 MKFree(m); 520 break; 521 } 522 if (m == 0) 523 printf("rn_delete: couldn't find our annotation\n"); 524 on1: 525 /* 526 * Eliminate us from tree 527 */ 528 if (tt->rn_flags & RNF_ROOT) 529 return (0); 530 #ifdef RN_DEBUG 531 /* Get us out of the creation list */ 532 for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {} 533 if (t) t->rn_ybro = tt->rn_ybro; 534 #endif RN_DEBUG 535 t = tt->rn_p; 536 if (dupedkey) { 537 if (tt == saved_tt) { 538 x = dupedkey; x->rn_p = t; 539 if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x; 540 #ifndef RN_DEBUG 541 x++; t = tt + 1; *x = *t; p = t->rn_p; 542 #else 543 x++; b = x->rn_info; t = tt + 1; *x = *t; p = t->rn_p; 544 x->rn_info = b; 545 #endif 546 if (p->rn_l == t) p->rn_l = x; else p->rn_r = x; 547 x->rn_l->rn_p = x; x->rn_r->rn_p = x; 548 } else { 549 for (p = saved_tt; p && p->rn_dupedkey != tt;) 550 p = p->rn_dupedkey; 551 if (p) p->rn_dupedkey = tt->rn_dupedkey; 552 else printf("rn_delete: couldn't find us\n"); 553 } 554 goto out; 555 } 556 if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l; 557 p = t->rn_p; 558 if (p->rn_r == t) p->rn_r = x; else p->rn_l = x; 559 x->rn_p = p; 560 /* 561 * Demote routes attached to us. 562 */ 563 if (t->rn_mklist) { 564 if (x->rn_b >= 0) { 565 for (mp = &x->rn_mklist; m = *mp;) 566 mp = &m->rm_mklist; 567 *mp = t->rn_mklist; 568 } else { 569 for (m = t->rn_mklist; m;) { 570 struct radix_mask *mm = m->rm_mklist; 571 if (m == x->rn_mklist && (--(m->rm_refs) < 0)) { 572 x->rn_mklist = 0; 573 MKFree(m); 574 } else 575 printf("%s %x at %x\n", 576 "rn_delete: Orphaned Mask", m, x); 577 m = mm; 578 } 579 } 580 } 581 /* 582 * We may be holding an active internal node in the tree. 583 */ 584 x = tt + 1; 585 if (t != x) { 586 #ifndef RN_DEBUG 587 *t = *x; 588 #else 589 b = t->rn_info; *t = *x; t->rn_info = b; 590 #endif 591 t->rn_l->rn_p = t; t->rn_r->rn_p = t; 592 p = x->rn_p; 593 if (p->rn_l == x) p->rn_l = t; else p->rn_r = t; 594 } 595 out: 596 tt->rn_flags &= ~RNF_ACTIVE; 597 tt[1].rn_flags &= ~RNF_ACTIVE; 598 return (tt); 599 } 600 char rn_zeros[MAXKEYLEN], rn_ones[MAXKEYLEN]; 601 602 rn_inithead(head, off, af) 603 struct radix_node_head **head; 604 int off; 605 { 606 register struct radix_node_head *rnh; 607 register struct radix_node *t, *tt, *ttt; 608 if (*head) 609 return (1); 610 R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh)); 611 if (rnh == 0) 612 return (0); 613 Bzero(rnh, sizeof (*rnh)); 614 *head = rnh; 615 t = rn_newpair(rn_zeros, off, rnh->rnh_nodes); 616 ttt = rnh->rnh_nodes + 2; 617 t->rn_r = ttt; 618 t->rn_p = t; 619 tt = t->rn_l; 620 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE; 621 tt->rn_b = -1 - off; 622 *ttt = *tt; 623 ttt->rn_key = rn_ones; 624 rnh->rnh_af = af; 625 rnh->rnh_treetop = t; 626 if (radix_node_head == 0) { 627 caddr_t cp = rn_ones, cplim = rn_ones + MAXKEYLEN; 628 while (cp < cplim) 629 *cp++ = -1; 630 if (rn_inithead(&radix_node_head, 0, 0) == 0) { 631 Free(rnh); 632 *head = 0; 633 return (0); 634 } 635 mask_rnhead = radix_node_head; 636 } 637 rnh->rnh_next = radix_node_head->rnh_next; 638 if (radix_node_head != rnh) 639 radix_node_head->rnh_next = rnh; 640 return (1); 641 } 642