1 /* 2 * Copyright (c) 1988, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)radix.c 8.2.1.1 (Berkeley) 09/23/94 8 */ 9 10 /* 11 * Routines to build and maintain radix trees for routing lookups. 12 */ 13 #ifndef RNF_NORMAL 14 #include <sys/param.h> 15 #include <sys/systm.h> 16 #include <sys/malloc.h> 17 #define M_DONTWAIT M_NOWAIT 18 #ifdef KERNEL 19 #include <sys/domain.h> 20 #endif 21 #endif 22 23 #include "radix.h" 24 25 int max_keylen; 26 struct radix_node_head *mask_rnhead; 27 static char *rn_zeros, *rn_ones; 28 29 #define rn_masktop (mask_rnhead->rnh_treetop) 30 #undef Bcmp 31 #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l)) 32 /* 33 * The data structure for the keys is a radix tree with one way 34 * branching removed. The index rn_b at an internal node n represents a bit 35 * position to be tested. The tree is arranged so that all descendants 36 * of a node n have keys whose bits all agree up to position rn_b - 1. 37 * (We say the index of n is rn_b.) 38 * 39 * There is at least one descendant which has a one bit at position rn_b, 40 * and at least one with a zero there. 41 * 42 * A route is determined by a pair of key and mask. We require that the 43 * bit-wise logical and of the key and mask to be the key. 44 * We define the index of a route to associated with the mask to be 45 * the first bit number in the mask where 0 occurs (with bit number 0 46 * representing the highest order bit). 47 * 48 * We say a mask is normal if every bit is 0, past the index of the mask. 49 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b, 50 * and m is a normal mask, then the route applies to every descendant of n. 51 * If the index(m) < rn_b, this implies the trailing last few bits of k 52 * before bit b are all 0, (and hence consequently true of every descendant 53 * of n), so the route applies to all descendants of the node as well. 54 * 55 * This version of the code assumes that all masks are normal, 56 * and consequently the only thing that needs to be stored about a mask 57 * is its length. This version also permits mapped, and fixed length 58 * elements to be entered into the tree. 59 */ 60 61 struct radix_node * 62 rn_search(v_arg, top) 63 void *v_arg; 64 struct radix_node *top; 65 { 66 register struct radix_node *x; 67 register caddr_t v; 68 69 for (x = top, v = v_arg; x->rn_b >= 0;) { 70 if (x->rn_bmask & v[x->rn_off]) 71 x = x->rn_r; 72 else 73 x = x->rn_l; 74 } 75 return (x); 76 }; 77 78 static char search_bits[] = {0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1}; 79 80 struct radix_node * 81 rn_search_unmapped(v_arg, head) 82 void *v_arg; 83 struct radix_node_head *head; 84 { 85 register struct radix_node *x = head->rnh_treetop; 86 register char *v; 87 register int index; 88 89 for (v = v_arg; x->rn_b >= 0;) { 90 index = x->rn_b + head->rnh_offset; 91 if (search_bits[index & 7] & v[index >> 3]) 92 x = x->rn_r; 93 else 94 x = x->rn_l; 95 } 96 return (x); 97 }; 98 99 100 /* 101 * This routine is used elsewhere in the kernel concerning 102 * best matches for interfaces and is no longer used in the 103 * radix code. 104 * 105 */ 106 int 107 rn_refines(m_arg, n_arg) 108 void *m_arg, *n_arg; 109 { 110 register caddr_t m = m_arg, n = n_arg; 111 register caddr_t lim, lim2 = lim = n + *(u_char *)n; 112 int longer = (*(u_char *)n++) - (int)(*(u_char *)m++); 113 int masks_are_equal = 1; 114 115 if (longer > 0) 116 lim -= longer; 117 while (n < lim) { 118 if (*n & ~(*m)) 119 return 0; 120 if (*n++ != *m++) 121 masks_are_equal = 0; 122 123 } 124 while (n < lim2) 125 if (*n++) 126 return 0; 127 if (masks_are_equal && (longer < 0)) 128 for (lim2 = m - longer; m < lim2; ) 129 if (*m++) 130 return 1; 131 return (!masks_are_equal); 132 } 133 /* Begin bits.c */ 134 static int low_bits[] = {1, 3, 7, 15, 31, 63, 127, 255}; 135 static int mask_bits[] = {1, 2, 4, 8, 16, 32, 64, 128}; 136 137 #define x1(a,n) ( a > ((1 << (n + 1)) - 1) ? n+1 : n) 138 #define x2(a,n) (( a > ((1 << (2 + n)) - 1)) ? x1(a,n+2) : x1(a,n)) 139 #define x4(a,n) (( a > ((1 << (4 + n)) - 1)) ? x2(a,n+4) : x2(a,n)) 140 #define x8(a,n) (( a > ((1 << (8 + n)) - 1)) ? x4(a,n+8) : x4(a,n)) 141 #define x16(a,n) ((a > (((1 << n) - 1)+(65535 << n))) ? x8(a,n+16) : x8(a,n)) 142 143 int 144 rn_mapped_bits_matched(t_a, r_a, rnh, masklen) 145 void *t_a; /* Under scrutiny, assumed mapped */ 146 void *r_a; /* being compared to, not mapped */ 147 struct radix_node_head *rnh; /* has offset for ref, map for trial */ 148 int masklen; /* only need this many bits to match*/ 149 { 150 register struct radix_index_table *table = rnh->rnh_table; 151 int matched = 0, k, l; 152 u_char *trial = t_a; /* Under scrutiny, assumed mapped */ 153 u_char *ref = r_a; /* being compared to, not mapped */ 154 155 #ifdef utterly_straightforward_way_of_doing_this 156 for (; table->limit; table++) { 157 for (; matched < table->limit; matched++) { 158 if (matched >= masklen - 1) 159 return (matched); 160 k = matched + table->offset; 161 l = matched + rnh->rnh_offset; 162 /* is bit l of ref == bit k of trial */ 163 if (((ref[l >> 3] >> (7 - (l & 7))) ^ 164 (trial[k >> 3] >> (7 - (k & 7)))) & 1) 165 return (matched); 166 } 167 } 168 #else 169 int test_info, trial_bits, ref_bits, limit, sum_bits, delta; 170 171 for (; table->limit; table++) { 172 limit = MIN(masklen, table->limit); 173 while (matched < limit) { 174 k = matched + table->offset; 175 l = matched + rnh->rnh_offset; 176 trial_bits = 7 - (k & 7); 177 ref_bits = 7 - (l & 7); 178 delta = MIN(MIN(trial_bits, ref_bits), limit - matched); 179 sum_bits = trial_bits + ref_bits; 180 181 test_info = ((int)ref[k >> 3]) << ref_bits; 182 test_info ^= ((int)trial[l >> 3]) << trial_bits; 183 test_info &= low_bits[sum_bits]; 184 test_info &= ~low_bits[sum_bits - delta]; 185 if (test_info != 0) { 186 int count, mask = mask_bits[sum_bits]; 187 for (count = delta; count >= 0; count--) { 188 if (mask & test_info) 189 return (matched); 190 matched++; mask >>= 1; 191 } 192 printf("Bits_matched: should never get here!"); 193 } 194 matched += delta; 195 if (matched >= masklen) 196 return (matched); 197 } 198 } 199 #endif 200 return (matched); 201 } 202 203 #if defined(IPPROTO_IP) && defined(IPVERSION) /* #include <netinet/i{n,p}.h>" */ 204 int ip_mapped_bits_matched 205 (void *t_a, void *r_a, struct radix_node_head *rnh, int masklen) 206 { 207 struct ip *trial = t_a; 208 struct sockaddr_in *ref = r_a; 209 210 u_long bits = trial->sin_addr.s_addr ^ ip->ip_dst.s_adddr; 211 if (bits == 0) return (32); /* expected case !*/ 212 bits = ntohl(bits); 213 bits = x16(bits, 0); 214 return bits; 215 } 216 #endif 217 218 rn_mapped_set_mask(index, rn, rnh) 219 int index; 220 register struct radix_node *rn; 221 register struct radix_node_head *rnh; 222 { 223 register struct radix_index_table *table; 224 225 for (table = rnh->rnh_table; table->limit && index < table->limit;) 226 table++; 227 if (table->limit) { 228 index += table->offset; 229 rn->rn_bmask = mask_bits[7 - (index & 7)]; 230 rn->rn_off = (index >> 3); 231 } 232 } 233 234 rn_unmapped_bits_matched(t_a, r_a, rnh, masklen) 235 void *t_a; /* Under scrutiny, assumed mapped */ 236 void *r_a; /* being compared to, not mapped */ 237 struct radix_node_head *rnh; /* has offset for ref, map for trial */ 238 int masklen; /* only need this many bits to match*/ 239 { 240 register u_char *cp1, *cp2, *limit; 241 int offset = rnh->rnh_offset >> 3;/* XXX: off must be n * 8 */ 242 int matched, test_info; 243 u_char *trial = t_a; /* Under scrutiny, assumed mapped */ 244 u_char *ref = r_a; /* being compared to, not mapped */ 245 246 cp1 = trial + offset; 247 limit = cp1 + ((masklen + 7) >> 3); 248 for (cp2 = ref + offset; cp1 < limit;) 249 if (*cp1++ != *cp2++) { 250 test_info = cp1[-1] ^ cp2[-1]; 251 matched = (cp1 - trial - offset) << 3; 252 for (; test_info; matched--) 253 test_info >>= 1; 254 if (matched > masklen) 255 matched = masklen; 256 return (matched); 257 } 258 return (masklen); 259 } 260 261 rn_unmapped_set_mask(index, rn, rnh) 262 int index; 263 register struct radix_node *rn; 264 register struct radix_node_head *rnh; 265 { 266 index += rnh->rnh_offset; 267 rn->rn_bmask = mask_bits[7 - (index & 7)]; 268 rn->rn_off = (index >> 3); 269 } 270 /* End bits.c */ 271 272 struct radix_node * 273 rn_match(v_arg, head) 274 void *v_arg; 275 struct radix_node_head *head; 276 { 277 register char *cp = (char *)(v_arg); 278 register struct radix_node *m, *t = head->rnh_treetop; 279 struct radix_node *saved_t, *top = t; 280 int bits_matched, rn_b; 281 282 /* 283 * Open code rn_search(v, top) to avoid overhead of extra 284 * subroutine call. 285 */ 286 while (t->rn_b >= 0) 287 if (t->rn_bmask & cp[t->rn_off]) 288 t = t->rn_r; 289 else 290 t = t->rn_l; 291 bits_matched = (*head->rnh_bits_matched) 292 (v_arg, t->rn_key, head, -1 - t->rn_b); 293 rn_b = -1 - bits_matched; /* XXX: compatability */ 294 /* 295 * Check this node, and any other duped keys. 296 * We want to match the most specific possible mask, so 297 * duplicates are sorted longest mask to shortest. 298 */ 299 for (saved_t = t; t; t = t->rn_dupedkey) 300 /* if (bits_matched >= mask_index) */ 301 if (rn_b <= t->rn_b) { 302 /* 303 * This extra grot is in case we are explicitly asked 304 * to look up the default. Ugh! 305 */ 306 if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey) 307 t = t->rn_dupedkey; 308 return (t); 309 } 310 /* start searching up the tree */ 311 t = saved_t; 312 do { 313 t = t->rn_p; 314 for (m = t->rn_mklist; m; m = m->rn_mklist) 315 /* if (bits_matched >= mask_index) */ 316 if (rn_b <= m->rn_b) 317 return (m); 318 } while (t != top); 319 return 0; 320 }; 321 322 #ifdef RN_DEBUG 323 int rn_nodenum; 324 struct radix_node *rn_clist; 325 int rn_saveinfo; 326 int rn_debug = 1; 327 #endif 328 329 struct radix_node * 330 rn_newpair1(v, b, nodes, rnh) 331 void *v; 332 int b; 333 struct radix_node nodes[2]; 334 struct radix_node_head *rnh; 335 { 336 register struct radix_node *tt = nodes, *t = tt + 1; 337 if (rnh == 0) 338 panic("rn_newpair1"); 339 t->rn_b = b; t->rn_l = tt; 340 (*rnh->rnh_set_mask)(b, t, rnh); 341 tt->rn_b = -1; tt->rn_key = (caddr_t)v; tt->rn_p = t; 342 tt->rn_flags = t->rn_flags = RNF_ACTIVE; 343 #ifdef RN_DEBUG 344 tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; 345 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; 346 #endif 347 return t; 348 } 349 350 #define DEFAULT(a, b) (a > 0 ? a : b) 351 #define VLEN(v, h) ((DEFAULT(h->rnh_addrsize, *(u_char *)v) << 3) - h->rnh_offset) 352 353 struct radix_node * 354 rn_insert(v_arg, head, dupentry, nodes) 355 void *v_arg; 356 register struct radix_node_head *head; 357 int *dupentry; 358 struct radix_node nodes[2]; 359 { 360 caddr_t v = (caddr_t)v_arg, cp = (head->rnh_offset >> 3) + v; 361 register struct radix_node *t = rn_search_unmapped(v, head); 362 register struct radix_node *p, *x; 363 int b, vlen = VLEN(v, head); 364 /* 365 * Find first bit at which v and t->rn_key differ 366 */ 367 b = rn_unmapped_bits_matched(v, t->rn_key, head, vlen); 368 if (b == vlen) { 369 *dupentry = 1; 370 return t; 371 } 372 /* 373 * Find appropriate node after which to insert new key 374 */ 375 p = t; 376 do { 377 x = p; 378 p = x->rn_p; 379 } while (b <= p->rn_b); 380 381 #ifdef RN_DEBUG 382 if (rn_debug) 383 printf("Going In:\n"), traverse(p); 384 #endif 385 t = rn_newpair1(v, b, nodes, head); 386 /* 387 * If we went to the left during the matching process, 388 * the spliced-in node will still be on the left. 389 */ 390 if (p->rn_l == x) 391 p->rn_l = t; 392 else 393 p->rn_r = t; 394 t->rn_p = p; 395 /* 396 * If the first bit of the input string that didn't match 397 * was set, put the new leaf to the right of the new node. 398 */ 399 if (cp[b >> 3] & search_bits[b & 7]) { 400 t->rn_r = nodes; t->rn_l = x; 401 } else 402 t->rn_r = x; 403 x->rn_p = t; 404 #ifdef RN_DEBUG 405 if (rn_debug) 406 printf("Coming out:\n"), traverse(p); 407 #endif 408 return (nodes); 409 } 410 411 /* 412 * Here mostly for compatability's sake with 413 * the previous networking code, which expects to find masks. 414 */ 415 struct radix_node * 416 rn_masksubr(n_arg, v_arg, skip, head, len_p) 417 void *n_arg, *v_arg; 418 int skip, *len_p; 419 struct radix_node_head *head; 420 { 421 caddr_t netmask = (caddr_t)n_arg, v = v_arg; 422 register struct radix_node *x = rn_masktop; 423 register caddr_t cp, cplim; 424 register int b, j, mlen, vlen; 425 int maskduplicated; 426 struct radix_node *saved_x; 427 428 if (head == 0) 429 head = mask_rnhead; 430 if (netmask == 0) { 431 if (*len_p) 432 *len_p = VLEN(v, head); 433 return 0; 434 } 435 if (skip > 0) 436 for (j = skip << 3; j > ((unsigned)x->rn_b);) 437 x = x->rn_r; 438 x = rn_search(netmask, x); 439 mlen = *(u_char *)netmask; 440 if ((skip > mlen) || 441 Bcmp(netmask + skip, x->rn_key + skip, mlen - skip) == 0) 442 goto found; 443 R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x)); 444 if (x == 0) 445 return (0); 446 saved_x = x; 447 Bzero(x, max_keylen + 2 * sizeof (*x)); 448 cp = (caddr_t)(x + 2); 449 Bcopy(netmask, cp, mlen); 450 if (skip > 1) 451 Bcopy(rn_ones, cp + 1, skip - 1); 452 maskduplicated = 0; 453 x = rn_insert(cp, mask_rnhead, &maskduplicated, x); 454 mlen = rn_unmapped_bits_matched(cp, rn_ones, head, mlen << 3); 455 if (maskduplicated) { 456 printf("rn_addmask1: impossible dup"); 457 Free(saved_x); 458 } else { 459 x->rn_b = -1 - mlen; 460 } 461 found: 462 if (len_p) 463 *len_p = (-1 - x->rn_b) - head->rnh_offset; 464 return (x); 465 } 466 467 struct radix_node * 468 rn_addroute(v_arg, n_arg, head, treenodes) 469 void *v_arg, *n_arg; 470 struct radix_node_head *head; 471 struct radix_node treenodes[2]; 472 { 473 caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg; 474 register struct radix_node *m, *us = treenodes; 475 struct radix_node *t, *tt, *x, *base, *top = head->rnh_treetop; 476 struct radix_node *s /*sibling*/, *p /*parent*/, **mp; 477 short b = 0, b_leaf; 478 int masklen, masklen_leaf, mlen, keyduplicated = 0; 479 480 /* 481 * This version assumes contiguous masks and only cares about 482 * the index of the mask, if present. 483 */ 484 (void) rn_masksubr(v_arg, n_arg, (head->rnh_offset >> 3), head, &masklen); 485 masklen_leaf = -1 - masklen; 486 base = tt = rn_insert(v, head, &keyduplicated, treenodes); 487 t = p = tt->rn_p; 488 /* 489 * Deal with duplicated keys: attach node to previous instance 490 * We sort the nodes so that the longest mask comes first. 491 */ 492 if (keyduplicated) { 493 /* 494 * Examine each node and continue past any where the 495 * mask length is greater than the new one; 496 * since we are storing -1 - masklength, the sense 497 * of the test is reversed. 498 */ 499 for (; tt && (tt->rn_b <= masklen_leaf); 500 x = tt, tt = tt->rn_dupedkey) 501 if (tt->rn_mask == netmask) 502 return (0); /* mask is also duplicated */ 503 if (tt == base) { 504 /* link in at head of list */ 505 us->rn_dupedkey = tt; 506 us->rn_p = p; 507 if (p->rn_l == tt) 508 p->rn_l = us; else p->rn_r = us; 509 base = us; 510 } else { 511 us->rn_dupedkey = x->rn_dupedkey; 512 x->rn_dupedkey = us; 513 } 514 #ifdef RN_DEBUG 515 t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; 516 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; 517 #endif 518 us->rn_key = (caddr_t) v; 519 us->rn_flags = RNF_ACTIVE; 520 } 521 us->rn_b = masklen_leaf; 522 us->rn_mask = netmask; 523 /* 524 * Annotate tree about masks. 525 */ 526 b = p->rn_b; 527 b_leaf = -1 - b; 528 if (p->rn_r == base) s = p->rn_l; else s = p->rn_r; 529 if (s->rn_b < 0) { 530 if (s->rn_mask || s->rn_dupedkey) { 531 /* 532 * There may be network routes among our sibling's 533 * dupedkey chain that previously couldn't be lifted 534 * which should now be added to the new parent. 535 */ 536 int previous_index = p->rn_p->rn_b; 537 mp = &(p->rn_mklist); 538 for (m = s; m; m = m->rn_dupedkey) { 539 if (m->rn_mask) { 540 int m_index = -1 - m->rn_b; 541 if (previous_index < m_index && b >= m_index) { 542 *mp = m; 543 mp = &(m->rn_mklist); 544 } 545 } 546 547 } 548 } 549 } else if (s->rn_mklist) { 550 /* 551 * Skip over masks whose index is > that of new node 552 */ 553 for (mp = &(s->rn_mklist); m = *mp; mp = &m->rn_mklist) 554 if (m->rn_b >= b_leaf) 555 break; 556 p->rn_mklist = m; *mp = 0; 557 } 558 /* Add new route to highest possible ancestor's list */ 559 if ((netmask == 0) || (masklen > p->rn_b )) 560 return us; /* can't lift at all */ 561 do { 562 x = t; 563 t = t->rn_p; 564 } while (masklen <= t->rn_b && x != top); 565 /* 566 * Search through routes associated with node to 567 * insert new route according to index. 568 * For nodes of equal index, place more specific 569 * masks first. 570 */ 571 for (mp = &x->rn_mklist; m = *mp; mp = &m->rn_mklist) { 572 if (m->rn_b > masklen_leaf) 573 continue; 574 if (m->rn_b < masklen_leaf) 575 break; 576 if (m->rn_b == masklen_leaf) { 577 printf("rn_addroute: impossible duplicate mask\n"); 578 return us; 579 } 580 } 581 *mp = us; 582 us->rn_mklist = m; 583 return us; 584 } 585 586 struct radix_node * 587 rn_delete(v_arg, n_arg, head) 588 void *v_arg, *n_arg; 589 struct radix_node_head *head; 590 { 591 register struct radix_node *t, *p, *x, *tt; 592 struct radix_node *m, *saved_m, **mp; 593 struct radix_node *dupedkey, *base, *top = head->rnh_treetop; 594 int b, head_off = head->rnh_offset >> 3, masklen, masklen_leaf; 595 int vlen = VLEN(v_arg, head) >> 3; 596 caddr_t v = v_arg; 597 598 base = tt = rn_search_unmapped(v_arg, head); 599 if (tt == 0 || Bcmp(v + head_off, tt->rn_key + head_off, vlen)) 600 return (0); 601 p = t = tt->rn_p; 602 (void) rn_masksubr(v_arg, n_arg, head_off, head, &masklen); 603 masklen_leaf = -1 - masklen; 604 if (dupedkey = tt->rn_dupedkey) { 605 while (tt->rn_b != masklen_leaf) 606 if ((tt = tt->rn_dupedkey) == 0) 607 return (0); 608 } 609 /* 610 * Delete our route from mask lists. 611 */ 612 if (tt->rn_mask == 0 || masklen > t->rn_b) 613 goto on1; /* Wasn't lifted at all */ 614 do { 615 x = p; 616 p = p->rn_p; 617 } while (masklen <= p->rn_b && x != top); 618 for (mp = &x->rn_mklist; m = *mp; mp = &m->rn_mklist) 619 if (m == tt) { 620 *mp = tt->rn_mklist; 621 break; 622 } 623 if (m == 0) 624 printf("rn_delete: couldn't find our annotation\n"); 625 on1: 626 /* 627 * Eliminate us from tree 628 */ 629 if (tt->rn_flags & RNF_ROOT) 630 return (0); 631 #ifdef RN_DEBUG 632 /* Get us out of the creation list */ 633 for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {} 634 if (t) t->rn_ybro = tt->rn_ybro; 635 #endif 636 if (dupedkey) { 637 if (tt == base) { 638 x = dupedkey; x->rn_p = t; 639 if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x; 640 } else { 641 for (x = base; x && x->rn_dupedkey != tt;) 642 x = x->rn_dupedkey; 643 if (x) x->rn_dupedkey = tt->rn_dupedkey; 644 else printf("rn_delete: couldn't find us\n"); 645 } 646 x = tt + 1; 647 if (x->rn_flags & RNF_ACTIVE) { 648 /* Find inactive node to clober among this chain. */ 649 for (t = base; t; t = x->rn_dupedkey) 650 if ((t[1].rn_flags & RNF_ACTIVE) == 0) 651 break; 652 if (t == 0) { 653 printf("rn_delete: lost inactive node"); 654 return (0); 655 } 656 t++; 657 goto clobber; 658 } 659 goto out; 660 } 661 if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l; 662 p = t->rn_p; 663 if (p->rn_r == t) p->rn_r = x; else p->rn_l = x; 664 x->rn_p = p; 665 /* 666 * Demote routes attached to us. 667 */ 668 if (t->rn_mklist) { 669 if (x->rn_b >= 0) { 670 for (mp = &x->rn_mklist; m = *mp;) 671 mp = &m->rn_mklist; 672 *mp = t->rn_mklist; 673 } 674 } 675 /* 676 * We may be holding an active internal node in the tree. 677 */ 678 x = tt + 1; 679 if (t != x) { 680 clobber: 681 #ifndef RN_DEBUG 682 *t = *x; 683 #else 684 b = t->rn_info; *t = *x; t->rn_info = b; 685 #endif 686 t->rn_l->rn_p = t; t->rn_r->rn_p = t; 687 p = x->rn_p; 688 if (p->rn_l == x) p->rn_l = t; else p->rn_r = t; 689 } 690 out: 691 tt->rn_flags &= ~RNF_ACTIVE; 692 tt[1].rn_flags &= ~RNF_ACTIVE; 693 return (tt); 694 } 695 696 int 697 rn_walktree(h, f, w) 698 struct radix_node_head *h; 699 register int (*f)(); 700 void *w; 701 { 702 int error; 703 struct radix_node *base, *next; 704 register struct radix_node *rn = h->rnh_treetop; 705 /* 706 * This gets complicated because we may delete the node 707 * while applying the function f to it, so we need to calculate 708 * the successor node in advance. 709 */ 710 /* First time through node, go left */ 711 while (rn->rn_b >= 0) 712 rn = rn->rn_l; 713 for (;;) { 714 base = rn; 715 /* If at right child go back up, otherwise, go right */ 716 while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0) 717 rn = rn->rn_p; 718 /* Find the next *leaf* since next node might vanish, too */ 719 for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;) 720 rn = rn->rn_l; 721 next = rn; 722 /* Process leaves */ 723 while (rn = base) { 724 base = rn->rn_dupedkey; 725 if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w))) 726 return (error); 727 } 728 rn = next; 729 if (rn->rn_flags & RNF_ROOT) 730 return (0); 731 } 732 /* NOTREACHED */ 733 } 734 735 int 736 rn_inithead(head, off) 737 void **head; 738 int off; 739 { 740 register struct radix_node_head *rnh; 741 register struct radix_node *t, *tt, *ttt; 742 if (*head) 743 return (1); 744 R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh)); 745 if (rnh == 0) 746 return (0); 747 Bzero(rnh, sizeof (*rnh)); 748 *head = rnh; 749 rnh->rnh_offset = off; 750 t = rn_newpair1(rn_zeros, 0, rnh->rnh_nodes, rnh); 751 ttt = rnh->rnh_nodes + 2; 752 t->rn_r = ttt; 753 t->rn_p = t; 754 tt = t->rn_l; 755 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE; 756 tt->rn_b = -1; 757 *ttt = *tt; 758 ttt->rn_key = rn_ones; 759 rnh->rnh_treetop = t; 760 rnh->rnh_addaddr = rn_addroute; 761 rnh->rnh_deladdr = rn_delete; 762 rnh->rnh_matchaddr = rn_match; 763 rnh->rnh_walktree = rn_walktree; 764 rnh->rnh_bits_matched = rn_unmapped_bits_matched; 765 rnh->rnh_set_mask = rn_unmapped_set_mask; 766 return (1); 767 } 768 769 void 770 rn_init() 771 { 772 char *cp, *cplim; 773 #ifdef KERNEL 774 struct domain *dom; 775 776 for (dom = domains; dom; dom = dom->dom_next) 777 if (dom->dom_maxrtkey > max_keylen) 778 max_keylen = dom->dom_maxrtkey; 779 #endif 780 if (max_keylen == 0) { 781 printf("rn_init: radix functions require max_keylen be set\n"); 782 return; 783 } 784 R_Malloc(rn_zeros, char *, 3 * max_keylen); 785 if (rn_zeros == NULL) 786 panic("rn_init"); 787 Bzero(rn_zeros, 2 * max_keylen); 788 rn_ones = cp = rn_zeros + max_keylen; 789 while (cp < cplim) 790 *cp++ = -1; 791 if (rn_inithead((void **)&mask_rnhead, 0) == 0) 792 panic("rn_init 2"); 793 } 794