1 /* $OpenBSD: radix.c,v 1.52 2015/11/06 18:07:57 mpi Exp $ */ 2 /* $NetBSD: radix.c,v 1.20 2003/08/07 16:32:56 agc Exp $ */ 3 4 /* 5 * Copyright (c) 1988, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)radix.c 8.6 (Berkeley) 10/17/95 33 */ 34 35 /* 36 * Routines to build and maintain radix trees for routing lookups. 37 */ 38 39 #ifndef _KERNEL 40 #include "kern_compat.h" 41 #else 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/malloc.h> 45 #include <sys/syslog.h> 46 #include <sys/pool.h> 47 #endif 48 49 #include <net/radix.h> 50 51 #if defined(ART) && !defined(SMALL_KERNEL) 52 #define SMALL_KERNEL 53 #endif 54 55 #ifndef SMALL_KERNEL 56 #include <sys/socket.h> 57 #include <net/route.h> 58 #include <net/radix_mpath.h> 59 #endif 60 61 static unsigned int max_keylen; 62 struct radix_node_head *mask_rnhead; 63 static char *addmask_key; 64 static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1}; 65 static char *rn_zeros, *rn_ones; 66 67 struct pool rtmask_pool; /* pool for radix_mask structures */ 68 69 #define rn_masktop (mask_rnhead->rnh_treetop) 70 71 static inline int rn_satisfies_leaf(char *, struct radix_node *, int); 72 static inline int rn_lexobetter(void *, void *); 73 static inline struct radix_mask *rn_new_radix_mask(struct radix_node *, 74 struct radix_mask *); 75 76 struct radix_node *rn_insert(void *, struct radix_node_head *, int *, 77 struct radix_node [2]); 78 struct radix_node *rn_newpair(void *, int, struct radix_node[2]); 79 80 static inline struct radix_node *rn_search(void *, struct radix_node *); 81 struct radix_node *rn_search_m(void *, struct radix_node *, void *); 82 int rn_add_dupedkey(struct radix_node *, struct radix_node_head *, 83 struct radix_node [2], u_int8_t); 84 void rn_fixup_nodes(struct radix_node *); 85 static inline struct radix_node *rn_lift_node(struct radix_node *); 86 void rn_add_radix_mask(struct radix_node *, int); 87 int rn_del_radix_mask(struct radix_node *); 88 static inline void rn_swap_nodes(struct radix_node *, struct radix_node *); 89 90 /* 91 * The data structure for the keys is a radix tree with one way 92 * branching removed. The index rn_b at an internal node n represents a bit 93 * position to be tested. The tree is arranged so that all descendants 94 * of a node n have keys whose bits all agree up to position rn_b - 1. 95 * (We say the index of n is rn_b.) 96 * 97 * There is at least one descendant which has a one bit at position rn_b, 98 * and at least one with a zero there. 99 * 100 * A route is determined by a pair of key and mask. We require that the 101 * bit-wise logical and of the key and mask to be the key. 102 * We define the index of a route to associated with the mask to be 103 * the first bit number in the mask where 0 occurs (with bit number 0 104 * representing the highest order bit). 105 * 106 * We say a mask is normal if every bit is 0, past the index of the mask. 107 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b, 108 * and m is a normal mask, then the route applies to every descendant of n. 109 * If the index(m) < rn_b, this implies the trailing last few bits of k 110 * before bit b are all 0, (and hence consequently true of every descendant 111 * of n), so the route applies to all descendants of the node as well. 112 * 113 * Similar logic shows that a non-normal mask m such that 114 * index(m) <= index(n) could potentially apply to many children of n. 115 * Thus, for each non-host route, we attach its mask to a list at an internal 116 * node as high in the tree as we can go. 117 * 118 * The present version of the code makes use of normal routes in short- 119 * circuiting an explicit mask and compare operation when testing whether 120 * a key satisfies a normal route, and also in remembering the unique leaf 121 * that governs a subtree. 122 */ 123 124 static inline struct radix_node * 125 rn_search(void *v_arg, struct radix_node *head) 126 { 127 struct radix_node *x = head; 128 caddr_t v = v_arg; 129 130 while (x->rn_b >= 0) { 131 if (x->rn_bmask & v[x->rn_off]) 132 x = x->rn_r; 133 else 134 x = x->rn_l; 135 } 136 return (x); 137 } 138 139 struct radix_node * 140 rn_search_m(void *v_arg, struct radix_node *head, void *m_arg) 141 { 142 struct radix_node *x = head; 143 caddr_t v = v_arg; 144 caddr_t m = m_arg; 145 146 while (x->rn_b >= 0) { 147 if ((x->rn_bmask & m[x->rn_off]) && 148 (x->rn_bmask & v[x->rn_off])) 149 x = x->rn_r; 150 else 151 x = x->rn_l; 152 } 153 return x; 154 } 155 156 int 157 rn_refines(void *m_arg, void *n_arg) 158 { 159 caddr_t m = m_arg; 160 caddr_t n = n_arg; 161 caddr_t lim, lim2; 162 int longer; 163 int masks_are_equal = 1; 164 165 lim2 = lim = n + *(u_char *)n; 166 longer = (*(u_char *)n++) - (int)(*(u_char *)m++); 167 if (longer > 0) 168 lim -= longer; 169 while (n < lim) { 170 if (*n & ~(*m)) 171 return 0; 172 if (*n++ != *m++) 173 masks_are_equal = 0; 174 } 175 while (n < lim2) 176 if (*n++) 177 return 0; 178 if (masks_are_equal && (longer < 0)) 179 for (lim2 = m - longer; m < lim2; ) 180 if (*m++) 181 return 1; 182 return (!masks_are_equal); 183 } 184 185 /* return a perfect match if m_arg is set, else do a regular rn_match */ 186 struct radix_node * 187 rn_lookup(void *v_arg, void *m_arg, struct radix_node_head *head) 188 { 189 struct radix_node *x, *tm; 190 caddr_t netmask = 0; 191 192 if (m_arg) { 193 tm = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off); 194 if (tm == NULL) 195 return (NULL); 196 netmask = tm->rn_key; 197 } 198 x = rn_match(v_arg, head); 199 if (x && netmask) { 200 while (x && x->rn_mask != netmask) 201 x = x->rn_dupedkey; 202 } 203 /* Never return internal nodes to the upper layer. */ 204 if (x && (x->rn_flags & RNF_ROOT)) 205 return (NULL); 206 return x; 207 } 208 209 static inline int 210 rn_satisfies_leaf(char *trial, struct radix_node *leaf, int skip) 211 { 212 char *cp = trial; 213 char *cp2 = leaf->rn_key; 214 char *cp3 = leaf->rn_mask; 215 char *cplim; 216 int length; 217 218 length = min(*(u_char *)cp, *(u_char *)cp2); 219 if (cp3 == NULL) 220 cp3 = rn_ones; 221 else 222 length = min(length, *(u_char *)cp3); 223 cplim = cp + length; 224 cp += skip; 225 cp2 += skip; 226 cp3 += skip; 227 while (cp < cplim) { 228 if ((*cp ^ *cp2) & *cp3) 229 return 0; 230 cp++, cp2++, cp3++; 231 } 232 return 1; 233 } 234 235 struct radix_node * 236 rn_match(void *v_arg, struct radix_node_head *head) 237 { 238 caddr_t v = v_arg; 239 caddr_t cp, cp2, cplim; 240 struct radix_node *top = head->rnh_treetop; 241 struct radix_node *saved_t, *t; 242 int off = top->rn_off; 243 int vlen, matched_off; 244 int test, b, rn_b; 245 246 t = rn_search(v, top); 247 /* 248 * See if we match exactly as a host destination 249 * or at least learn how many bits match, for normal mask finesse. 250 * 251 * It doesn't hurt us to limit how many bytes to check 252 * to the length of the mask, since if it matches we had a genuine 253 * match and the leaf we have is the most specific one anyway; 254 * if it didn't match with a shorter length it would fail 255 * with a long one. This wins big for class B&C netmasks which 256 * are probably the most common case... 257 */ 258 if (t->rn_mask) 259 vlen = *(u_char *)t->rn_mask; 260 else 261 vlen = *(u_char *)v; 262 cp = v + off; 263 cp2 = t->rn_key + off; 264 cplim = v + vlen; 265 for (; cp < cplim; cp++, cp2++) 266 if (*cp != *cp2) 267 goto on1; 268 /* 269 * This extra grot is in case we are explicitly asked 270 * to look up the default. Ugh! 271 */ 272 if (t->rn_flags & RNF_ROOT) 273 t = t->rn_dupedkey; 274 275 KASSERT(t == NULL || (t->rn_flags & RNF_ROOT) == 0); 276 return t; 277 on1: 278 test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */ 279 for (b = 7; (test >>= 1) > 0;) 280 b--; 281 matched_off = cp - v; 282 b += matched_off << 3; 283 rn_b = -1 - b; 284 /* 285 * If there is a host route in a duped-key chain, it will be first. 286 */ 287 saved_t = t; 288 if (t->rn_mask == NULL) 289 t = t->rn_dupedkey; 290 for (; t; t = t->rn_dupedkey) 291 /* 292 * Even if we don't match exactly as a host, 293 * we may match if the leaf we wound up at is 294 * a route to a net. 295 */ 296 if (t->rn_flags & RNF_NORMAL) { 297 if (rn_b <= t->rn_b) { 298 KASSERT((t->rn_flags & RNF_ROOT) == 0); 299 return t; 300 } 301 } else if (rn_satisfies_leaf(v, t, matched_off)) { 302 KASSERT((t->rn_flags & RNF_ROOT) == 0); 303 return t; 304 } 305 t = saved_t; 306 /* start searching up the tree */ 307 do { 308 struct radix_mask *m; 309 t = t->rn_p; 310 m = t->rn_mklist; 311 while (m) { 312 /* 313 * If non-contiguous masks ever become important 314 * we can restore the masking and open coding of 315 * the search and satisfaction test and put the 316 * calculation of "off" back before the "do". 317 */ 318 if (m->rm_flags & RNF_NORMAL) { 319 if (rn_b <= m->rm_b) { 320 KASSERT((m->rm_leaf->rn_flags & 321 RNF_ROOT) == 0); 322 return (m->rm_leaf); 323 } 324 } else { 325 struct radix_node *x; 326 off = min(t->rn_off, matched_off); 327 x = rn_search_m(v, t, m->rm_mask); 328 while (x && x->rn_mask != m->rm_mask) 329 x = x->rn_dupedkey; 330 if (x && rn_satisfies_leaf(v, x, off)) { 331 KASSERT((x->rn_flags & RNF_ROOT) == 0); 332 return x; 333 } 334 } 335 m = m->rm_mklist; 336 } 337 } while (t != top); 338 return NULL; 339 } 340 341 struct radix_node * 342 rn_newpair(void *v, int b, struct radix_node nodes[2]) 343 { 344 struct radix_node *tt = nodes, *t = nodes + 1; 345 t->rn_b = b; 346 t->rn_bmask = 0x80 >> (b & 7); 347 t->rn_l = tt; 348 t->rn_off = b >> 3; 349 tt->rn_b = -1; 350 tt->rn_key = v; 351 tt->rn_p = t; 352 tt->rn_flags = t->rn_flags = RNF_ACTIVE; 353 return t; 354 } 355 356 struct radix_node * 357 rn_insert(void *v_arg, struct radix_node_head *head, 358 int *dupentry, struct radix_node nodes[2]) 359 { 360 caddr_t v = v_arg; 361 struct radix_node *top = head->rnh_treetop; 362 struct radix_node *t, *tt; 363 int off = top->rn_off; 364 int b; 365 366 t = rn_search(v_arg, top); 367 /* 368 * Find first bit at which v and t->rn_key differ 369 */ 370 { 371 caddr_t cp, cp2, cplim; 372 int vlen, cmp_res; 373 374 vlen = *(u_char *)v; 375 cp = v + off; 376 cp2 = t->rn_key + off; 377 cplim = v + vlen; 378 379 while (cp < cplim) 380 if (*cp2++ != *cp++) 381 goto on1; 382 *dupentry = 1; 383 return t; 384 on1: 385 *dupentry = 0; 386 cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; 387 for (b = (cp - v) << 3; cmp_res; b--) 388 cmp_res >>= 1; 389 } 390 { 391 struct radix_node *p, *x = top; 392 caddr_t cp = v; 393 do { 394 p = x; 395 if (cp[x->rn_off] & x->rn_bmask) 396 x = x->rn_r; 397 else 398 x = x->rn_l; 399 } while (b > (unsigned int) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */ 400 t = rn_newpair(v_arg, b, nodes); 401 tt = t->rn_l; 402 if ((cp[p->rn_off] & p->rn_bmask) == 0) 403 p->rn_l = t; 404 else 405 p->rn_r = t; 406 x->rn_p = t; 407 t->rn_p = p; /* frees x, p as temp vars below */ 408 if ((cp[t->rn_off] & t->rn_bmask) == 0) { 409 t->rn_r = x; 410 } else { 411 t->rn_r = tt; 412 t->rn_l = x; 413 } 414 } 415 return (tt); 416 } 417 418 struct radix_node * 419 rn_addmask(void *n_arg, int search, int skip) 420 { 421 caddr_t netmask = n_arg; 422 struct radix_node *tm, *saved_tm; 423 caddr_t cp, cplim; 424 int b = 0, mlen, j; 425 int maskduplicated, m0, isnormal; 426 static int last_zeroed = 0; 427 428 if ((mlen = *(u_char *)netmask) > max_keylen) 429 mlen = max_keylen; 430 if (skip == 0) 431 skip = 1; 432 if (mlen <= skip) 433 return (mask_rnhead->rnh_nodes); /* rn_zero root node */ 434 if (skip > 1) 435 memcpy(addmask_key + 1, rn_ones + 1, skip - 1); 436 if ((m0 = mlen) > skip) 437 memcpy(addmask_key + skip, netmask + skip, mlen - skip); 438 /* 439 * Trim trailing zeroes. 440 */ 441 for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) 442 cp--; 443 mlen = cp - addmask_key; 444 if (mlen <= skip) { 445 if (m0 >= last_zeroed) 446 last_zeroed = mlen; 447 return (mask_rnhead->rnh_nodes); 448 } 449 if (m0 < last_zeroed) 450 memset(addmask_key + m0, 0, last_zeroed - m0); 451 *addmask_key = last_zeroed = mlen; 452 tm = rn_search(addmask_key, rn_masktop); 453 if (memcmp(addmask_key, tm->rn_key, mlen) != 0) 454 tm = NULL; 455 if (tm || search) 456 return (tm); 457 tm = malloc(max_keylen + 2 * sizeof (*tm), M_RTABLE, M_NOWAIT | M_ZERO); 458 if (tm == NULL) 459 return (0); 460 saved_tm = tm; 461 netmask = cp = (caddr_t)(tm + 2); 462 memcpy(cp, addmask_key, mlen); 463 tm = rn_insert(cp, mask_rnhead, &maskduplicated, tm); 464 if (maskduplicated) { 465 log(LOG_ERR, "rn_addmask: mask impossibly already in tree\n"); 466 free(saved_tm, M_RTABLE, 0); 467 return (tm); 468 } 469 /* 470 * Calculate index of mask, and check for normalcy. 471 */ 472 cplim = netmask + mlen; 473 isnormal = 1; 474 for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;) 475 cp++; 476 if (cp != cplim) { 477 for (j = 0x80; (j & *cp) != 0; j >>= 1) 478 b++; 479 if (*cp != normal_chars[b] || cp != (cplim - 1)) 480 isnormal = 0; 481 } 482 b += (cp - netmask) << 3; 483 tm->rn_b = -1 - b; 484 if (isnormal) 485 tm->rn_flags |= RNF_NORMAL; 486 return (tm); 487 } 488 489 /* rn_lexobetter: return a arbitrary ordering for non-contiguous masks */ 490 static inline int 491 rn_lexobetter(void *m_arg, void *n_arg) 492 { 493 u_char *mp = m_arg, *np = n_arg; 494 495 /* 496 * Longer masks might not really be lexicographically better, 497 * but longer masks always have precedence since they must be checked 498 * first. The netmasks were normalized before calling this function and 499 * don't have unneeded trailing zeros. 500 */ 501 if (*mp > *np) 502 return 1; 503 if (*mp < *np) 504 return 0; 505 /* 506 * Must return the first difference between the masks 507 * to ensure deterministic sorting. 508 */ 509 return (memcmp(mp, np, *mp) > 0); 510 } 511 512 static inline struct radix_mask * 513 rn_new_radix_mask(struct radix_node *tt, struct radix_mask *next) 514 { 515 struct radix_mask *m; 516 517 m = pool_get(&rtmask_pool, PR_NOWAIT | PR_ZERO); 518 if (m == NULL) { 519 log(LOG_ERR, "Mask for route not entered\n"); 520 return (0); 521 } 522 m->rm_b = tt->rn_b; 523 m->rm_flags = tt->rn_flags; 524 if (tt->rn_flags & RNF_NORMAL) 525 m->rm_leaf = tt; 526 else 527 m->rm_mask = tt->rn_mask; 528 m->rm_mklist = next; 529 tt->rn_mklist = m; 530 return m; 531 } 532 533 /* 534 * Find the point where the rn_mklist needs to be changed. 535 */ 536 static inline struct radix_node * 537 rn_lift_node(struct radix_node *t) 538 { 539 struct radix_node *x = t; 540 int b = -1 - t->rn_b; 541 542 /* rewind possible dupedkey list to head */ 543 while (t->rn_b < 0) 544 t = t->rn_p; 545 546 /* can't lift node above head of dupedkey list, give up */ 547 if (b > t->rn_b) 548 return (NULL); 549 550 do { 551 x = t; 552 t = t->rn_p; 553 } while (b <= t->rn_b && x != t); 554 555 return (x); 556 } 557 558 void 559 rn_add_radix_mask(struct radix_node *tt, int keyduplicated) 560 { 561 caddr_t netmask, mmask; 562 struct radix_node *x; 563 struct radix_mask *m, **mp; 564 int b_leaf = tt->rn_b; 565 566 /* Add new route to highest possible ancestor's list */ 567 if (tt->rn_mask == NULL) 568 return; /* can't lift at all */ 569 x = rn_lift_node(tt); 570 if (x == NULL) 571 return; /* didn't lift either */ 572 573 /* 574 * Search through routes associated with node to 575 * insert new route according to index. 576 * Need same criteria as when sorting dupedkeys to avoid 577 * double loop on deletion. 578 */ 579 netmask = tt->rn_mask; 580 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) { 581 if (m->rm_b < b_leaf) 582 continue; 583 if (m->rm_b > b_leaf) 584 break; 585 if (m->rm_flags & RNF_NORMAL) { 586 if (keyduplicated) { 587 if (m->rm_leaf->rn_p == tt) 588 /* new route is better */ 589 m->rm_leaf = tt; 590 #ifdef DIAGNOSTIC 591 else { 592 struct radix_node *t; 593 594 for (t = m->rm_leaf; 595 t && t->rn_mklist == m; 596 t = t->rn_dupedkey) 597 if (t == tt) 598 break; 599 if (t == NULL) { 600 log(LOG_ERR, "Non-unique " 601 "normal route on dupedkey, " 602 "mask not entered\n"); 603 return; 604 } 605 } 606 #endif 607 m->rm_refs++; 608 tt->rn_mklist = m; 609 return; 610 } else if (tt->rn_flags & RNF_NORMAL) { 611 log(LOG_ERR, "Non-unique normal route," 612 " mask not entered\n"); 613 return; 614 } 615 mmask = m->rm_leaf->rn_mask; 616 } else 617 mmask = m->rm_mask; 618 if (mmask == netmask) { 619 m->rm_refs++; 620 tt->rn_mklist = m; 621 return; 622 } 623 if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask)) 624 break; 625 } 626 *mp = rn_new_radix_mask(tt, *mp); 627 } 628 629 int 630 rn_add_dupedkey(struct radix_node *saved_tt, struct radix_node_head *head, 631 struct radix_node *tt, u_int8_t prio) 632 { 633 caddr_t netmask = tt->rn_mask; 634 struct radix_node *x = saved_tt, *xp; 635 #ifndef SMALL_KERNEL 636 struct radix_node *dupedkey_tt = NULL; 637 #endif 638 int before = -1; 639 int b_leaf = 0; 640 641 if (netmask) 642 b_leaf = tt->rn_b; 643 644 for (xp = x; x; xp = x, x = x->rn_dupedkey) { 645 #ifndef SMALL_KERNEL 646 /* permit multipath, if enabled for the family */ 647 if (rn_mpath_capable(head) && netmask == x->rn_mask) { 648 int mid; 649 /* 650 * Try to insert the new node in the middle 651 * of the list of any preexisting multipaths, 652 * to reduce the number of path disruptions 653 * that occur as a result of an insertion, 654 * per RFC2992. 655 * Additionally keep the list sorted by route 656 * priority. 657 */ 658 before = 0; 659 660 dupedkey_tt = x; 661 x = rn_mpath_prio(x, prio); 662 if (((struct rtentry *)x)->rt_priority != 663 prio) { 664 /* 665 * rn_mpath_prio returns the previous 666 * element if no element with the 667 * requested priority exists. It could 668 * be that the previous element comes 669 * with a bigger priority. 670 */ 671 if (((struct rtentry *)x)->rt_priority > prio) 672 before = 1; 673 xp = x; 674 break; 675 } 676 677 mid = rn_mpath_active_count(x) / 2; 678 do { 679 xp = x; 680 x = rn_mpath_next(x, RMP_MODE_BYPRIO); 681 } while (x && --mid > 0); 682 break; 683 } 684 #endif 685 if (x->rn_mask == netmask) 686 return (-1); 687 if (netmask == NULL || 688 (x->rn_mask && 689 ((b_leaf < x->rn_b) || /* index(netmask) > node */ 690 rn_refines(netmask, x->rn_mask) || 691 rn_lexobetter(netmask, x->rn_mask)))) 692 break; 693 } 694 /* 695 * If the mask is not duplicated, we wouldn't 696 * find it among possible duplicate key entries 697 * anyway, so the above test doesn't hurt. 698 * 699 * We sort the masks for a duplicated key the same way as 700 * in a masklist -- most specific to least specific. 701 * This may require the unfortunate nuisance of relocating 702 * the head of the list. 703 * 704 * We also reverse, or doubly link the list through the 705 * parent pointer. 706 */ 707 708 if ((x == saved_tt && before) || before == 1) 709 before = 1; 710 else 711 before = 0; 712 rn_link_dupedkey(tt, xp, before); 713 714 715 #ifndef SMALL_KERNEL 716 /* adjust the flags of the possible multipath chain */ 717 if (!dupedkey_tt) 718 dupedkey_tt = tt; 719 if (rn_mpath_capable(head)) 720 rn_mpath_adj_mpflag(dupedkey_tt, prio); 721 #endif 722 return (0); 723 } 724 725 /* 726 * Insert tt after x or in place of x if before is true. 727 */ 728 void 729 rn_link_dupedkey(struct radix_node *tt, struct radix_node *x, int before) 730 { 731 if (before) { 732 if (x->rn_p->rn_b > 0) { 733 /* link in at head of list */ 734 tt->rn_dupedkey = x; 735 tt->rn_flags = x->rn_flags; 736 tt->rn_p = x->rn_p; 737 x->rn_p = tt; 738 if (tt->rn_p->rn_l == x) 739 tt->rn_p->rn_l = tt; 740 else 741 tt->rn_p->rn_r = tt; 742 } else { 743 tt->rn_dupedkey = x; 744 x->rn_p->rn_dupedkey = tt; 745 tt->rn_p = x->rn_p; 746 x->rn_p = tt; 747 } 748 } else { 749 tt->rn_dupedkey = x->rn_dupedkey; 750 x->rn_dupedkey = tt; 751 tt->rn_p = x; 752 if (tt->rn_dupedkey) 753 tt->rn_dupedkey->rn_p = tt; 754 } 755 } 756 757 /* 758 * This function ensures that routes are properly promoted upwards. 759 * It adjusts the rn_mklist of the parent node to make sure overlapping 760 * routes can be found. 761 * 762 * There are two cases: 763 * - leaf nodes with possible rn_dupedkey list 764 * - internal nodes with maybe their own mklist 765 * If the mask of the route is bigger than the current branch bit then 766 * a rn_mklist entrie needs to be made. 767 */ 768 void 769 rn_fixup_nodes(struct radix_node *tt) 770 { 771 struct radix_node *tp, *x; 772 struct radix_mask *m, **mp; 773 int b_leaf; 774 775 tp = tt->rn_p; 776 if (tp->rn_r == tt) 777 x = tp->rn_l; 778 else 779 x = tp->rn_r; 780 781 b_leaf = -1 - tp->rn_b; 782 if (x->rn_b < 0) { /* x is a leaf node */ 783 struct radix_node *xx = NULL; 784 785 for (mp = &tp->rn_mklist; x; xx = x, x = x->rn_dupedkey) { 786 if (xx && xx->rn_mklist && xx->rn_mask == x->rn_mask && 787 x->rn_mklist == 0) { 788 /* multipath route */ 789 x->rn_mklist = xx->rn_mklist; 790 x->rn_mklist->rm_refs++; 791 } 792 if (x->rn_mask && (x->rn_b >= b_leaf) && 793 x->rn_mklist == 0) { 794 *mp = m = rn_new_radix_mask(x, 0); 795 if (m) 796 mp = &m->rm_mklist; 797 } 798 } 799 } else if (x->rn_mklist) { /* x is an internal node */ 800 /* 801 * Skip over masks whose index is > that of new node 802 */ 803 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) 804 if (m->rm_b >= b_leaf) 805 break; 806 tp->rn_mklist = m; 807 *mp = 0; 808 } 809 } 810 811 struct radix_node * 812 rn_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, 813 struct radix_node treenodes[2], u_int8_t prio) 814 { 815 caddr_t v = v_arg; 816 struct radix_node *top = head->rnh_treetop; 817 struct radix_node *tt, *saved_tt, *tm = NULL; 818 int keyduplicated; 819 820 /* 821 * In dealing with non-contiguous masks, there may be 822 * many different routes which have the same mask. 823 * We will find it useful to have a unique pointer to 824 * the mask to speed avoiding duplicate references at 825 * nodes and possibly save time in calculating indices. 826 */ 827 if (n_arg) { 828 if ((tm = rn_addmask(n_arg, 0, top->rn_off)) == 0) 829 return (0); 830 } 831 832 tt = rn_insert(v, head, &keyduplicated, treenodes); 833 834 if (keyduplicated) { 835 saved_tt = tt; 836 tt = treenodes; 837 838 tt->rn_key = v_arg; 839 tt->rn_b = -1; 840 tt->rn_flags = RNF_ACTIVE; 841 } 842 843 /* Put mask into the node. */ 844 if (tm) { 845 tt->rn_mask = tm->rn_key; 846 tt->rn_b = tm->rn_b; 847 tt->rn_flags |= tm->rn_flags & RNF_NORMAL; 848 } 849 850 /* Either insert into dupedkey list or as a leaf node. */ 851 if (keyduplicated) { 852 if (rn_add_dupedkey(saved_tt, head, tt, prio)) 853 return (NULL); 854 } else { 855 rn_fixup_nodes(tt); 856 #ifndef SMALL_KERNEL 857 if (rn_mpath_capable(head)) 858 rn_mpath_adj_mpflag(tt, prio); 859 #endif 860 } 861 862 /* finally insert a radix_mask element if needed */ 863 rn_add_radix_mask(tt, keyduplicated); 864 return (tt); 865 } 866 867 /* 868 * Cleanup mask list, tt points to route that needs to be cleaned 869 */ 870 int 871 rn_del_radix_mask(struct radix_node *tt) 872 { 873 struct radix_node *x; 874 struct radix_mask *m, *saved_m, **mp; 875 876 /* 877 * Cleanup mask list from possible references to this route. 878 */ 879 saved_m = m = tt->rn_mklist; 880 if (tt->rn_mask == NULL || m == NULL) 881 return (0); 882 883 if (tt->rn_flags & RNF_NORMAL) { 884 if (m->rm_leaf != tt && m->rm_refs == 0) { 885 log(LOG_ERR, "rn_delete: inconsistent normal " 886 "annotation\n"); 887 return (-1); 888 } 889 if (m->rm_leaf != tt) { 890 if (--m->rm_refs >= 0) 891 return (0); 892 else 893 log(LOG_ERR, "rn_delete: " 894 "inconsistent mklist refcount\n"); 895 } 896 /* 897 * If we end up here tt should be m->rm_leaf and therefor 898 * tt should be the head of a multipath chain. 899 * If this is not the case the table is no longer consistent. 900 */ 901 if (m->rm_refs > 0) { 902 if (tt->rn_dupedkey == NULL || 903 tt->rn_dupedkey->rn_mklist != m) { 904 log(LOG_ERR, "rn_delete: inconsistent " 905 "dupedkey list\n"); 906 return (-1); 907 } 908 m->rm_leaf = tt->rn_dupedkey; 909 --m->rm_refs; 910 return (0); 911 } 912 /* else tt is last and only route */ 913 } else { 914 if (m->rm_mask != tt->rn_mask) { 915 log(LOG_ERR, "rn_delete: inconsistent annotation\n"); 916 return (0); 917 } 918 if (--m->rm_refs >= 0) 919 return (0); 920 } 921 922 /* 923 * No other references hold to the radix_mask remove it from 924 * the tree. 925 */ 926 x = rn_lift_node(tt); 927 if (x == NULL) 928 return (0); /* Wasn't lifted at all */ 929 930 /* Finally eliminate the radix_mask from the tree */ 931 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) 932 if (m == saved_m) { 933 *mp = m->rm_mklist; 934 pool_put(&rtmask_pool, m); 935 break; 936 } 937 938 if (m == NULL) { 939 log(LOG_ERR, "rn_delete: couldn't find our annotation\n"); 940 if (tt->rn_flags & RNF_NORMAL) 941 return (-1); /* Dangling ref to us */ 942 } 943 944 return (0); 945 } 946 947 /* swap two internal nodes and fixup the parent and child pointers */ 948 static inline void 949 rn_swap_nodes(struct radix_node *from, struct radix_node *to) 950 { 951 *to = *from; 952 if (from->rn_p->rn_l == from) 953 from->rn_p->rn_l = to; 954 else 955 from->rn_p->rn_r = to; 956 957 to->rn_l->rn_p = to; 958 to->rn_r->rn_p = to; 959 } 960 961 struct radix_node * 962 rn_delete(void *v_arg, void *n_arg, struct radix_node_head *head, 963 struct radix_node *rn) 964 { 965 caddr_t v = v_arg; 966 caddr_t netmask = n_arg; 967 struct radix_node *top = head->rnh_treetop; 968 struct radix_node *tt, *tp, *pp, *x; 969 struct radix_node *dupedkey_tt, *saved_tt; 970 int off = top->rn_off; 971 int vlen; 972 973 vlen = *(u_char *)v; 974 975 /* 976 * Implement a lookup similar to rn_lookup but we need to save 977 * the radix leaf node (where th rn_dupedkey list starts) so 978 * it is not possible to use rn_lookup. 979 */ 980 tt = rn_search(v, top); 981 /* make sure the key is a perfect match */ 982 if (memcmp(v + off, tt->rn_key + off, vlen - off)) 983 return (NULL); 984 985 /* 986 * Here, tt is the deletion target, and 987 * saved_tt is the head of the dupedkey chain. 988 * dupedkey_tt will point to the start of the multipath chain. 989 */ 990 saved_tt = tt; 991 992 /* 993 * make tt point to the start of the rn_dupedkey list of multipath 994 * routes. 995 */ 996 if (netmask) { 997 struct radix_node *tm; 998 999 if ((tm = rn_addmask(netmask, 1, off)) == NULL) 1000 return (NULL); 1001 netmask = tm->rn_key; 1002 while (tt->rn_mask != netmask) 1003 if ((tt = tt->rn_dupedkey) == NULL) 1004 return (NULL); 1005 } 1006 1007 /* save start of multi path chain for later use */ 1008 dupedkey_tt = tt; 1009 1010 #ifndef SMALL_KERNEL 1011 /* if we got a hint use the hint from now on */ 1012 if (rn) 1013 tt = rn; 1014 #endif 1015 1016 KASSERT((tt->rn_flags & RNF_ROOT) == 0); 1017 1018 /* remove possible radix_mask */ 1019 if (rn_del_radix_mask(tt)) 1020 return (NULL); 1021 1022 /* 1023 * Finally eliminate us from tree 1024 */ 1025 tp = tt->rn_p; 1026 if (saved_tt->rn_dupedkey) { 1027 if (tt == saved_tt) { 1028 x = saved_tt->rn_dupedkey; 1029 x->rn_p = tp; 1030 if (tp->rn_l == tt) 1031 tp->rn_l = x; 1032 else 1033 tp->rn_r = x; 1034 /* head changed adjust dupedkey pointer */ 1035 dupedkey_tt = x; 1036 } else { 1037 x = saved_tt; 1038 /* dupedkey will change so adjust pointer */ 1039 if (dupedkey_tt == tt) 1040 dupedkey_tt = tt->rn_dupedkey; 1041 tp->rn_dupedkey = tt->rn_dupedkey; 1042 if (tt->rn_dupedkey) 1043 tt->rn_dupedkey->rn_p = tp; 1044 } 1045 1046 /* 1047 * We may be holding an active internal node in the tree. 1048 */ 1049 if (tt[1].rn_flags & RNF_ACTIVE) 1050 rn_swap_nodes(&tt[1], &x[1]); 1051 1052 #ifndef SMALL_KERNEL 1053 /* adjust the flags of the multipath chain */ 1054 if (rn_mpath_capable(head)) 1055 rn_mpath_adj_mpflag(dupedkey_tt, 1056 ((struct rtentry *)tt)->rt_priority); 1057 #endif 1058 /* over and out */ 1059 goto out; 1060 } 1061 1062 /* non-rn_dupedkey case, remove tt and tp node from the tree */ 1063 if (tp->rn_l == tt) 1064 x = tp->rn_r; 1065 else 1066 x = tp->rn_l; 1067 pp = tp->rn_p; 1068 if (pp->rn_r == tp) 1069 pp->rn_r = x; 1070 else 1071 pp->rn_l = x; 1072 x->rn_p = pp; 1073 1074 /* 1075 * Demote routes attached to us (actually on the internal parent node). 1076 */ 1077 if (tp->rn_mklist) { 1078 struct radix_mask *m, **mp; 1079 if (x->rn_b >= 0) { 1080 for (mp = &x->rn_mklist; (m = *mp);) 1081 mp = &m->rm_mklist; 1082 *mp = tp->rn_mklist; 1083 } else { 1084 /* If there are any key,mask pairs in a sibling 1085 duped-key chain, some subset will appear sorted 1086 in the same order attached to our mklist */ 1087 for (m = tp->rn_mklist; m && x; x = x->rn_dupedkey) 1088 if (m == x->rn_mklist) { 1089 struct radix_mask *mm = m->rm_mklist; 1090 x->rn_mklist = 0; 1091 if (--(m->rm_refs) < 0) 1092 pool_put(&rtmask_pool, m); 1093 else if (m->rm_flags & RNF_NORMAL) 1094 /* 1095 * don't progress because this 1096 * a multipath route. Next 1097 * route will use the same m. 1098 */ 1099 mm = m; 1100 m = mm; 1101 } 1102 if (m) 1103 log(LOG_ERR, "%s %p at %p\n", 1104 "rn_delete: Orphaned Mask", m, x); 1105 } 1106 } 1107 1108 /* 1109 * We may be holding an active internal node in the tree. 1110 * If so swap our internal node (t) with the parent node (tp) 1111 * since that one was just removed from the tree. 1112 */ 1113 if (tp != &tt[1]) 1114 rn_swap_nodes(&tt[1], tp); 1115 1116 /* no rn_dupedkey list so no need to fixup multipath chains */ 1117 out: 1118 tt[0].rn_flags &= ~RNF_ACTIVE; 1119 tt[1].rn_flags &= ~RNF_ACTIVE; 1120 return (tt); 1121 } 1122 1123 int 1124 rn_walktree(struct radix_node_head *h, int (*f)(struct radix_node *, void *, 1125 u_int), void *w) 1126 { 1127 int error; 1128 struct radix_node *base, *next; 1129 struct radix_node *rn = h->rnh_treetop; 1130 /* 1131 * This gets complicated because we may delete the node 1132 * while applying the function f to it, so we need to calculate 1133 * the successor node in advance. 1134 */ 1135 /* First time through node, go left */ 1136 while (rn->rn_b >= 0) 1137 rn = rn->rn_l; 1138 for (;;) { 1139 base = rn; 1140 /* If at right child go back up, otherwise, go right */ 1141 while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0) 1142 rn = rn->rn_p; 1143 /* Find the next *leaf* since next node might vanish, too */ 1144 for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;) 1145 rn = rn->rn_l; 1146 next = rn; 1147 /* Process leaves */ 1148 while ((rn = base) != NULL) { 1149 base = rn->rn_dupedkey; 1150 if (!(rn->rn_flags & RNF_ROOT) && 1151 (error = (*f)(rn, w, h->rnh_rtableid))) 1152 return (error); 1153 } 1154 rn = next; 1155 if (rn->rn_flags & RNF_ROOT) 1156 return (0); 1157 } 1158 /* NOTREACHED */ 1159 } 1160 1161 int 1162 rn_initmask(void) 1163 { 1164 if (mask_rnhead != NULL) 1165 return (0); 1166 1167 KASSERT(max_keylen > 0); 1168 1169 mask_rnhead = malloc(sizeof(*mask_rnhead), M_RTABLE, M_NOWAIT); 1170 if (mask_rnhead == NULL) 1171 return (1); 1172 1173 rn_inithead0(mask_rnhead, 0); 1174 return (0); 1175 } 1176 1177 int 1178 rn_inithead(void **head, int off) 1179 { 1180 struct radix_node_head *rnh; 1181 1182 if (*head != NULL) 1183 return (1); 1184 1185 if (rn_initmask()) 1186 panic("failed to initialize the mask tree"); 1187 1188 rnh = malloc(sizeof(*rnh), M_RTABLE, M_NOWAIT); 1189 if (rnh == NULL) 1190 return (0); 1191 *head = rnh; 1192 rn_inithead0(rnh, off); 1193 return (1); 1194 } 1195 1196 int 1197 rn_inithead0(struct radix_node_head *rnh, int offset) 1198 { 1199 struct radix_node *t, *tt, *ttt; 1200 int off = offset * NBBY; 1201 1202 memset(rnh, 0, sizeof(*rnh)); 1203 t = rn_newpair(rn_zeros, off, rnh->rnh_nodes); 1204 ttt = rnh->rnh_nodes + 2; 1205 t->rn_r = ttt; 1206 t->rn_p = t; 1207 tt = t->rn_l; 1208 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE; 1209 tt->rn_b = -1 - off; 1210 *ttt = *tt; 1211 ttt->rn_key = rn_ones; 1212 rnh->rnh_treetop = t; 1213 return (1); 1214 } 1215 1216 /* 1217 * rn_init() can be called multiple time with a different key length 1218 * as long as not radix tree head has been allocated. 1219 */ 1220 void 1221 rn_init(unsigned int keylen) 1222 { 1223 char *cp, *cplim; 1224 1225 if (max_keylen == 0) { 1226 pool_init(&rtmask_pool, sizeof(struct radix_mask), 0, 0, 0, 1227 "rtmask", NULL); 1228 } 1229 1230 if (keylen <= max_keylen) 1231 return; 1232 1233 KASSERT(mask_rnhead == NULL); 1234 1235 free(rn_zeros, M_RTABLE, 3 * max_keylen); 1236 rn_zeros = mallocarray(3, keylen, M_RTABLE, M_NOWAIT | M_ZERO); 1237 if (rn_zeros == NULL) 1238 panic("cannot initialize a radix tree without memory"); 1239 max_keylen = keylen; 1240 1241 rn_ones = cp = rn_zeros + max_keylen; 1242 addmask_key = cplim = rn_ones + max_keylen; 1243 while (cp < cplim) 1244 *cp++ = -1; 1245 } 1246