136210Ssklower /* 263211Sbostic * Copyright (c) 1988, 1989, 1993 363211Sbostic * The Regents of the University of California. All rights reserved. 436210Ssklower * 544465Sbostic * %sccs.include.redist.c% 636210Ssklower * 7*67789Ssklower * @(#)radix.c 8.2.2.1 (Berkeley) 10/09/94 836210Ssklower */ 936210Ssklower 1036210Ssklower /* 1136210Ssklower * Routines to build and maintain radix trees for routing lookups. 1236210Ssklower */ 13*67789Ssklower #ifndef _RADIX_H_ 1456529Sbostic #include <sys/param.h> 15*67789Ssklower #ifdef KERNEL 1656529Sbostic #include <sys/systm.h> 1756529Sbostic #include <sys/malloc.h> 1840784Ssklower #define M_DONTWAIT M_NOWAIT 1956529Sbostic #include <sys/domain.h> 20*67789Ssklower #else 21*67789Ssklower #include <stdlib.h> 2236210Ssklower #endif 23*67789Ssklower #include <sys/syslog.h> 24*67789Ssklower #include <net/radix.h> 2554824Ssklower #endif 2656529Sbostic 2754824Ssklower int max_keylen; 28*67789Ssklower struct radix_mask *rn_mkfreelist; 2938846Ssklower struct radix_node_head *mask_rnhead; 30*67789Ssklower static char *addmask_key; 31*67789Ssklower static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1}; 3254824Ssklower static char *rn_zeros, *rn_ones; 3354824Ssklower 3459007Ssklower #define rn_masktop (mask_rnhead->rnh_treetop) 3538846Ssklower #undef Bcmp 3638846Ssklower #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l)) 3736210Ssklower /* 3836210Ssklower * The data structure for the keys is a radix tree with one way 3936210Ssklower * branching removed. The index rn_b at an internal node n represents a bit 4036210Ssklower * position to be tested. The tree is arranged so that all descendants 4136210Ssklower * of a node n have keys whose bits all agree up to position rn_b - 1. 4236210Ssklower * (We say the index of n is rn_b.) 4336210Ssklower * 4436210Ssklower * There is at least one descendant which has a one bit at position rn_b, 4536210Ssklower * and at least one with a zero there. 4636210Ssklower * 4736210Ssklower * A route is determined by a pair of key and mask. We require that the 4836210Ssklower * bit-wise logical and of the key and mask to be the key. 4936210Ssklower * We define the index of a route to associated with the mask to be 5036210Ssklower * the first bit number in the mask where 0 occurs (with bit number 0 5136210Ssklower * representing the highest order bit). 5236210Ssklower * 5336210Ssklower * We say a mask is normal if every bit is 0, past the index of the mask. 5436210Ssklower * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b, 5536210Ssklower * and m is a normal mask, then the route applies to every descendant of n. 5636210Ssklower * If the index(m) < rn_b, this implies the trailing last few bits of k 5736210Ssklower * before bit b are all 0, (and hence consequently true of every descendant 5836210Ssklower * of n), so the route applies to all descendants of the node as well. 59*67789Ssklower * 60*67789Ssklower * Similar logic shows that a non-normal mask m such that 61*67789Ssklower * index(m) <= index(n) could potentially apply to many children of n. 62*67789Ssklower * Thus, for each non-host route, we attach its mask to a list at an internal 63*67789Ssklower * node as high in the tree as we can go. 6436210Ssklower * 65*67789Ssklower * The present version of the code makes use of normal routes in short- 66*67789Ssklower * circuiting an explict mask and compare operation when testing whether 67*67789Ssklower * a key satisfies a normal route, and also in remembering the unique leaf 68*67789Ssklower * that governs a subtree. 6936210Ssklower */ 7036210Ssklower 7136210Ssklower struct radix_node * 72*67789Ssklower rn_search(v_arg, head) 7361341Sbostic void *v_arg; 74*67789Ssklower struct radix_node *head; 7536210Ssklower { 7636210Ssklower register struct radix_node *x; 7761341Sbostic register caddr_t v; 7836210Ssklower 79*67789Ssklower for (x = head, v = v_arg; x->rn_b >= 0;) { 8036210Ssklower if (x->rn_bmask & v[x->rn_off]) 8136210Ssklower x = x->rn_r; 8236210Ssklower else 8336210Ssklower x = x->rn_l; 8436210Ssklower } 8561341Sbostic return (x); 8636210Ssklower }; 8736210Ssklower 8837472Ssklower struct radix_node * 89*67789Ssklower rn_search_m(v_arg, head, m_arg) 90*67789Ssklower struct radix_node *head; 91*67789Ssklower void *v_arg, *m_arg; 9237472Ssklower { 93*67789Ssklower register struct radix_node *x; 94*67789Ssklower register caddr_t v = v_arg, m = m_arg; 9536210Ssklower 96*67789Ssklower for (x = head; x->rn_b >= 0;) { 97*67789Ssklower if ((x->rn_bmask & m[x->rn_off]) && 98*67789Ssklower (x->rn_bmask & v[x->rn_off])) 9937472Ssklower x = x->rn_r; 10037472Ssklower else 10137472Ssklower x = x->rn_l; 10237472Ssklower } 103*67789Ssklower return x; 10437472Ssklower }; 10537472Ssklower 106*67789Ssklower int 10761341Sbostic rn_refines(m_arg, n_arg) 10861341Sbostic void *m_arg, *n_arg; 10950724Ssklower { 11061341Sbostic register caddr_t m = m_arg, n = n_arg; 11150724Ssklower register caddr_t lim, lim2 = lim = n + *(u_char *)n; 11250724Ssklower int longer = (*(u_char *)n++) - (int)(*(u_char *)m++); 11353649Ssklower int masks_are_equal = 1; 11437472Ssklower 11550724Ssklower if (longer > 0) 11650724Ssklower lim -= longer; 11753649Ssklower while (n < lim) { 11853649Ssklower if (*n & ~(*m)) 11950724Ssklower return 0; 12053649Ssklower if (*n++ != *m++) 12153649Ssklower masks_are_equal = 0; 12253649Ssklower } 12350724Ssklower while (n < lim2) 12450724Ssklower if (*n++) 12550724Ssklower return 0; 12653649Ssklower if (masks_are_equal && (longer < 0)) 12753649Ssklower for (lim2 = m - longer; m < lim2; ) 12853649Ssklower if (*m++) 12953649Ssklower return 1; 13053649Ssklower return (!masks_are_equal); 13150724Ssklower } 13250724Ssklower 133*67789Ssklower struct radix_node * 134*67789Ssklower rn_lookup(v_arg, m_arg, head) 135*67789Ssklower void *v_arg, *m_arg; 136*67789Ssklower struct radix_node_head *head; 13767781Ssklower { 138*67789Ssklower register struct radix_node *x; 139*67789Ssklower caddr_t netmask = 0; 14067781Ssklower 141*67789Ssklower if (m_arg) { 142*67789Ssklower if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) == 0) 143*67789Ssklower return (0); 144*67789Ssklower netmask = x->rn_key; 14567781Ssklower } 146*67789Ssklower x = rn_match(v_arg, head); 147*67789Ssklower if (x && netmask) { 148*67789Ssklower while (x && x->rn_mask != netmask) 149*67789Ssklower x = x->rn_dupedkey; 15067781Ssklower } 151*67789Ssklower return x; 15267781Ssklower } 15367781Ssklower 154*67789Ssklower static 155*67789Ssklower rn_satsifies_leaf(trial, leaf, skip) 156*67789Ssklower char *trial; 157*67789Ssklower register struct radix_node *leaf; 158*67789Ssklower int skip; 15967781Ssklower { 160*67789Ssklower register char *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask; 161*67789Ssklower char *cplim; 162*67789Ssklower int length = min(*(u_char *)cp, *(u_char *)cp2); 16367781Ssklower 164*67789Ssklower if (cp3 == 0) 165*67789Ssklower cp3 = rn_ones; 166*67789Ssklower else 167*67789Ssklower length = min(length, *(u_char *)cp3); 168*67789Ssklower cplim = cp + length; cp3 += skip; cp2 += skip; 169*67789Ssklower for (cp += skip; cp < cplim; cp++, cp2++, cp3++) 170*67789Ssklower if ((*cp ^ *cp2) & *cp3) 171*67789Ssklower return 0; 172*67789Ssklower return 1; 17367781Ssklower } 17467781Ssklower 17561341Sbostic struct radix_node * 17661341Sbostic rn_match(v_arg, head) 17761341Sbostic void *v_arg; 17859007Ssklower struct radix_node_head *head; 17936210Ssklower { 180*67789Ssklower caddr_t v = v_arg; 181*67789Ssklower register struct radix_node *t = head->rnh_treetop, *x; 182*67789Ssklower register caddr_t cp = v, cp2; 183*67789Ssklower caddr_t cplim; 18459007Ssklower struct radix_node *saved_t, *top = t; 185*67789Ssklower int off = t->rn_off, vlen = *(u_char *)cp, matched_off; 186*67789Ssklower register int test, b, rn_b; 18736210Ssklower 18836210Ssklower /* 18959007Ssklower * Open code rn_search(v, top) to avoid overhead of extra 19036210Ssklower * subroutine call. 19136210Ssklower */ 192*67789Ssklower for (; t->rn_b >= 0; ) { 19336210Ssklower if (t->rn_bmask & cp[t->rn_off]) 19436210Ssklower t = t->rn_r; 19536210Ssklower else 19636210Ssklower t = t->rn_l; 197*67789Ssklower } 19836210Ssklower /* 199*67789Ssklower * See if we match exactly as a host destination 200*67789Ssklower * or at least learn how many bits match, for normal mask finesse. 201*67789Ssklower * 202*67789Ssklower * It doesn't hurt us to limit how many bytes to check 203*67789Ssklower * to the length of the mask, since if it matches we had a genuine 204*67789Ssklower * match and the leaf we have is the most specific one anyway; 205*67789Ssklower * if it didn't match with a shorter length it would fail 206*67789Ssklower * with a long one. This wins big for class B&C netmasks which 207*67789Ssklower * are probably the most common case... 20836210Ssklower */ 209*67789Ssklower if (t->rn_mask) 210*67789Ssklower vlen = *(u_char *)t->rn_mask; 211*67789Ssklower cp += off; cp2 = t->rn_key + off; cplim = v + vlen; 212*67789Ssklower for (; cp < cplim; cp++, cp2++) 213*67789Ssklower if (*cp != *cp2) 214*67789Ssklower goto on1; 215*67789Ssklower /* 216*67789Ssklower * This extra grot is in case we are explicitly asked 217*67789Ssklower * to look up the default. Ugh! 218*67789Ssklower */ 219*67789Ssklower if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey) 220*67789Ssklower t = t->rn_dupedkey; 221*67789Ssklower return t; 222*67789Ssklower on1: 223*67789Ssklower test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */ 224*67789Ssklower for (b = 7; (test >>= 1) > 0;) 225*67789Ssklower b--; 226*67789Ssklower matched_off = cp - v; 227*67789Ssklower b += matched_off << 3; 228*67789Ssklower rn_b = -1 - b; 229*67789Ssklower /* 230*67789Ssklower * If there is a host route in a duped-key chain, it will be first. 231*67789Ssklower */ 232*67789Ssklower if ((saved_t = t)->rn_mask == 0) 233*67789Ssklower t = t->rn_dupedkey; 234*67789Ssklower for (; t; t = t->rn_dupedkey) 235*67789Ssklower /* 236*67789Ssklower * Even if we don't match exactly as a host, 237*67789Ssklower * we may match if the leaf we wound up at is 238*67789Ssklower * a route to a net. 239*67789Ssklower */ 240*67789Ssklower if (t->rn_flags & RNF_NORMAL) { 241*67789Ssklower if (rn_b <= t->rn_b) 242*67789Ssklower return t; 243*67789Ssklower } else if (rn_satsifies_leaf(v, t, matched_off)) 244*67789Ssklower return t; 245*67789Ssklower t = saved_t; 246*67789Ssklower /* start searching up the tree */ 247*67789Ssklower do { 248*67789Ssklower register struct radix_mask *m; 249*67789Ssklower t = t->rn_p; 250*67789Ssklower if (m = t->rn_mklist) { 25167781Ssklower /* 252*67789Ssklower * If non-contiguous masks ever become important 253*67789Ssklower * we can restore the masking and open coding of 254*67789Ssklower * the search and satisfaction test and put the 255*67789Ssklower * calculation of "off" back before the "do". 25667781Ssklower */ 257*67789Ssklower do { 258*67789Ssklower if (m->rm_flags & RNF_NORMAL) { 259*67789Ssklower if (rn_b <= m->rm_b) 260*67789Ssklower return (m->rm_leaf); 261*67789Ssklower } else { 262*67789Ssklower off = min(t->rn_off, matched_off); 263*67789Ssklower x = rn_search_m(v, t, m->rm_mask); 264*67789Ssklower while (x && x->rn_mask != m->rm_mask) 265*67789Ssklower x = x->rn_dupedkey; 266*67789Ssklower if (x && rn_satsifies_leaf(v, x, off)) 267*67789Ssklower return x; 268*67789Ssklower } 269*67789Ssklower } while (m = m->rm_mklist); 27067781Ssklower } 27159007Ssklower } while (t != top); 27236210Ssklower return 0; 27336210Ssklower }; 27436210Ssklower 27536210Ssklower #ifdef RN_DEBUG 27636210Ssklower int rn_nodenum; 27736210Ssklower struct radix_node *rn_clist; 27836210Ssklower int rn_saveinfo; 27959007Ssklower int rn_debug = 1; 28036210Ssklower #endif 28136210Ssklower 28236210Ssklower struct radix_node * 283*67789Ssklower rn_newpair(v, b, nodes) 28461341Sbostic void *v; 28552264Storek int b; 28636210Ssklower struct radix_node nodes[2]; 28736210Ssklower { 28836210Ssklower register struct radix_node *tt = nodes, *t = tt + 1; 289*67789Ssklower t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7); 290*67789Ssklower t->rn_l = tt; t->rn_off = b >> 3; 29161341Sbostic tt->rn_b = -1; tt->rn_key = (caddr_t)v; tt->rn_p = t; 29236210Ssklower tt->rn_flags = t->rn_flags = RNF_ACTIVE; 29336210Ssklower #ifdef RN_DEBUG 29436210Ssklower tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; 29536210Ssklower tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; 29636210Ssklower #endif 29736210Ssklower return t; 29836210Ssklower } 29936210Ssklower 30061341Sbostic struct radix_node * 30161341Sbostic rn_insert(v_arg, head, dupentry, nodes) 30261341Sbostic void *v_arg; 303*67789Ssklower struct radix_node_head *head; 30436210Ssklower int *dupentry; 30536210Ssklower struct radix_node nodes[2]; 30636210Ssklower { 307*67789Ssklower caddr_t v = v_arg; 308*67789Ssklower struct radix_node *top = head->rnh_treetop; 309*67789Ssklower int head_off = top->rn_off, vlen = (int)*((u_char *)v); 310*67789Ssklower register struct radix_node *t = rn_search(v_arg, top); 311*67789Ssklower register caddr_t cp = v + head_off; 312*67789Ssklower register int b; 313*67789Ssklower struct radix_node *tt; 31436210Ssklower /* 31567781Ssklower * Find first bit at which v and t->rn_key differ 31636210Ssklower */ 317*67789Ssklower { 318*67789Ssklower register caddr_t cp2 = t->rn_key + head_off; 319*67789Ssklower register int cmp_res; 320*67789Ssklower caddr_t cplim = v + vlen; 321*67789Ssklower 322*67789Ssklower while (cp < cplim) 323*67789Ssklower if (*cp2++ != *cp++) 324*67789Ssklower goto on1; 325*67789Ssklower *dupentry = 1; 326*67789Ssklower return t; 327*67789Ssklower on1: 328*67789Ssklower *dupentry = 0; 329*67789Ssklower cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; 330*67789Ssklower for (b = (cp - v) << 3; cmp_res; b--) 331*67789Ssklower cmp_res >>= 1; 332*67789Ssklower } 333*67789Ssklower { 334*67789Ssklower register struct radix_node *p, *x = top; 335*67789Ssklower cp = v; 33667781Ssklower do { 337*67789Ssklower p = x; 338*67789Ssklower if (cp[x->rn_off] & x->rn_bmask) 339*67789Ssklower x = x->rn_r; 340*67789Ssklower else x = x->rn_l; 341*67789Ssklower } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */ 34236210Ssklower #ifdef RN_DEBUG 34336210Ssklower if (rn_debug) 344*67789Ssklower log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p); 34536210Ssklower #endif 346*67789Ssklower t = rn_newpair(v_arg, b, nodes); tt = t->rn_l; 347*67789Ssklower if ((cp[p->rn_off] & p->rn_bmask) == 0) 34836210Ssklower p->rn_l = t; 34936210Ssklower else 35036210Ssklower p->rn_r = t; 351*67789Ssklower x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */ 352*67789Ssklower if ((cp[t->rn_off] & t->rn_bmask) == 0) { 35336210Ssklower t->rn_r = x; 354*67789Ssklower } else { 355*67789Ssklower t->rn_r = tt; t->rn_l = x; 356*67789Ssklower } 35736210Ssklower #ifdef RN_DEBUG 35836210Ssklower if (rn_debug) 359*67789Ssklower log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p); 36036210Ssklower #endif 361*67789Ssklower } 362*67789Ssklower return (tt); 36336210Ssklower } 36436210Ssklower 36536210Ssklower struct radix_node * 366*67789Ssklower rn_addmask(n_arg, search, skip) 367*67789Ssklower int search, skip; 368*67789Ssklower void *n_arg; 36943336Ssklower { 370*67789Ssklower caddr_t netmask = (caddr_t)n_arg; 371*67789Ssklower register struct radix_node *x; 37243336Ssklower register caddr_t cp, cplim; 373*67789Ssklower register int b = 0, mlen, j; 374*67789Ssklower int maskduplicated, m0, isnormal; 37567781Ssklower struct radix_node *saved_x; 376*67789Ssklower static int last_zeroed = 0; 37743336Ssklower 378*67789Ssklower if ((mlen = *(u_char *)netmask) > max_keylen) 379*67789Ssklower mlen = max_keylen; 380*67789Ssklower if (skip == 0) 381*67789Ssklower skip = 1; 382*67789Ssklower if (mlen <= skip) 383*67789Ssklower return (mask_rnhead->rnh_nodes); 384*67789Ssklower if (skip > 1) 385*67789Ssklower Bcopy(rn_ones + 1, addmask_key + 1, skip - 1); 386*67789Ssklower if ((m0 = mlen) > skip) 387*67789Ssklower Bcopy(netmask + skip, addmask_key + skip, mlen - skip); 388*67789Ssklower /* 389*67789Ssklower * Trim trailing zeroes. 390*67789Ssklower */ 391*67789Ssklower for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) 392*67789Ssklower cp--; 393*67789Ssklower mlen = cp - addmask_key; 394*67789Ssklower if (mlen <= skip) { 395*67789Ssklower if (m0 >= last_zeroed) 396*67789Ssklower last_zeroed = mlen; 397*67789Ssklower return (mask_rnhead->rnh_nodes); 39867781Ssklower } 399*67789Ssklower if (m0 < last_zeroed) 400*67789Ssklower Bzero(addmask_key + m0, last_zeroed - m0); 401*67789Ssklower *addmask_key = last_zeroed = mlen; 402*67789Ssklower x = rn_search(addmask_key, rn_masktop); 403*67789Ssklower if (Bcmp(addmask_key, x->rn_key, mlen) != 0) 404*67789Ssklower x = 0; 405*67789Ssklower if (x || search) 406*67789Ssklower return (x); 40754824Ssklower R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x)); 408*67789Ssklower if ((saved_x = x) == 0) 40943336Ssklower return (0); 41054824Ssklower Bzero(x, max_keylen + 2 * sizeof (*x)); 411*67789Ssklower netmask = cp = (caddr_t)(x + 2); 412*67789Ssklower Bcopy(addmask_key, cp, mlen); 41367781Ssklower x = rn_insert(cp, mask_rnhead, &maskduplicated, x); 41467781Ssklower if (maskduplicated) { 415*67789Ssklower log(LOG_ERR, "rn_addmask: mask impossibly already in tree"); 41667781Ssklower Free(saved_x); 417*67789Ssklower return (x); 41843336Ssklower } 419*67789Ssklower /* 420*67789Ssklower * Calculate index of mask, and check for normalcy. 421*67789Ssklower */ 422*67789Ssklower cplim = netmask + mlen; isnormal = 1; 423*67789Ssklower for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;) 424*67789Ssklower cp++; 425*67789Ssklower if (cp != cplim) { 426*67789Ssklower for (j = 0x80; (j & *cp) != 0; j >>= 1) 427*67789Ssklower b++; 428*67789Ssklower if (*cp != normal_chars[b] || cp != (cplim - 1)) 429*67789Ssklower isnormal = 0; 430*67789Ssklower } 431*67789Ssklower b += (cp - netmask) << 3; 432*67789Ssklower x->rn_b = -1 - b; 433*67789Ssklower if (isnormal) 434*67789Ssklower x->rn_flags |= RNF_NORMAL; 43543336Ssklower return (x); 43643336Ssklower } 43743336Ssklower 438*67789Ssklower static int /* XXX: arbitrary ordering for non-contiguous masks */ 439*67789Ssklower rn_lexobetter(m_arg, n_arg) 440*67789Ssklower void *m_arg, *n_arg; 441*67789Ssklower { 442*67789Ssklower register u_char *mp = m_arg, *np = n_arg, *lim; 443*67789Ssklower 444*67789Ssklower if (*mp > *np) 445*67789Ssklower return 1; /* not really, but need to check longer one first */ 446*67789Ssklower if (*mp == *np) 447*67789Ssklower for (lim = mp + *mp; mp < lim;) 448*67789Ssklower if (*mp++ > *np++) 449*67789Ssklower return 1; 450*67789Ssklower return 0; 451*67789Ssklower } 452*67789Ssklower 453*67789Ssklower static struct radix_mask * 454*67789Ssklower rn_new_radix_mask(tt, next) 455*67789Ssklower register struct radix_node *tt; 456*67789Ssklower register struct radix_mask *next; 457*67789Ssklower { 458*67789Ssklower register struct radix_mask *m; 459*67789Ssklower 460*67789Ssklower MKGet(m); 461*67789Ssklower if (m == 0) { 462*67789Ssklower log(LOG_ERR, "Mask for route not entered\n"); 463*67789Ssklower return (0); 464*67789Ssklower } 465*67789Ssklower Bzero(m, sizeof *m); 466*67789Ssklower m->rm_b = tt->rn_b; 467*67789Ssklower m->rm_flags = tt->rn_flags; 468*67789Ssklower if (tt->rn_flags & RNF_NORMAL) 469*67789Ssklower m->rm_leaf = tt; 470*67789Ssklower else 471*67789Ssklower m->rm_mask = tt->rn_mask; 472*67789Ssklower m->rm_mklist = next; 473*67789Ssklower tt->rn_mklist = m; 474*67789Ssklower return m; 475*67789Ssklower } 476*67789Ssklower 47761341Sbostic struct radix_node * 47861341Sbostic rn_addroute(v_arg, n_arg, head, treenodes) 47961341Sbostic void *v_arg, *n_arg; 48059007Ssklower struct radix_node_head *head; 48136210Ssklower struct radix_node treenodes[2]; 48236210Ssklower { 48361341Sbostic caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg; 484*67789Ssklower register struct radix_node *t, *x, *tt; 485*67789Ssklower struct radix_node *saved_tt, *top = head->rnh_treetop; 48636210Ssklower short b = 0, b_leaf; 487*67789Ssklower int keyduplicated; 488*67789Ssklower caddr_t mmask; 489*67789Ssklower struct radix_mask *m, **mp; 49036210Ssklower 49136210Ssklower /* 492*67789Ssklower * In dealing with non-contiguous masks, there may be 493*67789Ssklower * many different routes which have the same mask. 494*67789Ssklower * We will find it useful to have a unique pointer to 495*67789Ssklower * the mask to speed avoiding duplicate references at 496*67789Ssklower * nodes and possibly save time in calculating indices. 49736210Ssklower */ 498*67789Ssklower if (netmask) { 499*67789Ssklower if ((x = rn_addmask(netmask, 0, top->rn_off)) == 0) 500*67789Ssklower return (0); 501*67789Ssklower b_leaf = x->rn_b; 502*67789Ssklower b = -1 - x->rn_b; 503*67789Ssklower netmask = x->rn_key; 504*67789Ssklower } 50536210Ssklower /* 50636210Ssklower * Deal with duplicated keys: attach node to previous instance 50736210Ssklower */ 508*67789Ssklower saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes); 50936210Ssklower if (keyduplicated) { 510*67789Ssklower for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) { 511*67789Ssklower if (tt->rn_mask == netmask) 512*67789Ssklower return (0); 513*67789Ssklower if (netmask == 0 || 514*67789Ssklower (tt->rn_mask && 515*67789Ssklower ((b_leaf < tt->rn_b) || /* index(netmask) > node */ 516*67789Ssklower rn_refines(netmask, tt->rn_mask) || 517*67789Ssklower rn_lexobetter(netmask, tt->rn_mask)))) 518*67789Ssklower break; 519*67789Ssklower } 52036210Ssklower /* 521*67789Ssklower * If the mask is not duplicated, we wouldn't 522*67789Ssklower * find it among possible duplicate key entries 523*67789Ssklower * anyway, so the above test doesn't hurt. 524*67789Ssklower * 525*67789Ssklower * We sort the masks for a duplicated key the same way as 526*67789Ssklower * in a masklist -- most specific to least specific. 527*67789Ssklower * This may require the unfortunate nuisance of relocating 528*67789Ssklower * the head of the list. 52936210Ssklower */ 530*67789Ssklower if (tt == saved_tt) { 531*67789Ssklower struct radix_node *xx = x; 53250724Ssklower /* link in at head of list */ 533*67789Ssklower (tt = treenodes)->rn_dupedkey = t; 534*67789Ssklower tt->rn_flags = t->rn_flags; 535*67789Ssklower tt->rn_p = x = t->rn_p; 536*67789Ssklower if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt; 537*67789Ssklower saved_tt = tt; x = xx; 53850724Ssklower } else { 539*67789Ssklower (tt = treenodes)->rn_dupedkey = t->rn_dupedkey; 540*67789Ssklower t->rn_dupedkey = tt; 54150724Ssklower } 54236210Ssklower #ifdef RN_DEBUG 54336210Ssklower t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; 54436210Ssklower tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; 54536210Ssklower #endif 546*67789Ssklower tt->rn_key = (caddr_t) v; 547*67789Ssklower tt->rn_b = -1; 548*67789Ssklower tt->rn_flags = RNF_ACTIVE; 54936210Ssklower } 55036210Ssklower /* 551*67789Ssklower * Put mask in tree. 55236210Ssklower */ 553*67789Ssklower if (netmask) { 554*67789Ssklower tt->rn_mask = netmask; 555*67789Ssklower tt->rn_b = x->rn_b; 556*67789Ssklower tt->rn_flags = x->rn_flags; 557*67789Ssklower } 558*67789Ssklower t = saved_tt->rn_p; 559*67789Ssklower if (keyduplicated) 560*67789Ssklower goto on2; 561*67789Ssklower b_leaf = -1 - t->rn_b; 562*67789Ssklower if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r; 563*67789Ssklower /* Promote general routes from below */ 564*67789Ssklower if (x->rn_b < 0) { 565*67789Ssklower for (mp = &t->rn_mklist; x; x = x->rn_dupedkey) 566*67789Ssklower if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) { 567*67789Ssklower if (*mp = m = rn_new_radix_mask(x, 0)) 568*67789Ssklower mp = &m->rm_mklist; 56936210Ssklower } 570*67789Ssklower } else if (x->rn_mklist) { 57136210Ssklower /* 57236210Ssklower * Skip over masks whose index is > that of new node 57336210Ssklower */ 574*67789Ssklower for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) 575*67789Ssklower if (m->rm_b >= b_leaf) 57636210Ssklower break; 577*67789Ssklower t->rn_mklist = m; *mp = 0; 57836210Ssklower } 579*67789Ssklower on2: 58036210Ssklower /* Add new route to highest possible ancestor's list */ 581*67789Ssklower if ((netmask == 0) || (b > t->rn_b )) 582*67789Ssklower return tt; /* can't lift at all */ 583*67789Ssklower b_leaf = tt->rn_b; 58436210Ssklower do { 58536210Ssklower x = t; 58636210Ssklower t = t->rn_p; 587*67789Ssklower } while (b <= t->rn_b && x != top); 58836210Ssklower /* 58936210Ssklower * Search through routes associated with node to 59036210Ssklower * insert new route according to index. 591*67789Ssklower * Need same criteria as when sorting dupedkeys to avoid 592*67789Ssklower * double loop on deletion. 59336210Ssklower */ 594*67789Ssklower for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) { 595*67789Ssklower if (m->rm_b < b_leaf) 59636210Ssklower continue; 597*67789Ssklower if (m->rm_b > b_leaf) 59836210Ssklower break; 599*67789Ssklower if (m->rm_flags & RNF_NORMAL) { 600*67789Ssklower mmask = m->rm_leaf->rn_mask; 601*67789Ssklower if (tt->rn_flags & RNF_NORMAL) { 602*67789Ssklower log(LOG_ERR, 603*67789Ssklower "Non-unique normal route, mask not entered"); 604*67789Ssklower return tt; 605*67789Ssklower } 606*67789Ssklower } else 607*67789Ssklower mmask = m->rm_mask; 608*67789Ssklower if (mmask == netmask) { 609*67789Ssklower m->rm_refs++; 610*67789Ssklower tt->rn_mklist = m; 611*67789Ssklower return tt; 61236210Ssklower } 613*67789Ssklower if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask)) 614*67789Ssklower break; 61536210Ssklower } 616*67789Ssklower *mp = rn_new_radix_mask(tt, *mp); 617*67789Ssklower return tt; 61836210Ssklower } 61936210Ssklower 62061341Sbostic struct radix_node * 621*67789Ssklower rn_delete(v_arg, netmask_arg, head) 622*67789Ssklower void *v_arg, *netmask_arg; 62359007Ssklower struct radix_node_head *head; 62436210Ssklower { 62561341Sbostic register struct radix_node *t, *p, *x, *tt; 626*67789Ssklower struct radix_mask *m, *saved_m, **mp; 627*67789Ssklower struct radix_node *dupedkey, *saved_tt, *top; 628*67789Ssklower caddr_t v, netmask; 629*67789Ssklower int b, head_off, vlen; 63036210Ssklower 631*67789Ssklower v = v_arg; 632*67789Ssklower netmask = netmask_arg; 633*67789Ssklower x = head->rnh_treetop; 634*67789Ssklower tt = rn_search(v, x); 635*67789Ssklower head_off = x->rn_off; 636*67789Ssklower vlen = *(u_char *)v; 637*67789Ssklower saved_tt = tt; 638*67789Ssklower top = x; 639*67789Ssklower if (tt == 0 || 640*67789Ssklower Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off)) 64136210Ssklower return (0); 642*67789Ssklower /* 643*67789Ssklower * Delete our route from mask lists. 644*67789Ssklower */ 645*67789Ssklower if (netmask) { 646*67789Ssklower if ((x = rn_addmask(netmask, 1, head_off)) == 0) 647*67789Ssklower return (0); 648*67789Ssklower netmask = x->rn_key; 649*67789Ssklower while (tt->rn_mask != netmask) 65046255Ssklower if ((tt = tt->rn_dupedkey) == 0) 65136210Ssklower return (0); 65236210Ssklower } 653*67789Ssklower if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0) 654*67789Ssklower goto on1; 655*67789Ssklower if (tt->rn_flags & RNF_NORMAL) { 656*67789Ssklower if (m->rm_leaf != tt || m->rm_refs > 0) { 657*67789Ssklower log(LOG_ERR, "rn_delete: inconsistent annotation\n"); 658*67789Ssklower return 0; /* dangling ref could cause disaster */ 659*67789Ssklower } 660*67789Ssklower } else { 661*67789Ssklower if (m->rm_mask != tt->rn_mask) { 662*67789Ssklower log(LOG_ERR, "rn_delete: inconsistent annotation\n"); 663*67789Ssklower goto on1; 664*67789Ssklower } 665*67789Ssklower if (--m->rm_refs >= 0) 666*67789Ssklower goto on1; 667*67789Ssklower } 668*67789Ssklower b = -1 - tt->rn_b; 669*67789Ssklower t = saved_tt->rn_p; 670*67789Ssklower if (b > t->rn_b) 67136210Ssklower goto on1; /* Wasn't lifted at all */ 67236210Ssklower do { 673*67789Ssklower x = t; 674*67789Ssklower t = t->rn_p; 675*67789Ssklower } while (b <= t->rn_b && x != top); 676*67789Ssklower for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) 677*67789Ssklower if (m == saved_m) { 678*67789Ssklower *mp = m->rm_mklist; 679*67789Ssklower MKFree(m); 68038846Ssklower break; 68136210Ssklower } 682*67789Ssklower if (m == 0) { 683*67789Ssklower log(LOG_ERR, "rn_delete: couldn't find our annotation\n"); 684*67789Ssklower if (tt->rn_flags & RNF_NORMAL) 685*67789Ssklower return (0); /* Dangling ref to us */ 686*67789Ssklower } 68736210Ssklower on1: 68836210Ssklower /* 68936210Ssklower * Eliminate us from tree 69036210Ssklower */ 69136210Ssklower if (tt->rn_flags & RNF_ROOT) 69236210Ssklower return (0); 69336210Ssklower #ifdef RN_DEBUG 69436210Ssklower /* Get us out of the creation list */ 69536210Ssklower for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {} 69636210Ssklower if (t) t->rn_ybro = tt->rn_ybro; 69760346Storek #endif 698*67789Ssklower t = tt->rn_p; 699*67789Ssklower if (dupedkey = saved_tt->rn_dupedkey) { 700*67789Ssklower if (tt == saved_tt) { 70136210Ssklower x = dupedkey; x->rn_p = t; 70236210Ssklower if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x; 70350724Ssklower } else { 704*67789Ssklower for (x = p = saved_tt; p && p->rn_dupedkey != tt;) 705*67789Ssklower p = p->rn_dupedkey; 706*67789Ssklower if (p) p->rn_dupedkey = tt->rn_dupedkey; 707*67789Ssklower else log(LOG_ERR, "rn_delete: couldn't find us\n"); 70850724Ssklower } 709*67789Ssklower t = tt + 1; 710*67789Ssklower if (t->rn_flags & RNF_ACTIVE) { 711*67789Ssklower #ifndef RN_DEBUG 712*67789Ssklower *++x = *t; p = t->rn_p; 713*67789Ssklower #else 714*67789Ssklower b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p; 715*67789Ssklower #endif 716*67789Ssklower if (p->rn_l == t) p->rn_l = x; else p->rn_r = x; 717*67789Ssklower x->rn_l->rn_p = x; x->rn_r->rn_p = x; 71836210Ssklower } 71936210Ssklower goto out; 72036210Ssklower } 72136210Ssklower if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l; 72236210Ssklower p = t->rn_p; 72336210Ssklower if (p->rn_r == t) p->rn_r = x; else p->rn_l = x; 72436210Ssklower x->rn_p = p; 72536210Ssklower /* 72636210Ssklower * Demote routes attached to us. 72736210Ssklower */ 72836210Ssklower if (t->rn_mklist) { 72936210Ssklower if (x->rn_b >= 0) { 73045620Ssklower for (mp = &x->rn_mklist; m = *mp;) 731*67789Ssklower mp = &m->rm_mklist; 73245620Ssklower *mp = t->rn_mklist; 733*67789Ssklower } else { 734*67789Ssklower /* If there are any key,mask pairs in a sibling 735*67789Ssklower duped-key chain, some subset will appear sorted 736*67789Ssklower in the same order attached to our mklist */ 737*67789Ssklower for (m = t->rn_mklist; m && x; x = x->rn_dupedkey) 738*67789Ssklower if (m == x->rn_mklist) { 739*67789Ssklower struct radix_mask *mm = m->rm_mklist; 740*67789Ssklower x->rn_mklist = 0; 741*67789Ssklower if (--(m->rm_refs) < 0) 742*67789Ssklower MKFree(m); 743*67789Ssklower m = mm; 744*67789Ssklower } 745*67789Ssklower if (m) 746*67789Ssklower log(LOG_ERR, "%s %x at %x\n", 747*67789Ssklower "rn_delete: Orphaned Mask", m, x); 74836210Ssklower } 74936210Ssklower } 75036210Ssklower /* 75136210Ssklower * We may be holding an active internal node in the tree. 75236210Ssklower */ 75336210Ssklower x = tt + 1; 75436210Ssklower if (t != x) { 75536210Ssklower #ifndef RN_DEBUG 75636210Ssklower *t = *x; 75736210Ssklower #else 75836210Ssklower b = t->rn_info; *t = *x; t->rn_info = b; 75936210Ssklower #endif 76036210Ssklower t->rn_l->rn_p = t; t->rn_r->rn_p = t; 76136210Ssklower p = x->rn_p; 76236210Ssklower if (p->rn_l == x) p->rn_l = t; else p->rn_r = t; 76336210Ssklower } 76436210Ssklower out: 76536210Ssklower tt->rn_flags &= ~RNF_ACTIVE; 76636210Ssklower tt[1].rn_flags &= ~RNF_ACTIVE; 76736210Ssklower return (tt); 76836210Ssklower } 76950689Ssklower 77061341Sbostic int 77159007Ssklower rn_walktree(h, f, w) 77259007Ssklower struct radix_node_head *h; 77350689Ssklower register int (*f)(); 77461341Sbostic void *w; 77550689Ssklower { 77650689Ssklower int error; 77754824Ssklower struct radix_node *base, *next; 77859007Ssklower register struct radix_node *rn = h->rnh_treetop; 77954824Ssklower /* 78054824Ssklower * This gets complicated because we may delete the node 78154824Ssklower * while applying the function f to it, so we need to calculate 78254824Ssklower * the successor node in advance. 78354824Ssklower */ 78454824Ssklower /* First time through node, go left */ 78554824Ssklower while (rn->rn_b >= 0) 78654824Ssklower rn = rn->rn_l; 78750689Ssklower for (;;) { 78854824Ssklower base = rn; 78954824Ssklower /* If at right child go back up, otherwise, go right */ 79054824Ssklower while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0) 79154824Ssklower rn = rn->rn_p; 79254824Ssklower /* Find the next *leaf* since next node might vanish, too */ 79354824Ssklower for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;) 79454824Ssklower rn = rn->rn_l; 79554824Ssklower next = rn; 79654824Ssklower /* Process leaves */ 79754824Ssklower while (rn = base) { 79854824Ssklower base = rn->rn_dupedkey; 79950812Ssklower if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w))) 80050812Ssklower return (error); 80150689Ssklower } 80254824Ssklower rn = next; 80354824Ssklower if (rn->rn_flags & RNF_ROOT) 80454824Ssklower return (0); 80550689Ssklower } 80661341Sbostic /* NOTREACHED */ 80750689Ssklower } 80836210Ssklower 80961341Sbostic int 81050689Ssklower rn_inithead(head, off) 81154824Ssklower void **head; 81252264Storek int off; 81336210Ssklower { 81436354Ssklower register struct radix_node_head *rnh; 81536210Ssklower register struct radix_node *t, *tt, *ttt; 81636210Ssklower if (*head) 81736210Ssklower return (1); 81836354Ssklower R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh)); 81936354Ssklower if (rnh == 0) 82036210Ssklower return (0); 82136354Ssklower Bzero(rnh, sizeof (*rnh)); 82236354Ssklower *head = rnh; 823*67789Ssklower t = rn_newpair(rn_zeros, off, rnh->rnh_nodes); 82436354Ssklower ttt = rnh->rnh_nodes + 2; 82536210Ssklower t->rn_r = ttt; 82636210Ssklower t->rn_p = t; 82736210Ssklower tt = t->rn_l; 82836210Ssklower tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE; 829*67789Ssklower tt->rn_b = -1 - off; 83036210Ssklower *ttt = *tt; 83136210Ssklower ttt->rn_key = rn_ones; 83259007Ssklower rnh->rnh_addaddr = rn_addroute; 83359007Ssklower rnh->rnh_deladdr = rn_delete; 83459007Ssklower rnh->rnh_matchaddr = rn_match; 835*67789Ssklower rnh->rnh_lookup = rn_lookup; 83659007Ssklower rnh->rnh_walktree = rn_walktree; 837*67789Ssklower rnh->rnh_treetop = t; 83836210Ssklower return (1); 83936210Ssklower } 84054824Ssklower 84161341Sbostic void 84254824Ssklower rn_init() 84354824Ssklower { 84454824Ssklower char *cp, *cplim; 84554824Ssklower #ifdef KERNEL 84654824Ssklower struct domain *dom; 84754824Ssklower 84854824Ssklower for (dom = domains; dom; dom = dom->dom_next) 84954824Ssklower if (dom->dom_maxrtkey > max_keylen) 85054824Ssklower max_keylen = dom->dom_maxrtkey; 85154824Ssklower #endif 85254824Ssklower if (max_keylen == 0) { 853*67789Ssklower log(LOG_ERR, 854*67789Ssklower "rn_init: radix functions require max_keylen be set\n"); 85554824Ssklower return; 85654824Ssklower } 85754824Ssklower R_Malloc(rn_zeros, char *, 3 * max_keylen); 85854824Ssklower if (rn_zeros == NULL) 85954824Ssklower panic("rn_init"); 860*67789Ssklower Bzero(rn_zeros, 3 * max_keylen); 86154824Ssklower rn_ones = cp = rn_zeros + max_keylen; 862*67789Ssklower addmask_key = cplim = rn_ones + max_keylen; 86354824Ssklower while (cp < cplim) 86454824Ssklower *cp++ = -1; 86554824Ssklower if (rn_inithead((void **)&mask_rnhead, 0) == 0) 86654824Ssklower panic("rn_init 2"); 86754824Ssklower } 868