1 /* 2 * iterator/iter_utils.c - iterative resolver module utility functions. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains functions to assist the iterator module. 40 * Configuration options. Forward zones. 41 */ 42 #include "config.h" 43 #include "iterator/iter_utils.h" 44 #include "iterator/iterator.h" 45 #include "iterator/iter_hints.h" 46 #include "iterator/iter_fwd.h" 47 #include "iterator/iter_donotq.h" 48 #include "iterator/iter_delegpt.h" 49 #include "iterator/iter_priv.h" 50 #include "services/cache/infra.h" 51 #include "services/cache/dns.h" 52 #include "services/cache/rrset.h" 53 #include "util/net_help.h" 54 #include "util/module.h" 55 #include "util/log.h" 56 #include "util/config_file.h" 57 #include "util/regional.h" 58 #include "util/data/msgparse.h" 59 #include "util/data/dname.h" 60 #include "util/random.h" 61 #include "util/fptr_wlist.h" 62 #include "validator/val_anchor.h" 63 #include "validator/val_kcache.h" 64 #include "validator/val_kentry.h" 65 66 /** time when nameserver glue is said to be 'recent' */ 67 #define SUSPICION_RECENT_EXPIRY 86400 68 /** penalty to validation failed blacklisted IPs */ 69 #define BLACKLIST_PENALTY (USEFUL_SERVER_TOP_TIMEOUT*4) 70 71 /** fillup fetch policy array */ 72 static void 73 fetch_fill(struct iter_env* ie, const char* str) 74 { 75 char* s = (char*)str, *e; 76 int i; 77 for(i=0; i<ie->max_dependency_depth+1; i++) { 78 ie->target_fetch_policy[i] = strtol(s, &e, 10); 79 if(s == e) 80 fatal_exit("cannot parse fetch policy number %s", s); 81 s = e; 82 } 83 } 84 85 /** Read config string that represents the target fetch policy */ 86 static int 87 read_fetch_policy(struct iter_env* ie, const char* str) 88 { 89 int count = cfg_count_numbers(str); 90 if(count < 1) { 91 log_err("Cannot parse target fetch policy: \"%s\"", str); 92 return 0; 93 } 94 ie->max_dependency_depth = count - 1; 95 ie->target_fetch_policy = (int*)calloc( 96 (size_t)ie->max_dependency_depth+1, sizeof(int)); 97 if(!ie->target_fetch_policy) { 98 log_err("alloc fetch policy: out of memory"); 99 return 0; 100 } 101 fetch_fill(ie, str); 102 return 1; 103 } 104 105 int 106 iter_apply_cfg(struct iter_env* iter_env, struct config_file* cfg) 107 { 108 int i; 109 /* target fetch policy */ 110 if(!read_fetch_policy(iter_env, cfg->target_fetch_policy)) 111 return 0; 112 for(i=0; i<iter_env->max_dependency_depth+1; i++) 113 verbose(VERB_QUERY, "target fetch policy for level %d is %d", 114 i, iter_env->target_fetch_policy[i]); 115 116 if(!iter_env->hints) 117 iter_env->hints = hints_create(); 118 if(!iter_env->hints || !hints_apply_cfg(iter_env->hints, cfg)) { 119 log_err("Could not set root or stub hints"); 120 return 0; 121 } 122 if(!iter_env->donotq) 123 iter_env->donotq = donotq_create(); 124 if(!iter_env->donotq || !donotq_apply_cfg(iter_env->donotq, cfg)) { 125 log_err("Could not set donotqueryaddresses"); 126 return 0; 127 } 128 if(!iter_env->priv) 129 iter_env->priv = priv_create(); 130 if(!iter_env->priv || !priv_apply_cfg(iter_env->priv, cfg)) { 131 log_err("Could not set private addresses"); 132 return 0; 133 } 134 iter_env->supports_ipv6 = cfg->do_ip6; 135 iter_env->supports_ipv4 = cfg->do_ip4; 136 return 1; 137 } 138 139 /** filter out unsuitable targets 140 * @param iter_env: iterator environment with ipv6-support flag. 141 * @param env: module environment with infra cache. 142 * @param name: zone name 143 * @param namelen: length of name 144 * @param qtype: query type (host order). 145 * @param now: current time 146 * @param a: address in delegation point we are examining. 147 * @return an integer that signals the target suitability. 148 * as follows: 149 * -1: The address should be omitted from the list. 150 * Because: 151 * o The address is bogus (DNSSEC validation failure). 152 * o Listed as donotquery 153 * o is ipv6 but no ipv6 support (in operating system). 154 * o is ipv4 but no ipv4 support (in operating system). 155 * o is lame 156 * Otherwise, an rtt in milliseconds. 157 * 0 .. USEFUL_SERVER_TOP_TIMEOUT-1 158 * The roundtrip time timeout estimate. less than 2 minutes. 159 * Note that util/rtt.c has a MIN_TIMEOUT of 50 msec, thus 160 * values 0 .. 49 are not used, unless that is changed. 161 * USEFUL_SERVER_TOP_TIMEOUT 162 * This value exactly is given for unresponsive blacklisted. 163 * USEFUL_SERVER_TOP_TIMEOUT+1 164 * For non-blacklisted servers: huge timeout, but has traffic. 165 * USEFUL_SERVER_TOP_TIMEOUT*1 .. 166 * parent-side lame servers get this penalty. A dispreferential 167 * server. (lame in delegpt). 168 * USEFUL_SERVER_TOP_TIMEOUT*2 .. 169 * dnsseclame servers get penalty 170 * USEFUL_SERVER_TOP_TIMEOUT*3 .. 171 * recursion lame servers get penalty 172 * UNKNOWN_SERVER_NICENESS 173 * If no information is known about the server, this is 174 * returned. 376 msec or so. 175 * +BLACKLIST_PENALTY (of USEFUL_TOP_TIMEOUT*4) for dnssec failed IPs. 176 * 177 * When a final value is chosen that is dnsseclame ; dnsseclameness checking 178 * is turned off (so we do not discard the reply). 179 * When a final value is chosen that is recursionlame; RD bit is set on query. 180 * Because of the numbers this means recursionlame also have dnssec lameness 181 * checking turned off. 182 */ 183 static int 184 iter_filter_unsuitable(struct iter_env* iter_env, struct module_env* env, 185 uint8_t* name, size_t namelen, uint16_t qtype, uint32_t now, 186 struct delegpt_addr* a) 187 { 188 int rtt, lame, reclame, dnsseclame; 189 if(a->bogus) 190 return -1; /* address of server is bogus */ 191 if(donotq_lookup(iter_env->donotq, &a->addr, a->addrlen)) { 192 log_addr(VERB_ALGO, "skip addr on the donotquery list", 193 &a->addr, a->addrlen); 194 return -1; /* server is on the donotquery list */ 195 } 196 if(!iter_env->supports_ipv6 && addr_is_ip6(&a->addr, a->addrlen)) { 197 return -1; /* there is no ip6 available */ 198 } 199 if(!iter_env->supports_ipv4 && !addr_is_ip6(&a->addr, a->addrlen)) { 200 return -1; /* there is no ip4 available */ 201 } 202 /* check lameness - need zone , class info */ 203 if(infra_get_lame_rtt(env->infra_cache, &a->addr, a->addrlen, 204 name, namelen, qtype, &lame, &dnsseclame, &reclame, 205 &rtt, now)) { 206 log_addr(VERB_ALGO, "servselect", &a->addr, a->addrlen); 207 verbose(VERB_ALGO, " rtt=%d%s%s%s%s", rtt, 208 lame?" LAME":"", 209 dnsseclame?" DNSSEC_LAME":"", 210 reclame?" REC_LAME":"", 211 a->lame?" ADDR_LAME":""); 212 if(lame) 213 return -1; /* server is lame */ 214 else if(rtt >= USEFUL_SERVER_TOP_TIMEOUT) 215 /* server is unresponsive, 216 * we used to return TOP_TIMOUT, but fairly useless, 217 * because if == TOP_TIMEOUT is dropped because 218 * blacklisted later, instead, remove it here, so 219 * other choices (that are not blacklisted) can be 220 * tried */ 221 return -1; 222 /* select remainder from worst to best */ 223 else if(reclame) 224 return rtt+USEFUL_SERVER_TOP_TIMEOUT*3; /* nonpref */ 225 else if(dnsseclame ) 226 return rtt+USEFUL_SERVER_TOP_TIMEOUT*2; /* nonpref */ 227 else if(a->lame) 228 return rtt+USEFUL_SERVER_TOP_TIMEOUT+1; /* nonpref */ 229 else return rtt; 230 } 231 /* no server information present */ 232 if(a->lame) 233 return USEFUL_SERVER_TOP_TIMEOUT+1+UNKNOWN_SERVER_NICENESS; /* nonpref */ 234 return UNKNOWN_SERVER_NICENESS; 235 } 236 237 /** lookup RTT information, and also store fastest rtt (if any) */ 238 static int 239 iter_fill_rtt(struct iter_env* iter_env, struct module_env* env, 240 uint8_t* name, size_t namelen, uint16_t qtype, uint32_t now, 241 struct delegpt* dp, int* best_rtt, struct sock_list* blacklist) 242 { 243 int got_it = 0; 244 struct delegpt_addr* a; 245 if(dp->bogus) 246 return 0; /* NS bogus, all bogus, nothing found */ 247 for(a=dp->result_list; a; a = a->next_result) { 248 a->sel_rtt = iter_filter_unsuitable(iter_env, env, 249 name, namelen, qtype, now, a); 250 if(a->sel_rtt != -1) { 251 if(sock_list_find(blacklist, &a->addr, a->addrlen)) 252 a->sel_rtt += BLACKLIST_PENALTY; 253 254 if(!got_it) { 255 *best_rtt = a->sel_rtt; 256 got_it = 1; 257 } else if(a->sel_rtt < *best_rtt) { 258 *best_rtt = a->sel_rtt; 259 } 260 } 261 } 262 return got_it; 263 } 264 265 /** filter the addres list, putting best targets at front, 266 * returns number of best targets (or 0, no suitable targets) */ 267 static int 268 iter_filter_order(struct iter_env* iter_env, struct module_env* env, 269 uint8_t* name, size_t namelen, uint16_t qtype, uint32_t now, 270 struct delegpt* dp, int* selected_rtt, int open_target, 271 struct sock_list* blacklist) 272 { 273 int got_num = 0, low_rtt = 0, swap_to_front; 274 struct delegpt_addr* a, *n, *prev=NULL; 275 276 /* fillup sel_rtt and find best rtt in the bunch */ 277 got_num = iter_fill_rtt(iter_env, env, name, namelen, qtype, now, dp, 278 &low_rtt, blacklist); 279 if(got_num == 0) 280 return 0; 281 if(low_rtt >= USEFUL_SERVER_TOP_TIMEOUT && 282 (delegpt_count_missing_targets(dp) > 0 || open_target > 0)) { 283 verbose(VERB_ALGO, "Bad choices, trying to get more choice"); 284 return 0; /* we want more choice. The best choice is a bad one. 285 return 0 to force the caller to fetch more */ 286 } 287 288 got_num = 0; 289 a = dp->result_list; 290 while(a) { 291 /* skip unsuitable targets */ 292 if(a->sel_rtt == -1) { 293 prev = a; 294 a = a->next_result; 295 continue; 296 } 297 /* classify the server address and determine what to do */ 298 swap_to_front = 0; 299 if(a->sel_rtt >= low_rtt && a->sel_rtt - low_rtt <= RTT_BAND) { 300 got_num++; 301 swap_to_front = 1; 302 } else if(a->sel_rtt<low_rtt && low_rtt-a->sel_rtt<=RTT_BAND) { 303 got_num++; 304 swap_to_front = 1; 305 } 306 /* swap to front if necessary, or move to next result */ 307 if(swap_to_front && prev) { 308 n = a->next_result; 309 prev->next_result = n; 310 a->next_result = dp->result_list; 311 dp->result_list = a; 312 a = n; 313 } else { 314 prev = a; 315 a = a->next_result; 316 } 317 } 318 *selected_rtt = low_rtt; 319 return got_num; 320 } 321 322 struct delegpt_addr* 323 iter_server_selection(struct iter_env* iter_env, 324 struct module_env* env, struct delegpt* dp, 325 uint8_t* name, size_t namelen, uint16_t qtype, int* dnssec_lame, 326 int* chase_to_rd, int open_target, struct sock_list* blacklist) 327 { 328 int sel; 329 int selrtt; 330 struct delegpt_addr* a, *prev; 331 int num = iter_filter_order(iter_env, env, name, namelen, qtype, 332 *env->now, dp, &selrtt, open_target, blacklist); 333 334 if(num == 0) 335 return NULL; 336 verbose(VERB_ALGO, "selrtt %d", selrtt); 337 if(selrtt > BLACKLIST_PENALTY) { 338 if(selrtt-BLACKLIST_PENALTY > USEFUL_SERVER_TOP_TIMEOUT*3) { 339 verbose(VERB_ALGO, "chase to " 340 "blacklisted recursion lame server"); 341 *chase_to_rd = 1; 342 } 343 if(selrtt-BLACKLIST_PENALTY > USEFUL_SERVER_TOP_TIMEOUT*2) { 344 verbose(VERB_ALGO, "chase to " 345 "blacklisted dnssec lame server"); 346 *dnssec_lame = 1; 347 } 348 } else { 349 if(selrtt > USEFUL_SERVER_TOP_TIMEOUT*3) { 350 verbose(VERB_ALGO, "chase to recursion lame server"); 351 *chase_to_rd = 1; 352 } 353 if(selrtt > USEFUL_SERVER_TOP_TIMEOUT*2) { 354 verbose(VERB_ALGO, "chase to dnssec lame server"); 355 *dnssec_lame = 1; 356 } 357 if(selrtt == USEFUL_SERVER_TOP_TIMEOUT) { 358 verbose(VERB_ALGO, "chase to blacklisted lame server"); 359 return NULL; 360 } 361 } 362 363 if(num == 1) { 364 a = dp->result_list; 365 if(++a->attempts < OUTBOUND_MSG_RETRY) 366 return a; 367 dp->result_list = a->next_result; 368 return a; 369 } 370 371 /* randomly select a target from the list */ 372 log_assert(num > 1); 373 /* grab secure random number, to pick unexpected server. 374 * also we need it to be threadsafe. */ 375 sel = ub_random_max(env->rnd, num); 376 a = dp->result_list; 377 prev = NULL; 378 while(sel > 0 && a) { 379 prev = a; 380 a = a->next_result; 381 sel--; 382 } 383 if(!a) /* robustness */ 384 return NULL; 385 if(++a->attempts < OUTBOUND_MSG_RETRY) 386 return a; 387 /* remove it from the delegation point result list */ 388 if(prev) 389 prev->next_result = a->next_result; 390 else dp->result_list = a->next_result; 391 return a; 392 } 393 394 struct dns_msg* 395 dns_alloc_msg(ldns_buffer* pkt, struct msg_parse* msg, 396 struct regional* region) 397 { 398 struct dns_msg* m = (struct dns_msg*)regional_alloc(region, 399 sizeof(struct dns_msg)); 400 if(!m) 401 return NULL; 402 memset(m, 0, sizeof(*m)); 403 if(!parse_create_msg(pkt, msg, NULL, &m->qinfo, &m->rep, region)) { 404 log_err("malloc failure: allocating incoming dns_msg"); 405 return NULL; 406 } 407 return m; 408 } 409 410 struct dns_msg* 411 dns_copy_msg(struct dns_msg* from, struct regional* region) 412 { 413 struct dns_msg* m = (struct dns_msg*)regional_alloc(region, 414 sizeof(struct dns_msg)); 415 if(!m) 416 return NULL; 417 m->qinfo = from->qinfo; 418 if(!(m->qinfo.qname = regional_alloc_init(region, from->qinfo.qname, 419 from->qinfo.qname_len))) 420 return NULL; 421 if(!(m->rep = reply_info_copy(from->rep, NULL, region))) 422 return NULL; 423 return m; 424 } 425 426 int 427 iter_dns_store(struct module_env* env, struct query_info* msgqinf, 428 struct reply_info* msgrep, int is_referral, uint32_t leeway, 429 struct regional* region) 430 { 431 return dns_cache_store(env, msgqinf, msgrep, is_referral, leeway, 432 region); 433 } 434 435 int 436 iter_ns_probability(struct ub_randstate* rnd, int n, int m) 437 { 438 int sel; 439 if(n == m) /* 100% chance */ 440 return 1; 441 /* we do not need secure random numbers here, but 442 * we do need it to be threadsafe, so we use this */ 443 sel = ub_random_max(rnd, m); 444 return (sel < n); 445 } 446 447 /** detect dependency cycle for query and target */ 448 static int 449 causes_cycle(struct module_qstate* qstate, uint8_t* name, size_t namelen, 450 uint16_t t, uint16_t c) 451 { 452 struct query_info qinf; 453 qinf.qname = name; 454 qinf.qname_len = namelen; 455 qinf.qtype = t; 456 qinf.qclass = c; 457 fptr_ok(fptr_whitelist_modenv_detect_cycle( 458 qstate->env->detect_cycle)); 459 return (*qstate->env->detect_cycle)(qstate, &qinf, 460 (uint16_t)(BIT_RD|BIT_CD), qstate->is_priming); 461 } 462 463 void 464 iter_mark_cycle_targets(struct module_qstate* qstate, struct delegpt* dp) 465 { 466 struct delegpt_ns* ns; 467 for(ns = dp->nslist; ns; ns = ns->next) { 468 if(ns->resolved) 469 continue; 470 /* see if this ns as target causes dependency cycle */ 471 if(causes_cycle(qstate, ns->name, ns->namelen, 472 LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass) || 473 causes_cycle(qstate, ns->name, ns->namelen, 474 LDNS_RR_TYPE_A, qstate->qinfo.qclass)) { 475 log_nametypeclass(VERB_QUERY, "skipping target due " 476 "to dependency cycle (harden-glue: no may " 477 "fix some of the cycles)", 478 ns->name, LDNS_RR_TYPE_A, 479 qstate->qinfo.qclass); 480 ns->resolved = 1; 481 } 482 } 483 } 484 485 void 486 iter_mark_pside_cycle_targets(struct module_qstate* qstate, struct delegpt* dp) 487 { 488 struct delegpt_ns* ns; 489 for(ns = dp->nslist; ns; ns = ns->next) { 490 if(ns->done_pside4 && ns->done_pside6) 491 continue; 492 /* see if this ns as target causes dependency cycle */ 493 if(causes_cycle(qstate, ns->name, ns->namelen, 494 LDNS_RR_TYPE_A, qstate->qinfo.qclass)) { 495 log_nametypeclass(VERB_QUERY, "skipping target due " 496 "to dependency cycle", ns->name, 497 LDNS_RR_TYPE_A, qstate->qinfo.qclass); 498 ns->done_pside4 = 1; 499 } 500 if(causes_cycle(qstate, ns->name, ns->namelen, 501 LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass)) { 502 log_nametypeclass(VERB_QUERY, "skipping target due " 503 "to dependency cycle", ns->name, 504 LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass); 505 ns->done_pside6 = 1; 506 } 507 } 508 } 509 510 int 511 iter_dp_is_useless(struct query_info* qinfo, uint16_t qflags, 512 struct delegpt* dp) 513 { 514 struct delegpt_ns* ns; 515 /* check: 516 * o RD qflag is on. 517 * o no addresses are provided. 518 * o all NS items are required glue. 519 * OR 520 * o RD qflag is on. 521 * o no addresses are provided. 522 * o the query is for one of the nameservers in dp, 523 * and that nameserver is a glue-name for this dp. 524 */ 525 if(!(qflags&BIT_RD)) 526 return 0; 527 /* either available or unused targets */ 528 if(dp->usable_list || dp->result_list) 529 return 0; 530 531 /* see if query is for one of the nameservers, which is glue */ 532 if( (qinfo->qtype == LDNS_RR_TYPE_A || 533 qinfo->qtype == LDNS_RR_TYPE_AAAA) && 534 dname_subdomain_c(qinfo->qname, dp->name) && 535 delegpt_find_ns(dp, qinfo->qname, qinfo->qname_len)) 536 return 1; 537 538 for(ns = dp->nslist; ns; ns = ns->next) { 539 if(ns->resolved) /* skip failed targets */ 540 continue; 541 if(!dname_subdomain_c(ns->name, dp->name)) 542 return 0; /* one address is not required glue */ 543 } 544 return 1; 545 } 546 547 int 548 iter_indicates_dnssec(struct module_env* env, struct delegpt* dp, 549 struct dns_msg* msg, uint16_t dclass) 550 { 551 struct trust_anchor* a; 552 /* information not available, !env->anchors can be common */ 553 if(!env || !env->anchors || !dp || !dp->name) 554 return 0; 555 /* a trust anchor exists with this name, RRSIGs expected */ 556 if((a=anchor_find(env->anchors, dp->name, dp->namelabs, dp->namelen, 557 dclass))) { 558 lock_basic_unlock(&a->lock); 559 return 1; 560 } 561 /* see if DS rrset was given, in AUTH section */ 562 if(msg && msg->rep && 563 reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen, 564 LDNS_RR_TYPE_DS, dclass)) 565 return 1; 566 /* look in key cache */ 567 if(env->key_cache) { 568 struct key_entry_key* kk = key_cache_obtain(env->key_cache, 569 dp->name, dp->namelen, dclass, env->scratch, *env->now); 570 if(kk) { 571 if(query_dname_compare(kk->name, dp->name) == 0) { 572 if(key_entry_isgood(kk) || key_entry_isbad(kk)) { 573 regional_free_all(env->scratch); 574 return 1; 575 } else if(key_entry_isnull(kk)) { 576 regional_free_all(env->scratch); 577 return 0; 578 } 579 } 580 regional_free_all(env->scratch); 581 } 582 } 583 return 0; 584 } 585 586 int 587 iter_msg_has_dnssec(struct dns_msg* msg) 588 { 589 size_t i; 590 if(!msg || !msg->rep) 591 return 0; 592 for(i=0; i<msg->rep->an_numrrsets + msg->rep->ns_numrrsets; i++) { 593 if(((struct packed_rrset_data*)msg->rep->rrsets[i]-> 594 entry.data)->rrsig_count > 0) 595 return 1; 596 } 597 /* empty message has no DNSSEC info, with DNSSEC the reply is 598 * not empty (NSEC) */ 599 return 0; 600 } 601 602 int iter_msg_from_zone(struct dns_msg* msg, struct delegpt* dp, 603 enum response_type type, uint16_t dclass) 604 { 605 if(!msg || !dp || !msg->rep || !dp->name) 606 return 0; 607 /* SOA RRset - always from reply zone */ 608 if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen, 609 LDNS_RR_TYPE_SOA, dclass) || 610 reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen, 611 LDNS_RR_TYPE_SOA, dclass)) 612 return 1; 613 if(type == RESPONSE_TYPE_REFERRAL) { 614 size_t i; 615 /* if it adds a single label, i.e. we expect .com, 616 * and referral to example.com. NS ... , then origin zone 617 * is .com. For a referral to sub.example.com. NS ... then 618 * we do not know, since example.com. may be in between. */ 619 for(i=0; i<msg->rep->an_numrrsets+msg->rep->ns_numrrsets; 620 i++) { 621 struct ub_packed_rrset_key* s = msg->rep->rrsets[i]; 622 if(ntohs(s->rk.type) == LDNS_RR_TYPE_NS && 623 ntohs(s->rk.rrset_class) == dclass) { 624 int l = dname_count_labels(s->rk.dname); 625 if(l == dp->namelabs + 1 && 626 dname_strict_subdomain(s->rk.dname, 627 l, dp->name, dp->namelabs)) 628 return 1; 629 } 630 } 631 return 0; 632 } 633 log_assert(type==RESPONSE_TYPE_ANSWER || type==RESPONSE_TYPE_CNAME); 634 /* not a referral, and not lame delegation (upwards), so, 635 * any NS rrset must be from the zone itself */ 636 if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen, 637 LDNS_RR_TYPE_NS, dclass) || 638 reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen, 639 LDNS_RR_TYPE_NS, dclass)) 640 return 1; 641 /* a DNSKEY set is expected at the zone apex as well */ 642 /* this is for 'minimal responses' for DNSKEYs */ 643 if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen, 644 LDNS_RR_TYPE_DNSKEY, dclass)) 645 return 1; 646 return 0; 647 } 648 649 /** 650 * check equality of two rrsets 651 * @param k1: rrset 652 * @param k2: rrset 653 * @return true if equal 654 */ 655 static int 656 rrset_equal(struct ub_packed_rrset_key* k1, struct ub_packed_rrset_key* k2) 657 { 658 struct packed_rrset_data* d1 = (struct packed_rrset_data*) 659 k1->entry.data; 660 struct packed_rrset_data* d2 = (struct packed_rrset_data*) 661 k2->entry.data; 662 size_t i, t; 663 if(k1->rk.dname_len != k2->rk.dname_len || 664 k1->rk.flags != k2->rk.flags || 665 k1->rk.type != k2->rk.type || 666 k1->rk.rrset_class != k2->rk.rrset_class || 667 query_dname_compare(k1->rk.dname, k2->rk.dname) != 0) 668 return 0; 669 if(d1->ttl != d2->ttl || 670 d1->count != d2->count || 671 d1->rrsig_count != d2->rrsig_count || 672 d1->trust != d2->trust || 673 d1->security != d2->security) 674 return 0; 675 t = d1->count + d1->rrsig_count; 676 for(i=0; i<t; i++) { 677 if(d1->rr_len[i] != d2->rr_len[i] || 678 d1->rr_ttl[i] != d2->rr_ttl[i] || 679 memcmp(d1->rr_data[i], d2->rr_data[i], 680 d1->rr_len[i]) != 0) 681 return 0; 682 } 683 return 1; 684 } 685 686 int 687 reply_equal(struct reply_info* p, struct reply_info* q, ldns_buffer* scratch) 688 { 689 size_t i; 690 if(p->flags != q->flags || 691 p->qdcount != q->qdcount || 692 p->ttl != q->ttl || 693 p->prefetch_ttl != q->prefetch_ttl || 694 p->security != q->security || 695 p->an_numrrsets != q->an_numrrsets || 696 p->ns_numrrsets != q->ns_numrrsets || 697 p->ar_numrrsets != q->ar_numrrsets || 698 p->rrset_count != q->rrset_count) 699 return 0; 700 for(i=0; i<p->rrset_count; i++) { 701 if(!rrset_equal(p->rrsets[i], q->rrsets[i])) { 702 /* fallback procedure: try to sort and canonicalize */ 703 ldns_rr_list* pl, *ql; 704 pl = packed_rrset_to_rr_list(p->rrsets[i], scratch); 705 ql = packed_rrset_to_rr_list(q->rrsets[i], scratch); 706 if(!pl || !ql) { 707 ldns_rr_list_deep_free(pl); 708 ldns_rr_list_deep_free(ql); 709 return 0; 710 } 711 ldns_rr_list2canonical(pl); 712 ldns_rr_list2canonical(ql); 713 ldns_rr_list_sort(pl); 714 ldns_rr_list_sort(ql); 715 if(ldns_rr_list_compare(pl, ql) != 0) { 716 ldns_rr_list_deep_free(pl); 717 ldns_rr_list_deep_free(ql); 718 return 0; 719 } 720 ldns_rr_list_deep_free(pl); 721 ldns_rr_list_deep_free(ql); 722 continue; 723 } 724 } 725 return 1; 726 } 727 728 void 729 iter_store_parentside_rrset(struct module_env* env, 730 struct ub_packed_rrset_key* rrset) 731 { 732 struct rrset_ref ref; 733 rrset = packed_rrset_copy_alloc(rrset, env->alloc, *env->now); 734 if(!rrset) { 735 log_err("malloc failure in store_parentside_rrset"); 736 return; 737 } 738 rrset->rk.flags |= PACKED_RRSET_PARENT_SIDE; 739 rrset->entry.hash = rrset_key_hash(&rrset->rk); 740 ref.key = rrset; 741 ref.id = rrset->id; 742 /* ignore ret: if it was in the cache, ref updated */ 743 (void)rrset_cache_update(env->rrset_cache, &ref, env->alloc, *env->now); 744 } 745 746 /** fetch NS record from reply, if any */ 747 static struct ub_packed_rrset_key* 748 reply_get_NS_rrset(struct reply_info* rep) 749 { 750 size_t i; 751 for(i=0; i<rep->rrset_count; i++) { 752 if(rep->rrsets[i]->rk.type == htons(LDNS_RR_TYPE_NS)) { 753 return rep->rrsets[i]; 754 } 755 } 756 return NULL; 757 } 758 759 void 760 iter_store_parentside_NS(struct module_env* env, struct reply_info* rep) 761 { 762 struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep); 763 if(rrset) { 764 log_rrset_key(VERB_ALGO, "store parent-side NS", rrset); 765 iter_store_parentside_rrset(env, rrset); 766 } 767 } 768 769 void iter_store_parentside_neg(struct module_env* env, 770 struct query_info* qinfo, struct reply_info* rep) 771 { 772 /* TTL: NS from referral in iq->deleg_msg, 773 * or first RR from iq->response, 774 * or servfail5secs if !iq->response */ 775 uint32_t ttl = NORR_TTL; 776 struct ub_packed_rrset_key* neg; 777 struct packed_rrset_data* newd; 778 if(rep) { 779 struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep); 780 if(!rrset && rep->rrset_count != 0) rrset = rep->rrsets[0]; 781 if(rrset) ttl = ub_packed_rrset_ttl(rrset); 782 } 783 /* create empty rrset to store */ 784 neg = (struct ub_packed_rrset_key*)regional_alloc(env->scratch, 785 sizeof(struct ub_packed_rrset_key)); 786 if(!neg) { 787 log_err("out of memory in store_parentside_neg"); 788 return; 789 } 790 memset(&neg->entry, 0, sizeof(neg->entry)); 791 neg->entry.key = neg; 792 neg->rk.type = htons(qinfo->qtype); 793 neg->rk.rrset_class = htons(qinfo->qclass); 794 neg->rk.flags = 0; 795 neg->rk.dname = regional_alloc_init(env->scratch, qinfo->qname, 796 qinfo->qname_len); 797 if(!neg->rk.dname) { 798 log_err("out of memory in store_parentside_neg"); 799 return; 800 } 801 neg->rk.dname_len = qinfo->qname_len; 802 neg->entry.hash = rrset_key_hash(&neg->rk); 803 newd = (struct packed_rrset_data*)regional_alloc_zero(env->scratch, 804 sizeof(struct packed_rrset_data) + sizeof(size_t) + 805 sizeof(uint8_t*) + sizeof(uint32_t) + sizeof(uint16_t)); 806 if(!newd) { 807 log_err("out of memory in store_parentside_neg"); 808 return; 809 } 810 neg->entry.data = newd; 811 newd->ttl = ttl; 812 /* entry must have one RR, otherwise not valid in cache. 813 * put in one RR with empty rdata: those are ignored as nameserver */ 814 newd->count = 1; 815 newd->rrsig_count = 0; 816 newd->trust = rrset_trust_ans_noAA; 817 newd->rr_len = (size_t*)((uint8_t*)newd + 818 sizeof(struct packed_rrset_data)); 819 newd->rr_len[0] = 0 /* zero len rdata */ + sizeof(uint16_t); 820 packed_rrset_ptr_fixup(newd); 821 newd->rr_ttl[0] = newd->ttl; 822 ldns_write_uint16(newd->rr_data[0], 0 /* zero len rdata */); 823 /* store it */ 824 log_rrset_key(VERB_ALGO, "store parent-side negative", neg); 825 iter_store_parentside_rrset(env, neg); 826 } 827 828 int 829 iter_lookup_parent_NS_from_cache(struct module_env* env, struct delegpt* dp, 830 struct regional* region, struct query_info* qinfo) 831 { 832 struct ub_packed_rrset_key* akey; 833 akey = rrset_cache_lookup(env->rrset_cache, dp->name, 834 dp->namelen, LDNS_RR_TYPE_NS, qinfo->qclass, 835 PACKED_RRSET_PARENT_SIDE, *env->now, 0); 836 if(akey) { 837 log_rrset_key(VERB_ALGO, "found parent-side NS in cache", akey); 838 dp->has_parent_side_NS = 1; 839 /* and mark the new names as lame */ 840 if(!delegpt_rrset_add_ns(dp, region, akey, 1)) { 841 lock_rw_unlock(&akey->entry.lock); 842 return 0; 843 } 844 lock_rw_unlock(&akey->entry.lock); 845 } 846 return 1; 847 } 848 849 int iter_lookup_parent_glue_from_cache(struct module_env* env, 850 struct delegpt* dp, struct regional* region, struct query_info* qinfo) 851 { 852 struct ub_packed_rrset_key* akey; 853 struct delegpt_ns* ns; 854 size_t num = delegpt_count_targets(dp); 855 for(ns = dp->nslist; ns; ns = ns->next) { 856 /* get cached parentside A */ 857 akey = rrset_cache_lookup(env->rrset_cache, ns->name, 858 ns->namelen, LDNS_RR_TYPE_A, qinfo->qclass, 859 PACKED_RRSET_PARENT_SIDE, *env->now, 0); 860 if(akey) { 861 log_rrset_key(VERB_ALGO, "found parent-side", akey); 862 ns->done_pside4 = 1; 863 /* a negative-cache-element has no addresses it adds */ 864 if(!delegpt_add_rrset_A(dp, region, akey, 1)) 865 log_err("malloc failure in lookup_parent_glue"); 866 lock_rw_unlock(&akey->entry.lock); 867 } 868 /* get cached parentside AAAA */ 869 akey = rrset_cache_lookup(env->rrset_cache, ns->name, 870 ns->namelen, LDNS_RR_TYPE_AAAA, qinfo->qclass, 871 PACKED_RRSET_PARENT_SIDE, *env->now, 0); 872 if(akey) { 873 log_rrset_key(VERB_ALGO, "found parent-side", akey); 874 ns->done_pside6 = 1; 875 /* a negative-cache-element has no addresses it adds */ 876 if(!delegpt_add_rrset_AAAA(dp, region, akey, 1)) 877 log_err("malloc failure in lookup_parent_glue"); 878 lock_rw_unlock(&akey->entry.lock); 879 } 880 } 881 /* see if new (but lame) addresses have become available */ 882 return delegpt_count_targets(dp) != num; 883 } 884 885 int 886 iter_get_next_root(struct iter_hints* hints, struct iter_forwards* fwd, 887 uint16_t* c) 888 { 889 uint16_t c1 = *c, c2 = *c; 890 int r1 = hints_next_root(hints, &c1); 891 int r2 = forwards_next_root(fwd, &c2); 892 if(!r1 && !r2) /* got none, end of list */ 893 return 0; 894 else if(!r1) /* got one, return that */ 895 *c = c2; 896 else if(!r2) 897 *c = c1; 898 else if(c1 < c2) /* got both take smallest */ 899 *c = c1; 900 else *c = c2; 901 return 1; 902 } 903 904 void 905 iter_scrub_ds(struct dns_msg* msg, struct ub_packed_rrset_key* ns, uint8_t* z) 906 { 907 /* Only the DS record for the delegation itself is expected. 908 * We allow DS for everything between the bailiwick and the 909 * zonecut, thus DS records must be at or above the zonecut. 910 * And the DS records must be below the server authority zone. 911 * The answer section is already scrubbed. */ 912 size_t i = msg->rep->an_numrrsets; 913 while(i < (msg->rep->an_numrrsets + msg->rep->ns_numrrsets)) { 914 struct ub_packed_rrset_key* s = msg->rep->rrsets[i]; 915 if(ntohs(s->rk.type) == LDNS_RR_TYPE_DS && 916 (!ns || !dname_subdomain_c(ns->rk.dname, s->rk.dname) 917 || query_dname_compare(z, s->rk.dname) == 0)) { 918 log_nametypeclass(VERB_ALGO, "removing irrelevant DS", 919 s->rk.dname, ntohs(s->rk.type), 920 ntohs(s->rk.rrset_class)); 921 memmove(msg->rep->rrsets+i, msg->rep->rrsets+i+1, 922 sizeof(struct ub_packed_rrset_key*) * 923 (msg->rep->rrset_count-i-1)); 924 msg->rep->ns_numrrsets--; 925 msg->rep->rrset_count--; 926 /* stay at same i, but new record */ 927 continue; 928 } 929 i++; 930 } 931 } 932 933 void iter_dec_attempts(struct delegpt* dp, int d) 934 { 935 struct delegpt_addr* a; 936 for(a=dp->target_list; a; a = a->next_target) { 937 if(a->attempts >= OUTBOUND_MSG_RETRY) { 938 /* add back to result list */ 939 a->next_result = dp->result_list; 940 dp->result_list = a; 941 } 942 if(a->attempts > d) 943 a->attempts -= d; 944 else a->attempts = 0; 945 } 946 } 947 948 void iter_merge_retry_counts(struct delegpt* dp, struct delegpt* old) 949 { 950 struct delegpt_addr* a, *o, *prev; 951 for(a=dp->target_list; a; a = a->next_target) { 952 o = delegpt_find_addr(old, &a->addr, a->addrlen); 953 if(o) { 954 log_addr(VERB_ALGO, "copy attempt count previous dp", 955 &a->addr, a->addrlen); 956 a->attempts = o->attempts; 957 } 958 } 959 prev = NULL; 960 a = dp->usable_list; 961 while(a) { 962 if(a->attempts >= OUTBOUND_MSG_RETRY) { 963 log_addr(VERB_ALGO, "remove from usable list dp", 964 &a->addr, a->addrlen); 965 /* remove from result list */ 966 if(prev) 967 prev->next_usable = a->next_usable; 968 else dp->usable_list = a->next_usable; 969 /* prev stays the same */ 970 a = a->next_usable; 971 continue; 972 } 973 prev = a; 974 a = a->next_usable; 975 } 976 } 977