xref: /openbsd-src/usr.sbin/unbound/daemon/worker.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*
2  * daemon/worker.c - worker that handles a pending list of requests.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file implements the worker that handles callbacks on events, for
40  * pending requests.
41  */
42 #include "config.h"
43 #include "util/log.h"
44 #include "util/net_help.h"
45 #include "util/random.h"
46 #include "daemon/worker.h"
47 #include "daemon/daemon.h"
48 #include "daemon/remote.h"
49 #include "daemon/acl_list.h"
50 #include "util/netevent.h"
51 #include "util/config_file.h"
52 #include "util/module.h"
53 #include "util/regional.h"
54 #include "util/storage/slabhash.h"
55 #include "services/listen_dnsport.h"
56 #include "services/outside_network.h"
57 #include "services/outbound_list.h"
58 #include "services/cache/rrset.h"
59 #include "services/cache/infra.h"
60 #include "services/cache/dns.h"
61 #include "services/mesh.h"
62 #include "services/localzone.h"
63 #include "util/data/msgparse.h"
64 #include "util/data/msgencode.h"
65 #include "util/data/dname.h"
66 #include "util/fptr_wlist.h"
67 #include "util/tube.h"
68 #include "iterator/iter_fwd.h"
69 #include "iterator/iter_hints.h"
70 #include "validator/autotrust.h"
71 #include "validator/val_anchor.h"
72 #include "ldns/sbuffer.h"
73 
74 #ifdef HAVE_SYS_TYPES_H
75 #  include <sys/types.h>
76 #endif
77 #ifdef HAVE_NETDB_H
78 #include <netdb.h>
79 #endif
80 #include <signal.h>
81 #ifdef UB_ON_WINDOWS
82 #include "winrc/win_svc.h"
83 #endif
84 
85 /** Size of an UDP datagram */
86 #define NORMAL_UDP_SIZE	512 /* bytes */
87 
88 /**
89  * seconds to add to prefetch leeway.  This is a TTL that expires old rrsets
90  * earlier than they should in order to put the new update into the cache.
91  * This additional value is to make sure that if not all TTLs are equal in
92  * the message to be updated(and replaced), that rrsets with up to this much
93  * extra TTL are also replaced.  This means that the resulting new message
94  * will have (most likely) this TTL at least, avoiding very small 'split
95  * second' TTLs due to operators choosing relative primes for TTLs (or so).
96  * Also has to be at least one to break ties (and overwrite cached entry).
97  */
98 #define PREFETCH_EXPIRY_ADD 60
99 
100 #ifdef UNBOUND_ALLOC_STATS
101 /** measure memory leakage */
102 static void
103 debug_memleak(size_t accounted, size_t heap,
104 	size_t total_alloc, size_t total_free)
105 {
106 	static int init = 0;
107 	static size_t base_heap, base_accounted, base_alloc, base_free;
108 	size_t base_af, cur_af, grow_af, grow_acc;
109 	if(!init) {
110 		init = 1;
111 		base_heap = heap;
112 		base_accounted = accounted;
113 		base_alloc = total_alloc;
114 		base_free = total_free;
115 	}
116 	base_af = base_alloc - base_free;
117 	cur_af = total_alloc - total_free;
118 	grow_af = cur_af - base_af;
119 	grow_acc = accounted - base_accounted;
120 	log_info("Leakage: %d leaked. growth: %u use, %u acc, %u heap",
121 		(int)(grow_af - grow_acc), (unsigned)grow_af,
122 		(unsigned)grow_acc, (unsigned)(heap - base_heap));
123 }
124 
125 /** give debug heap size indication */
126 static void
127 debug_total_mem(size_t calctotal)
128 {
129 #ifdef HAVE_SBRK
130 	extern void* unbound_start_brk;
131 	extern size_t unbound_mem_alloc, unbound_mem_freed;
132 	void* cur = sbrk(0);
133 	int total = cur-unbound_start_brk;
134 	log_info("Total heap memory estimate: %u  total-alloc: %u  "
135 		"total-free: %u", (unsigned)total,
136 		(unsigned)unbound_mem_alloc, (unsigned)unbound_mem_freed);
137 	debug_memleak(calctotal, (size_t)total,
138 		unbound_mem_alloc, unbound_mem_freed);
139 #else
140 	(void)calctotal;
141 #endif /* HAVE_SBRK */
142 }
143 #endif /* UNBOUND_ALLOC_STATS */
144 
145 /** Report on memory usage by this thread and global */
146 static void
147 worker_mem_report(struct worker* ATTR_UNUSED(worker),
148 	struct serviced_query* ATTR_UNUSED(cur_serv))
149 {
150 #ifdef UNBOUND_ALLOC_STATS
151 	/* debug func in validator module */
152 	size_t total, front, back, mesh, msg, rrset, infra, ac, superac;
153 	size_t me, iter, val, anch;
154 	int i;
155 	if(verbosity < VERB_ALGO)
156 		return;
157 	front = listen_get_mem(worker->front);
158 	back = outnet_get_mem(worker->back);
159 	msg = slabhash_get_mem(worker->env.msg_cache);
160 	rrset = slabhash_get_mem(&worker->env.rrset_cache->table);
161 	infra = infra_get_mem(worker->env.infra_cache);
162 	mesh = mesh_get_mem(worker->env.mesh);
163 	ac = alloc_get_mem(&worker->alloc);
164 	superac = alloc_get_mem(&worker->daemon->superalloc);
165 	anch = anchors_get_mem(worker->env.anchors);
166 	iter = 0;
167 	val = 0;
168 	for(i=0; i<worker->env.mesh->mods.num; i++) {
169 		fptr_ok(fptr_whitelist_mod_get_mem(worker->env.mesh->
170 			mods.mod[i]->get_mem));
171 		if(strcmp(worker->env.mesh->mods.mod[i]->name, "validator")==0)
172 			val += (*worker->env.mesh->mods.mod[i]->get_mem)
173 				(&worker->env, i);
174 		else	iter += (*worker->env.mesh->mods.mod[i]->get_mem)
175 				(&worker->env, i);
176 	}
177 	me = sizeof(*worker) + sizeof(*worker->base) + sizeof(*worker->comsig)
178 		+ comm_point_get_mem(worker->cmd_com)
179 		+ sizeof(worker->rndstate)
180 		+ regional_get_mem(worker->scratchpad)
181 		+ sizeof(*worker->env.scratch_buffer)
182 		+ sldns_buffer_capacity(worker->env.scratch_buffer)
183 		+ forwards_get_mem(worker->env.fwds)
184 		+ hints_get_mem(worker->env.hints);
185 	if(worker->thread_num == 0)
186 		me += acl_list_get_mem(worker->daemon->acl);
187 	if(cur_serv) {
188 		me += serviced_get_mem(cur_serv);
189 	}
190 	total = front+back+mesh+msg+rrset+infra+iter+val+ac+superac+me;
191 	log_info("Memory conditions: %u front=%u back=%u mesh=%u msg=%u "
192 		"rrset=%u infra=%u iter=%u val=%u anchors=%u "
193 		"alloccache=%u globalalloccache=%u me=%u",
194 		(unsigned)total, (unsigned)front, (unsigned)back,
195 		(unsigned)mesh, (unsigned)msg, (unsigned)rrset,
196 		(unsigned)infra, (unsigned)iter, (unsigned)val, (unsigned)anch,
197 		(unsigned)ac, (unsigned)superac, (unsigned)me);
198 	debug_total_mem(total);
199 #else /* no UNBOUND_ALLOC_STATS */
200 	size_t val = 0;
201 	int i;
202 	if(verbosity < VERB_QUERY)
203 		return;
204 	for(i=0; i<worker->env.mesh->mods.num; i++) {
205 		fptr_ok(fptr_whitelist_mod_get_mem(worker->env.mesh->
206 			mods.mod[i]->get_mem));
207 		if(strcmp(worker->env.mesh->mods.mod[i]->name, "validator")==0)
208 			val += (*worker->env.mesh->mods.mod[i]->get_mem)
209 				(&worker->env, i);
210 	}
211 	verbose(VERB_QUERY, "cache memory msg=%u rrset=%u infra=%u val=%u",
212 		(unsigned)slabhash_get_mem(worker->env.msg_cache),
213 		(unsigned)slabhash_get_mem(&worker->env.rrset_cache->table),
214 		(unsigned)infra_get_mem(worker->env.infra_cache),
215 		(unsigned)val);
216 #endif /* UNBOUND_ALLOC_STATS */
217 }
218 
219 void
220 worker_send_cmd(struct worker* worker, enum worker_commands cmd)
221 {
222 	uint32_t c = (uint32_t)htonl(cmd);
223 	if(!tube_write_msg(worker->cmd, (uint8_t*)&c, sizeof(c), 0)) {
224 		log_err("worker send cmd %d failed", (int)cmd);
225 	}
226 }
227 
228 int
229 worker_handle_reply(struct comm_point* c, void* arg, int error,
230 	struct comm_reply* reply_info)
231 {
232 	struct module_qstate* q = (struct module_qstate*)arg;
233 	struct worker* worker = q->env->worker;
234 	struct outbound_entry e;
235 	e.qstate = q;
236 	e.qsent = NULL;
237 
238 	if(error != 0) {
239 		mesh_report_reply(worker->env.mesh, &e, reply_info, error);
240 		worker_mem_report(worker, NULL);
241 		return 0;
242 	}
243 	/* sanity check. */
244 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
245 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
246 			LDNS_PACKET_QUERY
247 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
248 		/* error becomes timeout for the module as if this reply
249 		 * never arrived. */
250 		mesh_report_reply(worker->env.mesh, &e, reply_info,
251 			NETEVENT_TIMEOUT);
252 		worker_mem_report(worker, NULL);
253 		return 0;
254 	}
255 	mesh_report_reply(worker->env.mesh, &e, reply_info, NETEVENT_NOERROR);
256 	worker_mem_report(worker, NULL);
257 	return 0;
258 }
259 
260 int
261 worker_handle_service_reply(struct comm_point* c, void* arg, int error,
262 	struct comm_reply* reply_info)
263 {
264 	struct outbound_entry* e = (struct outbound_entry*)arg;
265 	struct worker* worker = e->qstate->env->worker;
266 	struct serviced_query *sq = e->qsent;
267 
268 	verbose(VERB_ALGO, "worker svcd callback for qstate %p", e->qstate);
269 	if(error != 0) {
270 		mesh_report_reply(worker->env.mesh, e, reply_info, error);
271 		worker_mem_report(worker, sq);
272 		return 0;
273 	}
274 	/* sanity check. */
275 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
276 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
277 			LDNS_PACKET_QUERY
278 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
279 		/* error becomes timeout for the module as if this reply
280 		 * never arrived. */
281 		verbose(VERB_ALGO, "worker: bad reply handled as timeout");
282 		mesh_report_reply(worker->env.mesh, e, reply_info,
283 			NETEVENT_TIMEOUT);
284 		worker_mem_report(worker, sq);
285 		return 0;
286 	}
287 	mesh_report_reply(worker->env.mesh, e, reply_info, NETEVENT_NOERROR);
288 	worker_mem_report(worker, sq);
289 	return 0;
290 }
291 
292 /** check request sanity.
293  * @param pkt: the wire packet to examine for sanity.
294  * @param worker: parameters for checking.
295  * @return error code, 0 OK, or -1 discard.
296 */
297 static int
298 worker_check_request(sldns_buffer* pkt, struct worker* worker)
299 {
300 	if(sldns_buffer_limit(pkt) < LDNS_HEADER_SIZE) {
301 		verbose(VERB_QUERY, "request too short, discarded");
302 		return -1;
303 	}
304 	if(sldns_buffer_limit(pkt) > NORMAL_UDP_SIZE &&
305 		worker->daemon->cfg->harden_large_queries) {
306 		verbose(VERB_QUERY, "request too large, discarded");
307 		return -1;
308 	}
309 	if(LDNS_QR_WIRE(sldns_buffer_begin(pkt))) {
310 		verbose(VERB_QUERY, "request has QR bit on, discarded");
311 		return -1;
312 	}
313 	if(LDNS_TC_WIRE(sldns_buffer_begin(pkt))) {
314 		LDNS_TC_CLR(sldns_buffer_begin(pkt));
315 		verbose(VERB_QUERY, "request bad, has TC bit on");
316 		return LDNS_RCODE_FORMERR;
317 	}
318 	if(LDNS_OPCODE_WIRE(sldns_buffer_begin(pkt)) != LDNS_PACKET_QUERY) {
319 		verbose(VERB_QUERY, "request unknown opcode %d",
320 			LDNS_OPCODE_WIRE(sldns_buffer_begin(pkt)));
321 		return LDNS_RCODE_NOTIMPL;
322 	}
323 	if(LDNS_QDCOUNT(sldns_buffer_begin(pkt)) != 1) {
324 		verbose(VERB_QUERY, "request wrong nr qd=%d",
325 			LDNS_QDCOUNT(sldns_buffer_begin(pkt)));
326 		return LDNS_RCODE_FORMERR;
327 	}
328 	if(LDNS_ANCOUNT(sldns_buffer_begin(pkt)) != 0) {
329 		verbose(VERB_QUERY, "request wrong nr an=%d",
330 			LDNS_ANCOUNT(sldns_buffer_begin(pkt)));
331 		return LDNS_RCODE_FORMERR;
332 	}
333 	if(LDNS_NSCOUNT(sldns_buffer_begin(pkt)) != 0) {
334 		verbose(VERB_QUERY, "request wrong nr ns=%d",
335 			LDNS_NSCOUNT(sldns_buffer_begin(pkt)));
336 		return LDNS_RCODE_FORMERR;
337 	}
338 	if(LDNS_ARCOUNT(sldns_buffer_begin(pkt)) > 1) {
339 		verbose(VERB_QUERY, "request wrong nr ar=%d",
340 			LDNS_ARCOUNT(sldns_buffer_begin(pkt)));
341 		return LDNS_RCODE_FORMERR;
342 	}
343 	return 0;
344 }
345 
346 void
347 worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), uint8_t* msg,
348 	size_t len, int error, void* arg)
349 {
350 	struct worker* worker = (struct worker*)arg;
351 	enum worker_commands cmd;
352 	if(error != NETEVENT_NOERROR) {
353 		free(msg);
354 		if(error == NETEVENT_CLOSED)
355 			comm_base_exit(worker->base);
356 		else	log_info("control event: %d", error);
357 		return;
358 	}
359 	if(len != sizeof(uint32_t)) {
360 		fatal_exit("bad control msg length %d", (int)len);
361 	}
362 	cmd = sldns_read_uint32(msg);
363 	free(msg);
364 	switch(cmd) {
365 	case worker_cmd_quit:
366 		verbose(VERB_ALGO, "got control cmd quit");
367 		comm_base_exit(worker->base);
368 		break;
369 	case worker_cmd_stats:
370 		verbose(VERB_ALGO, "got control cmd stats");
371 		server_stats_reply(worker, 1);
372 		break;
373 	case worker_cmd_stats_noreset:
374 		verbose(VERB_ALGO, "got control cmd stats_noreset");
375 		server_stats_reply(worker, 0);
376 		break;
377 	case worker_cmd_remote:
378 		verbose(VERB_ALGO, "got control cmd remote");
379 		daemon_remote_exec(worker);
380 		break;
381 	default:
382 		log_err("bad command %d", (int)cmd);
383 		break;
384 	}
385 }
386 
387 /** check if a delegation is secure */
388 static enum sec_status
389 check_delegation_secure(struct reply_info *rep)
390 {
391 	/* return smallest security status */
392 	size_t i;
393 	enum sec_status sec = sec_status_secure;
394 	enum sec_status s;
395 	size_t num = rep->an_numrrsets + rep->ns_numrrsets;
396 	/* check if answer and authority are OK */
397 	for(i=0; i<num; i++) {
398 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
399 			->security;
400 		if(s < sec)
401 			sec = s;
402 	}
403 	/* in additional, only unchecked triggers revalidation */
404 	for(i=num; i<rep->rrset_count; i++) {
405 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
406 			->security;
407 		if(s == sec_status_unchecked)
408 			return s;
409 	}
410 	return sec;
411 }
412 
413 /** remove nonsecure from a delegation referral additional section */
414 static void
415 deleg_remove_nonsecure_additional(struct reply_info* rep)
416 {
417 	/* we can simply edit it, since we are working in the scratch region */
418 	size_t i;
419 	enum sec_status s;
420 
421 	for(i = rep->an_numrrsets+rep->ns_numrrsets; i<rep->rrset_count; i++) {
422 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
423 			->security;
424 		if(s != sec_status_secure) {
425 			memmove(rep->rrsets+i, rep->rrsets+i+1,
426 				sizeof(struct ub_packed_rrset_key*)*
427 				(rep->rrset_count - i - 1));
428 			rep->ar_numrrsets--;
429 			rep->rrset_count--;
430 			i--;
431 		}
432 	}
433 }
434 
435 /** answer nonrecursive query from the cache */
436 static int
437 answer_norec_from_cache(struct worker* worker, struct query_info* qinfo,
438 	uint16_t id, uint16_t flags, struct comm_reply* repinfo,
439 	struct edns_data* edns)
440 {
441 	/* for a nonrecursive query return either:
442 	 * 	o an error (servfail; we try to avoid this)
443 	 * 	o a delegation (closest we have; this routine tries that)
444 	 * 	o the answer (checked by answer_from_cache)
445 	 *
446 	 * So, grab a delegation from the rrset cache.
447 	 * Then check if it needs validation, if so, this routine fails,
448 	 * so that iterator can prime and validator can verify rrsets.
449 	 */
450 	uint16_t udpsize = edns->udp_size;
451 	int secure = 0;
452 	time_t timenow = *worker->env.now;
453 	int must_validate = (!(flags&BIT_CD) || worker->env.cfg->ignore_cd)
454 		&& worker->env.need_to_validate;
455 	struct dns_msg *msg = NULL;
456 	struct delegpt *dp;
457 
458 	dp = dns_cache_find_delegation(&worker->env, qinfo->qname,
459 		qinfo->qname_len, qinfo->qtype, qinfo->qclass,
460 		worker->scratchpad, &msg, timenow);
461 	if(!dp) { /* no delegation, need to reprime */
462 		regional_free_all(worker->scratchpad);
463 		return 0;
464 	}
465 	if(must_validate) {
466 		switch(check_delegation_secure(msg->rep)) {
467 		case sec_status_unchecked:
468 			/* some rrsets have not been verified yet, go and
469 			 * let validator do that */
470 			regional_free_all(worker->scratchpad);
471 			return 0;
472 		case sec_status_bogus:
473 			/* some rrsets are bogus, reply servfail */
474 			edns->edns_version = EDNS_ADVERTISED_VERSION;
475 			edns->udp_size = EDNS_ADVERTISED_SIZE;
476 			edns->ext_rcode = 0;
477 			edns->bits &= EDNS_DO;
478 			error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
479 				&msg->qinfo, id, flags, edns);
480 			regional_free_all(worker->scratchpad);
481 			if(worker->stats.extended) {
482 				worker->stats.ans_bogus++;
483 				worker->stats.ans_rcode[LDNS_RCODE_SERVFAIL]++;
484 			}
485 			return 1;
486 		case sec_status_secure:
487 			/* all rrsets are secure */
488 			/* remove non-secure rrsets from the add. section*/
489 			if(worker->env.cfg->val_clean_additional)
490 				deleg_remove_nonsecure_additional(msg->rep);
491 			secure = 1;
492 			break;
493 		case sec_status_indeterminate:
494 		case sec_status_insecure:
495 		default:
496 			/* not secure */
497 			secure = 0;
498 			break;
499 		}
500 	}
501 	/* return this delegation from the cache */
502 	edns->edns_version = EDNS_ADVERTISED_VERSION;
503 	edns->udp_size = EDNS_ADVERTISED_SIZE;
504 	edns->ext_rcode = 0;
505 	edns->bits &= EDNS_DO;
506 	msg->rep->flags |= BIT_QR|BIT_RA;
507 	if(!reply_info_answer_encode(&msg->qinfo, msg->rep, id, flags,
508 		repinfo->c->buffer, 0, 1, worker->scratchpad,
509 		udpsize, edns, (int)(edns->bits & EDNS_DO), secure)) {
510 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
511 			&msg->qinfo, id, flags, edns);
512 	}
513 	regional_free_all(worker->scratchpad);
514 	if(worker->stats.extended) {
515 		if(secure) worker->stats.ans_secure++;
516 		server_stats_insrcode(&worker->stats, repinfo->c->buffer);
517 	}
518 	return 1;
519 }
520 
521 /** answer query from the cache */
522 static int
523 answer_from_cache(struct worker* worker, struct query_info* qinfo,
524 	struct reply_info* rep, uint16_t id, uint16_t flags,
525 	struct comm_reply* repinfo, struct edns_data* edns)
526 {
527 	time_t timenow = *worker->env.now;
528 	uint16_t udpsize = edns->udp_size;
529 	int secure;
530 	int must_validate = (!(flags&BIT_CD) || worker->env.cfg->ignore_cd)
531 		&& worker->env.need_to_validate;
532 	/* see if it is possible */
533 	if(rep->ttl < timenow) {
534 		/* the rrsets may have been updated in the meantime.
535 		 * we will refetch the message format from the
536 		 * authoritative server
537 		 */
538 		return 0;
539 	}
540 	if(!rrset_array_lock(rep->ref, rep->rrset_count, timenow))
541 		return 0;
542 	/* locked and ids and ttls are OK. */
543 	/* check CNAME chain (if any) */
544 	if(rep->an_numrrsets > 0 && (rep->rrsets[0]->rk.type ==
545 		htons(LDNS_RR_TYPE_CNAME) || rep->rrsets[0]->rk.type ==
546 		htons(LDNS_RR_TYPE_DNAME))) {
547 		if(!reply_check_cname_chain(rep)) {
548 			/* cname chain invalid, redo iterator steps */
549 			verbose(VERB_ALGO, "Cache reply: cname chain broken");
550 		bail_out:
551 			rrset_array_unlock_touch(worker->env.rrset_cache,
552 				worker->scratchpad, rep->ref, rep->rrset_count);
553 			regional_free_all(worker->scratchpad);
554 			return 0;
555 		}
556 	}
557 	/* check security status of the cached answer */
558 	if( rep->security == sec_status_bogus && must_validate) {
559 		/* BAD cached */
560 		edns->edns_version = EDNS_ADVERTISED_VERSION;
561 		edns->udp_size = EDNS_ADVERTISED_SIZE;
562 		edns->ext_rcode = 0;
563 		edns->bits &= EDNS_DO;
564 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
565 			qinfo, id, flags, edns);
566 		rrset_array_unlock_touch(worker->env.rrset_cache,
567 			worker->scratchpad, rep->ref, rep->rrset_count);
568 		regional_free_all(worker->scratchpad);
569 		if(worker->stats.extended) {
570 			worker->stats.ans_bogus ++;
571 			worker->stats.ans_rcode[LDNS_RCODE_SERVFAIL] ++;
572 		}
573 		return 1;
574 	} else if( rep->security == sec_status_unchecked && must_validate) {
575 		verbose(VERB_ALGO, "Cache reply: unchecked entry needs "
576 			"validation");
577 		goto bail_out; /* need to validate cache entry first */
578 	} else if(rep->security == sec_status_secure) {
579 		if(reply_all_rrsets_secure(rep))
580 			secure = 1;
581 		else	{
582 			if(must_validate) {
583 				verbose(VERB_ALGO, "Cache reply: secure entry"
584 					" changed status");
585 				goto bail_out; /* rrset changed, re-verify */
586 			}
587 			secure = 0;
588 		}
589 	} else	secure = 0;
590 
591 	edns->edns_version = EDNS_ADVERTISED_VERSION;
592 	edns->udp_size = EDNS_ADVERTISED_SIZE;
593 	edns->ext_rcode = 0;
594 	edns->bits &= EDNS_DO;
595 	if(!reply_info_answer_encode(qinfo, rep, id, flags,
596 		repinfo->c->buffer, timenow, 1, worker->scratchpad,
597 		udpsize, edns, (int)(edns->bits & EDNS_DO), secure)) {
598 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
599 			qinfo, id, flags, edns);
600 	}
601 	/* cannot send the reply right now, because blocking network syscall
602 	 * is bad while holding locks. */
603 	rrset_array_unlock_touch(worker->env.rrset_cache, worker->scratchpad,
604 		rep->ref, rep->rrset_count);
605 	regional_free_all(worker->scratchpad);
606 	if(worker->stats.extended) {
607 		if(secure) worker->stats.ans_secure++;
608 		server_stats_insrcode(&worker->stats, repinfo->c->buffer);
609 	}
610 	/* go and return this buffer to the client */
611 	return 1;
612 }
613 
614 /** Reply to client and perform prefetch to keep cache up to date */
615 static void
616 reply_and_prefetch(struct worker* worker, struct query_info* qinfo,
617 	uint16_t flags, struct comm_reply* repinfo, time_t leeway)
618 {
619 	/* first send answer to client to keep its latency
620 	 * as small as a cachereply */
621 	comm_point_send_reply(repinfo);
622 	server_stats_prefetch(&worker->stats, worker);
623 
624 	/* create the prefetch in the mesh as a normal lookup without
625 	 * client addrs waiting, which has the cache blacklisted (to bypass
626 	 * the cache and go to the network for the data). */
627 	/* this (potentially) runs the mesh for the new query */
628 	mesh_new_prefetch(worker->env.mesh, qinfo, flags, leeway +
629 		PREFETCH_EXPIRY_ADD);
630 }
631 
632 /**
633  * Fill CH class answer into buffer. Keeps query.
634  * @param pkt: buffer
635  * @param str: string to put into text record (<255).
636  * @param edns: edns reply information.
637  */
638 static void
639 chaos_replystr(sldns_buffer* pkt, const char* str, struct edns_data* edns)
640 {
641 	size_t len = strlen(str);
642 	unsigned int rd = LDNS_RD_WIRE(sldns_buffer_begin(pkt));
643 	unsigned int cd = LDNS_CD_WIRE(sldns_buffer_begin(pkt));
644 	if(len>255) len=255; /* cap size of TXT record */
645 	sldns_buffer_clear(pkt);
646 	sldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip id */
647 	sldns_buffer_write_u16(pkt, (uint16_t)(BIT_QR|BIT_RA));
648 	if(rd) LDNS_RD_SET(sldns_buffer_begin(pkt));
649 	if(cd) LDNS_CD_SET(sldns_buffer_begin(pkt));
650 	sldns_buffer_write_u16(pkt, 1); /* qdcount */
651 	sldns_buffer_write_u16(pkt, 1); /* ancount */
652 	sldns_buffer_write_u16(pkt, 0); /* nscount */
653 	sldns_buffer_write_u16(pkt, 0); /* arcount */
654 	(void)query_dname_len(pkt); /* skip qname */
655 	sldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip qtype */
656 	sldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip qclass */
657 	sldns_buffer_write_u16(pkt, 0xc00c); /* compr ptr to query */
658 	sldns_buffer_write_u16(pkt, LDNS_RR_TYPE_TXT);
659 	sldns_buffer_write_u16(pkt, LDNS_RR_CLASS_CH);
660 	sldns_buffer_write_u32(pkt, 0); /* TTL */
661 	sldns_buffer_write_u16(pkt, sizeof(uint8_t) + len);
662 	sldns_buffer_write_u8(pkt, len);
663 	sldns_buffer_write(pkt, str, len);
664 	sldns_buffer_flip(pkt);
665 	edns->edns_version = EDNS_ADVERTISED_VERSION;
666 	edns->udp_size = EDNS_ADVERTISED_SIZE;
667 	edns->bits &= EDNS_DO;
668 	attach_edns_record(pkt, edns);
669 }
670 
671 /**
672  * Answer CH class queries.
673  * @param w: worker
674  * @param qinfo: query info. Pointer into packet buffer.
675  * @param edns: edns info from query.
676  * @param pkt: packet buffer.
677  * @return: true if a reply is to be sent.
678  */
679 static int
680 answer_chaos(struct worker* w, struct query_info* qinfo,
681 	struct edns_data* edns, sldns_buffer* pkt)
682 {
683 	struct config_file* cfg = w->env.cfg;
684 	if(qinfo->qtype != LDNS_RR_TYPE_ANY && qinfo->qtype != LDNS_RR_TYPE_TXT)
685 		return 0;
686 	if(query_dname_compare(qinfo->qname,
687 		(uint8_t*)"\002id\006server") == 0 ||
688 		query_dname_compare(qinfo->qname,
689 		(uint8_t*)"\010hostname\004bind") == 0)
690 	{
691 		if(cfg->hide_identity)
692 			return 0;
693 		if(cfg->identity==NULL || cfg->identity[0]==0) {
694 			char buf[MAXHOSTNAMELEN+1];
695 			if (gethostname(buf, MAXHOSTNAMELEN) == 0) {
696 				buf[MAXHOSTNAMELEN] = 0;
697 				chaos_replystr(pkt, buf, edns);
698 			} else 	{
699 				log_err("gethostname: %s", strerror(errno));
700 				chaos_replystr(pkt, "no hostname", edns);
701 			}
702 		}
703 		else 	chaos_replystr(pkt, cfg->identity, edns);
704 		return 1;
705 	}
706 	if(query_dname_compare(qinfo->qname,
707 		(uint8_t*)"\007version\006server") == 0 ||
708 		query_dname_compare(qinfo->qname,
709 		(uint8_t*)"\007version\004bind") == 0)
710 	{
711 		if(cfg->hide_version)
712 			return 0;
713 		if(cfg->version==NULL || cfg->version[0]==0)
714 			chaos_replystr(pkt, PACKAGE_STRING, edns);
715 		else 	chaos_replystr(pkt, cfg->version, edns);
716 		return 1;
717 	}
718 	return 0;
719 }
720 
721 int
722 deny_refuse(struct comm_point* c, enum acl_access acl,
723 	enum acl_access deny, enum acl_access refuse,
724 	struct worker* worker, struct comm_reply* repinfo)
725 {
726 	if(acl == deny) {
727 		comm_point_drop_reply(repinfo);
728 		if(worker->stats.extended)
729 			worker->stats.unwanted_queries++;
730 		return 0;
731 	} else if(acl == refuse) {
732 		log_addr(VERB_ALGO, "refused query from",
733 			&repinfo->addr, repinfo->addrlen);
734 		log_buf(VERB_ALGO, "refuse", c->buffer);
735 		if(worker->stats.extended)
736 			worker->stats.unwanted_queries++;
737 		if(worker_check_request(c->buffer, worker) == -1) {
738 			comm_point_drop_reply(repinfo);
739 			return 0; /* discard this */
740 		}
741 		sldns_buffer_set_limit(c->buffer, LDNS_HEADER_SIZE);
742 		sldns_buffer_write_at(c->buffer, 4,
743 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
744 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
745 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
746 			LDNS_RCODE_REFUSED);
747 		return 1;
748 	}
749 
750 	return -1;
751 }
752 
753 int
754 deny_refuse_all(struct comm_point* c, enum acl_access acl,
755 	struct worker* worker, struct comm_reply* repinfo)
756 {
757 	return deny_refuse(c, acl, acl_deny, acl_refuse, worker, repinfo);
758 }
759 
760 int
761 deny_refuse_non_local(struct comm_point* c, enum acl_access acl,
762 	struct worker* worker, struct comm_reply* repinfo)
763 {
764 	return deny_refuse(c, acl, acl_deny_non_local, acl_refuse_non_local, worker, repinfo);
765 }
766 
767 int
768 worker_handle_request(struct comm_point* c, void* arg, int error,
769 	struct comm_reply* repinfo)
770 {
771 	struct worker* worker = (struct worker*)arg;
772 	int ret;
773 	hashvalue_t h;
774 	struct lruhash_entry* e;
775 	struct query_info qinfo;
776 	struct edns_data edns;
777 	enum acl_access acl;
778 
779 	if(error != NETEVENT_NOERROR) {
780 		/* some bad tcp query DNS formats give these error calls */
781 		verbose(VERB_ALGO, "handle request called with err=%d", error);
782 		return 0;
783 	}
784 	acl = acl_list_lookup(worker->daemon->acl, &repinfo->addr,
785 		repinfo->addrlen);
786 	if((ret=deny_refuse_all(c, acl, worker, repinfo)) != -1)
787 	{
788 		return ret;
789 	}
790 	if((ret=worker_check_request(c->buffer, worker)) != 0) {
791 		verbose(VERB_ALGO, "worker check request: bad query.");
792 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
793 		if(ret != -1) {
794 			LDNS_QR_SET(sldns_buffer_begin(c->buffer));
795 			LDNS_RCODE_SET(sldns_buffer_begin(c->buffer), ret);
796 			return 1;
797 		}
798 		comm_point_drop_reply(repinfo);
799 		return 0;
800 	}
801 	worker->stats.num_queries++;
802 	/* see if query is in the cache */
803 	if(!query_info_parse(&qinfo, c->buffer)) {
804 		verbose(VERB_ALGO, "worker parse request: formerror.");
805 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
806 		sldns_buffer_rewind(c->buffer);
807 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
808 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
809 			LDNS_RCODE_FORMERR);
810 		server_stats_insrcode(&worker->stats, c->buffer);
811 		return 1;
812 	}
813 	if(worker->env.cfg->log_queries) {
814 		char ip[128];
815 		addr_to_str(&repinfo->addr, repinfo->addrlen, ip, sizeof(ip));
816 		log_nametypeclass(0, ip, qinfo.qname, qinfo.qtype, qinfo.qclass);
817 	}
818 	if(qinfo.qtype == LDNS_RR_TYPE_AXFR ||
819 		qinfo.qtype == LDNS_RR_TYPE_IXFR) {
820 		verbose(VERB_ALGO, "worker request: refused zone transfer.");
821 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
822 		sldns_buffer_rewind(c->buffer);
823 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
824 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
825 			LDNS_RCODE_REFUSED);
826 		if(worker->stats.extended) {
827 			worker->stats.qtype[qinfo.qtype]++;
828 			server_stats_insrcode(&worker->stats, c->buffer);
829 		}
830 		return 1;
831 	}
832 	if((ret=parse_edns_from_pkt(c->buffer, &edns)) != 0) {
833 		verbose(VERB_ALGO, "worker parse edns: formerror.");
834 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
835 		sldns_buffer_rewind(c->buffer);
836 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
837 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer), ret);
838 		server_stats_insrcode(&worker->stats, c->buffer);
839 		return 1;
840 	}
841 	if(edns.edns_present && edns.edns_version != 0) {
842 		edns.ext_rcode = (uint8_t)(EDNS_RCODE_BADVERS>>4);
843 		edns.edns_version = EDNS_ADVERTISED_VERSION;
844 		edns.udp_size = EDNS_ADVERTISED_SIZE;
845 		edns.bits &= EDNS_DO;
846 		verbose(VERB_ALGO, "query with bad edns version.");
847 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
848 		error_encode(c->buffer, EDNS_RCODE_BADVERS&0xf, &qinfo,
849 			*(uint16_t*)sldns_buffer_begin(c->buffer),
850 			sldns_buffer_read_u16_at(c->buffer, 2), NULL);
851 		attach_edns_record(c->buffer, &edns);
852 		return 1;
853 	}
854 	if(edns.edns_present && edns.udp_size < NORMAL_UDP_SIZE &&
855 		worker->daemon->cfg->harden_short_bufsize) {
856 		verbose(VERB_QUERY, "worker request: EDNS bufsize %d ignored",
857 			(int)edns.udp_size);
858 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
859 		edns.udp_size = NORMAL_UDP_SIZE;
860 	}
861 	if(edns.udp_size > worker->daemon->cfg->max_udp_size &&
862 		c->type == comm_udp) {
863 		verbose(VERB_QUERY,
864 			"worker request: max UDP reply size modified"
865 			" (%d to max-udp-size)", (int)edns.udp_size);
866 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
867 		edns.udp_size = worker->daemon->cfg->max_udp_size;
868 	}
869 	if(edns.udp_size < LDNS_HEADER_SIZE) {
870 		verbose(VERB_ALGO, "worker request: edns is too small.");
871 		log_addr(VERB_CLIENT, "from", &repinfo->addr, repinfo->addrlen);
872 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
873 		LDNS_TC_SET(sldns_buffer_begin(c->buffer));
874 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
875 			LDNS_RCODE_SERVFAIL);
876 		sldns_buffer_set_position(c->buffer, LDNS_HEADER_SIZE);
877 		sldns_buffer_write_at(c->buffer, 4,
878 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
879 		sldns_buffer_flip(c->buffer);
880 		return 1;
881 	}
882 	if(worker->stats.extended)
883 		server_stats_insquery(&worker->stats, c, qinfo.qtype,
884 			qinfo.qclass, &edns, repinfo);
885 	if(c->type != comm_udp)
886 		edns.udp_size = 65535; /* max size for TCP replies */
887 	if(qinfo.qclass == LDNS_RR_CLASS_CH && answer_chaos(worker, &qinfo,
888 		&edns, c->buffer)) {
889 		server_stats_insrcode(&worker->stats, c->buffer);
890 		return 1;
891 	}
892 	if(local_zones_answer(worker->daemon->local_zones, &qinfo, &edns,
893 		c->buffer, worker->scratchpad)) {
894 		regional_free_all(worker->scratchpad);
895 		if(sldns_buffer_limit(c->buffer) == 0) {
896 			comm_point_drop_reply(repinfo);
897 			return 0;
898 		}
899 		server_stats_insrcode(&worker->stats, c->buffer);
900 		return 1;
901 	}
902 
903 	/* We've looked in our local zones. If the answer isn't there, we
904 	 * might need to bail out based on ACLs now. */
905 	if((ret=deny_refuse_non_local(c, acl, worker, repinfo)) != -1)
906 	{
907 		return ret;
908 	}
909 
910 	/* If this request does not have the recursion bit set, verify
911 	 * ACLs allow the snooping. */
912 	if(!(LDNS_RD_WIRE(sldns_buffer_begin(c->buffer))) &&
913 		acl != acl_allow_snoop ) {
914 		sldns_buffer_set_limit(c->buffer, LDNS_HEADER_SIZE);
915 		sldns_buffer_write_at(c->buffer, 4,
916 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
917 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
918 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
919 			LDNS_RCODE_REFUSED);
920 		sldns_buffer_flip(c->buffer);
921 		server_stats_insrcode(&worker->stats, c->buffer);
922 		log_addr(VERB_ALGO, "refused nonrec (cache snoop) query from",
923 			&repinfo->addr, repinfo->addrlen);
924 		return 1;
925 	}
926 	h = query_info_hash(&qinfo);
927 	if((e=slabhash_lookup(worker->env.msg_cache, h, &qinfo, 0))) {
928 		/* answer from cache - we have acquired a readlock on it */
929 		if(answer_from_cache(worker, &qinfo,
930 			(struct reply_info*)e->data,
931 			*(uint16_t*)sldns_buffer_begin(c->buffer),
932 			sldns_buffer_read_u16_at(c->buffer, 2), repinfo,
933 			&edns)) {
934 			/* prefetch it if the prefetch TTL expired */
935 			if(worker->env.cfg->prefetch && *worker->env.now >=
936 				((struct reply_info*)e->data)->prefetch_ttl) {
937 				time_t leeway = ((struct reply_info*)e->
938 					data)->ttl - *worker->env.now;
939 				lock_rw_unlock(&e->lock);
940 				reply_and_prefetch(worker, &qinfo,
941 					sldns_buffer_read_u16_at(c->buffer, 2),
942 					repinfo, leeway);
943 				return 0;
944 			}
945 			lock_rw_unlock(&e->lock);
946 			return 1;
947 		}
948 		verbose(VERB_ALGO, "answer from the cache failed");
949 		lock_rw_unlock(&e->lock);
950 	}
951 	if(!LDNS_RD_WIRE(sldns_buffer_begin(c->buffer))) {
952 		if(answer_norec_from_cache(worker, &qinfo,
953 			*(uint16_t*)sldns_buffer_begin(c->buffer),
954 			sldns_buffer_read_u16_at(c->buffer, 2), repinfo,
955 			&edns)) {
956 			return 1;
957 		}
958 		verbose(VERB_ALGO, "answer norec from cache -- "
959 			"need to validate or not primed");
960 	}
961 	sldns_buffer_rewind(c->buffer);
962 	server_stats_querymiss(&worker->stats, worker);
963 
964 	if(verbosity >= VERB_CLIENT) {
965 		if(c->type == comm_udp)
966 			log_addr(VERB_CLIENT, "udp request from",
967 				&repinfo->addr, repinfo->addrlen);
968 		else	log_addr(VERB_CLIENT, "tcp request from",
969 				&repinfo->addr, repinfo->addrlen);
970 	}
971 
972 	/* grab a work request structure for this new request */
973 	mesh_new_client(worker->env.mesh, &qinfo,
974 		sldns_buffer_read_u16_at(c->buffer, 2),
975 		&edns, repinfo, *(uint16_t*)sldns_buffer_begin(c->buffer));
976 	worker_mem_report(worker, NULL);
977 	return 0;
978 }
979 
980 void
981 worker_sighandler(int sig, void* arg)
982 {
983 	/* note that log, print, syscalls here give race conditions. */
984 	/* we still print DETAIL logs, because this is extensive per message
985 	 * logging anyway, and the operator may then have an interest
986 	 * in the cause for unbound to exit */
987 	struct worker* worker = (struct worker*)arg;
988 	switch(sig) {
989 #ifdef SIGHUP
990 		case SIGHUP:
991 			verbose(VERB_QUERY, "caught signal SIGHUP");
992 			comm_base_exit(worker->base);
993 			break;
994 #endif
995 		case SIGINT:
996 			verbose(VERB_QUERY, "caught signal SIGINT");
997 			worker->need_to_exit = 1;
998 			comm_base_exit(worker->base);
999 			break;
1000 #ifdef SIGQUIT
1001 		case SIGQUIT:
1002 			verbose(VERB_QUERY, "caught signal SIGQUIT");
1003 			worker->need_to_exit = 1;
1004 			comm_base_exit(worker->base);
1005 			break;
1006 #endif
1007 		case SIGTERM:
1008 			verbose(VERB_QUERY, "caught signal SIGTERM");
1009 			worker->need_to_exit = 1;
1010 			comm_base_exit(worker->base);
1011 			break;
1012 		default:
1013 			log_err("unknown signal: %d, ignored", sig);
1014 			break;
1015 	}
1016 }
1017 
1018 /** restart statistics timer for worker, if enabled */
1019 static void
1020 worker_restart_timer(struct worker* worker)
1021 {
1022 	if(worker->env.cfg->stat_interval > 0) {
1023 		struct timeval tv;
1024 #ifndef S_SPLINT_S
1025 		tv.tv_sec = worker->env.cfg->stat_interval;
1026 		tv.tv_usec = 0;
1027 #endif
1028 		comm_timer_set(worker->stat_timer, &tv);
1029 	}
1030 }
1031 
1032 void worker_stat_timer_cb(void* arg)
1033 {
1034 	struct worker* worker = (struct worker*)arg;
1035 	server_stats_log(&worker->stats, worker, worker->thread_num);
1036 	mesh_stats(worker->env.mesh, "mesh has");
1037 	worker_mem_report(worker, NULL);
1038 	if(!worker->daemon->cfg->stat_cumulative) {
1039 		worker_stats_clear(worker);
1040 	}
1041 	/* start next timer */
1042 	worker_restart_timer(worker);
1043 }
1044 
1045 void worker_probe_timer_cb(void* arg)
1046 {
1047 	struct worker* worker = (struct worker*)arg;
1048 	struct timeval tv;
1049 #ifndef S_SPLINT_S
1050 	tv.tv_sec = (time_t)autr_probe_timer(&worker->env);
1051 	tv.tv_usec = 0;
1052 #endif
1053 	if(tv.tv_sec != 0)
1054 		comm_timer_set(worker->env.probe_timer, &tv);
1055 }
1056 
1057 struct worker*
1058 worker_create(struct daemon* daemon, int id, int* ports, int n)
1059 {
1060 	unsigned int seed;
1061 	struct worker* worker = (struct worker*)calloc(1,
1062 		sizeof(struct worker));
1063 	if(!worker)
1064 		return NULL;
1065 	worker->numports = n;
1066 	worker->ports = (int*)memdup(ports, sizeof(int)*n);
1067 	if(!worker->ports) {
1068 		free(worker);
1069 		return NULL;
1070 	}
1071 	worker->daemon = daemon;
1072 	worker->thread_num = id;
1073 	if(!(worker->cmd = tube_create())) {
1074 		free(worker->ports);
1075 		free(worker);
1076 		return NULL;
1077 	}
1078 	/* create random state here to avoid locking trouble in RAND_bytes */
1079 	seed = (unsigned int)time(NULL) ^ (unsigned int)getpid() ^
1080 		(((unsigned int)worker->thread_num)<<17);
1081 		/* shift thread_num so it does not match out pid bits */
1082 	if(!(worker->rndstate = ub_initstate(seed, daemon->rand))) {
1083 		seed = 0;
1084 		log_err("could not init random numbers.");
1085 		tube_delete(worker->cmd);
1086 		free(worker->ports);
1087 		free(worker);
1088 		return NULL;
1089 	}
1090 	seed = 0;
1091 	return worker;
1092 }
1093 
1094 int
1095 worker_init(struct worker* worker, struct config_file *cfg,
1096 	struct listen_port* ports, int do_sigs)
1097 {
1098 	worker->need_to_exit = 0;
1099 	worker->base = comm_base_create(do_sigs);
1100 	if(!worker->base) {
1101 		log_err("could not create event handling base");
1102 		worker_delete(worker);
1103 		return 0;
1104 	}
1105 	comm_base_set_slow_accept_handlers(worker->base, &worker_stop_accept,
1106 		&worker_start_accept, worker);
1107 	if(do_sigs) {
1108 #ifdef SIGHUP
1109 		ub_thread_sig_unblock(SIGHUP);
1110 #endif
1111 		ub_thread_sig_unblock(SIGINT);
1112 #ifdef SIGQUIT
1113 		ub_thread_sig_unblock(SIGQUIT);
1114 #endif
1115 		ub_thread_sig_unblock(SIGTERM);
1116 #ifndef LIBEVENT_SIGNAL_PROBLEM
1117 		worker->comsig = comm_signal_create(worker->base,
1118 			worker_sighandler, worker);
1119 		if(!worker->comsig
1120 #ifdef SIGHUP
1121 			|| !comm_signal_bind(worker->comsig, SIGHUP)
1122 #endif
1123 #ifdef SIGQUIT
1124 			|| !comm_signal_bind(worker->comsig, SIGQUIT)
1125 #endif
1126 			|| !comm_signal_bind(worker->comsig, SIGTERM)
1127 			|| !comm_signal_bind(worker->comsig, SIGINT)) {
1128 			log_err("could not create signal handlers");
1129 			worker_delete(worker);
1130 			return 0;
1131 		}
1132 #endif /* LIBEVENT_SIGNAL_PROBLEM */
1133 		if(!daemon_remote_open_accept(worker->daemon->rc,
1134 			worker->daemon->rc_ports, worker)) {
1135 			worker_delete(worker);
1136 			return 0;
1137 		}
1138 #ifdef UB_ON_WINDOWS
1139 		wsvc_setup_worker(worker);
1140 #endif /* UB_ON_WINDOWS */
1141 	} else { /* !do_sigs */
1142 		worker->comsig = NULL;
1143 	}
1144 	worker->front = listen_create(worker->base, ports,
1145 		cfg->msg_buffer_size, (int)cfg->incoming_num_tcp,
1146 		worker->daemon->listen_sslctx, worker_handle_request, worker);
1147 	if(!worker->front) {
1148 		log_err("could not create listening sockets");
1149 		worker_delete(worker);
1150 		return 0;
1151 	}
1152 	worker->back = outside_network_create(worker->base,
1153 		cfg->msg_buffer_size, (size_t)cfg->outgoing_num_ports,
1154 		cfg->out_ifs, cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
1155 		cfg->do_tcp?cfg->outgoing_num_tcp:0,
1156 		worker->daemon->env->infra_cache, worker->rndstate,
1157 		cfg->use_caps_bits_for_id, worker->ports, worker->numports,
1158 		cfg->unwanted_threshold, &worker_alloc_cleanup, worker,
1159 		cfg->do_udp, worker->daemon->connect_sslctx, cfg->delay_close);
1160 	if(!worker->back) {
1161 		log_err("could not create outgoing sockets");
1162 		worker_delete(worker);
1163 		return 0;
1164 	}
1165 	/* start listening to commands */
1166 	if(!tube_setup_bg_listen(worker->cmd, worker->base,
1167 		&worker_handle_control_cmd, worker)) {
1168 		log_err("could not create control compt.");
1169 		worker_delete(worker);
1170 		return 0;
1171 	}
1172 	worker->stat_timer = comm_timer_create(worker->base,
1173 		worker_stat_timer_cb, worker);
1174 	if(!worker->stat_timer) {
1175 		log_err("could not create statistics timer");
1176 	}
1177 
1178 	/* we use the msg_buffer_size as a good estimate for what the
1179 	 * user wants for memory usage sizes */
1180 	worker->scratchpad = regional_create_custom(cfg->msg_buffer_size);
1181 	if(!worker->scratchpad) {
1182 		log_err("malloc failure");
1183 		worker_delete(worker);
1184 		return 0;
1185 	}
1186 
1187 	server_stats_init(&worker->stats, cfg);
1188 	alloc_init(&worker->alloc, &worker->daemon->superalloc,
1189 		worker->thread_num);
1190 	alloc_set_id_cleanup(&worker->alloc, &worker_alloc_cleanup, worker);
1191 	worker->env = *worker->daemon->env;
1192 	comm_base_timept(worker->base, &worker->env.now, &worker->env.now_tv);
1193 	if(worker->thread_num == 0)
1194 		log_set_time(worker->env.now);
1195 	worker->env.worker = worker;
1196 	worker->env.send_query = &worker_send_query;
1197 	worker->env.alloc = &worker->alloc;
1198 	worker->env.rnd = worker->rndstate;
1199 	worker->env.scratch = worker->scratchpad;
1200 	worker->env.mesh = mesh_create(&worker->daemon->mods, &worker->env);
1201 	worker->env.detach_subs = &mesh_detach_subs;
1202 	worker->env.attach_sub = &mesh_attach_sub;
1203 	worker->env.kill_sub = &mesh_state_delete;
1204 	worker->env.detect_cycle = &mesh_detect_cycle;
1205 	worker->env.scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size);
1206 	if(!(worker->env.fwds = forwards_create()) ||
1207 		!forwards_apply_cfg(worker->env.fwds, cfg)) {
1208 		log_err("Could not set forward zones");
1209 		worker_delete(worker);
1210 		return 0;
1211 	}
1212 	if(!(worker->env.hints = hints_create()) ||
1213 		!hints_apply_cfg(worker->env.hints, cfg)) {
1214 		log_err("Could not set root or stub hints");
1215 		worker_delete(worker);
1216 		return 0;
1217 	}
1218 	/* one probe timer per process -- if we have 5011 anchors */
1219 	if(autr_get_num_anchors(worker->env.anchors) > 0
1220 #ifndef THREADS_DISABLED
1221 		&& worker->thread_num == 0
1222 #endif
1223 		) {
1224 		struct timeval tv;
1225 		tv.tv_sec = 0;
1226 		tv.tv_usec = 0;
1227 		worker->env.probe_timer = comm_timer_create(worker->base,
1228 			worker_probe_timer_cb, worker);
1229 		if(!worker->env.probe_timer) {
1230 			log_err("could not create 5011-probe timer");
1231 		} else {
1232 			/* let timer fire, then it can reset itself */
1233 			comm_timer_set(worker->env.probe_timer, &tv);
1234 		}
1235 	}
1236 	if(!worker->env.mesh || !worker->env.scratch_buffer) {
1237 		worker_delete(worker);
1238 		return 0;
1239 	}
1240 	worker_mem_report(worker, NULL);
1241 	/* if statistics enabled start timer */
1242 	if(worker->env.cfg->stat_interval > 0) {
1243 		verbose(VERB_ALGO, "set statistics interval %d secs",
1244 			worker->env.cfg->stat_interval);
1245 		worker_restart_timer(worker);
1246 	}
1247 	return 1;
1248 }
1249 
1250 void
1251 worker_work(struct worker* worker)
1252 {
1253 	comm_base_dispatch(worker->base);
1254 }
1255 
1256 void
1257 worker_delete(struct worker* worker)
1258 {
1259 	if(!worker)
1260 		return;
1261 	if(worker->env.mesh && verbosity >= VERB_OPS) {
1262 		server_stats_log(&worker->stats, worker, worker->thread_num);
1263 		mesh_stats(worker->env.mesh, "mesh has");
1264 		worker_mem_report(worker, NULL);
1265 	}
1266 	outside_network_quit_prepare(worker->back);
1267 	mesh_delete(worker->env.mesh);
1268 	sldns_buffer_free(worker->env.scratch_buffer);
1269 	forwards_delete(worker->env.fwds);
1270 	hints_delete(worker->env.hints);
1271 	listen_delete(worker->front);
1272 	outside_network_delete(worker->back);
1273 	comm_signal_delete(worker->comsig);
1274 	tube_delete(worker->cmd);
1275 	comm_timer_delete(worker->stat_timer);
1276 	comm_timer_delete(worker->env.probe_timer);
1277 	free(worker->ports);
1278 	if(worker->thread_num == 0) {
1279 		log_set_time(NULL);
1280 #ifdef UB_ON_WINDOWS
1281 		wsvc_desetup_worker(worker);
1282 #endif /* UB_ON_WINDOWS */
1283 	}
1284 	comm_base_delete(worker->base);
1285 	ub_randfree(worker->rndstate);
1286 	alloc_clear(&worker->alloc);
1287 	regional_destroy(worker->scratchpad);
1288 	free(worker);
1289 }
1290 
1291 struct outbound_entry*
1292 worker_send_query(uint8_t* qname, size_t qnamelen, uint16_t qtype,
1293 	uint16_t qclass, uint16_t flags, int dnssec, int want_dnssec,
1294 	struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
1295 	size_t zonelen, struct module_qstate* q)
1296 {
1297 	struct worker* worker = q->env->worker;
1298 	struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
1299 		q->region, sizeof(*e));
1300 	if(!e)
1301 		return NULL;
1302 	e->qstate = q;
1303 	e->qsent = outnet_serviced_query(worker->back, qname,
1304 		qnamelen, qtype, qclass, flags, dnssec, want_dnssec,
1305 		q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, addr,
1306 		addrlen, zone, zonelen, worker_handle_service_reply, e,
1307 		worker->back->udp_buff);
1308 	if(!e->qsent) {
1309 		return NULL;
1310 	}
1311 	return e;
1312 }
1313 
1314 void
1315 worker_alloc_cleanup(void* arg)
1316 {
1317 	struct worker* worker = (struct worker*)arg;
1318 	slabhash_clear(&worker->env.rrset_cache->table);
1319 	slabhash_clear(worker->env.msg_cache);
1320 }
1321 
1322 void worker_stats_clear(struct worker* worker)
1323 {
1324 	server_stats_init(&worker->stats, worker->env.cfg);
1325 	mesh_stats_clear(worker->env.mesh);
1326 	worker->back->unwanted_replies = 0;
1327 }
1328 
1329 void worker_start_accept(void* arg)
1330 {
1331 	struct worker* worker = (struct worker*)arg;
1332 	listen_start_accept(worker->front);
1333 	if(worker->thread_num == 0)
1334 		daemon_remote_start_accept(worker->daemon->rc);
1335 }
1336 
1337 void worker_stop_accept(void* arg)
1338 {
1339 	struct worker* worker = (struct worker*)arg;
1340 	listen_stop_accept(worker->front);
1341 	if(worker->thread_num == 0)
1342 		daemon_remote_stop_accept(worker->daemon->rc);
1343 }
1344 
1345 /* --- fake callbacks for fptr_wlist to work --- */
1346 struct outbound_entry* libworker_send_query(uint8_t* ATTR_UNUSED(qname),
1347 	size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
1348 	uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
1349 	int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
1350 	struct sockaddr_storage* ATTR_UNUSED(addr),
1351 	socklen_t ATTR_UNUSED(addrlen), struct module_qstate* ATTR_UNUSED(q))
1352 {
1353 	log_assert(0);
1354 	return 0;
1355 }
1356 
1357 int libworker_handle_reply(struct comm_point* ATTR_UNUSED(c),
1358 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
1359         struct comm_reply* ATTR_UNUSED(reply_info))
1360 {
1361 	log_assert(0);
1362 	return 0;
1363 }
1364 
1365 int libworker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
1366 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
1367         struct comm_reply* ATTR_UNUSED(reply_info))
1368 {
1369 	log_assert(0);
1370 	return 0;
1371 }
1372 
1373 void libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
1374         uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
1375         int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
1376 {
1377 	log_assert(0);
1378 }
1379 
1380 void libworker_fg_done_cb(void* ATTR_UNUSED(arg), int ATTR_UNUSED(rcode),
1381         sldns_buffer* ATTR_UNUSED(buf), enum sec_status ATTR_UNUSED(s),
1382 	char* ATTR_UNUSED(why_bogus))
1383 {
1384 	log_assert(0);
1385 }
1386 
1387 void libworker_bg_done_cb(void* ATTR_UNUSED(arg), int ATTR_UNUSED(rcode),
1388         sldns_buffer* ATTR_UNUSED(buf), enum sec_status ATTR_UNUSED(s),
1389 	char* ATTR_UNUSED(why_bogus))
1390 {
1391 	log_assert(0);
1392 }
1393 
1394 void libworker_event_done_cb(void* ATTR_UNUSED(arg), int ATTR_UNUSED(rcode),
1395         sldns_buffer* ATTR_UNUSED(buf), enum sec_status ATTR_UNUSED(s),
1396 	char* ATTR_UNUSED(why_bogus))
1397 {
1398 	log_assert(0);
1399 }
1400 
1401 int context_query_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1402 {
1403 	log_assert(0);
1404 	return 0;
1405 }
1406 
1407 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
1408 {
1409         log_assert(0);
1410         return 0;
1411 }
1412 
1413 int codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1414 {
1415         log_assert(0);
1416         return 0;
1417 }
1418 
1419