xref: /openbsd-src/usr.sbin/unbound/daemon/worker.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*
2  * daemon/worker.c - worker that handles a pending list of requests.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file implements the worker that handles callbacks on events, for
40  * pending requests.
41  */
42 #include "config.h"
43 #include <ldns/wire2host.h>
44 #include "util/log.h"
45 #include "util/net_help.h"
46 #include "util/random.h"
47 #include "daemon/worker.h"
48 #include "daemon/daemon.h"
49 #include "daemon/remote.h"
50 #include "daemon/acl_list.h"
51 #include "util/netevent.h"
52 #include "util/config_file.h"
53 #include "util/module.h"
54 #include "util/regional.h"
55 #include "util/storage/slabhash.h"
56 #include "services/listen_dnsport.h"
57 #include "services/outside_network.h"
58 #include "services/outbound_list.h"
59 #include "services/cache/rrset.h"
60 #include "services/cache/infra.h"
61 #include "services/cache/dns.h"
62 #include "services/mesh.h"
63 #include "services/localzone.h"
64 #include "util/data/msgparse.h"
65 #include "util/data/msgencode.h"
66 #include "util/data/dname.h"
67 #include "util/fptr_wlist.h"
68 #include "util/tube.h"
69 #include "iterator/iter_fwd.h"
70 #include "validator/autotrust.h"
71 
72 #ifdef HAVE_SYS_TYPES_H
73 #  include <sys/types.h>
74 #endif
75 #ifdef HAVE_NETDB_H
76 #include <netdb.h>
77 #endif
78 #include <signal.h>
79 #ifdef UB_ON_WINDOWS
80 #include "winrc/win_svc.h"
81 #endif
82 
83 /** Size of an UDP datagram */
84 #define NORMAL_UDP_SIZE	512 /* bytes */
85 
86 /**
87  * seconds to add to prefetch leeway.  This is a TTL that expires old rrsets
88  * earlier than they should in order to put the new update into the cache.
89  * This additional value is to make sure that if not all TTLs are equal in
90  * the message to be updated(and replaced), that rrsets with up to this much
91  * extra TTL are also replaced.  This means that the resulting new message
92  * will have (most likely) this TTL at least, avoiding very small 'split
93  * second' TTLs due to operators choosing relative primes for TTLs (or so).
94  * Also has to be at least one to break ties (and overwrite cached entry).
95  */
96 #define PREFETCH_EXPIRY_ADD 60
97 
98 #ifdef UNBOUND_ALLOC_STATS
99 /** measure memory leakage */
100 static void
101 debug_memleak(size_t accounted, size_t heap,
102 	size_t total_alloc, size_t total_free)
103 {
104 	static int init = 0;
105 	static size_t base_heap, base_accounted, base_alloc, base_free;
106 	size_t base_af, cur_af, grow_af, grow_acc;
107 	if(!init) {
108 		init = 1;
109 		base_heap = heap;
110 		base_accounted = accounted;
111 		base_alloc = total_alloc;
112 		base_free = total_free;
113 	}
114 	base_af = base_alloc - base_free;
115 	cur_af = total_alloc - total_free;
116 	grow_af = cur_af - base_af;
117 	grow_acc = accounted - base_accounted;
118 	log_info("Leakage: %d leaked. growth: %u use, %u acc, %u heap",
119 		(int)(grow_af - grow_acc), (unsigned)grow_af,
120 		(unsigned)grow_acc, (unsigned)(heap - base_heap));
121 }
122 
123 /** give debug heap size indication */
124 static void
125 debug_total_mem(size_t calctotal)
126 {
127 #ifdef HAVE_SBRK
128 	extern void* unbound_start_brk;
129 	extern size_t unbound_mem_alloc, unbound_mem_freed;
130 	void* cur = sbrk(0);
131 	int total = cur-unbound_start_brk;
132 	log_info("Total heap memory estimate: %u  total-alloc: %u  "
133 		"total-free: %u", (unsigned)total,
134 		(unsigned)unbound_mem_alloc, (unsigned)unbound_mem_freed);
135 	debug_memleak(calctotal, (size_t)total,
136 		unbound_mem_alloc, unbound_mem_freed);
137 #else
138 	(void)calctotal;
139 #endif /* HAVE_SBRK */
140 }
141 #endif /* UNBOUND_ALLOC_STATS */
142 
143 /** Report on memory usage by this thread and global */
144 static void
145 worker_mem_report(struct worker* ATTR_UNUSED(worker),
146 	struct serviced_query* ATTR_UNUSED(cur_serv))
147 {
148 #ifdef UNBOUND_ALLOC_STATS
149 	/* debug func in validator module */
150 	size_t total, front, back, mesh, msg, rrset, infra, ac, superac;
151 	size_t me, iter, val;
152 	int i;
153 	if(verbosity < VERB_ALGO)
154 		return;
155 	front = listen_get_mem(worker->front);
156 	back = outnet_get_mem(worker->back);
157 	msg = slabhash_get_mem(worker->env.msg_cache);
158 	rrset = slabhash_get_mem(&worker->env.rrset_cache->table);
159 	infra = infra_get_mem(worker->env.infra_cache);
160 	mesh = mesh_get_mem(worker->env.mesh);
161 	ac = alloc_get_mem(&worker->alloc);
162 	superac = alloc_get_mem(&worker->daemon->superalloc);
163 	iter = 0;
164 	val = 0;
165 	for(i=0; i<worker->env.mesh->mods.num; i++) {
166 		fptr_ok(fptr_whitelist_mod_get_mem(worker->env.mesh->
167 			mods.mod[i]->get_mem));
168 		if(strcmp(worker->env.mesh->mods.mod[i]->name, "validator")==0)
169 			val += (*worker->env.mesh->mods.mod[i]->get_mem)
170 				(&worker->env, i);
171 		else	iter += (*worker->env.mesh->mods.mod[i]->get_mem)
172 				(&worker->env, i);
173 	}
174 	me = sizeof(*worker) + sizeof(*worker->base) + sizeof(*worker->comsig)
175 		+ comm_point_get_mem(worker->cmd_com)
176 		+ sizeof(worker->rndstate)
177 		+ regional_get_mem(worker->scratchpad)
178 		+ sizeof(*worker->env.scratch_buffer)
179 		+ ldns_buffer_capacity(worker->env.scratch_buffer)
180 		+ forwards_get_mem(worker->env.fwds);
181 	if(worker->thread_num == 0)
182 		me += acl_list_get_mem(worker->daemon->acl);
183 	if(cur_serv) {
184 		me += serviced_get_mem(cur_serv);
185 	}
186 	total = front+back+mesh+msg+rrset+infra+iter+val+ac+superac+me;
187 	log_info("Memory conditions: %u front=%u back=%u mesh=%u msg=%u "
188 		"rrset=%u infra=%u iter=%u val=%u "
189 		"alloccache=%u globalalloccache=%u me=%u",
190 		(unsigned)total, (unsigned)front, (unsigned)back,
191 		(unsigned)mesh, (unsigned)msg, (unsigned)rrset,
192 		(unsigned)infra, (unsigned)iter, (unsigned)val, (unsigned)ac,
193 		(unsigned)superac, (unsigned)me);
194 	debug_total_mem(total);
195 #else /* no UNBOUND_ALLOC_STATS */
196 	size_t val = 0;
197 	int i;
198 	if(verbosity < VERB_QUERY)
199 		return;
200 	for(i=0; i<worker->env.mesh->mods.num; i++) {
201 		fptr_ok(fptr_whitelist_mod_get_mem(worker->env.mesh->
202 			mods.mod[i]->get_mem));
203 		if(strcmp(worker->env.mesh->mods.mod[i]->name, "validator")==0)
204 			val += (*worker->env.mesh->mods.mod[i]->get_mem)
205 				(&worker->env, i);
206 	}
207 	verbose(VERB_QUERY, "cache memory msg=%u rrset=%u infra=%u val=%u",
208 		(unsigned)slabhash_get_mem(worker->env.msg_cache),
209 		(unsigned)slabhash_get_mem(&worker->env.rrset_cache->table),
210 		(unsigned)infra_get_mem(worker->env.infra_cache),
211 		(unsigned)val);
212 #endif /* UNBOUND_ALLOC_STATS */
213 }
214 
215 void
216 worker_send_cmd(struct worker* worker, enum worker_commands cmd)
217 {
218 	uint32_t c = (uint32_t)htonl(cmd);
219 	if(!tube_write_msg(worker->cmd, (uint8_t*)&c, sizeof(c), 0)) {
220 		log_err("worker send cmd %d failed", (int)cmd);
221 	}
222 }
223 
224 int
225 worker_handle_reply(struct comm_point* c, void* arg, int error,
226 	struct comm_reply* reply_info)
227 {
228 	struct module_qstate* q = (struct module_qstate*)arg;
229 	struct worker* worker = q->env->worker;
230 	struct outbound_entry e;
231 	e.qstate = q;
232 	e.qsent = NULL;
233 
234 	if(error != 0) {
235 		mesh_report_reply(worker->env.mesh, &e, reply_info, error);
236 		worker_mem_report(worker, NULL);
237 		return 0;
238 	}
239 	/* sanity check. */
240 	if(!LDNS_QR_WIRE(ldns_buffer_begin(c->buffer))
241 		|| LDNS_OPCODE_WIRE(ldns_buffer_begin(c->buffer)) !=
242 			LDNS_PACKET_QUERY
243 		|| LDNS_QDCOUNT(ldns_buffer_begin(c->buffer)) > 1) {
244 		/* error becomes timeout for the module as if this reply
245 		 * never arrived. */
246 		mesh_report_reply(worker->env.mesh, &e, reply_info,
247 			NETEVENT_TIMEOUT);
248 		worker_mem_report(worker, NULL);
249 		return 0;
250 	}
251 	mesh_report_reply(worker->env.mesh, &e, reply_info, NETEVENT_NOERROR);
252 	worker_mem_report(worker, NULL);
253 	return 0;
254 }
255 
256 int
257 worker_handle_service_reply(struct comm_point* c, void* arg, int error,
258 	struct comm_reply* reply_info)
259 {
260 	struct outbound_entry* e = (struct outbound_entry*)arg;
261 	struct worker* worker = e->qstate->env->worker;
262 	struct serviced_query *sq = e->qsent;
263 
264 	verbose(VERB_ALGO, "worker svcd callback for qstate %p", e->qstate);
265 	if(error != 0) {
266 		mesh_report_reply(worker->env.mesh, e, reply_info, error);
267 		worker_mem_report(worker, sq);
268 		return 0;
269 	}
270 	/* sanity check. */
271 	if(!LDNS_QR_WIRE(ldns_buffer_begin(c->buffer))
272 		|| LDNS_OPCODE_WIRE(ldns_buffer_begin(c->buffer)) !=
273 			LDNS_PACKET_QUERY
274 		|| LDNS_QDCOUNT(ldns_buffer_begin(c->buffer)) > 1) {
275 		/* error becomes timeout for the module as if this reply
276 		 * never arrived. */
277 		verbose(VERB_ALGO, "worker: bad reply handled as timeout");
278 		mesh_report_reply(worker->env.mesh, e, reply_info,
279 			NETEVENT_TIMEOUT);
280 		worker_mem_report(worker, sq);
281 		return 0;
282 	}
283 	mesh_report_reply(worker->env.mesh, e, reply_info, NETEVENT_NOERROR);
284 	worker_mem_report(worker, sq);
285 	return 0;
286 }
287 
288 /** check request sanity.
289  * @param pkt: the wire packet to examine for sanity.
290  * @param worker: parameters for checking.
291  * @return error code, 0 OK, or -1 discard.
292 */
293 static int
294 worker_check_request(ldns_buffer* pkt, struct worker* worker)
295 {
296 	if(ldns_buffer_limit(pkt) < LDNS_HEADER_SIZE) {
297 		verbose(VERB_QUERY, "request too short, discarded");
298 		return -1;
299 	}
300 	if(ldns_buffer_limit(pkt) > NORMAL_UDP_SIZE &&
301 		worker->daemon->cfg->harden_large_queries) {
302 		verbose(VERB_QUERY, "request too large, discarded");
303 		return -1;
304 	}
305 	if(LDNS_QR_WIRE(ldns_buffer_begin(pkt))) {
306 		verbose(VERB_QUERY, "request has QR bit on, discarded");
307 		return -1;
308 	}
309 	if(LDNS_TC_WIRE(ldns_buffer_begin(pkt))) {
310 		LDNS_TC_CLR(ldns_buffer_begin(pkt));
311 		verbose(VERB_QUERY, "request bad, has TC bit on");
312 		return LDNS_RCODE_FORMERR;
313 	}
314 	if(LDNS_OPCODE_WIRE(ldns_buffer_begin(pkt)) != LDNS_PACKET_QUERY) {
315 		verbose(VERB_QUERY, "request unknown opcode %d",
316 			LDNS_OPCODE_WIRE(ldns_buffer_begin(pkt)));
317 		return LDNS_RCODE_NOTIMPL;
318 	}
319 	if(LDNS_QDCOUNT(ldns_buffer_begin(pkt)) != 1) {
320 		verbose(VERB_QUERY, "request wrong nr qd=%d",
321 			LDNS_QDCOUNT(ldns_buffer_begin(pkt)));
322 		return LDNS_RCODE_FORMERR;
323 	}
324 	if(LDNS_ANCOUNT(ldns_buffer_begin(pkt)) != 0) {
325 		verbose(VERB_QUERY, "request wrong nr an=%d",
326 			LDNS_ANCOUNT(ldns_buffer_begin(pkt)));
327 		return LDNS_RCODE_FORMERR;
328 	}
329 	if(LDNS_NSCOUNT(ldns_buffer_begin(pkt)) != 0) {
330 		verbose(VERB_QUERY, "request wrong nr ns=%d",
331 			LDNS_NSCOUNT(ldns_buffer_begin(pkt)));
332 		return LDNS_RCODE_FORMERR;
333 	}
334 	if(LDNS_ARCOUNT(ldns_buffer_begin(pkt)) > 1) {
335 		verbose(VERB_QUERY, "request wrong nr ar=%d",
336 			LDNS_ARCOUNT(ldns_buffer_begin(pkt)));
337 		return LDNS_RCODE_FORMERR;
338 	}
339 	return 0;
340 }
341 
342 void
343 worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), uint8_t* msg,
344 	size_t len, int error, void* arg)
345 {
346 	struct worker* worker = (struct worker*)arg;
347 	enum worker_commands cmd;
348 	if(error != NETEVENT_NOERROR) {
349 		free(msg);
350 		if(error == NETEVENT_CLOSED)
351 			comm_base_exit(worker->base);
352 		else	log_info("control event: %d", error);
353 		return;
354 	}
355 	if(len != sizeof(uint32_t)) {
356 		fatal_exit("bad control msg length %d", (int)len);
357 	}
358 	cmd = ldns_read_uint32(msg);
359 	free(msg);
360 	switch(cmd) {
361 	case worker_cmd_quit:
362 		verbose(VERB_ALGO, "got control cmd quit");
363 		comm_base_exit(worker->base);
364 		break;
365 	case worker_cmd_stats:
366 		verbose(VERB_ALGO, "got control cmd stats");
367 		server_stats_reply(worker, 1);
368 		break;
369 	case worker_cmd_stats_noreset:
370 		verbose(VERB_ALGO, "got control cmd stats_noreset");
371 		server_stats_reply(worker, 0);
372 		break;
373 	case worker_cmd_remote:
374 		verbose(VERB_ALGO, "got control cmd remote");
375 		daemon_remote_exec(worker);
376 		break;
377 	default:
378 		log_err("bad command %d", (int)cmd);
379 		break;
380 	}
381 }
382 
383 /** check if a delegation is secure */
384 static enum sec_status
385 check_delegation_secure(struct reply_info *rep)
386 {
387 	/* return smallest security status */
388 	size_t i;
389 	enum sec_status sec = sec_status_secure;
390 	enum sec_status s;
391 	size_t num = rep->an_numrrsets + rep->ns_numrrsets;
392 	/* check if answer and authority are OK */
393 	for(i=0; i<num; i++) {
394 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
395 			->security;
396 		if(s < sec)
397 			sec = s;
398 	}
399 	/* in additional, only unchecked triggers revalidation */
400 	for(i=num; i<rep->rrset_count; i++) {
401 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
402 			->security;
403 		if(s == sec_status_unchecked)
404 			return s;
405 	}
406 	return sec;
407 }
408 
409 /** remove nonsecure from a delegation referral additional section */
410 static void
411 deleg_remove_nonsecure_additional(struct reply_info* rep)
412 {
413 	/* we can simply edit it, since we are working in the scratch region */
414 	size_t i;
415 	enum sec_status s;
416 
417 	for(i = rep->an_numrrsets+rep->ns_numrrsets; i<rep->rrset_count; i++) {
418 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
419 			->security;
420 		if(s != sec_status_secure) {
421 			memmove(rep->rrsets+i, rep->rrsets+i+1,
422 				sizeof(struct ub_packed_rrset_key*)*
423 				(rep->rrset_count - i - 1));
424 			rep->ar_numrrsets--;
425 			rep->rrset_count--;
426 			i--;
427 		}
428 	}
429 }
430 
431 /** answer nonrecursive query from the cache */
432 static int
433 answer_norec_from_cache(struct worker* worker, struct query_info* qinfo,
434 	uint16_t id, uint16_t flags, struct comm_reply* repinfo,
435 	struct edns_data* edns)
436 {
437 	/* for a nonrecursive query return either:
438 	 * 	o an error (servfail; we try to avoid this)
439 	 * 	o a delegation (closest we have; this routine tries that)
440 	 * 	o the answer (checked by answer_from_cache)
441 	 *
442 	 * So, grab a delegation from the rrset cache.
443 	 * Then check if it needs validation, if so, this routine fails,
444 	 * so that iterator can prime and validator can verify rrsets.
445 	 */
446 	uint16_t udpsize = edns->udp_size;
447 	int secure = 0;
448 	uint32_t timenow = *worker->env.now;
449 	int must_validate = (!(flags&BIT_CD) || worker->env.cfg->ignore_cd)
450 		&& worker->env.need_to_validate;
451 	struct dns_msg *msg = NULL;
452 	struct delegpt *dp;
453 
454 	dp = dns_cache_find_delegation(&worker->env, qinfo->qname,
455 		qinfo->qname_len, qinfo->qtype, qinfo->qclass,
456 		worker->scratchpad, &msg, timenow);
457 	if(!dp) { /* no delegation, need to reprime */
458 		regional_free_all(worker->scratchpad);
459 		return 0;
460 	}
461 	if(must_validate) {
462 		switch(check_delegation_secure(msg->rep)) {
463 		case sec_status_unchecked:
464 			/* some rrsets have not been verified yet, go and
465 			 * let validator do that */
466 			regional_free_all(worker->scratchpad);
467 			return 0;
468 		case sec_status_bogus:
469 			/* some rrsets are bogus, reply servfail */
470 			edns->edns_version = EDNS_ADVERTISED_VERSION;
471 			edns->udp_size = EDNS_ADVERTISED_SIZE;
472 			edns->ext_rcode = 0;
473 			edns->bits &= EDNS_DO;
474 			error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
475 				&msg->qinfo, id, flags, edns);
476 			regional_free_all(worker->scratchpad);
477 			if(worker->stats.extended) {
478 				worker->stats.ans_bogus++;
479 				worker->stats.ans_rcode[LDNS_RCODE_SERVFAIL]++;
480 			}
481 			return 1;
482 		case sec_status_secure:
483 			/* all rrsets are secure */
484 			/* remove non-secure rrsets from the add. section*/
485 			if(worker->env.cfg->val_clean_additional)
486 				deleg_remove_nonsecure_additional(msg->rep);
487 			secure = 1;
488 			break;
489 		case sec_status_indeterminate:
490 		case sec_status_insecure:
491 		default:
492 			/* not secure */
493 			secure = 0;
494 			break;
495 		}
496 	}
497 	/* return this delegation from the cache */
498 	edns->edns_version = EDNS_ADVERTISED_VERSION;
499 	edns->udp_size = EDNS_ADVERTISED_SIZE;
500 	edns->ext_rcode = 0;
501 	edns->bits &= EDNS_DO;
502 	msg->rep->flags |= BIT_QR|BIT_RA;
503 	if(!reply_info_answer_encode(&msg->qinfo, msg->rep, id, flags,
504 		repinfo->c->buffer, 0, 1, worker->scratchpad,
505 		udpsize, edns, (int)(edns->bits & EDNS_DO), secure)) {
506 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
507 			&msg->qinfo, id, flags, edns);
508 	}
509 	regional_free_all(worker->scratchpad);
510 	if(worker->stats.extended) {
511 		if(secure) worker->stats.ans_secure++;
512 		server_stats_insrcode(&worker->stats, repinfo->c->buffer);
513 	}
514 	return 1;
515 }
516 
517 /** answer query from the cache */
518 static int
519 answer_from_cache(struct worker* worker, struct query_info* qinfo,
520 	struct reply_info* rep, uint16_t id, uint16_t flags,
521 	struct comm_reply* repinfo, struct edns_data* edns)
522 {
523 	uint32_t timenow = *worker->env.now;
524 	uint16_t udpsize = edns->udp_size;
525 	int secure;
526 	int must_validate = (!(flags&BIT_CD) || worker->env.cfg->ignore_cd)
527 		&& worker->env.need_to_validate;
528 	/* see if it is possible */
529 	if(rep->ttl < timenow) {
530 		/* the rrsets may have been updated in the meantime.
531 		 * we will refetch the message format from the
532 		 * authoritative server
533 		 */
534 		return 0;
535 	}
536 	if(!rrset_array_lock(rep->ref, rep->rrset_count, timenow))
537 		return 0;
538 	/* locked and ids and ttls are OK. */
539 	/* check CNAME chain (if any) */
540 	if(rep->an_numrrsets > 0 && (rep->rrsets[0]->rk.type ==
541 		htons(LDNS_RR_TYPE_CNAME) || rep->rrsets[0]->rk.type ==
542 		htons(LDNS_RR_TYPE_DNAME))) {
543 		if(!reply_check_cname_chain(rep)) {
544 			/* cname chain invalid, redo iterator steps */
545 			verbose(VERB_ALGO, "Cache reply: cname chain broken");
546 		bail_out:
547 			rrset_array_unlock_touch(worker->env.rrset_cache,
548 				worker->scratchpad, rep->ref, rep->rrset_count);
549 			regional_free_all(worker->scratchpad);
550 			return 0;
551 		}
552 	}
553 	/* check security status of the cached answer */
554 	if( rep->security == sec_status_bogus && must_validate) {
555 		/* BAD cached */
556 		edns->edns_version = EDNS_ADVERTISED_VERSION;
557 		edns->udp_size = EDNS_ADVERTISED_SIZE;
558 		edns->ext_rcode = 0;
559 		edns->bits &= EDNS_DO;
560 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
561 			qinfo, id, flags, edns);
562 		rrset_array_unlock_touch(worker->env.rrset_cache,
563 			worker->scratchpad, rep->ref, rep->rrset_count);
564 		regional_free_all(worker->scratchpad);
565 		if(worker->stats.extended) {
566 			worker->stats.ans_bogus ++;
567 			worker->stats.ans_rcode[LDNS_RCODE_SERVFAIL] ++;
568 		}
569 		return 1;
570 	} else if( rep->security == sec_status_unchecked && must_validate) {
571 		verbose(VERB_ALGO, "Cache reply: unchecked entry needs "
572 			"validation");
573 		goto bail_out; /* need to validate cache entry first */
574 	} else if(rep->security == sec_status_secure) {
575 		if(reply_all_rrsets_secure(rep))
576 			secure = 1;
577 		else	{
578 			if(must_validate) {
579 				verbose(VERB_ALGO, "Cache reply: secure entry"
580 					" changed status");
581 				goto bail_out; /* rrset changed, re-verify */
582 			}
583 			secure = 0;
584 		}
585 	} else	secure = 0;
586 
587 	edns->edns_version = EDNS_ADVERTISED_VERSION;
588 	edns->udp_size = EDNS_ADVERTISED_SIZE;
589 	edns->ext_rcode = 0;
590 	edns->bits &= EDNS_DO;
591 	if(!reply_info_answer_encode(qinfo, rep, id, flags,
592 		repinfo->c->buffer, timenow, 1, worker->scratchpad,
593 		udpsize, edns, (int)(edns->bits & EDNS_DO), secure)) {
594 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
595 			qinfo, id, flags, edns);
596 	}
597 	/* cannot send the reply right now, because blocking network syscall
598 	 * is bad while holding locks. */
599 	rrset_array_unlock_touch(worker->env.rrset_cache, worker->scratchpad,
600 		rep->ref, rep->rrset_count);
601 	regional_free_all(worker->scratchpad);
602 	if(worker->stats.extended) {
603 		if(secure) worker->stats.ans_secure++;
604 		server_stats_insrcode(&worker->stats, repinfo->c->buffer);
605 	}
606 	/* go and return this buffer to the client */
607 	return 1;
608 }
609 
610 /** Reply to client and perform prefetch to keep cache up to date */
611 static void
612 reply_and_prefetch(struct worker* worker, struct query_info* qinfo,
613 	uint16_t flags, struct comm_reply* repinfo, uint32_t leeway)
614 {
615 	/* first send answer to client to keep its latency
616 	 * as small as a cachereply */
617 	comm_point_send_reply(repinfo);
618 	server_stats_prefetch(&worker->stats, worker);
619 
620 	/* create the prefetch in the mesh as a normal lookup without
621 	 * client addrs waiting, which has the cache blacklisted (to bypass
622 	 * the cache and go to the network for the data). */
623 	/* this (potentially) runs the mesh for the new query */
624 	mesh_new_prefetch(worker->env.mesh, qinfo, flags, leeway +
625 		PREFETCH_EXPIRY_ADD);
626 }
627 
628 /**
629  * Fill CH class answer into buffer. Keeps query.
630  * @param pkt: buffer
631  * @param str: string to put into text record (<255).
632  * @param edns: edns reply information.
633  */
634 static void
635 chaos_replystr(ldns_buffer* pkt, const char* str, struct edns_data* edns)
636 {
637 	size_t len = strlen(str);
638 	unsigned int rd = LDNS_RD_WIRE(ldns_buffer_begin(pkt));
639 	unsigned int cd = LDNS_CD_WIRE(ldns_buffer_begin(pkt));
640 	if(len>255) len=255; /* cap size of TXT record */
641 	ldns_buffer_clear(pkt);
642 	ldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip id */
643 	ldns_buffer_write_u16(pkt, (uint16_t)(BIT_QR|BIT_RA));
644 	if(rd) LDNS_RD_SET(ldns_buffer_begin(pkt));
645 	if(cd) LDNS_CD_SET(ldns_buffer_begin(pkt));
646 	ldns_buffer_write_u16(pkt, 1); /* qdcount */
647 	ldns_buffer_write_u16(pkt, 1); /* ancount */
648 	ldns_buffer_write_u16(pkt, 0); /* nscount */
649 	ldns_buffer_write_u16(pkt, 0); /* arcount */
650 	(void)query_dname_len(pkt); /* skip qname */
651 	ldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip qtype */
652 	ldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip qclass */
653 	ldns_buffer_write_u16(pkt, 0xc00c); /* compr ptr to query */
654 	ldns_buffer_write_u16(pkt, LDNS_RR_TYPE_TXT);
655 	ldns_buffer_write_u16(pkt, LDNS_RR_CLASS_CH);
656 	ldns_buffer_write_u32(pkt, 0); /* TTL */
657 	ldns_buffer_write_u16(pkt, sizeof(uint8_t) + len);
658 	ldns_buffer_write_u8(pkt, len);
659 	ldns_buffer_write(pkt, str, len);
660 	ldns_buffer_flip(pkt);
661 	edns->edns_version = EDNS_ADVERTISED_VERSION;
662 	edns->udp_size = EDNS_ADVERTISED_SIZE;
663 	edns->bits &= EDNS_DO;
664 	attach_edns_record(pkt, edns);
665 }
666 
667 /**
668  * Answer CH class queries.
669  * @param w: worker
670  * @param qinfo: query info. Pointer into packet buffer.
671  * @param edns: edns info from query.
672  * @param pkt: packet buffer.
673  * @return: true if a reply is to be sent.
674  */
675 static int
676 answer_chaos(struct worker* w, struct query_info* qinfo,
677 	struct edns_data* edns, ldns_buffer* pkt)
678 {
679 	struct config_file* cfg = w->env.cfg;
680 	if(qinfo->qtype != LDNS_RR_TYPE_ANY && qinfo->qtype != LDNS_RR_TYPE_TXT)
681 		return 0;
682 	if(query_dname_compare(qinfo->qname,
683 		(uint8_t*)"\002id\006server") == 0 ||
684 		query_dname_compare(qinfo->qname,
685 		(uint8_t*)"\010hostname\004bind") == 0)
686 	{
687 		if(cfg->hide_identity)
688 			return 0;
689 		if(cfg->identity==NULL || cfg->identity[0]==0) {
690 			char buf[MAXHOSTNAMELEN+1];
691 			if (gethostname(buf, MAXHOSTNAMELEN) == 0) {
692 				buf[MAXHOSTNAMELEN] = 0;
693 				chaos_replystr(pkt, buf, edns);
694 			} else 	{
695 				log_err("gethostname: %s", strerror(errno));
696 				chaos_replystr(pkt, "no hostname", edns);
697 			}
698 		}
699 		else 	chaos_replystr(pkt, cfg->identity, edns);
700 		return 1;
701 	}
702 	if(query_dname_compare(qinfo->qname,
703 		(uint8_t*)"\007version\006server") == 0 ||
704 		query_dname_compare(qinfo->qname,
705 		(uint8_t*)"\007version\004bind") == 0)
706 	{
707 		if(cfg->hide_version)
708 			return 0;
709 		if(cfg->version==NULL || cfg->version[0]==0)
710 			chaos_replystr(pkt, PACKAGE_STRING, edns);
711 		else 	chaos_replystr(pkt, cfg->version, edns);
712 		return 1;
713 	}
714 	return 0;
715 }
716 
717 int
718 worker_handle_request(struct comm_point* c, void* arg, int error,
719 	struct comm_reply* repinfo)
720 {
721 	struct worker* worker = (struct worker*)arg;
722 	int ret;
723 	hashvalue_t h;
724 	struct lruhash_entry* e;
725 	struct query_info qinfo;
726 	struct edns_data edns;
727 	enum acl_access acl;
728 
729 	if(error != NETEVENT_NOERROR) {
730 		/* some bad tcp query DNS formats give these error calls */
731 		verbose(VERB_ALGO, "handle request called with err=%d", error);
732 		return 0;
733 	}
734 	acl = acl_list_lookup(worker->daemon->acl, &repinfo->addr,
735 		repinfo->addrlen);
736 	if(acl == acl_deny) {
737 		comm_point_drop_reply(repinfo);
738 		if(worker->stats.extended)
739 			worker->stats.unwanted_queries++;
740 		return 0;
741 	} else if(acl == acl_refuse) {
742 		log_addr(VERB_ALGO, "refused query from",
743 			&repinfo->addr, repinfo->addrlen);
744 		log_buf(VERB_ALGO, "refuse", c->buffer);
745 		if(worker->stats.extended)
746 			worker->stats.unwanted_queries++;
747 		if(worker_check_request(c->buffer, worker) == -1) {
748 			comm_point_drop_reply(repinfo);
749 			return 0; /* discard this */
750 		}
751 		ldns_buffer_set_limit(c->buffer, LDNS_HEADER_SIZE);
752 		ldns_buffer_write_at(c->buffer, 4,
753 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
754 		LDNS_QR_SET(ldns_buffer_begin(c->buffer));
755 		LDNS_RCODE_SET(ldns_buffer_begin(c->buffer),
756 			LDNS_RCODE_REFUSED);
757 		return 1;
758 	}
759 	if((ret=worker_check_request(c->buffer, worker)) != 0) {
760 		verbose(VERB_ALGO, "worker check request: bad query.");
761 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
762 		if(ret != -1) {
763 			LDNS_QR_SET(ldns_buffer_begin(c->buffer));
764 			LDNS_RCODE_SET(ldns_buffer_begin(c->buffer), ret);
765 			return 1;
766 		}
767 		comm_point_drop_reply(repinfo);
768 		return 0;
769 	}
770 	worker->stats.num_queries++;
771 	/* see if query is in the cache */
772 	if(!query_info_parse(&qinfo, c->buffer)) {
773 		verbose(VERB_ALGO, "worker parse request: formerror.");
774 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
775 		ldns_buffer_rewind(c->buffer);
776 		LDNS_QR_SET(ldns_buffer_begin(c->buffer));
777 		LDNS_RCODE_SET(ldns_buffer_begin(c->buffer),
778 			LDNS_RCODE_FORMERR);
779 		server_stats_insrcode(&worker->stats, c->buffer);
780 		return 1;
781 	}
782 	if(worker->env.cfg->log_queries) {
783 		char ip[128];
784 		addr_to_str(&repinfo->addr, repinfo->addrlen, ip, sizeof(ip));
785 		log_nametypeclass(0, ip, qinfo.qname, qinfo.qtype, qinfo.qclass);
786 	}
787 	if(qinfo.qtype == LDNS_RR_TYPE_AXFR ||
788 		qinfo.qtype == LDNS_RR_TYPE_IXFR) {
789 		verbose(VERB_ALGO, "worker request: refused zone transfer.");
790 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
791 		ldns_buffer_rewind(c->buffer);
792 		LDNS_QR_SET(ldns_buffer_begin(c->buffer));
793 		LDNS_RCODE_SET(ldns_buffer_begin(c->buffer),
794 			LDNS_RCODE_REFUSED);
795 		if(worker->stats.extended) {
796 			worker->stats.qtype[qinfo.qtype]++;
797 			server_stats_insrcode(&worker->stats, c->buffer);
798 		}
799 		return 1;
800 	}
801 	if((ret=parse_edns_from_pkt(c->buffer, &edns)) != 0) {
802 		verbose(VERB_ALGO, "worker parse edns: formerror.");
803 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
804 		ldns_buffer_rewind(c->buffer);
805 		LDNS_QR_SET(ldns_buffer_begin(c->buffer));
806 		LDNS_RCODE_SET(ldns_buffer_begin(c->buffer), ret);
807 		server_stats_insrcode(&worker->stats, c->buffer);
808 		return 1;
809 	}
810 	if(edns.edns_present && edns.edns_version != 0) {
811 		edns.ext_rcode = (uint8_t)(EDNS_RCODE_BADVERS>>4);
812 		edns.edns_version = EDNS_ADVERTISED_VERSION;
813 		edns.udp_size = EDNS_ADVERTISED_SIZE;
814 		edns.bits &= EDNS_DO;
815 		verbose(VERB_ALGO, "query with bad edns version.");
816 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
817 		error_encode(c->buffer, EDNS_RCODE_BADVERS&0xf, &qinfo,
818 			*(uint16_t*)ldns_buffer_begin(c->buffer),
819 			ldns_buffer_read_u16_at(c->buffer, 2), NULL);
820 		attach_edns_record(c->buffer, &edns);
821 		return 1;
822 	}
823 	if(edns.edns_present && edns.udp_size < NORMAL_UDP_SIZE &&
824 		worker->daemon->cfg->harden_short_bufsize) {
825 		verbose(VERB_QUERY, "worker request: EDNS bufsize %d ignored",
826 			(int)edns.udp_size);
827 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
828 		edns.udp_size = NORMAL_UDP_SIZE;
829 	}
830 	if(edns.edns_present && edns.udp_size < LDNS_HEADER_SIZE) {
831 		verbose(VERB_ALGO, "worker request: edns is too small.");
832 		log_addr(VERB_CLIENT, "from", &repinfo->addr, repinfo->addrlen);
833 		LDNS_QR_SET(ldns_buffer_begin(c->buffer));
834 		LDNS_TC_SET(ldns_buffer_begin(c->buffer));
835 		LDNS_RCODE_SET(ldns_buffer_begin(c->buffer),
836 			LDNS_RCODE_SERVFAIL);
837 		ldns_buffer_set_position(c->buffer, LDNS_HEADER_SIZE);
838 		ldns_buffer_write_at(c->buffer, 4,
839 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
840 		ldns_buffer_flip(c->buffer);
841 		return 1;
842 	}
843 	if(worker->stats.extended)
844 		server_stats_insquery(&worker->stats, c, qinfo.qtype,
845 			qinfo.qclass, &edns, repinfo);
846 	if(c->type != comm_udp)
847 		edns.udp_size = 65535; /* max size for TCP replies */
848 	if(qinfo.qclass == LDNS_RR_CLASS_CH && answer_chaos(worker, &qinfo,
849 		&edns, c->buffer)) {
850 		server_stats_insrcode(&worker->stats, c->buffer);
851 		return 1;
852 	}
853 	if(local_zones_answer(worker->daemon->local_zones, &qinfo, &edns,
854 		c->buffer, worker->scratchpad)) {
855 		regional_free_all(worker->scratchpad);
856 		if(ldns_buffer_limit(c->buffer) == 0) {
857 			comm_point_drop_reply(repinfo);
858 			return 0;
859 		}
860 		server_stats_insrcode(&worker->stats, c->buffer);
861 		return 1;
862 	}
863 	if(!(LDNS_RD_WIRE(ldns_buffer_begin(c->buffer))) &&
864 		acl != acl_allow_snoop ) {
865 		ldns_buffer_set_limit(c->buffer, LDNS_HEADER_SIZE);
866 		ldns_buffer_write_at(c->buffer, 4,
867 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
868 		LDNS_QR_SET(ldns_buffer_begin(c->buffer));
869 		LDNS_RCODE_SET(ldns_buffer_begin(c->buffer),
870 			LDNS_RCODE_REFUSED);
871 		ldns_buffer_flip(c->buffer);
872 		server_stats_insrcode(&worker->stats, c->buffer);
873 		log_addr(VERB_ALGO, "refused nonrec (cache snoop) query from",
874 			&repinfo->addr, repinfo->addrlen);
875 		return 1;
876 	}
877 	h = query_info_hash(&qinfo);
878 	if((e=slabhash_lookup(worker->env.msg_cache, h, &qinfo, 0))) {
879 		/* answer from cache - we have acquired a readlock on it */
880 		if(answer_from_cache(worker, &qinfo,
881 			(struct reply_info*)e->data,
882 			*(uint16_t*)ldns_buffer_begin(c->buffer),
883 			ldns_buffer_read_u16_at(c->buffer, 2), repinfo,
884 			&edns)) {
885 			/* prefetch it if the prefetch TTL expired */
886 			if(worker->env.cfg->prefetch && *worker->env.now >=
887 				((struct reply_info*)e->data)->prefetch_ttl) {
888 				uint32_t leeway = ((struct reply_info*)e->
889 					data)->ttl - *worker->env.now;
890 				lock_rw_unlock(&e->lock);
891 				reply_and_prefetch(worker, &qinfo,
892 					ldns_buffer_read_u16_at(c->buffer, 2),
893 					repinfo, leeway);
894 				return 0;
895 			}
896 			lock_rw_unlock(&e->lock);
897 			return 1;
898 		}
899 		verbose(VERB_ALGO, "answer from the cache failed");
900 		lock_rw_unlock(&e->lock);
901 	}
902 	if(!LDNS_RD_WIRE(ldns_buffer_begin(c->buffer))) {
903 		if(answer_norec_from_cache(worker, &qinfo,
904 			*(uint16_t*)ldns_buffer_begin(c->buffer),
905 			ldns_buffer_read_u16_at(c->buffer, 2), repinfo,
906 			&edns)) {
907 			return 1;
908 		}
909 		verbose(VERB_ALGO, "answer norec from cache -- "
910 			"need to validate or not primed");
911 	}
912 	ldns_buffer_rewind(c->buffer);
913 	server_stats_querymiss(&worker->stats, worker);
914 
915 	if(verbosity >= VERB_CLIENT) {
916 		if(c->type == comm_udp)
917 			log_addr(VERB_CLIENT, "udp request from",
918 				&repinfo->addr, repinfo->addrlen);
919 		else	log_addr(VERB_CLIENT, "tcp request from",
920 				&repinfo->addr, repinfo->addrlen);
921 	}
922 
923 	/* grab a work request structure for this new request */
924 	mesh_new_client(worker->env.mesh, &qinfo,
925 		ldns_buffer_read_u16_at(c->buffer, 2),
926 		&edns, repinfo, *(uint16_t*)ldns_buffer_begin(c->buffer));
927 	worker_mem_report(worker, NULL);
928 	return 0;
929 }
930 
931 void
932 worker_sighandler(int sig, void* arg)
933 {
934 	/* note that log, print, syscalls here give race conditions. */
935 	/* we still print DETAIL logs, because this is extensive per message
936 	 * logging anyway, and the operator may then have an interest
937 	 * in the cause for unbound to exit */
938 	struct worker* worker = (struct worker*)arg;
939 	switch(sig) {
940 #ifdef SIGHUP
941 		case SIGHUP:
942 			verbose(VERB_QUERY, "caught signal SIGHUP");
943 			comm_base_exit(worker->base);
944 			break;
945 #endif
946 		case SIGINT:
947 			verbose(VERB_QUERY, "caught signal SIGINT");
948 			worker->need_to_exit = 1;
949 			comm_base_exit(worker->base);
950 			break;
951 #ifdef SIGQUIT
952 		case SIGQUIT:
953 			verbose(VERB_QUERY, "caught signal SIGQUIT");
954 			worker->need_to_exit = 1;
955 			comm_base_exit(worker->base);
956 			break;
957 #endif
958 		case SIGTERM:
959 			verbose(VERB_QUERY, "caught signal SIGTERM");
960 			worker->need_to_exit = 1;
961 			comm_base_exit(worker->base);
962 			break;
963 		default:
964 			log_err("unknown signal: %d, ignored", sig);
965 			break;
966 	}
967 }
968 
969 /** restart statistics timer for worker, if enabled */
970 static void
971 worker_restart_timer(struct worker* worker)
972 {
973 	if(worker->env.cfg->stat_interval > 0) {
974 		struct timeval tv;
975 #ifndef S_SPLINT_S
976 		tv.tv_sec = worker->env.cfg->stat_interval;
977 		tv.tv_usec = 0;
978 #endif
979 		comm_timer_set(worker->stat_timer, &tv);
980 	}
981 }
982 
983 void worker_stat_timer_cb(void* arg)
984 {
985 	struct worker* worker = (struct worker*)arg;
986 	server_stats_log(&worker->stats, worker, worker->thread_num);
987 	mesh_stats(worker->env.mesh, "mesh has");
988 	worker_mem_report(worker, NULL);
989 	if(!worker->daemon->cfg->stat_cumulative) {
990 		worker_stats_clear(worker);
991 	}
992 	/* start next timer */
993 	worker_restart_timer(worker);
994 }
995 
996 void worker_probe_timer_cb(void* arg)
997 {
998 	struct worker* worker = (struct worker*)arg;
999 	struct timeval tv;
1000 #ifndef S_SPLINT_S
1001 	tv.tv_sec = (time_t)autr_probe_timer(&worker->env);
1002 	tv.tv_usec = 0;
1003 #endif
1004 	if(tv.tv_sec != 0)
1005 		comm_timer_set(worker->env.probe_timer, &tv);
1006 }
1007 
1008 struct worker*
1009 worker_create(struct daemon* daemon, int id, int* ports, int n)
1010 {
1011 	unsigned int seed;
1012 	struct worker* worker = (struct worker*)calloc(1,
1013 		sizeof(struct worker));
1014 	if(!worker)
1015 		return NULL;
1016 	worker->numports = n;
1017 	worker->ports = (int*)memdup(ports, sizeof(int)*n);
1018 	if(!worker->ports) {
1019 		free(worker);
1020 		return NULL;
1021 	}
1022 	worker->daemon = daemon;
1023 	worker->thread_num = id;
1024 	if(!(worker->cmd = tube_create())) {
1025 		free(worker->ports);
1026 		free(worker);
1027 		return NULL;
1028 	}
1029 	/* create random state here to avoid locking trouble in RAND_bytes */
1030 	seed = (unsigned int)time(NULL) ^ (unsigned int)getpid() ^
1031 		(((unsigned int)worker->thread_num)<<17);
1032 		/* shift thread_num so it does not match out pid bits */
1033 	if(!(worker->rndstate = ub_initstate(seed, daemon->rand))) {
1034 		seed = 0;
1035 		log_err("could not init random numbers.");
1036 		tube_delete(worker->cmd);
1037 		free(worker->ports);
1038 		free(worker);
1039 		return NULL;
1040 	}
1041 	seed = 0;
1042 	return worker;
1043 }
1044 
1045 int
1046 worker_init(struct worker* worker, struct config_file *cfg,
1047 	struct listen_port* ports, int do_sigs)
1048 {
1049 	worker->need_to_exit = 0;
1050 	worker->base = comm_base_create(do_sigs);
1051 	if(!worker->base) {
1052 		log_err("could not create event handling base");
1053 		worker_delete(worker);
1054 		return 0;
1055 	}
1056 	comm_base_set_slow_accept_handlers(worker->base, &worker_stop_accept,
1057 		&worker_start_accept, worker);
1058 	if(do_sigs) {
1059 #ifdef SIGHUP
1060 		ub_thread_sig_unblock(SIGHUP);
1061 #endif
1062 		ub_thread_sig_unblock(SIGINT);
1063 #ifdef SIGQUIT
1064 		ub_thread_sig_unblock(SIGQUIT);
1065 #endif
1066 		ub_thread_sig_unblock(SIGTERM);
1067 #ifndef LIBEVENT_SIGNAL_PROBLEM
1068 		worker->comsig = comm_signal_create(worker->base,
1069 			worker_sighandler, worker);
1070 		if(!worker->comsig
1071 #ifdef SIGHUP
1072 			|| !comm_signal_bind(worker->comsig, SIGHUP)
1073 #endif
1074 #ifdef SIGQUIT
1075 			|| !comm_signal_bind(worker->comsig, SIGQUIT)
1076 #endif
1077 			|| !comm_signal_bind(worker->comsig, SIGTERM)
1078 			|| !comm_signal_bind(worker->comsig, SIGINT)) {
1079 			log_err("could not create signal handlers");
1080 			worker_delete(worker);
1081 			return 0;
1082 		}
1083 #endif /* LIBEVENT_SIGNAL_PROBLEM */
1084 		if(!daemon_remote_open_accept(worker->daemon->rc,
1085 			worker->daemon->rc_ports, worker)) {
1086 			worker_delete(worker);
1087 			return 0;
1088 		}
1089 #ifdef UB_ON_WINDOWS
1090 		wsvc_setup_worker(worker);
1091 #endif /* UB_ON_WINDOWS */
1092 	} else { /* !do_sigs */
1093 		worker->comsig = NULL;
1094 	}
1095 	worker->front = listen_create(worker->base, ports,
1096 		cfg->msg_buffer_size, (int)cfg->incoming_num_tcp,
1097 		worker->daemon->listen_sslctx, worker_handle_request, worker);
1098 	if(!worker->front) {
1099 		log_err("could not create listening sockets");
1100 		worker_delete(worker);
1101 		return 0;
1102 	}
1103 	worker->back = outside_network_create(worker->base,
1104 		cfg->msg_buffer_size, (size_t)cfg->outgoing_num_ports,
1105 		cfg->out_ifs, cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
1106 		cfg->do_tcp?cfg->outgoing_num_tcp:0,
1107 		worker->daemon->env->infra_cache, worker->rndstate,
1108 		cfg->use_caps_bits_for_id, worker->ports, worker->numports,
1109 		cfg->unwanted_threshold, &worker_alloc_cleanup, worker,
1110 		cfg->do_udp, worker->daemon->connect_sslctx);
1111 	if(!worker->back) {
1112 		log_err("could not create outgoing sockets");
1113 		worker_delete(worker);
1114 		return 0;
1115 	}
1116 	/* start listening to commands */
1117 	if(!tube_setup_bg_listen(worker->cmd, worker->base,
1118 		&worker_handle_control_cmd, worker)) {
1119 		log_err("could not create control compt.");
1120 		worker_delete(worker);
1121 		return 0;
1122 	}
1123 	worker->stat_timer = comm_timer_create(worker->base,
1124 		worker_stat_timer_cb, worker);
1125 	if(!worker->stat_timer) {
1126 		log_err("could not create statistics timer");
1127 	}
1128 
1129 	/* we use the msg_buffer_size as a good estimate for what the
1130 	 * user wants for memory usage sizes */
1131 	worker->scratchpad = regional_create_custom(cfg->msg_buffer_size);
1132 	if(!worker->scratchpad) {
1133 		log_err("malloc failure");
1134 		worker_delete(worker);
1135 		return 0;
1136 	}
1137 
1138 	server_stats_init(&worker->stats, cfg);
1139 	alloc_init(&worker->alloc, &worker->daemon->superalloc,
1140 		worker->thread_num);
1141 	alloc_set_id_cleanup(&worker->alloc, &worker_alloc_cleanup, worker);
1142 	worker->env = *worker->daemon->env;
1143 	comm_base_timept(worker->base, &worker->env.now, &worker->env.now_tv);
1144 	if(worker->thread_num == 0)
1145 		log_set_time(worker->env.now);
1146 	worker->env.worker = worker;
1147 	worker->env.send_query = &worker_send_query;
1148 	worker->env.alloc = &worker->alloc;
1149 	worker->env.rnd = worker->rndstate;
1150 	worker->env.scratch = worker->scratchpad;
1151 	worker->env.mesh = mesh_create(&worker->daemon->mods, &worker->env);
1152 	worker->env.detach_subs = &mesh_detach_subs;
1153 	worker->env.attach_sub = &mesh_attach_sub;
1154 	worker->env.kill_sub = &mesh_state_delete;
1155 	worker->env.detect_cycle = &mesh_detect_cycle;
1156 	worker->env.scratch_buffer = ldns_buffer_new(cfg->msg_buffer_size);
1157 	if(!(worker->env.fwds = forwards_create()) ||
1158 		!forwards_apply_cfg(worker->env.fwds, cfg)) {
1159 		log_err("Could not set forward zones");
1160 		worker_delete(worker);
1161 		return 0;
1162 	}
1163 	/* one probe timer per process -- if we have 5011 anchors */
1164 	if(autr_get_num_anchors(worker->env.anchors) > 0
1165 #ifndef THREADS_DISABLED
1166 		&& worker->thread_num == 0
1167 #endif
1168 		) {
1169 		struct timeval tv;
1170 		tv.tv_sec = 0;
1171 		tv.tv_usec = 0;
1172 		worker->env.probe_timer = comm_timer_create(worker->base,
1173 			worker_probe_timer_cb, worker);
1174 		if(!worker->env.probe_timer) {
1175 			log_err("could not create 5011-probe timer");
1176 		} else {
1177 			/* let timer fire, then it can reset itself */
1178 			comm_timer_set(worker->env.probe_timer, &tv);
1179 		}
1180 	}
1181 	if(!worker->env.mesh || !worker->env.scratch_buffer) {
1182 		worker_delete(worker);
1183 		return 0;
1184 	}
1185 	worker_mem_report(worker, NULL);
1186 	/* if statistics enabled start timer */
1187 	if(worker->env.cfg->stat_interval > 0) {
1188 		verbose(VERB_ALGO, "set statistics interval %d secs",
1189 			worker->env.cfg->stat_interval);
1190 		worker_restart_timer(worker);
1191 	}
1192 	return 1;
1193 }
1194 
1195 void
1196 worker_work(struct worker* worker)
1197 {
1198 	comm_base_dispatch(worker->base);
1199 }
1200 
1201 void
1202 worker_delete(struct worker* worker)
1203 {
1204 	if(!worker)
1205 		return;
1206 	if(worker->env.mesh && verbosity >= VERB_OPS) {
1207 		server_stats_log(&worker->stats, worker, worker->thread_num);
1208 		mesh_stats(worker->env.mesh, "mesh has");
1209 		worker_mem_report(worker, NULL);
1210 	}
1211 	outside_network_quit_prepare(worker->back);
1212 	mesh_delete(worker->env.mesh);
1213 	ldns_buffer_free(worker->env.scratch_buffer);
1214 	forwards_delete(worker->env.fwds);
1215 	listen_delete(worker->front);
1216 	outside_network_delete(worker->back);
1217 	comm_signal_delete(worker->comsig);
1218 	tube_delete(worker->cmd);
1219 	comm_timer_delete(worker->stat_timer);
1220 	comm_timer_delete(worker->env.probe_timer);
1221 	free(worker->ports);
1222 	if(worker->thread_num == 0) {
1223 		log_set_time(NULL);
1224 #ifdef UB_ON_WINDOWS
1225 		wsvc_desetup_worker(worker);
1226 #endif /* UB_ON_WINDOWS */
1227 	}
1228 	comm_base_delete(worker->base);
1229 	ub_randfree(worker->rndstate);
1230 	alloc_clear(&worker->alloc);
1231 	regional_destroy(worker->scratchpad);
1232 	free(worker);
1233 }
1234 
1235 /** compare outbound entry qstates */
1236 static int
1237 outbound_entry_compare(void* a, void* b)
1238 {
1239 	struct outbound_entry* e1 = (struct outbound_entry*)a;
1240 	struct outbound_entry* e2 = (struct outbound_entry*)b;
1241 	if(e1->qstate == e2->qstate)
1242 		return 1;
1243 	return 0;
1244 }
1245 
1246 struct outbound_entry*
1247 worker_send_query(uint8_t* qname, size_t qnamelen, uint16_t qtype,
1248 	uint16_t qclass, uint16_t flags, int dnssec, int want_dnssec,
1249 	struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
1250 	size_t zonelen, struct module_qstate* q)
1251 {
1252 	struct worker* worker = q->env->worker;
1253 	struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
1254 		q->region, sizeof(*e));
1255 	if(!e)
1256 		return NULL;
1257 	e->qstate = q;
1258 	e->qsent = outnet_serviced_query(worker->back, qname,
1259 		qnamelen, qtype, qclass, flags, dnssec, want_dnssec,
1260 		q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, addr,
1261 		addrlen, zone, zonelen, worker_handle_service_reply, e,
1262 		worker->back->udp_buff, &outbound_entry_compare);
1263 	if(!e->qsent) {
1264 		return NULL;
1265 	}
1266 	return e;
1267 }
1268 
1269 void
1270 worker_alloc_cleanup(void* arg)
1271 {
1272 	struct worker* worker = (struct worker*)arg;
1273 	slabhash_clear(&worker->env.rrset_cache->table);
1274 	slabhash_clear(worker->env.msg_cache);
1275 }
1276 
1277 void worker_stats_clear(struct worker* worker)
1278 {
1279 	server_stats_init(&worker->stats, worker->env.cfg);
1280 	mesh_stats_clear(worker->env.mesh);
1281 	worker->back->unwanted_replies = 0;
1282 }
1283 
1284 void worker_start_accept(void* arg)
1285 {
1286 	struct worker* worker = (struct worker*)arg;
1287 	listen_start_accept(worker->front);
1288 	if(worker->thread_num == 0)
1289 		daemon_remote_start_accept(worker->daemon->rc);
1290 }
1291 
1292 void worker_stop_accept(void* arg)
1293 {
1294 	struct worker* worker = (struct worker*)arg;
1295 	listen_stop_accept(worker->front);
1296 	if(worker->thread_num == 0)
1297 		daemon_remote_stop_accept(worker->daemon->rc);
1298 }
1299 
1300 /* --- fake callbacks for fptr_wlist to work --- */
1301 struct outbound_entry* libworker_send_query(uint8_t* ATTR_UNUSED(qname),
1302 	size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
1303 	uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
1304 	int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
1305 	struct sockaddr_storage* ATTR_UNUSED(addr),
1306 	socklen_t ATTR_UNUSED(addrlen), struct module_qstate* ATTR_UNUSED(q))
1307 {
1308 	log_assert(0);
1309 	return 0;
1310 }
1311 
1312 int libworker_handle_reply(struct comm_point* ATTR_UNUSED(c),
1313 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
1314         struct comm_reply* ATTR_UNUSED(reply_info))
1315 {
1316 	log_assert(0);
1317 	return 0;
1318 }
1319 
1320 int libworker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
1321 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
1322         struct comm_reply* ATTR_UNUSED(reply_info))
1323 {
1324 	log_assert(0);
1325 	return 0;
1326 }
1327 
1328 void libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
1329         uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
1330         int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
1331 {
1332 	log_assert(0);
1333 }
1334 
1335 void libworker_fg_done_cb(void* ATTR_UNUSED(arg), int ATTR_UNUSED(rcode),
1336         ldns_buffer* ATTR_UNUSED(buf), enum sec_status ATTR_UNUSED(s),
1337 	char* ATTR_UNUSED(why_bogus))
1338 {
1339 	log_assert(0);
1340 }
1341 
1342 void libworker_bg_done_cb(void* ATTR_UNUSED(arg), int ATTR_UNUSED(rcode),
1343         ldns_buffer* ATTR_UNUSED(buf), enum sec_status ATTR_UNUSED(s),
1344 	char* ATTR_UNUSED(why_bogus))
1345 {
1346 	log_assert(0);
1347 }
1348 
1349 int context_query_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1350 {
1351 	log_assert(0);
1352 	return 0;
1353 }
1354 
1355 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
1356 {
1357         log_assert(0);
1358         return 0;
1359 }
1360 
1361 int codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1362 {
1363         log_assert(0);
1364         return 0;
1365 }
1366 
1367