xref: /openbsd-src/usr.sbin/unbound/libunbound/libworker.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*
2  * libunbound/worker.c - worker thread or process that resolves
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file contains the worker process or thread that performs
40  * the DNS resolving and validation. The worker is called by a procedure
41  * and if in the background continues until exit, if in the foreground
42  * returns from the procedure when done.
43  */
44 #include "config.h"
45 #ifdef HAVE_SSL
46 #include <openssl/ssl.h>
47 #endif
48 #include "libunbound/libworker.h"
49 #include "libunbound/context.h"
50 #include "libunbound/unbound.h"
51 #include "libunbound/unbound-event.h"
52 #include "services/outside_network.h"
53 #include "services/mesh.h"
54 #include "services/localzone.h"
55 #include "services/cache/rrset.h"
56 #include "services/outbound_list.h"
57 #include "util/module.h"
58 #include "util/regional.h"
59 #include "util/random.h"
60 #include "util/config_file.h"
61 #include "util/netevent.h"
62 #include "util/storage/lookup3.h"
63 #include "util/storage/slabhash.h"
64 #include "util/net_help.h"
65 #include "util/data/dname.h"
66 #include "util/data/msgreply.h"
67 #include "util/data/msgencode.h"
68 #include "util/tube.h"
69 #include "iterator/iter_fwd.h"
70 #include "iterator/iter_hints.h"
71 #include "ldns/sbuffer.h"
72 #include "ldns/str2wire.h"
73 
74 /** handle new query command for bg worker */
75 static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len);
76 
77 /** delete libworker env */
78 static void
79 libworker_delete_env(struct libworker* w)
80 {
81 	if(w->env) {
82 		outside_network_quit_prepare(w->back);
83 		mesh_delete(w->env->mesh);
84 		context_release_alloc(w->ctx, w->env->alloc,
85 			!w->is_bg || w->is_bg_thread);
86 		sldns_buffer_free(w->env->scratch_buffer);
87 		regional_destroy(w->env->scratch);
88 		forwards_delete(w->env->fwds);
89 		hints_delete(w->env->hints);
90 		ub_randfree(w->env->rnd);
91 		free(w->env);
92 	}
93 #ifdef HAVE_SSL
94 	SSL_CTX_free(w->sslctx);
95 #endif
96 	outside_network_delete(w->back);
97 }
98 
99 /** delete libworker struct */
100 static void
101 libworker_delete(struct libworker* w)
102 {
103 	if(!w) return;
104 	libworker_delete_env(w);
105 	comm_base_delete(w->base);
106 	free(w);
107 }
108 
109 void
110 libworker_delete_event(struct libworker* w)
111 {
112 	if(!w) return;
113 	libworker_delete_env(w);
114 	comm_base_delete_no_base(w->base);
115 	free(w);
116 }
117 
118 /** setup fresh libworker struct */
119 static struct libworker*
120 libworker_setup(struct ub_ctx* ctx, int is_bg, struct event_base* eb)
121 {
122 	unsigned int seed;
123 	struct libworker* w = (struct libworker*)calloc(1, sizeof(*w));
124 	struct config_file* cfg = ctx->env->cfg;
125 	int* ports;
126 	int numports;
127 	if(!w) return NULL;
128 	w->is_bg = is_bg;
129 	w->ctx = ctx;
130 	w->env = (struct module_env*)malloc(sizeof(*w->env));
131 	if(!w->env) {
132 		free(w);
133 		return NULL;
134 	}
135 	*w->env = *ctx->env;
136 	w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread);
137 	if(!w->env->alloc) {
138 		libworker_delete(w);
139 		return NULL;
140 	}
141 	w->thread_num = w->env->alloc->thread_num;
142 	alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w);
143 	if(!w->is_bg || w->is_bg_thread) {
144 		lock_basic_lock(&ctx->cfglock);
145 	}
146 	w->env->scratch = regional_create_custom(cfg->msg_buffer_size);
147 	w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size);
148 	w->env->fwds = forwards_create();
149 	if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) {
150 		forwards_delete(w->env->fwds);
151 		w->env->fwds = NULL;
152 	}
153 	w->env->hints = hints_create();
154 	if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) {
155 		hints_delete(w->env->hints);
156 		w->env->hints = NULL;
157 	}
158 	if(cfg->ssl_upstream) {
159 		w->sslctx = connect_sslctx_create(NULL, NULL, NULL);
160 		if(!w->sslctx) {
161 			/* to make the setup fail after unlock */
162 			hints_delete(w->env->hints);
163 			w->env->hints = NULL;
164 		}
165 	}
166 	if(!w->is_bg || w->is_bg_thread) {
167 		lock_basic_unlock(&ctx->cfglock);
168 	}
169 	if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds ||
170 		!w->env->hints) {
171 		libworker_delete(w);
172 		return NULL;
173 	}
174 	w->env->worker = (struct worker*)w;
175 	w->env->probe_timer = NULL;
176 	seed = (unsigned int)time(NULL) ^ (unsigned int)getpid() ^
177 		(((unsigned int)w->thread_num)<<17);
178 	seed ^= (unsigned int)w->env->alloc->next_id;
179 	if(!w->is_bg || w->is_bg_thread) {
180 		lock_basic_lock(&ctx->cfglock);
181 	}
182 	if(!(w->env->rnd = ub_initstate(seed, ctx->seed_rnd))) {
183 		if(!w->is_bg || w->is_bg_thread) {
184 			lock_basic_unlock(&ctx->cfglock);
185 		}
186 		seed = 0;
187 		libworker_delete(w);
188 		return NULL;
189 	}
190 	if(!w->is_bg || w->is_bg_thread) {
191 		lock_basic_unlock(&ctx->cfglock);
192 	}
193 	if(1) {
194 		/* primitive lockout for threading: if it overwrites another
195 		 * thread it is like wiping the cache (which is likely empty
196 		 * at the start) */
197 		/* note we are holding the ctx lock in normal threaded
198 		 * cases so that is solved properly, it is only for many ctx
199 		 * in different threads that this may clash */
200 		static int done_raninit = 0;
201 		if(!done_raninit) {
202 			done_raninit = 1;
203 			hash_set_raninit((uint32_t)ub_random(w->env->rnd));
204 		}
205 	}
206 	seed = 0;
207 
208 	if(eb)
209 		w->base = comm_base_create_event(eb);
210 	else	w->base = comm_base_create(0);
211 	if(!w->base) {
212 		libworker_delete(w);
213 		return NULL;
214 	}
215 	if(!w->is_bg || w->is_bg_thread) {
216 		lock_basic_lock(&ctx->cfglock);
217 	}
218 	numports = cfg_condense_ports(cfg, &ports);
219 	if(numports == 0) {
220 		int locked = !w->is_bg || w->is_bg_thread;
221 		libworker_delete(w);
222 		if(locked) {
223 			lock_basic_unlock(&ctx->cfglock);
224 		}
225 		return NULL;
226 	}
227 	w->back = outside_network_create(w->base, cfg->msg_buffer_size,
228 		(size_t)cfg->outgoing_num_ports, cfg->out_ifs,
229 		cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
230 		cfg->do_tcp?cfg->outgoing_num_tcp:0,
231 		w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id,
232 		ports, numports, cfg->unwanted_threshold,
233 		&libworker_alloc_cleanup, w, cfg->do_udp, w->sslctx,
234 		cfg->delay_close);
235 	if(!w->is_bg || w->is_bg_thread) {
236 		lock_basic_unlock(&ctx->cfglock);
237 	}
238 	free(ports);
239 	if(!w->back) {
240 		libworker_delete(w);
241 		return NULL;
242 	}
243 	w->env->mesh = mesh_create(&ctx->mods, w->env);
244 	if(!w->env->mesh) {
245 		libworker_delete(w);
246 		return NULL;
247 	}
248 	w->env->send_query = &libworker_send_query;
249 	w->env->detach_subs = &mesh_detach_subs;
250 	w->env->attach_sub = &mesh_attach_sub;
251 	w->env->kill_sub = &mesh_state_delete;
252 	w->env->detect_cycle = &mesh_detect_cycle;
253 	comm_base_timept(w->base, &w->env->now, &w->env->now_tv);
254 	return w;
255 }
256 
257 struct libworker* libworker_create_event(struct ub_ctx* ctx,
258 	struct event_base* eb)
259 {
260 	return libworker_setup(ctx, 0, eb);
261 }
262 
263 /** handle cancel command for bg worker */
264 static void
265 handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len)
266 {
267 	struct ctx_query* q;
268 	if(w->is_bg_thread) {
269 		lock_basic_lock(&w->ctx->cfglock);
270 		q = context_deserialize_cancel(w->ctx, buf, len);
271 		lock_basic_unlock(&w->ctx->cfglock);
272 	} else {
273 		q = context_deserialize_cancel(w->ctx, buf, len);
274 	}
275 	if(!q) {
276 		/* probably simply lookup failed, i.e. the message had been
277 		 * processed and answered before the cancel arrived */
278 		return;
279 	}
280 	q->cancelled = 1;
281 	free(buf);
282 }
283 
284 /** do control command coming into bg server */
285 static void
286 libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len)
287 {
288 	switch(context_serial_getcmd(msg, len)) {
289 		default:
290 		case UB_LIBCMD_ANSWER:
291 			log_err("unknown command for bg worker %d",
292 				(int)context_serial_getcmd(msg, len));
293 			/* and fall through to quit */
294 		case UB_LIBCMD_QUIT:
295 			free(msg);
296 			comm_base_exit(w->base);
297 			break;
298 		case UB_LIBCMD_NEWQUERY:
299 			handle_newq(w, msg, len);
300 			break;
301 		case UB_LIBCMD_CANCEL:
302 			handle_cancel(w, msg, len);
303 			break;
304 	}
305 }
306 
307 /** handle control command coming into server */
308 void
309 libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
310 	uint8_t* msg, size_t len, int err, void* arg)
311 {
312 	struct libworker* w = (struct libworker*)arg;
313 
314 	if(err != 0) {
315 		free(msg);
316 		/* it is of no use to go on, exit */
317 		comm_base_exit(w->base);
318 		return;
319 	}
320 	libworker_do_cmd(w, msg, len); /* also frees the buf */
321 }
322 
323 /** the background thread func */
324 static void*
325 libworker_dobg(void* arg)
326 {
327 	/* setup */
328 	uint32_t m;
329 	struct libworker* w = (struct libworker*)arg;
330 	struct ub_ctx* ctx;
331 	if(!w) {
332 		log_err("libunbound bg worker init failed, nomem");
333 		return NULL;
334 	}
335 	ctx = w->ctx;
336 	log_thread_set(&w->thread_num);
337 #ifdef THREADS_DISABLED
338 	/* we are forked */
339 	w->is_bg_thread = 0;
340 	/* close non-used parts of the pipes */
341 	tube_close_write(ctx->qq_pipe);
342 	tube_close_read(ctx->rr_pipe);
343 #endif
344 	if(!tube_setup_bg_listen(ctx->qq_pipe, w->base,
345 		libworker_handle_control_cmd, w)) {
346 		log_err("libunbound bg worker init failed, no bglisten");
347 		return NULL;
348 	}
349 	if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) {
350 		log_err("libunbound bg worker init failed, no bgwrite");
351 		return NULL;
352 	}
353 
354 	/* do the work */
355 	comm_base_dispatch(w->base);
356 
357 	/* cleanup */
358 	m = UB_LIBCMD_QUIT;
359 	tube_remove_bg_listen(w->ctx->qq_pipe);
360 	tube_remove_bg_write(w->ctx->rr_pipe);
361 	libworker_delete(w);
362 	(void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m,
363 		(uint32_t)sizeof(m), 0);
364 #ifdef THREADS_DISABLED
365 	/* close pipes from forked process before exit */
366 	tube_close_read(ctx->qq_pipe);
367 	tube_close_write(ctx->rr_pipe);
368 #endif
369 	return NULL;
370 }
371 
372 int libworker_bg(struct ub_ctx* ctx)
373 {
374 	struct libworker* w;
375 	/* fork or threadcreate */
376 	lock_basic_lock(&ctx->cfglock);
377 	if(ctx->dothread) {
378 		lock_basic_unlock(&ctx->cfglock);
379 		w = libworker_setup(ctx, 1, NULL);
380 		if(!w) return UB_NOMEM;
381 		w->is_bg_thread = 1;
382 #ifdef ENABLE_LOCK_CHECKS
383 		w->thread_num = 1; /* for nicer DEBUG checklocks */
384 #endif
385 		ub_thread_create(&ctx->bg_tid, libworker_dobg, w);
386 	} else {
387 		lock_basic_unlock(&ctx->cfglock);
388 #ifndef HAVE_FORK
389 		/* no fork on windows */
390 		return UB_FORKFAIL;
391 #else /* HAVE_FORK */
392 		switch((ctx->bg_pid=fork())) {
393 			case 0:
394 				w = libworker_setup(ctx, 1, NULL);
395 				if(!w) fatal_exit("out of memory");
396 				/* close non-used parts of the pipes */
397 				tube_close_write(ctx->qq_pipe);
398 				tube_close_read(ctx->rr_pipe);
399 				(void)libworker_dobg(w);
400 				exit(0);
401 				break;
402 			case -1:
403 				return UB_FORKFAIL;
404 			default:
405 				/* close non-used parts, so that the worker
406 				 * bgprocess gets 'pipe closed' when the
407 				 * main process exits */
408 				tube_close_read(ctx->qq_pipe);
409 				tube_close_write(ctx->rr_pipe);
410 				break;
411 		}
412 #endif /* HAVE_FORK */
413 	}
414 	return UB_NOERROR;
415 }
416 
417 /** get msg reply struct (in temp region) */
418 static struct reply_info*
419 parse_reply(sldns_buffer* pkt, struct regional* region, struct query_info* qi)
420 {
421 	struct reply_info* rep;
422 	struct msg_parse* msg;
423 	if(!(msg = regional_alloc(region, sizeof(*msg)))) {
424 		return NULL;
425 	}
426 	memset(msg, 0, sizeof(*msg));
427 	sldns_buffer_set_position(pkt, 0);
428 	if(parse_packet(pkt, msg, region) != 0)
429 		return 0;
430 	if(!parse_create_msg(pkt, msg, NULL, qi, &rep, region)) {
431 		return 0;
432 	}
433 	return rep;
434 }
435 
436 /** insert canonname */
437 static int
438 fill_canon(struct ub_result* res, uint8_t* s)
439 {
440 	char buf[255+2];
441 	dname_str(s, buf);
442 	res->canonname = strdup(buf);
443 	return res->canonname != 0;
444 }
445 
446 /** fill data into result */
447 static int
448 fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer,
449 	uint8_t* finalcname, struct query_info* rq, struct reply_info* rep)
450 {
451 	size_t i;
452 	struct packed_rrset_data* data;
453 	res->ttl = 0;
454 	if(!answer) {
455 		if(finalcname) {
456 			if(!fill_canon(res, finalcname))
457 				return 0; /* out of memory */
458 		}
459 		if(rep->rrset_count != 0)
460 			res->ttl = (int)rep->ttl;
461 		res->data = (char**)calloc(1, sizeof(char*));
462 		res->len = (int*)calloc(1, sizeof(int));
463 		return (res->data && res->len);
464 	}
465 	data = (struct packed_rrset_data*)answer->entry.data;
466 	if(query_dname_compare(rq->qname, answer->rk.dname) != 0) {
467 		if(!fill_canon(res, answer->rk.dname))
468 			return 0; /* out of memory */
469 	} else	res->canonname = NULL;
470 	res->data = (char**)calloc(data->count+1, sizeof(char*));
471 	res->len = (int*)calloc(data->count+1, sizeof(int));
472 	if(!res->data || !res->len)
473 		return 0; /* out of memory */
474 	for(i=0; i<data->count; i++) {
475 		/* remove rdlength from rdata */
476 		res->len[i] = (int)(data->rr_len[i] - 2);
477 		res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]);
478 		if(!res->data[i])
479 			return 0; /* out of memory */
480 	}
481 	/* ttl for positive answers, from CNAME and answer RRs */
482 	if(data->count != 0) {
483 		size_t j;
484 		res->ttl = (int)data->ttl;
485 		for(j=0; j<rep->an_numrrsets; j++) {
486 			struct packed_rrset_data* d =
487 				(struct packed_rrset_data*)rep->rrsets[j]->
488 				entry.data;
489 			if((int)d->ttl < res->ttl)
490 				res->ttl = (int)d->ttl;
491 		}
492 	}
493 	/* ttl for negative answers */
494 	if(data->count == 0 && rep->rrset_count != 0)
495 		res->ttl = (int)rep->ttl;
496 	res->data[data->count] = NULL;
497 	res->len[data->count] = 0;
498 	return 1;
499 }
500 
501 /** fill result from parsed message, on error fills servfail */
502 void
503 libworker_enter_result(struct ub_result* res, sldns_buffer* buf,
504 	struct regional* temp, enum sec_status msg_security)
505 {
506 	struct query_info rq;
507 	struct reply_info* rep;
508 	res->rcode = LDNS_RCODE_SERVFAIL;
509 	rep = parse_reply(buf, temp, &rq);
510 	if(!rep) {
511 		log_err("cannot parse buf");
512 		return; /* error parsing buf, or out of memory */
513 	}
514 	if(!fill_res(res, reply_find_answer_rrset(&rq, rep),
515 		reply_find_final_cname_target(&rq, rep), &rq, rep))
516 		return; /* out of memory */
517 	/* rcode, havedata, nxdomain, secure, bogus */
518 	res->rcode = (int)FLAGS_GET_RCODE(rep->flags);
519 	if(res->data && res->data[0])
520 		res->havedata = 1;
521 	if(res->rcode == LDNS_RCODE_NXDOMAIN)
522 		res->nxdomain = 1;
523 	if(msg_security == sec_status_secure)
524 		res->secure = 1;
525 	if(msg_security == sec_status_bogus)
526 		res->bogus = 1;
527 }
528 
529 /** fillup fg results */
530 static void
531 libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf,
532 	enum sec_status s, char* why_bogus)
533 {
534 	if(why_bogus)
535 		q->res->why_bogus = strdup(why_bogus);
536 	if(rcode != 0) {
537 		q->res->rcode = rcode;
538 		q->msg_security = s;
539 		return;
540 	}
541 
542 	q->res->rcode = LDNS_RCODE_SERVFAIL;
543 	q->msg_security = 0;
544 	q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf));
545 	q->msg_len = sldns_buffer_limit(buf);
546 	if(!q->msg) {
547 		return; /* the error is in the rcode */
548 	}
549 
550 	/* canonname and results */
551 	q->msg_security = s;
552 	libworker_enter_result(q->res, buf, q->w->env->scratch, s);
553 }
554 
555 void
556 libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
557 	char* why_bogus)
558 {
559 	struct ctx_query* q = (struct ctx_query*)arg;
560 	/* fg query is done; exit comm base */
561 	comm_base_exit(q->w->base);
562 
563 	libworker_fillup_fg(q, rcode, buf, s, why_bogus);
564 }
565 
566 /** setup qinfo and edns */
567 static int
568 setup_qinfo_edns(struct libworker* w, struct ctx_query* q,
569 	struct query_info* qinfo, struct edns_data* edns)
570 {
571 	qinfo->qtype = (uint16_t)q->res->qtype;
572 	qinfo->qclass = (uint16_t)q->res->qclass;
573 	qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len);
574 	if(!qinfo->qname) {
575 		return 0;
576 	}
577 	edns->edns_present = 1;
578 	edns->ext_rcode = 0;
579 	edns->edns_version = 0;
580 	edns->bits = EDNS_DO;
581 	if(sldns_buffer_capacity(w->back->udp_buff) < 65535)
582 		edns->udp_size = (uint16_t)sldns_buffer_capacity(
583 			w->back->udp_buff);
584 	else	edns->udp_size = 65535;
585 	return 1;
586 }
587 
588 int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q)
589 {
590 	struct libworker* w = libworker_setup(ctx, 0, NULL);
591 	uint16_t qflags, qid;
592 	struct query_info qinfo;
593 	struct edns_data edns;
594 	if(!w)
595 		return UB_INITFAIL;
596 	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
597 		libworker_delete(w);
598 		return UB_SYNTAX;
599 	}
600 	qid = 0;
601 	qflags = BIT_RD;
602 	q->w = w;
603 	/* see if there is a fixed answer */
604 	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
605 	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
606 	if(local_zones_answer(ctx->local_zones, &qinfo, &edns,
607 		w->back->udp_buff, w->env->scratch)) {
608 		regional_free_all(w->env->scratch);
609 		libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
610 			w->back->udp_buff, sec_status_insecure, NULL);
611 		libworker_delete(w);
612 		free(qinfo.qname);
613 		return UB_NOERROR;
614 	}
615 	/* process new query */
616 	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
617 		w->back->udp_buff, qid, libworker_fg_done_cb, q)) {
618 		free(qinfo.qname);
619 		return UB_NOMEM;
620 	}
621 	free(qinfo.qname);
622 
623 	/* wait for reply */
624 	comm_base_dispatch(w->base);
625 
626 	libworker_delete(w);
627 	return UB_NOERROR;
628 }
629 
630 void
631 libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf,
632 	enum sec_status s, char* why_bogus)
633 {
634 	struct ctx_query* q = (struct ctx_query*)arg;
635 	ub_event_callback_t cb = (ub_event_callback_t)q->cb;
636 	void* cb_arg = q->cb_arg;
637 	int cancelled = q->cancelled;
638 
639 	/* delete it now */
640 	struct ub_ctx* ctx = q->w->ctx;
641 	lock_basic_lock(&ctx->cfglock);
642 	(void)rbtree_delete(&ctx->queries, q->node.key);
643 	ctx->num_async--;
644 	context_query_delete(q);
645 	lock_basic_unlock(&ctx->cfglock);
646 
647 	if(!cancelled) {
648 		/* call callback */
649 		int sec = 0;
650 		if(s == sec_status_bogus)
651 			sec = 1;
652 		else if(s == sec_status_secure)
653 			sec = 2;
654 		(*cb)(cb_arg, rcode, (void*)sldns_buffer_begin(buf),
655 			(int)sldns_buffer_limit(buf), sec, why_bogus);
656 	}
657 }
658 
659 int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q,
660 	int* async_id)
661 {
662 	struct libworker* w = ctx->event_worker;
663 	uint16_t qflags, qid;
664 	struct query_info qinfo;
665 	struct edns_data edns;
666 	if(!w)
667 		return UB_INITFAIL;
668 	if(!setup_qinfo_edns(w, q, &qinfo, &edns))
669 		return UB_SYNTAX;
670 	qid = 0;
671 	qflags = BIT_RD;
672 	q->w = w;
673 	/* see if there is a fixed answer */
674 	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
675 	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
676 	if(local_zones_answer(ctx->local_zones, &qinfo, &edns,
677 		w->back->udp_buff, w->env->scratch)) {
678 		regional_free_all(w->env->scratch);
679 		free(qinfo.qname);
680 		libworker_event_done_cb(q, LDNS_RCODE_NOERROR,
681 			w->back->udp_buff, sec_status_insecure, NULL);
682 		return UB_NOERROR;
683 	}
684 	/* process new query */
685 	if(async_id)
686 		*async_id = q->querynum;
687 	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
688 		w->back->udp_buff, qid, libworker_event_done_cb, q)) {
689 		free(qinfo.qname);
690 		return UB_NOMEM;
691 	}
692 	free(qinfo.qname);
693 	return UB_NOERROR;
694 }
695 
696 /** add result to the bg worker result queue */
697 static void
698 add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt,
699 	int err, char* reason)
700 {
701 	uint8_t* msg = NULL;
702 	uint32_t len = 0;
703 
704 	/* serialize and delete unneeded q */
705 	if(w->is_bg_thread) {
706 		lock_basic_lock(&w->ctx->cfglock);
707 		if(reason)
708 			q->res->why_bogus = strdup(reason);
709 		if(pkt) {
710 			q->msg_len = sldns_buffer_remaining(pkt);
711 			q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len);
712 			if(!q->msg)
713 				msg = context_serialize_answer(q, UB_NOMEM,
714 				NULL, &len);
715 			else	msg = context_serialize_answer(q, err,
716 				NULL, &len);
717 		} else msg = context_serialize_answer(q, err, NULL, &len);
718 		lock_basic_unlock(&w->ctx->cfglock);
719 	} else {
720 		if(reason)
721 			q->res->why_bogus = strdup(reason);
722 		msg = context_serialize_answer(q, err, pkt, &len);
723 		(void)rbtree_delete(&w->ctx->queries, q->node.key);
724 		w->ctx->num_async--;
725 		context_query_delete(q);
726 	}
727 
728 	if(!msg) {
729 		log_err("out of memory for async answer");
730 		return;
731 	}
732 	if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) {
733 		log_err("out of memory for async answer");
734 		return;
735 	}
736 }
737 
738 void
739 libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
740 	char* why_bogus)
741 {
742 	struct ctx_query* q = (struct ctx_query*)arg;
743 
744 	if(q->cancelled) {
745 		if(q->w->is_bg_thread) {
746 			/* delete it now */
747 			struct ub_ctx* ctx = q->w->ctx;
748 			lock_basic_lock(&ctx->cfglock);
749 			(void)rbtree_delete(&ctx->queries, q->node.key);
750 			ctx->num_async--;
751 			context_query_delete(q);
752 			lock_basic_unlock(&ctx->cfglock);
753 		}
754 		/* cancelled, do not give answer */
755 		return;
756 	}
757 	q->msg_security = s;
758 	if(!buf)
759 		buf = q->w->env->scratch_buffer;
760 	if(rcode != 0) {
761 		error_encode(buf, rcode, NULL, 0, BIT_RD, NULL);
762 	}
763 	add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus);
764 }
765 
766 
767 /** handle new query command for bg worker */
768 static void
769 handle_newq(struct libworker* w, uint8_t* buf, uint32_t len)
770 {
771 	uint16_t qflags, qid;
772 	struct query_info qinfo;
773 	struct edns_data edns;
774 	struct ctx_query* q;
775 	if(w->is_bg_thread) {
776 		lock_basic_lock(&w->ctx->cfglock);
777 		q = context_lookup_new_query(w->ctx, buf, len);
778 		lock_basic_unlock(&w->ctx->cfglock);
779 	} else {
780 		q = context_deserialize_new_query(w->ctx, buf, len);
781 	}
782 	free(buf);
783 	if(!q) {
784 		log_err("failed to deserialize newq");
785 		return;
786 	}
787 	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
788 		add_bg_result(w, q, NULL, UB_SYNTAX, NULL);
789 		return;
790 	}
791 	qid = 0;
792 	qflags = BIT_RD;
793 	/* see if there is a fixed answer */
794 	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
795 	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
796 	if(local_zones_answer(w->ctx->local_zones, &qinfo, &edns,
797 		w->back->udp_buff, w->env->scratch)) {
798 		regional_free_all(w->env->scratch);
799 		q->msg_security = sec_status_insecure;
800 		add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL);
801 		free(qinfo.qname);
802 		return;
803 	}
804 	q->w = w;
805 	/* process new query */
806 	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
807 		w->back->udp_buff, qid, libworker_bg_done_cb, q)) {
808 		add_bg_result(w, q, NULL, UB_NOMEM, NULL);
809 	}
810 	free(qinfo.qname);
811 }
812 
813 void libworker_alloc_cleanup(void* arg)
814 {
815 	struct libworker* w = (struct libworker*)arg;
816 	slabhash_clear(&w->env->rrset_cache->table);
817         slabhash_clear(w->env->msg_cache);
818 }
819 
820 struct outbound_entry* libworker_send_query(uint8_t* qname, size_t qnamelen,
821         uint16_t qtype, uint16_t qclass, uint16_t flags, int dnssec,
822 	int want_dnssec, struct sockaddr_storage* addr, socklen_t addrlen,
823 	uint8_t* zone, size_t zonelen, struct module_qstate* q)
824 {
825 	struct libworker* w = (struct libworker*)q->env->worker;
826 	struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
827 		q->region, sizeof(*e));
828 	if(!e)
829 		return NULL;
830 	e->qstate = q;
831 	e->qsent = outnet_serviced_query(w->back, qname,
832 		qnamelen, qtype, qclass, flags, dnssec, want_dnssec,
833 		q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, addr,
834 		addrlen, zone, zonelen, libworker_handle_service_reply, e,
835 		w->back->udp_buff);
836 	if(!e->qsent) {
837 		return NULL;
838 	}
839 	return e;
840 }
841 
842 int
843 libworker_handle_reply(struct comm_point* c, void* arg, int error,
844         struct comm_reply* reply_info)
845 {
846 	struct module_qstate* q = (struct module_qstate*)arg;
847 	struct libworker* lw = (struct libworker*)q->env->worker;
848 	struct outbound_entry e;
849 	e.qstate = q;
850 	e.qsent = NULL;
851 
852 	if(error != 0) {
853 		mesh_report_reply(lw->env->mesh, &e, reply_info, error);
854 		return 0;
855 	}
856 	/* sanity check. */
857 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
858 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
859 			LDNS_PACKET_QUERY
860 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
861 		/* error becomes timeout for the module as if this reply
862 		 * never arrived. */
863 		mesh_report_reply(lw->env->mesh, &e, reply_info,
864 			NETEVENT_TIMEOUT);
865 		return 0;
866 	}
867 	mesh_report_reply(lw->env->mesh, &e, reply_info, NETEVENT_NOERROR);
868 	return 0;
869 }
870 
871 int
872 libworker_handle_service_reply(struct comm_point* c, void* arg, int error,
873         struct comm_reply* reply_info)
874 {
875 	struct outbound_entry* e = (struct outbound_entry*)arg;
876 	struct libworker* lw = (struct libworker*)e->qstate->env->worker;
877 
878 	if(error != 0) {
879 		mesh_report_reply(lw->env->mesh, e, reply_info, error);
880 		return 0;
881 	}
882 	/* sanity check. */
883 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
884 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
885 			LDNS_PACKET_QUERY
886 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
887 		/* error becomes timeout for the module as if this reply
888 		 * never arrived. */
889 		mesh_report_reply(lw->env->mesh, e, reply_info,
890 			NETEVENT_TIMEOUT);
891 		return 0;
892 	}
893 	mesh_report_reply(lw->env->mesh,  e, reply_info, NETEVENT_NOERROR);
894 	return 0;
895 }
896 
897 /* --- fake callbacks for fptr_wlist to work --- */
898 void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
899 	uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
900 	int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
901 {
902 	log_assert(0);
903 }
904 
905 int worker_handle_request(struct comm_point* ATTR_UNUSED(c),
906 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
907         struct comm_reply* ATTR_UNUSED(repinfo))
908 {
909 	log_assert(0);
910 	return 0;
911 }
912 
913 int worker_handle_reply(struct comm_point* ATTR_UNUSED(c),
914 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
915         struct comm_reply* ATTR_UNUSED(reply_info))
916 {
917 	log_assert(0);
918 	return 0;
919 }
920 
921 int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
922 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
923         struct comm_reply* ATTR_UNUSED(reply_info))
924 {
925 	log_assert(0);
926 	return 0;
927 }
928 
929 int remote_accept_callback(struct comm_point* ATTR_UNUSED(c),
930 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
931         struct comm_reply* ATTR_UNUSED(repinfo))
932 {
933 	log_assert(0);
934 	return 0;
935 }
936 
937 int remote_control_callback(struct comm_point* ATTR_UNUSED(c),
938 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
939         struct comm_reply* ATTR_UNUSED(repinfo))
940 {
941 	log_assert(0);
942 	return 0;
943 }
944 
945 void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg))
946 {
947 	log_assert(0);
948 }
949 
950 struct outbound_entry* worker_send_query(uint8_t* ATTR_UNUSED(qname),
951 	size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
952 	uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
953 	int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
954 	struct sockaddr_storage* ATTR_UNUSED(addr),
955 	socklen_t ATTR_UNUSED(addrlen), struct module_qstate* ATTR_UNUSED(q))
956 {
957 	log_assert(0);
958 	return 0;
959 }
960 
961 void
962 worker_alloc_cleanup(void* ATTR_UNUSED(arg))
963 {
964 	log_assert(0);
965 }
966 
967 void worker_stat_timer_cb(void* ATTR_UNUSED(arg))
968 {
969 	log_assert(0);
970 }
971 
972 void worker_probe_timer_cb(void* ATTR_UNUSED(arg))
973 {
974 	log_assert(0);
975 }
976 
977 void worker_start_accept(void* ATTR_UNUSED(arg))
978 {
979 	log_assert(0);
980 }
981 
982 void worker_stop_accept(void* ATTR_UNUSED(arg))
983 {
984 	log_assert(0);
985 }
986 
987 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
988 {
989 	log_assert(0);
990 	return 0;
991 }
992 
993 int
994 codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
995 {
996 	log_assert(0);
997 	return 0;
998 }
999 
1000 int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1001 {
1002         log_assert(0);
1003         return 0;
1004 }
1005 
1006 void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg))
1007 {
1008         log_assert(0);
1009 }
1010 
1011 #ifdef UB_ON_WINDOWS
1012 void
1013 worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void*
1014         ATTR_UNUSED(arg)) {
1015         log_assert(0);
1016 }
1017 
1018 void
1019 wsvc_cron_cb(void* ATTR_UNUSED(arg))
1020 {
1021         log_assert(0);
1022 }
1023 #endif /* UB_ON_WINDOWS */
1024