xref: /openbsd-src/usr.sbin/unbound/edns-subnet/subnetmod.c (revision c90a81c56dcebd6a1b73fe4aff9b03385b8e63b3)
1 /*
2  * edns-subnet/subnetmod.c - edns subnet module. Must be called before validator
3  * and iterator.
4  *
5  * Copyright (c) 2013, NLnet Labs. All rights reserved.
6  *
7  * This software is open source.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * Redistributions of source code must retain the above copyright notice,
14  * this list of conditions and the following disclaimer.
15  *
16  * Redistributions in binary form must reproduce the above copyright notice,
17  * this list of conditions and the following disclaimer in the documentation
18  * and/or other materials provided with the distribution.
19  *
20  * Neither the name of the NLNET LABS nor the names of its contributors may
21  * be used to endorse or promote products derived from this software without
22  * specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
27  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
28  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
30  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36  /**
37  * \file
38  * subnet module for unbound.
39  */
40 
41 #include "config.h"
42 
43 #ifdef CLIENT_SUBNET /* keeps splint happy */
44 
45 #include "edns-subnet/subnetmod.h"
46 #include "edns-subnet/edns-subnet.h"
47 #include "edns-subnet/addrtree.h"
48 #include "edns-subnet/subnet-whitelist.h"
49 
50 #include "services/mesh.h"
51 #include "services/cache/dns.h"
52 #include "util/module.h"
53 #include "util/regional.h"
54 #include "util/storage/slabhash.h"
55 #include "util/config_file.h"
56 #include "util/data/msgreply.h"
57 #include "sldns/sbuffer.h"
58 #include "iterator/iter_utils.h"
59 
60 /** externally called */
61 void
62 subnet_data_delete(void *d, void *ATTR_UNUSED(arg))
63 {
64 	struct subnet_msg_cache_data *r;
65 	r = (struct subnet_msg_cache_data*)d;
66 	addrtree_delete(r->tree4);
67 	addrtree_delete(r->tree6);
68 	free(r);
69 }
70 
71 /** externally called */
72 size_t
73 msg_cache_sizefunc(void *k, void *d)
74 {
75 	struct msgreply_entry *q = (struct msgreply_entry*)k;
76 	struct subnet_msg_cache_data *r = (struct subnet_msg_cache_data*)d;
77 	size_t s = sizeof(struct msgreply_entry)
78 		+ sizeof(struct subnet_msg_cache_data)
79 		+ q->key.qname_len + lock_get_mem(&q->entry.lock);
80 	s += addrtree_size(r->tree4);
81 	s += addrtree_size(r->tree6);
82 	return s;
83 }
84 
85 /** new query for ecs module */
86 static int
87 subnet_new_qstate(struct module_qstate *qstate, int id)
88 {
89 	struct subnet_qstate *sq = (struct subnet_qstate*)regional_alloc(
90 		qstate->region, sizeof(struct subnet_qstate));
91 	if(!sq)
92 		return 0;
93 	qstate->minfo[id] = sq;
94 	memset(sq, 0, sizeof(*sq));
95 	sq->started_no_cache_store = qstate->no_cache_store;
96 	return 1;
97 }
98 
99 /** Add ecs struct to edns list, after parsing it to wire format. */
100 static void
101 ecs_opt_list_append(struct ecs_data* ecs, struct edns_option** list,
102 	struct module_qstate *qstate)
103 {
104 	size_t sn_octs, sn_octs_remainder;
105 	sldns_buffer* buf = qstate->env->scratch_buffer;
106 
107 	if(ecs->subnet_validdata) {
108 		log_assert(ecs->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4 ||
109 			ecs->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP6);
110 		log_assert(ecs->subnet_addr_fam != EDNSSUBNET_ADDRFAM_IP4 ||
111 			ecs->subnet_source_mask <=  INET_SIZE*8);
112 		log_assert(ecs->subnet_addr_fam != EDNSSUBNET_ADDRFAM_IP6 ||
113 			ecs->subnet_source_mask <= INET6_SIZE*8);
114 
115 		sn_octs = ecs->subnet_source_mask / 8;
116 		sn_octs_remainder =
117 			(size_t)((ecs->subnet_source_mask % 8)>0?1:0);
118 
119 		log_assert(sn_octs + sn_octs_remainder <= INET6_SIZE);
120 
121 		sldns_buffer_clear(buf);
122 		sldns_buffer_write_u16(buf, ecs->subnet_addr_fam);
123 		sldns_buffer_write_u8(buf, ecs->subnet_source_mask);
124 		sldns_buffer_write_u8(buf, ecs->subnet_scope_mask);
125 		sldns_buffer_write(buf, ecs->subnet_addr, sn_octs);
126 		if(sn_octs_remainder)
127 			sldns_buffer_write_u8(buf, ecs->subnet_addr[sn_octs] &
128 				~(0xFF >> (ecs->subnet_source_mask % 8)));
129 		sldns_buffer_flip(buf);
130 
131 		edns_opt_list_append(list,
132 				qstate->env->cfg->client_subnet_opcode,
133 				sn_octs + sn_octs_remainder + 4,
134 				sldns_buffer_begin(buf), qstate->region);
135 	}
136 }
137 
138 int ecs_whitelist_check(struct query_info* qinfo,
139 	uint16_t ATTR_UNUSED(flags), struct module_qstate* qstate,
140 	struct sockaddr_storage* addr, socklen_t addrlen,
141 	uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen),
142 	struct regional* ATTR_UNUSED(region), int id, void* ATTR_UNUSED(cbargs))
143 {
144 	struct subnet_qstate *sq;
145 	struct subnet_env *sn_env;
146 
147 	if(!(sq=(struct subnet_qstate*)qstate->minfo[id]))
148 		return 1;
149 	sn_env = (struct subnet_env*)qstate->env->modinfo[id];
150 
151 	/* Cache by default, might be disabled after parsing EDNS option
152 	 * received from nameserver. */
153 	if(!iter_stub_fwd_no_cache(qstate, &qstate->qinfo)) {
154 		qstate->no_cache_store = 0;
155 	}
156 
157 	if(sq->ecs_server_out.subnet_validdata && ((sq->subnet_downstream &&
158 		qstate->env->cfg->client_subnet_always_forward) ||
159 		ecs_is_whitelisted(sn_env->whitelist,
160 		addr, addrlen, qinfo->qname, qinfo->qname_len,
161 		qinfo->qclass))) {
162 		/* Address on whitelist or client query contains ECS option, we
163 		 * want to sent out ECS. Only add option if it is not already
164 		 * set. */
165 		if(!(sq->subnet_sent)) {
166 			ecs_opt_list_append(&sq->ecs_server_out,
167 				&qstate->edns_opts_back_out, qstate);
168 			sq->subnet_sent = 1;
169 		}
170 	}
171 	else if(sq->subnet_sent) {
172 		/* Outgoing ECS option is set, but we don't want to sent it to
173 		 * this address, remove option. */
174 		edns_opt_list_remove(&qstate->edns_opts_back_out,
175 			qstate->env->cfg->client_subnet_opcode);
176 		sq->subnet_sent = 0;
177 	}
178 	return 1;
179 }
180 
181 
182 void
183 subnet_markdel(void* key)
184 {
185 	struct msgreply_entry *e = (struct msgreply_entry*)key;
186 	e->key.qtype = 0;
187 	e->key.qclass = 0;
188 }
189 
190 int
191 subnetmod_init(struct module_env *env, int id)
192 {
193 	struct subnet_env *sn_env = (struct subnet_env*)calloc(1,
194 		sizeof(struct subnet_env));
195 	if(!sn_env) {
196 		log_err("malloc failure");
197 		return 0;
198 	}
199 	alloc_init(&sn_env->alloc, NULL, 0);
200 	env->modinfo[id] = (void*)sn_env;
201 	/* Copy msg_cache settings */
202 	sn_env->subnet_msg_cache = slabhash_create(env->cfg->msg_cache_slabs,
203 		HASH_DEFAULT_STARTARRAY, env->cfg->msg_cache_size,
204 		msg_cache_sizefunc, query_info_compare, query_entry_delete,
205 		subnet_data_delete, NULL);
206 	slabhash_setmarkdel(sn_env->subnet_msg_cache, &subnet_markdel);
207 	if(!sn_env->subnet_msg_cache) {
208 		log_err("subnet: could not create cache");
209 		free(sn_env);
210 		env->modinfo[id] = NULL;
211 		return 0;
212 	}
213 	/* whitelist for edns subnet capable servers */
214 	sn_env->whitelist = ecs_whitelist_create();
215 	if(!sn_env->whitelist ||
216 		!ecs_whitelist_apply_cfg(sn_env->whitelist, env->cfg)) {
217 		log_err("subnet: could not create ECS whitelist");
218 		slabhash_delete(sn_env->subnet_msg_cache);
219 		free(sn_env);
220 		env->modinfo[id] = NULL;
221 		return 0;
222 	}
223 
224 	verbose(VERB_QUERY, "subnet: option registered (%d)",
225 		env->cfg->client_subnet_opcode);
226 	/* Create new mesh state for all queries. */
227 	env->unique_mesh = 1;
228 	if(!edns_register_option(env->cfg->client_subnet_opcode,
229 		env->cfg->client_subnet_always_forward /* bypass cache */,
230 		0 /* no aggregation */, env)) {
231 		log_err("subnet: could not register opcode");
232 		ecs_whitelist_delete(sn_env->whitelist);
233 		slabhash_delete(sn_env->subnet_msg_cache);
234 		free(sn_env);
235 		env->modinfo[id] = NULL;
236 		return 0;
237 	}
238 	inplace_cb_register((void*)ecs_whitelist_check, inplace_cb_query, NULL,
239 		env, id);
240 	inplace_cb_register((void*)ecs_edns_back_parsed,
241 		inplace_cb_edns_back_parsed, NULL, env, id);
242 	inplace_cb_register((void*)ecs_query_response,
243 		inplace_cb_query_response, NULL, env, id);
244 	lock_rw_init(&sn_env->biglock);
245 	return 1;
246 }
247 
248 void
249 subnetmod_deinit(struct module_env *env, int id)
250 {
251 	struct subnet_env *sn_env;
252 	if(!env || !env->modinfo[id])
253 		return;
254 	sn_env = (struct subnet_env*)env->modinfo[id];
255 	lock_rw_destroy(&sn_env->biglock);
256 	inplace_cb_delete(env, inplace_cb_edns_back_parsed, id);
257 	inplace_cb_delete(env, inplace_cb_query, id);
258 	inplace_cb_delete(env, inplace_cb_query_response, id);
259 	ecs_whitelist_delete(sn_env->whitelist);
260 	slabhash_delete(sn_env->subnet_msg_cache);
261 	alloc_clear(&sn_env->alloc);
262 	free(sn_env);
263 	env->modinfo[id] = NULL;
264 }
265 
266 /** Tells client that upstream has no/improper support */
267 static void
268 cp_edns_bad_response(struct ecs_data *target, struct ecs_data *source)
269 {
270 	target->subnet_scope_mask  = 0;
271 	target->subnet_source_mask = source->subnet_source_mask;
272 	target->subnet_addr_fam    = source->subnet_addr_fam;
273 	memcpy(target->subnet_addr, source->subnet_addr, INET6_SIZE);
274 	target->subnet_validdata = 1;
275 }
276 
277 static void
278 delfunc(void *envptr, void *elemptr) {
279 	struct reply_info *elem = (struct reply_info *)elemptr;
280 	struct subnet_env *env = (struct subnet_env *)envptr;
281 	reply_info_parsedelete(elem, &env->alloc);
282 }
283 
284 static size_t
285 sizefunc(void *elemptr) {
286 	struct reply_info *elem  = (struct reply_info *)elemptr;
287 	return sizeof (struct reply_info) - sizeof (struct rrset_ref)
288 		+ elem->rrset_count * sizeof (struct rrset_ref)
289 		+ elem->rrset_count * sizeof (struct ub_packed_rrset_key *);
290 }
291 
292 /**
293  * Select tree from cache entry based on edns data.
294  * If for address family not present it will create a new one.
295  * NULL on failure to create. */
296 static struct addrtree*
297 get_tree(struct subnet_msg_cache_data *data, struct ecs_data *edns,
298 	struct subnet_env *env, struct config_file* cfg)
299 {
300 	struct addrtree *tree;
301 	if (edns->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4) {
302 		if (!data->tree4)
303 			data->tree4 = addrtree_create(
304 				cfg->max_client_subnet_ipv4, &delfunc,
305 				&sizefunc, env, cfg->max_ecs_tree_size_ipv4);
306 		tree = data->tree4;
307 	} else {
308 		if (!data->tree6)
309 			data->tree6 = addrtree_create(
310 				cfg->max_client_subnet_ipv6, &delfunc,
311 				&sizefunc, env, cfg->max_ecs_tree_size_ipv6);
312 		tree = data->tree6;
313 	}
314 	return tree;
315 }
316 
317 static void
318 update_cache(struct module_qstate *qstate, int id)
319 {
320 	struct msgreply_entry *mrep_entry;
321 	struct addrtree *tree;
322 	struct reply_info *rep;
323 	struct query_info qinf;
324 	struct subnet_env *sne = qstate->env->modinfo[id];
325 	struct subnet_qstate *sq = (struct subnet_qstate*)qstate->minfo[id];
326 	struct slabhash *subnet_msg_cache = sne->subnet_msg_cache;
327 	struct ecs_data *edns = &sq->ecs_client_in;
328 	size_t i;
329 
330 	/* We already calculated hash upon lookup */
331 	hashvalue_type h = qstate->minfo[id] ?
332 		((struct subnet_qstate*)qstate->minfo[id])->qinfo_hash :
333 		query_info_hash(&qstate->qinfo, qstate->query_flags);
334 	/* Step 1, general qinfo lookup */
335 	struct lruhash_entry *lru_entry = slabhash_lookup(subnet_msg_cache, h,
336 		&qstate->qinfo, 1);
337 	int acquired_lock = (lru_entry != NULL);
338 	if (!lru_entry) {
339 		qinf = qstate->qinfo;
340 		qinf.qname = memdup(qstate->qinfo.qname,
341 			qstate->qinfo.qname_len);
342 		if(!qinf.qname) {
343 			log_err("memdup failed");
344 			return;
345 		}
346 		mrep_entry = query_info_entrysetup(&qinf, NULL, h);
347 		free(qinf.qname); /* if qname 'consumed', it is set to NULL */
348 		if (!mrep_entry) {
349 			log_err("query_info_entrysetup failed");
350 			return;
351 		}
352 		lru_entry = &mrep_entry->entry;
353 		lock_rw_wrlock(&lru_entry->lock);
354 		lru_entry->data = calloc(1,
355 			sizeof(struct subnet_msg_cache_data));
356 		if (!lru_entry->data) {
357 			log_err("malloc failed");
358 			return;
359 		}
360 	}
361 	/* Step 2, find the correct tree */
362 	if (!(tree = get_tree(lru_entry->data, edns, sne, qstate->env->cfg))) {
363 		if (acquired_lock) lock_rw_unlock(&lru_entry->lock);
364 		log_err("Subnet cache insertion failed");
365 		return;
366 	}
367 	lock_quick_lock(&sne->alloc.lock);
368 	rep = reply_info_copy(qstate->return_msg->rep, &sne->alloc, NULL);
369 	lock_quick_unlock(&sne->alloc.lock);
370 	if (!rep) {
371 		if (acquired_lock) lock_rw_unlock(&lru_entry->lock);
372 		log_err("Subnet cache insertion failed");
373 		return;
374 	}
375 
376 	/* store RRsets */
377 	for(i=0; i<rep->rrset_count; i++) {
378 		rep->ref[i].key = rep->rrsets[i];
379 		rep->ref[i].id = rep->rrsets[i]->id;
380 	}
381 	reply_info_set_ttls(rep, *qstate->env->now);
382 	rep->flags |= (BIT_RA | BIT_QR); /* fix flags to be sensible for */
383 	rep->flags &= ~(BIT_AA | BIT_CD);/* a reply based on the cache   */
384 	addrtree_insert(tree, (addrkey_t*)edns->subnet_addr,
385 		edns->subnet_source_mask,
386 		sq->ecs_server_in.subnet_scope_mask, rep,
387 		rep->ttl, *qstate->env->now);
388 	if (acquired_lock) {
389 		lock_rw_unlock(&lru_entry->lock);
390 	} else {
391 		lock_rw_unlock(&lru_entry->lock);
392 		slabhash_insert(subnet_msg_cache, h, lru_entry, lru_entry->data,
393 			NULL);
394 	}
395 }
396 
397 /** Lookup in cache and reply true iff reply is sent. */
398 static int
399 lookup_and_reply(struct module_qstate *qstate, int id, struct subnet_qstate *sq)
400 {
401 	struct lruhash_entry *e;
402 	struct module_env *env = qstate->env;
403 	struct subnet_env *sne = (struct subnet_env*)env->modinfo[id];
404 	hashvalue_type h = query_info_hash(&qstate->qinfo, qstate->query_flags);
405 	struct subnet_msg_cache_data *data;
406 	struct ecs_data *ecs = &sq->ecs_client_in;
407 	struct addrtree *tree;
408 	struct addrnode *node;
409 	uint8_t scope;
410 
411 	memset(&sq->ecs_client_out, 0, sizeof(sq->ecs_client_out));
412 
413 	if (sq) sq->qinfo_hash = h; /* Might be useful on cache miss */
414 	e = slabhash_lookup(sne->subnet_msg_cache, h, &qstate->qinfo, 1);
415 	if (!e) return 0; /* qinfo not in cache */
416 	data = e->data;
417 	tree = (ecs->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4)?
418 		data->tree4 : data->tree6;
419 	if (!tree) { /* qinfo in cache but not for this family */
420 		lock_rw_unlock(&e->lock);
421 		return 0;
422 	}
423 	node = addrtree_find(tree, (addrkey_t*)ecs->subnet_addr,
424 		ecs->subnet_source_mask, *env->now);
425 	if (!node) { /* plain old cache miss */
426 		lock_rw_unlock(&e->lock);
427 		return 0;
428 	}
429 
430 	qstate->return_msg = tomsg(NULL, &qstate->qinfo,
431 		(struct reply_info *)node->elem, qstate->region, *env->now,
432 		env->scratch);
433 	scope = (uint8_t)node->scope;
434 	lock_rw_unlock(&e->lock);
435 
436 	if (!qstate->return_msg) { /* Failed allocation or expired TTL */
437 		return 0;
438 	}
439 
440 	if (sq->subnet_downstream) { /* relay to interested client */
441 		sq->ecs_client_out.subnet_scope_mask = scope;
442 		sq->ecs_client_out.subnet_addr_fam = ecs->subnet_addr_fam;
443 		sq->ecs_client_out.subnet_source_mask = ecs->subnet_source_mask;
444 		memcpy(&sq->ecs_client_out.subnet_addr, &ecs->subnet_addr,
445 			INET6_SIZE);
446 		sq->ecs_client_out.subnet_validdata = 1;
447 	}
448 	return 1;
449 }
450 
451 /**
452  * Test first bits of addresses for equality. Caller is responsible
453  * for making sure that both a and b are at least net/8 octets long.
454  * @param a: first address.
455  * @param a: seconds address.
456  * @param net: Number of bits to test.
457  * @return: 1 if equal, 0 otherwise.
458  */
459 static int
460 common_prefix(uint8_t *a, uint8_t *b, uint8_t net)
461 {
462 	size_t n = (size_t)net / 8;
463 	return !memcmp(a, b, n) && ((net % 8) == 0 || a[n] == b[n]);
464 }
465 
466 static enum module_ext_state
467 eval_response(struct module_qstate *qstate, int id, struct subnet_qstate *sq)
468 {
469 	struct subnet_env *sne = qstate->env->modinfo[id];
470 
471 	struct ecs_data *c_in  = &sq->ecs_client_in; /* rcvd from client */
472 	struct ecs_data *c_out = &sq->ecs_client_out;/* will send to client */
473 	struct ecs_data *s_in  = &sq->ecs_server_in; /* rcvd from auth */
474 	struct ecs_data *s_out = &sq->ecs_server_out;/* sent to auth */
475 
476 	memset(c_out, 0, sizeof(*c_out));
477 
478 	if (!qstate->return_msg) {
479 		/* already an answer and its not a message, but retain
480 		 * the actual rcode, instead of module_error, so send
481 		 * module_finished */
482 		return module_finished;
483 	}
484 
485 	/* We have not asked for subnet data */
486 	if (!sq->subnet_sent) {
487 		if (s_in->subnet_validdata)
488 			verbose(VERB_QUERY, "subnet: received spurious data");
489 		if (sq->subnet_downstream) /* Copy back to client */
490 			cp_edns_bad_response(c_out, c_in);
491 		return module_finished;
492 	}
493 
494 	/* subnet sent but nothing came back */
495 	if (!s_in->subnet_validdata) {
496 		/* The authority indicated no support for edns subnet. As a
497 		 * consequence the answer ended up in the regular cache. It
498 		 * is still usefull to put it in the edns subnet cache for
499 		 * when a client explicitly asks for subnet specific answer. */
500 		verbose(VERB_QUERY, "subnet: Authority indicates no support");
501 		if(!sq->started_no_cache_store) {
502 			lock_rw_wrlock(&sne->biglock);
503 			update_cache(qstate, id);
504 			lock_rw_unlock(&sne->biglock);
505 		}
506 		if (sq->subnet_downstream)
507 			cp_edns_bad_response(c_out, c_in);
508 		return module_finished;
509 	}
510 
511 	/* Being here means we have asked for and got a subnet specific
512 	 * answer. Also, the answer from the authority is not yet cached
513 	 * anywhere. */
514 
515 	/* can we accept response? */
516 	if(s_out->subnet_addr_fam != s_in->subnet_addr_fam ||
517 		s_out->subnet_source_mask != s_in->subnet_source_mask ||
518 		!common_prefix(s_out->subnet_addr, s_in->subnet_addr,
519 			s_out->subnet_source_mask))
520 	{
521 		/* we can not accept, restart query without option */
522 		verbose(VERB_QUERY, "subnet: forged data");
523 		s_out->subnet_validdata = 0;
524 		(void)edns_opt_list_remove(&qstate->edns_opts_back_out,
525 			qstate->env->cfg->client_subnet_opcode);
526 		sq->subnet_sent = 0;
527 		return module_restart_next;
528 	}
529 
530 	lock_rw_wrlock(&sne->biglock);
531 	if(!sq->started_no_cache_store) {
532 		update_cache(qstate, id);
533 	}
534 	sne->num_msg_nocache++;
535 	lock_rw_unlock(&sne->biglock);
536 
537 	if (sq->subnet_downstream) {
538 		/* Client wants to see the answer, echo option back
539 		 * and adjust the scope. */
540 		c_out->subnet_addr_fam = c_in->subnet_addr_fam;
541 		c_out->subnet_source_mask = c_in->subnet_source_mask;
542 		memcpy(&c_out->subnet_addr, &c_in->subnet_addr, INET6_SIZE);
543 		c_out->subnet_scope_mask = s_in->subnet_scope_mask;
544 		/* Limit scope returned to client to scope used for caching. */
545 		if(c_out->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4) {
546 			if(c_out->subnet_scope_mask >
547 				qstate->env->cfg->max_client_subnet_ipv4) {
548 				c_out->subnet_scope_mask =
549 					qstate->env->cfg->max_client_subnet_ipv4;
550 			}
551 		}
552 		else if(c_out->subnet_scope_mask >
553 				qstate->env->cfg->max_client_subnet_ipv6) {
554 				c_out->subnet_scope_mask =
555 					qstate->env->cfg->max_client_subnet_ipv6;
556 		}
557 		c_out->subnet_validdata = 1;
558 	}
559 	return module_finished;
560 }
561 
562 /** Parse EDNS opt data containing ECS */
563 static int
564 parse_subnet_option(struct edns_option* ecs_option, struct ecs_data* ecs)
565 {
566 	memset(ecs, 0, sizeof(*ecs));
567 	if (ecs_option->opt_len < 4)
568 		return 0;
569 
570 	ecs->subnet_addr_fam = sldns_read_uint16(ecs_option->opt_data);
571 	ecs->subnet_source_mask = ecs_option->opt_data[2];
572 	ecs->subnet_scope_mask = ecs_option->opt_data[3];
573 	/* remaining bytes indicate address */
574 
575 	/* validate input*/
576 	/* option length matches calculated length? */
577 	if (ecs_option->opt_len != (size_t)((ecs->subnet_source_mask+7)/8 + 4))
578 		return 0;
579 	if (ecs_option->opt_len - 4 > INET6_SIZE || ecs_option->opt_len == 0)
580 		return 0;
581 	if (ecs->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4) {
582 		if (ecs->subnet_source_mask > 32 || ecs->subnet_scope_mask > 32)
583 			return 0;
584 	} else if (ecs->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP6) {
585 		if (ecs->subnet_source_mask > 128 ||
586 			ecs->subnet_scope_mask > 128)
587 			return 0;
588 	} else
589 		return 0;
590 
591 	/* valid ECS data, write to ecs_data */
592 	if (copy_clear(ecs->subnet_addr, INET6_SIZE, ecs_option->opt_data + 4,
593 		ecs_option->opt_len - 4, ecs->subnet_source_mask))
594 		return 0;
595 	ecs->subnet_validdata = 1;
596 	return 1;
597 }
598 
599 static void
600 subnet_option_from_ss(struct sockaddr_storage *ss, struct ecs_data* ecs,
601 	struct config_file* cfg)
602 {
603 	void* sinaddr;
604 
605 	/* Construct subnet option from original query */
606 	if(((struct sockaddr_in*)ss)->sin_family == AF_INET) {
607 		ecs->subnet_source_mask = cfg->max_client_subnet_ipv4;
608 		ecs->subnet_addr_fam = EDNSSUBNET_ADDRFAM_IP4;
609 		sinaddr = &((struct sockaddr_in*)ss)->sin_addr;
610 		if (!copy_clear( ecs->subnet_addr, INET6_SIZE,
611 			(uint8_t *)sinaddr, INET_SIZE,
612 			ecs->subnet_source_mask)) {
613 			ecs->subnet_validdata = 1;
614 		}
615 	}
616 #ifdef INET6
617 	else {
618 		ecs->subnet_source_mask = cfg->max_client_subnet_ipv6;
619 		ecs->subnet_addr_fam = EDNSSUBNET_ADDRFAM_IP6;
620 		sinaddr = &((struct sockaddr_in6*)ss)->sin6_addr;
621 		if (!copy_clear( ecs->subnet_addr, INET6_SIZE,
622 			(uint8_t *)sinaddr, INET6_SIZE,
623 			ecs->subnet_source_mask)) {
624 			ecs->subnet_validdata = 1;
625 		}
626 	}
627 #else
628 			/* We don't know how to handle ip6, just pass */
629 #endif /* INET6 */
630 }
631 
632 int
633 ecs_query_response(struct module_qstate* qstate, struct dns_msg* response,
634 	int id, void* ATTR_UNUSED(cbargs))
635 {
636 	struct subnet_qstate *sq;
637 
638 	if(!response || !(sq=(struct subnet_qstate*)qstate->minfo[id]))
639 		return 1;
640 
641 	if(sq->subnet_sent &&
642 		FLAGS_GET_RCODE(response->rep->flags) == LDNS_RCODE_REFUSED) {
643 		/* REFUSED response to ECS query, remove ECS option. */
644 		edns_opt_list_remove(&qstate->edns_opts_back_out,
645 			qstate->env->cfg->client_subnet_opcode);
646 		sq->subnet_sent = 0;
647 		memset(&sq->ecs_server_out, 0, sizeof(sq->ecs_server_out));
648 	}
649 	return 1;
650 }
651 
652 int
653 ecs_edns_back_parsed(struct module_qstate* qstate, int id,
654 	void* ATTR_UNUSED(cbargs))
655 {
656 	struct subnet_qstate *sq;
657 	struct edns_option* ecs_opt;
658 
659 	if(!(sq=(struct subnet_qstate*)qstate->minfo[id]))
660 		return 1;
661 	if((ecs_opt = edns_opt_list_find(
662 		qstate->edns_opts_back_in,
663 		qstate->env->cfg->client_subnet_opcode))) {
664 		if(parse_subnet_option(ecs_opt, &sq->ecs_server_in) &&
665 			sq->subnet_sent &&
666 			sq->ecs_server_in.subnet_validdata)
667 			/* Only skip global cache store if we sent an ECS option
668 			 * and received one back. Answers from non-whitelisted
669 			 * servers will end up in global cache. Answers for
670 			 * queries with 0 source will not (unless nameserver
671 			 * does not support ECS). */
672 			qstate->no_cache_store = 1;
673 	}
674 
675 	return 1;
676 }
677 
678 void
679 subnetmod_operate(struct module_qstate *qstate, enum module_ev event,
680 	int id, struct outbound_entry* outbound)
681 {
682 	struct subnet_env *sne = qstate->env->modinfo[id];
683 	struct subnet_qstate *sq = (struct subnet_qstate*)qstate->minfo[id];
684 
685 	verbose(VERB_QUERY, "subnet[module %d] operate: extstate:%s "
686 		"event:%s", id, strextstate(qstate->ext_state[id]),
687 		strmodulevent(event));
688 	log_query_info(VERB_QUERY, "subnet operate: query", &qstate->qinfo);
689 
690 	if((event == module_event_new || event == module_event_pass) &&
691 		sq == NULL) {
692 		struct edns_option* ecs_opt;
693 		if(!subnet_new_qstate(qstate, id)) {
694 			qstate->return_msg = NULL;
695 			qstate->ext_state[id] = module_finished;
696 			return;
697 		}
698 
699 		sq = (struct subnet_qstate*)qstate->minfo[id];
700 
701 		if((ecs_opt = edns_opt_list_find(
702 			qstate->edns_opts_front_in,
703 			qstate->env->cfg->client_subnet_opcode))) {
704 			if(!parse_subnet_option(ecs_opt, &sq->ecs_client_in)) {
705 				/* Wrongly formatted ECS option. RFC mandates to
706 				 * return FORMERROR. */
707 				qstate->return_rcode = LDNS_RCODE_FORMERR;
708 				qstate->ext_state[id] = module_finished;
709 				return;
710 			}
711 			sq->subnet_downstream = 1;
712 		}
713 		else if(qstate->mesh_info->reply_list) {
714 			subnet_option_from_ss(
715 				&qstate->mesh_info->reply_list->query_reply.addr,
716 				&sq->ecs_client_in, qstate->env->cfg);
717 		}
718 
719 		if(sq->ecs_client_in.subnet_validdata == 0) {
720 			/* No clients are interested in result or we could not
721 			 * parse it, we don't do client subnet */
722 			sq->ecs_server_out.subnet_validdata = 0;
723 			verbose(VERB_ALGO, "subnet: pass to next module");
724 			qstate->ext_state[id] = module_wait_module;
725 			return;
726 		}
727 
728 		/* Limit to minimum allowed source mask */
729 		if(sq->ecs_client_in.subnet_source_mask != 0 && (
730 			(sq->ecs_client_in.subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4 &&
731 			 sq->ecs_client_in.subnet_source_mask < qstate->env->cfg->min_client_subnet_ipv4) ||
732 			(sq->ecs_client_in.subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP6 &&
733 			 sq->ecs_client_in.subnet_source_mask < qstate->env->cfg->min_client_subnet_ipv6))) {
734 				qstate->return_rcode = LDNS_RCODE_REFUSED;
735 				qstate->ext_state[id] = module_finished;
736 				return;
737 		}
738 
739 		lock_rw_wrlock(&sne->biglock);
740 		if (lookup_and_reply(qstate, id, sq)) {
741 			sne->num_msg_cache++;
742 			lock_rw_unlock(&sne->biglock);
743 			verbose(VERB_QUERY, "subnet: answered from cache");
744 			qstate->ext_state[id] = module_finished;
745 
746 			ecs_opt_list_append(&sq->ecs_client_out,
747 				&qstate->edns_opts_front_out, qstate);
748 			return;
749 		}
750 		lock_rw_unlock(&sne->biglock);
751 
752 		sq->ecs_server_out.subnet_addr_fam =
753 			sq->ecs_client_in.subnet_addr_fam;
754 		sq->ecs_server_out.subnet_source_mask =
755 			sq->ecs_client_in.subnet_source_mask;
756 		/* Limit source prefix to configured maximum */
757 		if(sq->ecs_server_out.subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4
758 			&& sq->ecs_server_out.subnet_source_mask >
759 			qstate->env->cfg->max_client_subnet_ipv4)
760 			sq->ecs_server_out.subnet_source_mask =
761 				qstate->env->cfg->max_client_subnet_ipv4;
762 		else if(sq->ecs_server_out.subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP6
763 			&& sq->ecs_server_out.subnet_source_mask >
764 			qstate->env->cfg->max_client_subnet_ipv6)
765 			sq->ecs_server_out.subnet_source_mask =
766 				qstate->env->cfg->max_client_subnet_ipv6;
767 		/* Safe to copy completely, even if the source is limited by the
768 		 * configuration. ecs_opt_list_append() will limit the address.
769 		 * */
770 		memcpy(&sq->ecs_server_out.subnet_addr,
771 			sq->ecs_client_in.subnet_addr, INET6_SIZE);
772 		sq->ecs_server_out.subnet_scope_mask = 0;
773 		sq->ecs_server_out.subnet_validdata = 1;
774 		if(sq->ecs_server_out.subnet_source_mask != 0 &&
775 			qstate->env->cfg->client_subnet_always_forward &&
776 			sq->subnet_downstream)
777 			/* ECS specific data required, do not look at the global
778 			 * cache in other modules. */
779 			qstate->no_cache_lookup = 1;
780 
781 		/* pass request to next module */
782 		verbose(VERB_ALGO,
783 			"subnet: not found in cache. pass to next module");
784 		qstate->ext_state[id] = module_wait_module;
785 		return;
786 	}
787 	/* Query handed back by next module, we have a 'final' answer */
788 	if(sq && event == module_event_moddone) {
789 		qstate->ext_state[id] = eval_response(qstate, id, sq);
790 		if(qstate->ext_state[id] == module_finished &&
791 			qstate->return_msg) {
792 			ecs_opt_list_append(&sq->ecs_client_out,
793 				&qstate->edns_opts_front_out, qstate);
794 		}
795 		qstate->no_cache_store = sq->started_no_cache_store;
796 		return;
797 	}
798 	if(sq && outbound) {
799 		return;
800 	}
801 	/* We are being revisited */
802 	if(event == module_event_pass || event == module_event_new) {
803 		/* Just pass it on, we already did the work */
804 		verbose(VERB_ALGO, "subnet: pass to next module");
805 		qstate->ext_state[id] = module_wait_module;
806 		return;
807 	}
808 	if(!sq && (event == module_event_moddone)) {
809 		/* during priming, module done but we never started */
810 		qstate->ext_state[id] = module_finished;
811 		return;
812 	}
813 	log_err("subnet: bad event %s", strmodulevent(event));
814 	qstate->ext_state[id] = module_error;
815 	return;
816 }
817 
818 void
819 subnetmod_clear(struct module_qstate *ATTR_UNUSED(qstate),
820 	int ATTR_UNUSED(id))
821 {
822 	/* qstate has no data outside region */
823 }
824 
825 void
826 subnetmod_inform_super(struct module_qstate *ATTR_UNUSED(qstate),
827 	int ATTR_UNUSED(id), struct module_qstate *ATTR_UNUSED(super))
828 {
829 	/* Not used */
830 }
831 
832 size_t
833 subnetmod_get_mem(struct module_env *env, int id)
834 {
835 	struct subnet_env *sn_env = env->modinfo[id];
836 	if (!sn_env) return 0;
837 	return sizeof(*sn_env) +
838 		slabhash_get_mem(sn_env->subnet_msg_cache) +
839 		ecs_whitelist_get_mem(sn_env->whitelist);
840 }
841 
842 /**
843  * The module function block
844  */
845 static struct module_func_block subnetmod_block = {
846 	"subnet", &subnetmod_init, &subnetmod_deinit, &subnetmod_operate,
847 	&subnetmod_inform_super, &subnetmod_clear, &subnetmod_get_mem
848 };
849 
850 struct module_func_block*
851 subnetmod_get_funcblock(void)
852 {
853 	return &subnetmod_block;
854 }
855 
856 /** Wrappers for static functions to unit test */
857 size_t
858 unittest_wrapper_subnetmod_sizefunc(void *elemptr)
859 {
860 	return sizefunc(elemptr);
861 }
862 
863 #endif  /* CLIENT_SUBNET */
864