xref: /openbsd-src/sys/net/if_etherbridge.c (revision 5fcf6ec37564f5a5a9084d0647cfaa901d44b1e3)
1 /*	$OpenBSD: if_etherbridge.c,v 1.1 2021/02/21 03:26:46 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2018, 2021 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/kernel.h>
24 #include <sys/mbuf.h>
25 #include <sys/socket.h>
26 #include <sys/ioctl.h>
27 #include <sys/timeout.h>
28 #include <sys/pool.h>
29 #include <sys/tree.h>
30 
31 #include <net/if.h>
32 #include <net/if_var.h>
33 #include <net/if_dl.h>
34 #include <net/if_media.h>
35 #include <net/if_types.h>
36 #include <net/rtable.h>
37 #include <net/toeplitz.h>
38 
39 #include <netinet/in.h>
40 #include <netinet/if_ether.h>
41 
42 /* for bridge stuff */
43 #include <net/if_bridge.h>
44 
45 #include <net/if_etherbridge.h>
46 
47 static inline void	ebe_take(struct eb_entry *);
48 static inline void	ebe_rele(struct eb_entry *);
49 static void		ebe_free(void *);
50 
51 static void		etherbridge_age(void *);
52 
53 RBT_PROTOTYPE(eb_tree, eb_entry, ebe_tentry, ebt_cmp);
54 
55 static struct pool	eb_entry_pool;
56 
57 static inline int
58 eb_port_eq(struct etherbridge *eb, void *a, void *b)
59 {
60 	return ((*eb->eb_ops->eb_op_port_eq)(eb->eb_cookie, a, b));
61 }
62 
63 static inline void *
64 eb_port_take(struct etherbridge *eb, void *port)
65 {
66 	return ((*eb->eb_ops->eb_op_port_take)(eb->eb_cookie, port));
67 }
68 
69 static inline void
70 eb_port_rele(struct etherbridge *eb, void *port)
71 {
72 	return ((*eb->eb_ops->eb_op_port_rele)(eb->eb_cookie, port));
73 }
74 
75 static inline size_t
76 eb_port_ifname(struct etherbridge *eb, char *dst, size_t len, void *port)
77 {
78 	return ((*eb->eb_ops->eb_op_port_ifname)(eb->eb_cookie, dst, len,
79 	    port));
80 }
81 
82 static inline void
83 eb_port_sa(struct etherbridge *eb, struct sockaddr_storage *ss, void *port)
84 {
85 	(*eb->eb_ops->eb_op_port_sa)(eb->eb_cookie, ss, port);
86 }
87 
88 int
89 etherbridge_init(struct etherbridge *eb, const char *name,
90     const struct etherbridge_ops *ops, void *cookie)
91 {
92 	size_t i;
93 
94 	if (eb_entry_pool.pr_size == 0) {
95 		pool_init(&eb_entry_pool, sizeof(struct eb_entry),
96 		    0, IPL_SOFTNET, 0, "ebepl", NULL);
97 	}
98 
99 	eb->eb_table = mallocarray(ETHERBRIDGE_TABLE_SIZE,
100 	    sizeof(*eb->eb_table), M_DEVBUF, M_WAITOK|M_CANFAIL);
101 	if (eb->eb_table == NULL)
102 		return (ENOMEM);
103 
104 	eb->eb_name = name;
105 	eb->eb_ops = ops;
106 	eb->eb_cookie = cookie;
107 
108 	mtx_init(&eb->eb_lock, IPL_SOFTNET);
109 	RBT_INIT(eb_tree, &eb->eb_tree);
110 
111 	eb->eb_num = 0;
112 	eb->eb_max = 100;
113 	eb->eb_max_age = 240;
114 	timeout_set(&eb->eb_tmo_age, etherbridge_age, eb);
115 
116 	for (i = 0; i < ETHERBRIDGE_TABLE_SIZE; i++) {
117 		struct eb_list *ebl = &eb->eb_table[i];
118 		SMR_TAILQ_INIT(ebl);
119 	}
120 
121 	return (0);
122 }
123 
124 int
125 etherbridge_up(struct etherbridge *eb)
126 {
127 	etherbridge_age(eb);
128 
129 	return (0);
130 }
131 
132 int
133 etherbridge_down(struct etherbridge *eb)
134 {
135 	smr_barrier();
136 
137 	return (0);
138 }
139 
140 void
141 etherbridge_destroy(struct etherbridge *eb)
142 {
143 	struct eb_entry *ebe, *nebe;
144 
145 	/* XXX assume that nothing will calling etherbridge_map now */
146 
147 	timeout_del_barrier(&eb->eb_tmo_age);
148 
149 	free(eb->eb_table, M_DEVBUF,
150 	    ETHERBRIDGE_TABLE_SIZE * sizeof(*eb->eb_table));
151 
152 	RBT_FOREACH_SAFE(ebe, eb_tree, &eb->eb_tree, nebe) {
153 		RBT_REMOVE(eb_tree, &eb->eb_tree, ebe);
154 		ebe_free(ebe);
155 	}
156 }
157 
158 static struct eb_list *
159 etherbridge_list(struct etherbridge *eb, const struct ether_addr *ea)
160 {
161 	uint16_t hash = stoeplitz_eaddr(ea->ether_addr_octet);
162 	hash &= ETHERBRIDGE_TABLE_MASK;
163 	return (&eb->eb_table[hash]);
164 }
165 
166 static struct eb_entry *
167 ebl_find(struct eb_list *ebl, const struct ether_addr *ea)
168 {
169 	struct eb_entry *ebe;
170 
171 	SMR_TAILQ_FOREACH(ebe, ebl, ebe_lentry) {
172 		if (ETHER_IS_EQ(ea, &ebe->ebe_addr))
173 			return (ebe);
174 	}
175 
176 	return (NULL);
177 }
178 
179 static inline void
180 ebl_insert(struct eb_list *ebl, struct eb_entry *ebe)
181 {
182 	SMR_TAILQ_INSERT_TAIL_LOCKED(ebl, ebe, ebe_lentry);
183 }
184 
185 static inline void
186 ebl_remove(struct eb_list *ebl, struct eb_entry *ebe)
187 {
188 	SMR_TAILQ_REMOVE_LOCKED(ebl, ebe, ebe_lentry);
189 }
190 
191 static inline int
192 ebt_cmp(const struct eb_entry *aebe, const struct eb_entry *bebe)
193 {
194 	return (memcmp(&aebe->ebe_addr, &bebe->ebe_addr,
195 	    sizeof(aebe->ebe_addr)));
196 }
197 
198 RBT_GENERATE(eb_tree, eb_entry, ebe_tentry, ebt_cmp);
199 
200 static inline struct eb_entry *
201 ebt_insert(struct etherbridge *eb, struct eb_entry *ebe)
202 {
203 	return (RBT_INSERT(eb_tree, &eb->eb_tree, ebe));
204 }
205 
206 static inline void
207 ebt_replace(struct etherbridge *eb, struct eb_entry *oebe,
208     struct eb_entry *nebe)
209 {
210 	struct eb_entry *rvebe;
211 
212 	RBT_REMOVE(eb_tree, &eb->eb_tree, oebe);
213 	rvebe = RBT_INSERT(eb_tree, &eb->eb_tree, nebe);
214 	KASSERTMSG(rvebe == NULL, "ebt_replace eb %p nebe %p rvebe %p",
215 	    eb, nebe, rvebe);
216 }
217 
218 static inline void
219 ebt_remove(struct etherbridge *eb, struct eb_entry *ebe)
220 {
221 	RBT_REMOVE(eb_tree, &eb->eb_tree, ebe);
222 }
223 
224 static inline void
225 ebe_take(struct eb_entry *ebe)
226 {
227 	refcnt_take(&ebe->ebe_refs);
228 }
229 
230 static void
231 ebe_rele(struct eb_entry *ebe)
232 {
233 	if (refcnt_rele(&ebe->ebe_refs))
234 		smr_call(&ebe->ebe_smr_entry, ebe_free, ebe);
235 }
236 
237 static void
238 ebe_free(void *arg)
239 {
240 	struct eb_entry *ebe = arg;
241 	struct etherbridge *eb = ebe->ebe_etherbridge;
242 
243 	eb_port_rele(eb, ebe->ebe_port);
244 	pool_put(&eb_entry_pool, ebe);
245 }
246 
247 void *
248 etherbridge_resolve(struct etherbridge *eb, const struct ether_addr *ea)
249 {
250 	struct eb_list *ebl = etherbridge_list(eb, ea);
251 	struct eb_entry *ebe;
252 
253 	SMR_ASSERT_CRITICAL();
254 
255 	ebe = ebl_find(ebl, ea);
256 	if (ebe != NULL) {
257 		if (ebe->ebe_type == EBE_DYNAMIC) {
258 			int diff = getuptime() - ebe->ebe_age;
259 			if (diff > eb->eb_max_age)
260 				return (NULL);
261 		}
262 
263 		return (ebe->ebe_port);
264 	}
265 
266 	return (NULL);
267 }
268 
269 void
270 etherbridge_map(struct etherbridge *eb, void *port,
271     const struct ether_addr *ea)
272 {
273 	struct eb_list *ebl;
274 	struct eb_entry *oebe, *nebe;
275 	unsigned int num;
276 	void *nport;
277 	int new = 0;
278 
279 	if (ETHER_IS_MULTICAST(ea->ether_addr_octet) ||
280 	    ETHER_IS_EQ(ea->ether_addr_octet, etheranyaddr))
281 		return;
282 
283 	ebl = etherbridge_list(eb, ea);
284 
285 	smr_read_enter();
286 	oebe = ebl_find(ebl, ea);
287 	if (oebe == NULL)
288 		new = 1;
289 	else {
290 		oebe->ebe_age = getuptime();
291 
292 		/* does this entry need to be replaced? */
293 		if (oebe->ebe_type == EBE_DYNAMIC &&
294 		    !eb_port_eq(eb, oebe->ebe_port, port)) {
295 			new = 1;
296 			ebe_take(oebe);
297 		} else
298 			oebe = NULL;
299 	}
300 	smr_read_leave();
301 
302 	if (!new)
303 		return;
304 
305 	nport = eb_port_take(eb, port);
306 	if (nport == NULL) {
307 		/* XXX should we remove the old one and flood? */
308 		return;
309 	}
310 
311 	nebe = pool_get(&eb_entry_pool, PR_NOWAIT);
312 	if (nebe == NULL) {
313 		/* XXX should we remove the old one and flood? */
314 		eb_port_rele(eb, nport);
315 		return;
316 	}
317 
318 	smr_init(&nebe->ebe_smr_entry);
319 	refcnt_init(&nebe->ebe_refs);
320 	nebe->ebe_etherbridge = eb;
321 
322 	nebe->ebe_addr = *ea;
323 	nebe->ebe_port = nport;
324 	nebe->ebe_type = EBE_DYNAMIC;
325 	nebe->ebe_age = getuptime();
326 
327 	mtx_enter(&eb->eb_lock);
328 	num = eb->eb_num + (oebe == NULL);
329 	if (num <= eb->eb_max && ebt_insert(eb, nebe) == oebe) {
330 		/* we won, do the update */
331 		ebl_insert(ebl, nebe);
332 
333 		if (oebe != NULL) {
334 			ebl_remove(ebl, oebe);
335 			ebt_replace(eb, oebe, nebe);
336 
337 			/* take the table reference away */
338 			if (refcnt_rele(&oebe->ebe_refs)) {
339 				panic("%s: eb %p oebe %p refcnt",
340 				    __func__, eb, oebe);
341 			}
342 		}
343 
344 		nebe = NULL;
345 		eb->eb_num = num;
346 	}
347 	mtx_leave(&eb->eb_lock);
348 
349 	if (nebe != NULL) {
350 		/*
351 		 * the new entry didnt make it into the
352 		 * table, so it can be freed directly.
353 		 */
354 		ebe_free(nebe);
355 	}
356 
357 	if (oebe != NULL) {
358 		/*
359 		 * the old entry could be referenced in
360 		 * multiple places, including an smr read
361 		 * section, so release it properly.
362 		 */
363 		ebe_rele(oebe);
364 	}
365 }
366 
367 static void
368 etherbridge_age(void *arg)
369 {
370 	struct etherbridge *eb = arg;
371 	struct eb_entry *ebe, *nebe;
372 	struct eb_queue ebq = TAILQ_HEAD_INITIALIZER(ebq);
373 	int diff;
374 	unsigned int now = getuptime();
375 	size_t i;
376 
377 	timeout_add_sec(&eb->eb_tmo_age, 100);
378 
379 	for (i = 0; i < ETHERBRIDGE_TABLE_SIZE; i++) {
380 		struct eb_list *ebl = &eb->eb_table[i];
381 #if 0
382 		if (SMR_TAILQ_EMPTY(ebl));
383 			continue;
384 #endif
385 
386 		mtx_enter(&eb->eb_lock); /* don't block map too much */
387 		SMR_TAILQ_FOREACH_SAFE_LOCKED(ebe, ebl, ebe_lentry, nebe) {
388 			if (ebe->ebe_type != EBE_DYNAMIC)
389 				continue;
390 
391 			diff = now - ebe->ebe_age;
392 			if (diff < eb->eb_max_age)
393 				continue;
394 
395 			ebl_remove(ebl, ebe);
396 			ebt_remove(eb, ebe);
397 			eb->eb_num--;
398 
399 			/* we own the tables ref now */
400 
401 			TAILQ_INSERT_TAIL(&ebq, ebe, ebe_qentry);
402 		}
403 		mtx_leave(&eb->eb_lock);
404 	}
405 
406 	TAILQ_FOREACH_SAFE(ebe, &ebq, ebe_qentry, nebe) {
407 		TAILQ_REMOVE(&ebq, ebe, ebe_qentry);
408 		ebe_rele(ebe);
409 	}
410 }
411 
412 void
413 etherbridge_detach_port(struct etherbridge *eb, void *port)
414 {
415 	struct eb_entry *ebe, *nebe;
416 	struct eb_queue ebq = TAILQ_HEAD_INITIALIZER(ebq);
417 	size_t i;
418 
419 	for (i = 0; i < ETHERBRIDGE_TABLE_SIZE; i++) {
420 		struct eb_list *ebl = &eb->eb_table[i];
421 
422 		mtx_enter(&eb->eb_lock); /* don't block map too much */
423 		SMR_TAILQ_FOREACH_SAFE_LOCKED(ebe, ebl, ebe_lentry, nebe) {
424 			if (!eb_port_eq(eb, ebe->ebe_port, port))
425 				continue;
426 
427 			ebl_remove(ebl, ebe);
428 			ebt_remove(eb, ebe);
429 			eb->eb_num--;
430 
431 			/* we own the tables ref now */
432 
433 			TAILQ_INSERT_TAIL(&ebq, ebe, ebe_qentry);
434 		}
435 		mtx_leave(&eb->eb_lock);
436 	}
437 
438 	smr_barrier(); /* try and do it once for all the entries */
439 
440 	TAILQ_FOREACH_SAFE(ebe, &ebq, ebe_qentry, nebe) {
441 		TAILQ_REMOVE(&ebq, ebe, ebe_qentry);
442 		if (refcnt_rele(&ebe->ebe_refs))
443 			ebe_free(ebe);
444 	}
445 }
446 
447 void
448 etherbridge_flush(struct etherbridge *eb, uint32_t flags)
449 {
450 	struct eb_entry *ebe, *nebe;
451 	struct eb_queue ebq = TAILQ_HEAD_INITIALIZER(ebq);
452 	size_t i;
453 
454 	for (i = 0; i < ETHERBRIDGE_TABLE_SIZE; i++) {
455 		struct eb_list *ebl = &eb->eb_table[i];
456 
457 		mtx_enter(&eb->eb_lock); /* don't block map too much */
458 		SMR_TAILQ_FOREACH_SAFE_LOCKED(ebe, ebl, ebe_lentry, nebe) {
459 			if (flags == IFBF_FLUSHDYN &&
460 			    ebe->ebe_type != EBE_DYNAMIC)
461 				continue;
462 
463 			ebl_remove(ebl, ebe);
464 			ebt_remove(eb, ebe);
465 			eb->eb_num--;
466 
467 			/* we own the tables ref now */
468 
469 			TAILQ_INSERT_TAIL(&ebq, ebe, ebe_qentry);
470 		}
471 		mtx_leave(&eb->eb_lock);
472 	}
473 
474 	smr_barrier(); /* try and do it once for all the entries */
475 
476 	TAILQ_FOREACH_SAFE(ebe, &ebq, ebe_qentry, nebe) {
477 		TAILQ_REMOVE(&ebq, ebe, ebe_qentry);
478 		if (refcnt_rele(&ebe->ebe_refs))
479 			ebe_free(ebe);
480 	}
481 }
482 
483 int
484 etherbridge_rtfind(struct etherbridge *eb, struct ifbaconf *baconf)
485 {
486 	struct eb_entry *ebe;
487 	struct ifbareq bareq;
488 	caddr_t buf;
489 	size_t len, nlen;
490 	time_t age, now = getuptime();
491 	int error;
492 
493 	if (baconf->ifbac_len == 0) {
494 		/* single read is atomic */
495 		baconf->ifbac_len = eb->eb_num * sizeof(bareq);
496 		return (0);
497 	}
498 
499 	buf = malloc(baconf->ifbac_len, M_TEMP, M_WAITOK|M_CANFAIL);
500 	if (buf == NULL)
501 		return (ENOMEM);
502 	len = 0;
503 
504 	mtx_enter(&eb->eb_lock);
505 	RBT_FOREACH(ebe, eb_tree, &eb->eb_tree) {
506 		nlen = len + sizeof(bareq);
507 		if (nlen > baconf->ifbac_len)
508 			break;
509 
510 		strlcpy(bareq.ifba_name, eb->eb_name,
511 		    sizeof(bareq.ifba_name));
512 		eb_port_ifname(eb,
513 		    bareq.ifba_ifsname, sizeof(bareq.ifba_ifsname),
514 		    ebe->ebe_port);
515 		memcpy(&bareq.ifba_dst, &ebe->ebe_addr,
516 		    sizeof(bareq.ifba_dst));
517 
518 		memset(&bareq.ifba_dstsa, 0, sizeof(bareq.ifba_dstsa));
519 		eb_port_sa(eb, &bareq.ifba_dstsa, ebe->ebe_port);
520 
521 		switch (ebe->ebe_type) {
522 		case EBE_DYNAMIC:
523 			age = now - ebe->ebe_age;
524 			bareq.ifba_age = MIN(age, 0xff);
525 			bareq.ifba_flags = IFBAF_DYNAMIC;
526 			break;
527 		case EBE_STATIC:
528 			bareq.ifba_age = 0;
529 			bareq.ifba_flags = IFBAF_STATIC;
530 			break;
531 		}
532 
533 		memcpy(buf + len, &bareq, sizeof(bareq));
534                 len = nlen;
535         }
536 	nlen = baconf->ifbac_len;
537 	baconf->ifbac_len = eb->eb_num * sizeof(bareq);
538 	mtx_leave(&eb->eb_lock);
539 
540 	error = copyout(buf, baconf->ifbac_buf, len);
541 	free(buf, M_TEMP, nlen);
542 
543         return (error);
544 }
545 
546 int
547 etherbridge_set_max(struct etherbridge *eb, struct ifbrparam *bparam)
548 {
549 	if (bparam->ifbrp_csize < 1 ||
550 	    bparam->ifbrp_csize > 4096) /* XXX */
551 		return (EINVAL);
552 
553 	/* commit */
554 	eb->eb_max = bparam->ifbrp_csize;
555 
556 	return (0);
557 }
558 
559 int
560 etherbridge_get_max(struct etherbridge *eb, struct ifbrparam *bparam)
561 {
562 	bparam->ifbrp_csize = eb->eb_max;
563 
564 	return (0);
565 }
566 
567 int
568 etherbridge_set_tmo(struct etherbridge *eb, struct ifbrparam *bparam)
569 {
570 	if (bparam->ifbrp_ctime < 8 ||
571 	    bparam->ifbrp_ctime > 3600)
572 		return (EINVAL);
573 
574 	/* commit */
575 	eb->eb_max_age = bparam->ifbrp_ctime;
576 
577 	return (0);
578 }
579 
580 int
581 etherbridge_get_tmo(struct etherbridge *eb, struct ifbrparam *bparam)
582 {
583 	bparam->ifbrp_ctime = eb->eb_max_age;
584 
585 	return (0);
586 }
587