xref: /openbsd-src/sys/net/if_wg.c (revision f08653c5675c062059a3f1eaa00008c5edfe93bf)
1 /*	$OpenBSD: if_wg.c,v 1.40 2025/01/25 14:51:34 mvs Exp $ */
2 
3 /*
4  * Copyright (C) 2015-2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
5  * Copyright (C) 2019-2020 Matt Dunwoodie <ncon@noconroy.net>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bpfilter.h"
21 #include "pf.h"
22 
23 #include <sys/types.h>
24 #include <sys/systm.h>
25 #include <sys/param.h>
26 #include <sys/pool.h>
27 
28 #include <sys/socket.h>
29 #include <sys/socketvar.h>
30 #include <sys/percpu.h>
31 #include <sys/ioctl.h>
32 #include <sys/mbuf.h>
33 #include <sys/syslog.h>
34 
35 #include <net/if.h>
36 #include <net/if_var.h>
37 #include <net/if_types.h>
38 #include <net/if_wg.h>
39 
40 #include <net/wg_noise.h>
41 #include <net/wg_cookie.h>
42 
43 #include <net/pfvar.h>
44 #include <net/route.h>
45 #include <net/bpf.h>
46 #include <net/art.h>
47 
48 #include <netinet/ip.h>
49 #include <netinet/ip6.h>
50 #include <netinet/udp.h>
51 #include <netinet/in_pcb.h>
52 
53 #include <crypto/siphash.h>
54 
55 #define DEFAULT_MTU		1420
56 
57 #define MAX_STAGED_PKT		128
58 #define MAX_QUEUED_PKT		1024
59 #define MAX_QUEUED_PKT_MASK	(MAX_QUEUED_PKT - 1)
60 
61 #define MAX_QUEUED_HANDSHAKES	4096
62 
63 #define HASHTABLE_PEER_SIZE	(1 << 11)
64 #define HASHTABLE_INDEX_SIZE	(1 << 13)
65 #define MAX_PEERS_PER_IFACE	(1 << 20)
66 
67 #define REKEY_TIMEOUT		5
68 #define REKEY_TIMEOUT_JITTER	334 /* 1/3 sec, round for arc4random_uniform */
69 #define KEEPALIVE_TIMEOUT	10
70 #define MAX_TIMER_HANDSHAKES	(90 / REKEY_TIMEOUT)
71 #define NEW_HANDSHAKE_TIMEOUT	(REKEY_TIMEOUT + KEEPALIVE_TIMEOUT)
72 #define UNDERLOAD_TIMEOUT	1
73 
74 #define WGPRINTF(loglevel, sc, mtx, fmt, ...) do {		\
75 	if (ISSET((sc)->sc_if.if_flags, IFF_DEBUG)) {		\
76 		if (mtx)					\
77 			mtx_enter(mtx);				\
78 		log(loglevel, "%s: " fmt, (sc)->sc_if.if_xname, \
79 		    ##__VA_ARGS__);				\
80 		if (mtx)					\
81 			mtx_leave(mtx);				\
82 	}							\
83 } while (0)
84 
85 #define CONTAINER_OF(ptr, type, member) ({			\
86 	const __typeof( ((type *)0)->member ) *__mptr = (ptr);	\
87 	(type *)( (char *)__mptr - offsetof(type,member) );})
88 
89 /* First byte indicating packet type on the wire */
90 #define WG_PKT_INITIATION htole32(1)
91 #define WG_PKT_RESPONSE htole32(2)
92 #define WG_PKT_COOKIE htole32(3)
93 #define WG_PKT_DATA htole32(4)
94 
95 #define WG_PKT_WITH_PADDING(n)	(((n) + (16-1)) & (~(16-1)))
96 #define WG_KEY_SIZE		WG_KEY_LEN
97 
98 struct wg_pkt_initiation {
99 	uint32_t		t;
100 	uint32_t		s_idx;
101 	uint8_t			ue[NOISE_PUBLIC_KEY_LEN];
102 	uint8_t			es[NOISE_PUBLIC_KEY_LEN + NOISE_AUTHTAG_LEN];
103 	uint8_t			ets[NOISE_TIMESTAMP_LEN + NOISE_AUTHTAG_LEN];
104 	struct cookie_macs	m;
105 };
106 
107 struct wg_pkt_response {
108 	uint32_t		t;
109 	uint32_t		s_idx;
110 	uint32_t		r_idx;
111 	uint8_t			ue[NOISE_PUBLIC_KEY_LEN];
112 	uint8_t			en[0 + NOISE_AUTHTAG_LEN];
113 	struct cookie_macs	m;
114 };
115 
116 struct wg_pkt_cookie {
117 	uint32_t		t;
118 	uint32_t		r_idx;
119 	uint8_t			nonce[COOKIE_NONCE_SIZE];
120 	uint8_t			ec[COOKIE_ENCRYPTED_SIZE];
121 };
122 
123 struct wg_pkt_data {
124 	uint32_t		t;
125 	uint32_t		r_idx;
126 	uint8_t			nonce[sizeof(uint64_t)];
127 	uint8_t			buf[];
128 };
129 
130 struct wg_endpoint {
131 	union {
132 		struct sockaddr		r_sa;
133 		struct sockaddr_in	r_sin;
134 #ifdef INET6
135 		struct sockaddr_in6	r_sin6;
136 #endif
137 	} e_remote;
138 	union {
139 		struct in_addr		l_in;
140 #ifdef INET6
141 		struct in6_pktinfo	l_pktinfo6;
142 #define l_in6 l_pktinfo6.ipi6_addr
143 #endif
144 	} e_local;
145 };
146 
147 struct wg_tag {
148 	struct wg_endpoint	 t_endpoint;
149 	struct wg_peer		*t_peer;
150 	struct mbuf		*t_mbuf;
151 	int			 t_done;
152 	int			 t_mtu;
153 };
154 
155 struct wg_index {
156 	LIST_ENTRY(wg_index)	 i_entry;
157 	SLIST_ENTRY(wg_index)	 i_unused_entry;
158 	uint32_t		 i_key;
159 	struct noise_remote	*i_value;
160 };
161 
162 struct wg_timers {
163 	/* t_mtx is for blocking wg_timers_event_* when setting t_disabled. */
164 	struct mutex		 t_mtx;
165 
166 	int			 t_disabled;
167 	int			 t_need_another_keepalive;
168 	uint16_t		 t_persistent_keepalive_interval;
169 	struct timeout		 t_new_handshake;
170 	struct timeout		 t_send_keepalive;
171 	struct timeout		 t_retry_handshake;
172 	struct timeout		 t_zero_key_material;
173 	struct timeout		 t_persistent_keepalive;
174 
175 	struct mutex		 t_handshake_mtx;
176 	struct timespec		 t_handshake_last_sent;	/* nanouptime */
177 	struct timespec		 t_handshake_complete;	/* nanotime */
178 	int			 t_handshake_retries;
179 };
180 
181 struct wg_aip {
182 	struct art_node		 a_node;
183 	LIST_ENTRY(wg_aip)	 a_entry;
184 	struct wg_peer		*a_peer;
185 	struct wg_aip_io	 a_data;
186 };
187 
188 struct wg_queue {
189 	struct mutex		 q_mtx;
190 	struct mbuf_list	 q_list;
191 };
192 
193 struct wg_ring {
194 	struct mutex	 r_mtx;
195 	uint32_t	 r_head;
196 	uint32_t	 r_tail;
197 	struct mbuf	*r_buf[MAX_QUEUED_PKT];
198 };
199 
200 struct wg_peer {
201 	LIST_ENTRY(wg_peer)	 p_pubkey_entry;
202 	TAILQ_ENTRY(wg_peer)	 p_seq_entry;
203 	uint64_t		 p_id;
204 	struct wg_softc		*p_sc;
205 
206 	struct noise_remote	 p_remote;
207 	struct cookie_maker	 p_cookie;
208 	struct wg_timers	 p_timers;
209 
210 	struct mutex		 p_counters_mtx;
211 	uint64_t		 p_counters_tx;
212 	uint64_t		 p_counters_rx;
213 
214 	struct mutex		 p_endpoint_mtx;
215 	struct wg_endpoint	 p_endpoint;
216 
217 	struct task		 p_send_initiation;
218 	struct task		 p_send_keepalive;
219 	struct task		 p_clear_secrets;
220 	struct task		 p_deliver_out;
221 	struct task		 p_deliver_in;
222 
223 	struct mbuf_queue	 p_stage_queue;
224 	struct wg_queue		 p_encap_queue;
225 	struct wg_queue		 p_decap_queue;
226 
227 	SLIST_HEAD(,wg_index)	 p_unused_index;
228 	struct wg_index		 p_index[3];
229 
230 	LIST_HEAD(,wg_aip)	 p_aip;
231 
232 	SLIST_ENTRY(wg_peer)	 p_start_list;
233 	int			 p_start_onlist;
234 
235 	char			 p_description[IFDESCRSIZE];
236 };
237 
238 struct wg_softc {
239 	struct ifnet		 sc_if;
240 	SIPHASH_KEY		 sc_secret;
241 
242 	struct rwlock		 sc_lock;
243 	struct noise_local	 sc_local;
244 	struct cookie_checker	 sc_cookie;
245 	in_port_t		 sc_udp_port;
246 	int			 sc_udp_rtable;
247 
248 	struct rwlock		 sc_so_lock;
249 	struct socket		*sc_so4;
250 #ifdef INET6
251 	struct socket		*sc_so6;
252 #endif
253 
254 	size_t			 sc_aip_num;
255 	struct art_root		*sc_aip4;
256 #ifdef INET6
257 	struct art_root		*sc_aip6;
258 #endif
259 
260 	struct rwlock		 sc_peer_lock;
261 	size_t			 sc_peer_num;
262 	LIST_HEAD(,wg_peer)	*sc_peer;
263 	TAILQ_HEAD(,wg_peer)	 sc_peer_seq;
264 	u_long			 sc_peer_mask;
265 
266 	struct mutex		 sc_index_mtx;
267 	LIST_HEAD(,wg_index)	*sc_index;
268 	u_long			 sc_index_mask;
269 
270 	struct task		 sc_handshake;
271 	struct mbuf_queue	 sc_handshake_queue;
272 
273 	struct task		 sc_encap;
274 	struct task		 sc_decap;
275 	struct wg_ring		 sc_encap_ring;
276 	struct wg_ring		 sc_decap_ring;
277 };
278 
279 struct wg_peer *
280 	wg_peer_create(struct wg_softc *, uint8_t[WG_KEY_SIZE]);
281 struct wg_peer *
282 	wg_peer_lookup(struct wg_softc *, const uint8_t[WG_KEY_SIZE]);
283 void	wg_peer_destroy(struct wg_peer *);
284 void	wg_peer_set_endpoint_from_tag(struct wg_peer *, struct wg_tag *);
285 void	wg_peer_set_sockaddr(struct wg_peer *, struct sockaddr *);
286 int	wg_peer_get_sockaddr(struct wg_peer *, struct sockaddr *);
287 void	wg_peer_clear_src(struct wg_peer *);
288 void	wg_peer_get_endpoint(struct wg_peer *, struct wg_endpoint *);
289 void	wg_peer_counters_add(struct wg_peer *, uint64_t, uint64_t);
290 
291 int	wg_aip_add(struct wg_softc *, struct wg_peer *, struct wg_aip_io *);
292 struct wg_peer *
293 	wg_aip_lookup(struct art_root *, void *);
294 int	wg_aip_remove(struct wg_softc *, struct wg_peer *,
295 	    struct wg_aip_io *);
296 
297 int	wg_socket_open(struct socket **, int, in_port_t *, int *, void *);
298 void	wg_socket_close(struct socket **);
299 int	wg_bind(struct wg_softc *, in_port_t *, int *);
300 void	wg_unbind(struct wg_softc *);
301 int	wg_send(struct wg_softc *, struct wg_endpoint *, struct mbuf *);
302 void	wg_send_buf(struct wg_softc *, struct wg_endpoint *, uint8_t *,
303 	    size_t);
304 
305 struct wg_tag *
306 	wg_tag_get(struct mbuf *);
307 
308 void	wg_timers_init(struct wg_timers *);
309 void	wg_timers_enable(struct wg_timers *);
310 void	wg_timers_disable(struct wg_timers *);
311 void	wg_timers_set_persistent_keepalive(struct wg_timers *, uint16_t);
312 int	wg_timers_get_persistent_keepalive(struct wg_timers *, uint16_t *);
313 void	wg_timers_get_last_handshake(struct wg_timers *, struct timespec *);
314 int	wg_timers_expired_handshake_last_sent(struct wg_timers *);
315 int	wg_timers_check_handshake_last_sent(struct wg_timers *);
316 
317 void	wg_timers_event_data_sent(struct wg_timers *);
318 void	wg_timers_event_data_received(struct wg_timers *);
319 void	wg_timers_event_any_authenticated_packet_sent(struct wg_timers *);
320 void	wg_timers_event_any_authenticated_packet_received(struct wg_timers *);
321 void	wg_timers_event_handshake_initiated(struct wg_timers *);
322 void	wg_timers_event_handshake_responded(struct wg_timers *);
323 void	wg_timers_event_handshake_complete(struct wg_timers *);
324 void	wg_timers_event_session_derived(struct wg_timers *);
325 void	wg_timers_event_any_authenticated_packet_traversal(struct wg_timers *);
326 void	wg_timers_event_want_initiation(struct wg_timers *);
327 void	wg_timers_event_reset_handshake_last_sent(struct wg_timers *);
328 
329 void	wg_timers_run_send_initiation(void *, int);
330 void	wg_timers_run_retry_handshake(void *);
331 void	wg_timers_run_send_keepalive(void *);
332 void	wg_timers_run_new_handshake(void *);
333 void	wg_timers_run_zero_key_material(void *);
334 void	wg_timers_run_persistent_keepalive(void *);
335 
336 void	wg_peer_send_buf(struct wg_peer *, uint8_t *, size_t);
337 void	wg_send_initiation(void *);
338 void	wg_send_response(struct wg_peer *);
339 void	wg_send_cookie(struct wg_softc *, struct cookie_macs *, uint32_t,
340 	    struct wg_endpoint *);
341 void	wg_send_keepalive(void *);
342 void	wg_peer_clear_secrets(void *);
343 void	wg_handshake(struct wg_softc *, struct mbuf *);
344 void	wg_handshake_worker(void *);
345 
346 void	wg_encap(struct wg_softc *, struct mbuf *);
347 void	wg_decap(struct wg_softc *, struct mbuf *);
348 void	wg_encap_worker(void *);
349 void	wg_decap_worker(void *);
350 void	wg_deliver_out(void *);
351 void	wg_deliver_in(void *);
352 
353 int	wg_queue_in(struct wg_softc *, struct wg_peer *, struct mbuf *);
354 void	wg_queue_out(struct wg_softc *, struct wg_peer *);
355 struct mbuf *
356 	wg_ring_dequeue(struct wg_ring *);
357 struct mbuf *
358 	wg_queue_dequeue(struct wg_queue *, struct wg_tag **);
359 size_t	wg_queue_len(struct wg_queue *);
360 
361 struct noise_remote *
362 	wg_remote_get(void *, uint8_t[NOISE_PUBLIC_KEY_LEN]);
363 uint32_t
364 	wg_index_set(void *, struct noise_remote *);
365 struct noise_remote *
366 	wg_index_get(void *, uint32_t);
367 void	wg_index_drop(void *, uint32_t);
368 
369 struct mbuf *
370 	wg_input(void *, struct mbuf *, struct ip *, struct ip6_hdr *, void *,
371 	    int);
372 int	wg_output(struct ifnet *, struct mbuf *, struct sockaddr *,
373 	    struct rtentry *);
374 int	wg_ioctl_set(struct wg_softc *, struct wg_data_io *);
375 int	wg_ioctl_get(struct wg_softc *, struct wg_data_io *);
376 int	wg_ioctl(struct ifnet *, u_long, caddr_t);
377 int	wg_up(struct wg_softc *);
378 void	wg_down(struct wg_softc *);
379 
380 int	wg_clone_create(struct if_clone *, int);
381 int	wg_clone_destroy(struct ifnet *);
382 void	wgattach(int);
383 
384 uint64_t	peer_counter = 0;
385 struct pool	wg_aip_pool;
386 struct pool	wg_peer_pool;
387 struct pool	wg_ratelimit_pool;
388 struct timeval	underload_interval = { UNDERLOAD_TIMEOUT, 0 };
389 
390 size_t		 wg_counter = 0;
391 struct taskq	*wg_handshake_taskq;
392 struct taskq	*wg_crypt_taskq;
393 
394 struct if_clone	wg_cloner =
395     IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy);
396 
397 struct wg_peer *
398 wg_peer_create(struct wg_softc *sc, uint8_t public[WG_KEY_SIZE])
399 {
400 	struct wg_peer	*peer;
401 	uint64_t	 idx;
402 
403 	rw_assert_wrlock(&sc->sc_lock);
404 
405 	if (sc->sc_peer_num >= MAX_PEERS_PER_IFACE)
406 		return NULL;
407 
408 	if ((peer = pool_get(&wg_peer_pool, PR_NOWAIT)) == NULL)
409 		return NULL;
410 
411 	peer->p_id = peer_counter++;
412 	peer->p_sc = sc;
413 
414 	noise_remote_init(&peer->p_remote, public, &sc->sc_local);
415 	cookie_maker_init(&peer->p_cookie, public);
416 	wg_timers_init(&peer->p_timers);
417 
418 	mtx_init(&peer->p_counters_mtx, IPL_NET);
419 	peer->p_counters_tx = 0;
420 	peer->p_counters_rx = 0;
421 
422 	strlcpy(peer->p_description, "", IFDESCRSIZE);
423 
424 	mtx_init(&peer->p_endpoint_mtx, IPL_NET);
425 	bzero(&peer->p_endpoint, sizeof(peer->p_endpoint));
426 
427 	task_set(&peer->p_send_initiation, wg_send_initiation, peer);
428 	task_set(&peer->p_send_keepalive, wg_send_keepalive, peer);
429 	task_set(&peer->p_clear_secrets, wg_peer_clear_secrets, peer);
430 	task_set(&peer->p_deliver_out, wg_deliver_out, peer);
431 	task_set(&peer->p_deliver_in, wg_deliver_in, peer);
432 
433 	mq_init(&peer->p_stage_queue, MAX_STAGED_PKT, IPL_NET);
434 	mtx_init(&peer->p_encap_queue.q_mtx, IPL_NET);
435 	ml_init(&peer->p_encap_queue.q_list);
436 	mtx_init(&peer->p_decap_queue.q_mtx, IPL_NET);
437 	ml_init(&peer->p_decap_queue.q_list);
438 
439 	SLIST_INIT(&peer->p_unused_index);
440 	SLIST_INSERT_HEAD(&peer->p_unused_index, &peer->p_index[0],
441 	    i_unused_entry);
442 	SLIST_INSERT_HEAD(&peer->p_unused_index, &peer->p_index[1],
443 	    i_unused_entry);
444 	SLIST_INSERT_HEAD(&peer->p_unused_index, &peer->p_index[2],
445 	    i_unused_entry);
446 
447 	LIST_INIT(&peer->p_aip);
448 
449 	peer->p_start_onlist = 0;
450 
451 	idx = SipHash24(&sc->sc_secret, public, WG_KEY_SIZE);
452 	idx &= sc->sc_peer_mask;
453 
454 	rw_enter_write(&sc->sc_peer_lock);
455 	LIST_INSERT_HEAD(&sc->sc_peer[idx], peer, p_pubkey_entry);
456 	TAILQ_INSERT_TAIL(&sc->sc_peer_seq, peer, p_seq_entry);
457 	sc->sc_peer_num++;
458 	rw_exit_write(&sc->sc_peer_lock);
459 
460 	WGPRINTF(LOG_INFO, sc, NULL, "Peer %llu created\n", peer->p_id);
461 	return peer;
462 }
463 
464 struct wg_peer *
465 wg_peer_lookup(struct wg_softc *sc, const uint8_t public[WG_KEY_SIZE])
466 {
467 	uint8_t		 peer_key[WG_KEY_SIZE];
468 	struct wg_peer	*peer;
469 	uint64_t	 idx;
470 
471 	idx = SipHash24(&sc->sc_secret, public, WG_KEY_SIZE);
472 	idx &= sc->sc_peer_mask;
473 
474 	rw_enter_read(&sc->sc_peer_lock);
475 	LIST_FOREACH(peer, &sc->sc_peer[idx], p_pubkey_entry) {
476 		noise_remote_keys(&peer->p_remote, peer_key, NULL);
477 		if (timingsafe_bcmp(peer_key, public, WG_KEY_SIZE) == 0)
478 			goto done;
479 	}
480 	peer = NULL;
481 done:
482 	rw_exit_read(&sc->sc_peer_lock);
483 	return peer;
484 }
485 
486 void
487 wg_peer_destroy(struct wg_peer *peer)
488 {
489 	struct wg_softc	*sc = peer->p_sc;
490 	struct wg_aip *aip, *taip;
491 
492 	rw_assert_wrlock(&sc->sc_lock);
493 
494 	/*
495 	 * Remove peer from the pubkey hashtable and disable all timeouts.
496 	 * After this, and flushing wg_handshake_taskq, then no more handshakes
497 	 * can be started.
498 	 */
499 	rw_enter_write(&sc->sc_peer_lock);
500 	LIST_REMOVE(peer, p_pubkey_entry);
501 	TAILQ_REMOVE(&sc->sc_peer_seq, peer, p_seq_entry);
502 	sc->sc_peer_num--;
503 	rw_exit_write(&sc->sc_peer_lock);
504 
505 	wg_timers_disable(&peer->p_timers);
506 
507 	taskq_barrier(wg_handshake_taskq);
508 
509 	/*
510 	 * Now we drop all allowed ips, to drop all outgoing packets to the
511 	 * peer. Then drop all the indexes to drop all incoming packets to the
512 	 * peer. Then we can flush if_snd, wg_crypt_taskq and then nettq to
513 	 * ensure no more references to the peer exist.
514 	 */
515 	LIST_FOREACH_SAFE(aip, &peer->p_aip, a_entry, taip)
516 		wg_aip_remove(sc, peer, &aip->a_data);
517 
518 	noise_remote_clear(&peer->p_remote);
519 
520 	NET_LOCK();
521 	while (!ifq_empty(&sc->sc_if.if_snd)) {
522 		/*
523 		 * XXX: `if_snd' of stopped interface could still
524 		 * contain packets
525 		 */
526 		if (!ISSET(sc->sc_if.if_flags, IFF_RUNNING)) {
527 			ifq_purge(&sc->sc_if.if_snd);
528 			continue;
529 		}
530 		NET_UNLOCK();
531 		tsleep_nsec(&nowake, PWAIT, "wg_ifq", 1000);
532 		NET_LOCK();
533 	}
534 	NET_UNLOCK();
535 
536 	taskq_barrier(wg_crypt_taskq);
537 	taskq_barrier(net_tq(sc->sc_if.if_index));
538 
539 	if (!mq_empty(&peer->p_stage_queue))
540 		mq_purge(&peer->p_stage_queue);
541 
542 	WGPRINTF(LOG_INFO, sc, NULL, "Peer %llu destroyed\n", peer->p_id);
543 	explicit_bzero(peer, sizeof(*peer));
544 	pool_put(&wg_peer_pool, peer);
545 }
546 
547 void
548 wg_peer_set_endpoint_from_tag(struct wg_peer *peer, struct wg_tag *t)
549 {
550 	if (memcmp(&t->t_endpoint, &peer->p_endpoint,
551 	    sizeof(t->t_endpoint)) == 0)
552 		return;
553 
554 	mtx_enter(&peer->p_endpoint_mtx);
555 	peer->p_endpoint = t->t_endpoint;
556 	mtx_leave(&peer->p_endpoint_mtx);
557 }
558 
559 void
560 wg_peer_set_sockaddr(struct wg_peer *peer, struct sockaddr *remote)
561 {
562 	mtx_enter(&peer->p_endpoint_mtx);
563 	memcpy(&peer->p_endpoint.e_remote, remote,
564 	       sizeof(peer->p_endpoint.e_remote));
565 	bzero(&peer->p_endpoint.e_local, sizeof(peer->p_endpoint.e_local));
566 	mtx_leave(&peer->p_endpoint_mtx);
567 }
568 
569 int
570 wg_peer_get_sockaddr(struct wg_peer *peer, struct sockaddr *remote)
571 {
572 	int	ret = 0;
573 
574 	mtx_enter(&peer->p_endpoint_mtx);
575 	if (peer->p_endpoint.e_remote.r_sa.sa_family != AF_UNSPEC)
576 		memcpy(remote, &peer->p_endpoint.e_remote,
577 		       sizeof(peer->p_endpoint.e_remote));
578 	else
579 		ret = ENOENT;
580 	mtx_leave(&peer->p_endpoint_mtx);
581 	return ret;
582 }
583 
584 void
585 wg_peer_clear_src(struct wg_peer *peer)
586 {
587 	mtx_enter(&peer->p_endpoint_mtx);
588 	bzero(&peer->p_endpoint.e_local, sizeof(peer->p_endpoint.e_local));
589 	mtx_leave(&peer->p_endpoint_mtx);
590 }
591 
592 void
593 wg_peer_get_endpoint(struct wg_peer *peer, struct wg_endpoint *endpoint)
594 {
595 	mtx_enter(&peer->p_endpoint_mtx);
596 	memcpy(endpoint, &peer->p_endpoint, sizeof(*endpoint));
597 	mtx_leave(&peer->p_endpoint_mtx);
598 }
599 
600 void
601 wg_peer_counters_add(struct wg_peer *peer, uint64_t tx, uint64_t rx)
602 {
603 	mtx_enter(&peer->p_counters_mtx);
604 	peer->p_counters_tx += tx;
605 	peer->p_counters_rx += rx;
606 	mtx_leave(&peer->p_counters_mtx);
607 }
608 
609 int
610 wg_aip_add(struct wg_softc *sc, struct wg_peer *peer, struct wg_aip_io *d)
611 {
612 	struct art_root	*root;
613 	struct art_node	*node;
614 	struct wg_aip	*aip;
615 	int		 ret = 0;
616 
617 	switch (d->a_af) {
618 	case AF_INET:	root = sc->sc_aip4; break;
619 #ifdef INET6
620 	case AF_INET6:	root = sc->sc_aip6; break;
621 #endif
622 	default: return EAFNOSUPPORT;
623 	}
624 
625 	if ((aip = pool_get(&wg_aip_pool, PR_NOWAIT|PR_ZERO)) == NULL)
626 		return ENOBUFS;
627 
628 	rw_enter_write(&root->ar_lock);
629 	node = art_insert(root, &aip->a_node, &d->a_addr, d->a_cidr);
630 
631 	if (node == &aip->a_node) {
632 		aip->a_peer = peer;
633 		aip->a_data = *d;
634 		LIST_INSERT_HEAD(&peer->p_aip, aip, a_entry);
635 		sc->sc_aip_num++;
636 	} else {
637 		pool_put(&wg_aip_pool, aip);
638 		aip = (struct wg_aip *) node;
639 		if (aip->a_peer != peer) {
640 			LIST_REMOVE(aip, a_entry);
641 			LIST_INSERT_HEAD(&peer->p_aip, aip, a_entry);
642 			aip->a_peer = peer;
643 		}
644 	}
645 	rw_exit_write(&root->ar_lock);
646 	return ret;
647 }
648 
649 struct wg_peer *
650 wg_aip_lookup(struct art_root *root, void *addr)
651 {
652 	struct srp_ref	 sr;
653 	struct art_node	*node;
654 
655 	node = art_match(root, addr, &sr);
656 	srp_leave(&sr);
657 
658 	return node == NULL ? NULL : ((struct wg_aip *) node)->a_peer;
659 }
660 
661 int
662 wg_aip_remove(struct wg_softc *sc, struct wg_peer *peer, struct wg_aip_io *d)
663 {
664 	struct srp_ref	 sr;
665 	struct art_root	*root;
666 	struct art_node	*node;
667 	struct wg_aip	*aip;
668 	int		 ret = 0;
669 
670 	switch (d->a_af) {
671 	case AF_INET:	root = sc->sc_aip4; break;
672 #ifdef INET6
673 	case AF_INET6:	root = sc->sc_aip6; break;
674 #endif
675 	default: return EAFNOSUPPORT;
676 	}
677 
678 	rw_enter_write(&root->ar_lock);
679 	if ((node = art_lookup(root, &d->a_addr, d->a_cidr, &sr)) == NULL) {
680 		ret = ENOENT;
681 	} else if (((struct wg_aip *) node)->a_peer != peer) {
682 		ret = EXDEV;
683 	} else {
684 		aip = (struct wg_aip *)node;
685 		if (art_delete(root, node, &d->a_addr, d->a_cidr) == NULL)
686 			panic("art_delete failed to delete node %p", node);
687 
688 		sc->sc_aip_num--;
689 		LIST_REMOVE(aip, a_entry);
690 		pool_put(&wg_aip_pool, aip);
691 	}
692 
693 	srp_leave(&sr);
694 	rw_exit_write(&root->ar_lock);
695 	return ret;
696 }
697 
698 int
699 wg_socket_open(struct socket **so, int af, in_port_t *port,
700     int *rtable, void *upcall_arg)
701 {
702 	struct mbuf		 mhostnam, mrtable;
703 #ifdef INET6
704 	struct sockaddr_in6	*sin6;
705 #endif
706 	struct sockaddr_in	*sin;
707 	int			 ret;
708 
709 	m_inithdr(&mhostnam);
710 	m_inithdr(&mrtable);
711 
712 	bzero(mtod(&mrtable, u_int *), sizeof(u_int));
713 	*mtod(&mrtable, u_int *) = *rtable;
714 	mrtable.m_len = sizeof(u_int);
715 
716 	if (af == AF_INET) {
717 		sin = mtod(&mhostnam, struct sockaddr_in *);
718 		bzero(sin, sizeof(*sin));
719 		sin->sin_len = sizeof(*sin);
720 		sin->sin_family = AF_INET;
721 		sin->sin_port = *port;
722 		sin->sin_addr.s_addr = INADDR_ANY;
723 		mhostnam.m_len = sin->sin_len;
724 #ifdef INET6
725 	} else if (af == AF_INET6) {
726 		sin6 = mtod(&mhostnam, struct sockaddr_in6 *);
727 		bzero(sin6, sizeof(*sin6));
728 		sin6->sin6_len = sizeof(*sin6);
729 		sin6->sin6_family = AF_INET6;
730 		sin6->sin6_port = *port;
731 		sin6->sin6_addr = (struct in6_addr) { .s6_addr = { 0 } };
732 		mhostnam.m_len = sin6->sin6_len;
733 #endif
734 	} else {
735 		return EAFNOSUPPORT;
736 	}
737 
738 	if ((ret = socreate(af, so, SOCK_DGRAM, 0)) != 0)
739 		return ret;
740 
741 	solock(*so);
742 	sotoinpcb(*so)->inp_upcall = wg_input;
743 	sotoinpcb(*so)->inp_upcall_arg = upcall_arg;
744 	sounlock(*so);
745 
746 	if ((ret = sosetopt(*so, SOL_SOCKET, SO_RTABLE, &mrtable)) == 0) {
747 		solock(*so);
748 		if ((ret = sobind(*so, &mhostnam, curproc)) == 0) {
749 			*port = sotoinpcb(*so)->inp_lport;
750 			*rtable = sotoinpcb(*so)->inp_rtableid;
751 		}
752 		sounlock(*so);
753 	}
754 
755 	if (ret != 0)
756 		wg_socket_close(so);
757 
758 	return ret;
759 }
760 
761 void
762 wg_socket_close(struct socket **so)
763 {
764 	if (*so != NULL && soclose(*so, 0) != 0)
765 		panic("Unable to close wg socket");
766 	*so = NULL;
767 }
768 
769 int
770 wg_bind(struct wg_softc *sc, in_port_t *portp, int *rtablep)
771 {
772 	int		 ret = 0, rtable = *rtablep;
773 	in_port_t	 port = *portp;
774 	struct socket	*so4;
775 #ifdef INET6
776 	struct socket	*so6;
777 	int		 retries = 0;
778 retry:
779 #endif
780 	if ((ret = wg_socket_open(&so4, AF_INET, &port, &rtable, sc)) != 0)
781 		return ret;
782 
783 #ifdef INET6
784 	if ((ret = wg_socket_open(&so6, AF_INET6, &port, &rtable, sc)) != 0) {
785 		if (ret == EADDRINUSE && *portp == 0 && retries++ < 100)
786 			goto retry;
787 		wg_socket_close(&so4);
788 		return ret;
789 	}
790 #endif
791 
792 	rw_enter_write(&sc->sc_so_lock);
793 	wg_socket_close(&sc->sc_so4);
794 	sc->sc_so4 = so4;
795 #ifdef INET6
796 	wg_socket_close(&sc->sc_so6);
797 	sc->sc_so6 = so6;
798 #endif
799 	rw_exit_write(&sc->sc_so_lock);
800 
801 	*portp = port;
802 	*rtablep = rtable;
803 	return 0;
804 }
805 
806 void
807 wg_unbind(struct wg_softc *sc)
808 {
809 	rw_enter_write(&sc->sc_so_lock);
810 	wg_socket_close(&sc->sc_so4);
811 #ifdef INET6
812 	wg_socket_close(&sc->sc_so6);
813 #endif
814 	rw_exit_write(&sc->sc_so_lock);
815 }
816 
817 int
818 wg_send(struct wg_softc *sc, struct wg_endpoint *e, struct mbuf *m)
819 {
820 	struct mbuf	 peernam, *control = NULL;
821 	int		 ret;
822 
823 	/* Get local control address before locking */
824 	if (e->e_remote.r_sa.sa_family == AF_INET) {
825 		if (e->e_local.l_in.s_addr != INADDR_ANY)
826 			control = sbcreatecontrol(&e->e_local.l_in,
827 			    sizeof(struct in_addr), IP_SENDSRCADDR,
828 			    IPPROTO_IP);
829 #ifdef INET6
830 	} else if (e->e_remote.r_sa.sa_family == AF_INET6) {
831 		if (!IN6_IS_ADDR_UNSPECIFIED(&e->e_local.l_in6))
832 			control = sbcreatecontrol(&e->e_local.l_pktinfo6,
833 			    sizeof(struct in6_pktinfo), IPV6_PKTINFO,
834 			    IPPROTO_IPV6);
835 #endif
836 	} else {
837 		m_freem(m);
838 		return EAFNOSUPPORT;
839 	}
840 
841 	/* Get remote address */
842 	peernam.m_type = MT_SONAME;
843 	peernam.m_next = NULL;
844 	peernam.m_nextpkt = NULL;
845 	peernam.m_data = (void *)&e->e_remote.r_sa;
846 	peernam.m_len = e->e_remote.r_sa.sa_len;
847 	peernam.m_flags = 0;
848 
849 	rw_enter_read(&sc->sc_so_lock);
850 	if (e->e_remote.r_sa.sa_family == AF_INET && sc->sc_so4 != NULL)
851 		ret = sosend(sc->sc_so4, &peernam, NULL, m, control, 0);
852 #ifdef INET6
853 	else if (e->e_remote.r_sa.sa_family == AF_INET6 && sc->sc_so6 != NULL)
854 		ret = sosend(sc->sc_so6, &peernam, NULL, m, control, 0);
855 #endif
856 	else {
857 		ret = ENOTCONN;
858 		m_freem(control);
859 		m_freem(m);
860 	}
861 	rw_exit_read(&sc->sc_so_lock);
862 
863 	return ret;
864 }
865 
866 void
867 wg_send_buf(struct wg_softc *sc, struct wg_endpoint *e, uint8_t *buf,
868     size_t len)
869 {
870 	struct mbuf	*m;
871 	int		 ret = 0;
872 	size_t		 mlen = len + max_hdr;
873 
874 retry:
875 	m = m_gethdr(M_WAIT, MT_DATA);
876 	if (mlen > MHLEN)
877 		MCLGETL(m, M_WAIT, mlen);
878 	m_align(m, len);
879 	m->m_pkthdr.len = m->m_len = len;
880 	memcpy(mtod(m, void *), buf, len);
881 
882 	/* As we're sending a handshake packet here, we want high priority */
883 	m->m_pkthdr.pf.prio = IFQ_MAXPRIO;
884 
885 	if (ret == 0) {
886 		ret = wg_send(sc, e, m);
887 		/* Retry if we couldn't bind to e->e_local */
888 		if (ret == EADDRNOTAVAIL) {
889 			bzero(&e->e_local, sizeof(e->e_local));
890 			goto retry;
891 		}
892 	} else {
893 		ret = wg_send(sc, e, m);
894 		if (ret != 0)
895 			WGPRINTF(LOG_DEBUG, sc, NULL,
896 			    "Unable to send packet\n");
897 	}
898 }
899 
900 struct wg_tag *
901 wg_tag_get(struct mbuf *m)
902 {
903 	struct m_tag	*mtag;
904 
905 	if ((mtag = m_tag_find(m, PACKET_TAG_WIREGUARD, NULL)) == NULL) {
906 		mtag = m_tag_get(PACKET_TAG_WIREGUARD, sizeof(struct wg_tag),
907 		    M_NOWAIT);
908 		if (mtag == NULL)
909 			return (NULL);
910 		bzero(mtag + 1, sizeof(struct wg_tag));
911 		m_tag_prepend(m, mtag);
912 	}
913 	return ((struct wg_tag *)(mtag + 1));
914 }
915 
916 /*
917  * The following section handles the timeout callbacks for a WireGuard session.
918  * These functions provide an "event based" model for controlling wg(8) session
919  * timers. All function calls occur after the specified event below.
920  *
921  * wg_timers_event_data_sent:
922  *	tx: data
923  * wg_timers_event_data_received:
924  *	rx: data
925  * wg_timers_event_any_authenticated_packet_sent:
926  *	tx: keepalive, data, handshake
927  * wg_timers_event_any_authenticated_packet_received:
928  *	rx: keepalive, data, handshake
929  * wg_timers_event_any_authenticated_packet_traversal:
930  *	tx, rx: keepalive, data, handshake
931  * wg_timers_event_handshake_initiated:
932  *	tx: initiation
933  * wg_timers_event_handshake_responded:
934  *	tx: response
935  * wg_timers_event_handshake_complete:
936  *	rx: response, confirmation data
937  * wg_timers_event_session_derived:
938  *	tx: response, rx: response
939  * wg_timers_event_want_initiation:
940  *	tx: data failed, old keys expiring
941  * wg_timers_event_reset_handshake_last_sent:
942  * 	anytime we may immediately want a new handshake
943  */
944 void
945 wg_timers_init(struct wg_timers *t)
946 {
947 	bzero(t, sizeof(*t));
948 	mtx_init_flags(&t->t_mtx, IPL_NET, "wg_timers", 0);
949 	mtx_init(&t->t_handshake_mtx, IPL_NET);
950 
951 	timeout_set(&t->t_new_handshake, wg_timers_run_new_handshake, t);
952 	timeout_set(&t->t_send_keepalive, wg_timers_run_send_keepalive, t);
953 	timeout_set(&t->t_retry_handshake, wg_timers_run_retry_handshake, t);
954 	timeout_set(&t->t_persistent_keepalive,
955 	    wg_timers_run_persistent_keepalive, t);
956 	timeout_set(&t->t_zero_key_material,
957 	    wg_timers_run_zero_key_material, t);
958 }
959 
960 void
961 wg_timers_enable(struct wg_timers *t)
962 {
963 	mtx_enter(&t->t_mtx);
964 	t->t_disabled = 0;
965 	mtx_leave(&t->t_mtx);
966 	wg_timers_run_persistent_keepalive(t);
967 }
968 
969 void
970 wg_timers_disable(struct wg_timers *t)
971 {
972 	mtx_enter(&t->t_mtx);
973 	t->t_disabled = 1;
974 	t->t_need_another_keepalive = 0;
975 	mtx_leave(&t->t_mtx);
976 
977 	timeout_del_barrier(&t->t_new_handshake);
978 	timeout_del_barrier(&t->t_send_keepalive);
979 	timeout_del_barrier(&t->t_retry_handshake);
980 	timeout_del_barrier(&t->t_persistent_keepalive);
981 	timeout_del_barrier(&t->t_zero_key_material);
982 }
983 
984 void
985 wg_timers_set_persistent_keepalive(struct wg_timers *t, uint16_t interval)
986 {
987 	mtx_enter(&t->t_mtx);
988 	if (!t->t_disabled) {
989 		t->t_persistent_keepalive_interval = interval;
990 		wg_timers_run_persistent_keepalive(t);
991 	}
992 	mtx_leave(&t->t_mtx);
993 }
994 
995 int
996 wg_timers_get_persistent_keepalive(struct wg_timers *t, uint16_t *interval)
997 {
998 	*interval = t->t_persistent_keepalive_interval;
999 	return *interval > 0 ? 0 : ENOENT;
1000 }
1001 
1002 void
1003 wg_timers_get_last_handshake(struct wg_timers *t, struct timespec *time)
1004 {
1005 	mtx_enter(&t->t_handshake_mtx);
1006 	*time = t->t_handshake_complete;
1007 	mtx_leave(&t->t_handshake_mtx);
1008 }
1009 
1010 int
1011 wg_timers_expired_handshake_last_sent(struct wg_timers *t)
1012 {
1013 	struct timespec uptime;
1014 	struct timespec expire = { .tv_sec = REKEY_TIMEOUT, .tv_nsec = 0 };
1015 
1016 	getnanouptime(&uptime);
1017 	timespecadd(&t->t_handshake_last_sent, &expire, &expire);
1018 	return timespeccmp(&uptime, &expire, >) ? ETIMEDOUT : 0;
1019 }
1020 
1021 int
1022 wg_timers_check_handshake_last_sent(struct wg_timers *t)
1023 {
1024 	int ret;
1025 	mtx_enter(&t->t_handshake_mtx);
1026 	if ((ret = wg_timers_expired_handshake_last_sent(t)) == ETIMEDOUT)
1027 		getnanouptime(&t->t_handshake_last_sent);
1028 	mtx_leave(&t->t_handshake_mtx);
1029 	return ret;
1030 }
1031 
1032 void
1033 wg_timers_event_data_sent(struct wg_timers *t)
1034 {
1035 	int	msecs = NEW_HANDSHAKE_TIMEOUT * 1000;
1036 	msecs += arc4random_uniform(REKEY_TIMEOUT_JITTER);
1037 
1038 	mtx_enter(&t->t_mtx);
1039 	if (!t->t_disabled && !timeout_pending(&t->t_new_handshake))
1040 		timeout_add_msec(&t->t_new_handshake, msecs);
1041 	mtx_leave(&t->t_mtx);
1042 }
1043 
1044 void
1045 wg_timers_event_data_received(struct wg_timers *t)
1046 {
1047 	mtx_enter(&t->t_mtx);
1048 	if (!t->t_disabled) {
1049 		if (!timeout_pending(&t->t_send_keepalive))
1050 			timeout_add_sec(&t->t_send_keepalive,
1051 			    KEEPALIVE_TIMEOUT);
1052 		else
1053 			t->t_need_another_keepalive = 1;
1054 	}
1055 	mtx_leave(&t->t_mtx);
1056 }
1057 
1058 void
1059 wg_timers_event_any_authenticated_packet_sent(struct wg_timers *t)
1060 {
1061 	timeout_del(&t->t_send_keepalive);
1062 }
1063 
1064 void
1065 wg_timers_event_any_authenticated_packet_received(struct wg_timers *t)
1066 {
1067 	timeout_del(&t->t_new_handshake);
1068 }
1069 
1070 void
1071 wg_timers_event_any_authenticated_packet_traversal(struct wg_timers *t)
1072 {
1073 	mtx_enter(&t->t_mtx);
1074 	if (!t->t_disabled && t->t_persistent_keepalive_interval > 0)
1075 		timeout_add_sec(&t->t_persistent_keepalive,
1076 		    t->t_persistent_keepalive_interval);
1077 	mtx_leave(&t->t_mtx);
1078 }
1079 
1080 void
1081 wg_timers_event_handshake_initiated(struct wg_timers *t)
1082 {
1083 	int	msecs = REKEY_TIMEOUT * 1000;
1084 	msecs += arc4random_uniform(REKEY_TIMEOUT_JITTER);
1085 
1086 	mtx_enter(&t->t_mtx);
1087 	if (!t->t_disabled)
1088 		timeout_add_msec(&t->t_retry_handshake, msecs);
1089 	mtx_leave(&t->t_mtx);
1090 }
1091 
1092 void
1093 wg_timers_event_handshake_responded(struct wg_timers *t)
1094 {
1095 	mtx_enter(&t->t_handshake_mtx);
1096 	getnanouptime(&t->t_handshake_last_sent);
1097 	mtx_leave(&t->t_handshake_mtx);
1098 }
1099 
1100 void
1101 wg_timers_event_handshake_complete(struct wg_timers *t)
1102 {
1103 	mtx_enter(&t->t_mtx);
1104 	if (!t->t_disabled) {
1105 		mtx_enter(&t->t_handshake_mtx);
1106 		timeout_del(&t->t_retry_handshake);
1107 		t->t_handshake_retries = 0;
1108 		getnanotime(&t->t_handshake_complete);
1109 		mtx_leave(&t->t_handshake_mtx);
1110 		wg_timers_run_send_keepalive(t);
1111 	}
1112 	mtx_leave(&t->t_mtx);
1113 }
1114 
1115 void
1116 wg_timers_event_session_derived(struct wg_timers *t)
1117 {
1118 	mtx_enter(&t->t_mtx);
1119 	if (!t->t_disabled)
1120 		timeout_add_sec(&t->t_zero_key_material, REJECT_AFTER_TIME * 3);
1121 	mtx_leave(&t->t_mtx);
1122 }
1123 
1124 void
1125 wg_timers_event_want_initiation(struct wg_timers *t)
1126 {
1127 	mtx_enter(&t->t_mtx);
1128 	if (!t->t_disabled)
1129 		wg_timers_run_send_initiation(t, 0);
1130 	mtx_leave(&t->t_mtx);
1131 }
1132 
1133 void
1134 wg_timers_event_reset_handshake_last_sent(struct wg_timers *t)
1135 {
1136 	mtx_enter(&t->t_handshake_mtx);
1137 	t->t_handshake_last_sent.tv_sec -= (REKEY_TIMEOUT + 1);
1138 	mtx_leave(&t->t_handshake_mtx);
1139 }
1140 
1141 void
1142 wg_timers_run_send_initiation(void *_t, int is_retry)
1143 {
1144 	struct wg_timers *t = _t;
1145 	struct wg_peer	 *peer = CONTAINER_OF(t, struct wg_peer, p_timers);
1146 	if (!is_retry)
1147 		t->t_handshake_retries = 0;
1148 	if (wg_timers_expired_handshake_last_sent(t) == ETIMEDOUT)
1149 		task_add(wg_handshake_taskq, &peer->p_send_initiation);
1150 }
1151 
1152 void
1153 wg_timers_run_retry_handshake(void *_t)
1154 {
1155 	struct wg_timers *t = _t;
1156 	struct wg_peer	 *peer = CONTAINER_OF(t, struct wg_peer, p_timers);
1157 	char		  ipaddr[INET6_ADDRSTRLEN];
1158 
1159 	mtx_enter(&t->t_handshake_mtx);
1160 	if (t->t_handshake_retries <= MAX_TIMER_HANDSHAKES) {
1161 		t->t_handshake_retries++;
1162 		mtx_leave(&t->t_handshake_mtx);
1163 
1164 		WGPRINTF(LOG_INFO, peer->p_sc, &peer->p_endpoint_mtx,
1165 		    "Handshake for peer %llu (%s) did not complete after %d "
1166 		    "seconds, retrying (try %d)\n", peer->p_id,
1167 		    sockaddr_ntop(&peer->p_endpoint.e_remote.r_sa, ipaddr,
1168 		        sizeof(ipaddr)),
1169 		    REKEY_TIMEOUT, t->t_handshake_retries + 1);
1170 		wg_peer_clear_src(peer);
1171 		wg_timers_run_send_initiation(t, 1);
1172 	} else {
1173 		mtx_leave(&t->t_handshake_mtx);
1174 
1175 		WGPRINTF(LOG_INFO, peer->p_sc, &peer->p_endpoint_mtx,
1176 		    "Handshake for peer %llu (%s) did not complete after %d "
1177 		    "retries, giving up\n", peer->p_id,
1178 		    sockaddr_ntop(&peer->p_endpoint.e_remote.r_sa, ipaddr,
1179 		        sizeof(ipaddr)), MAX_TIMER_HANDSHAKES + 2);
1180 
1181 		timeout_del(&t->t_send_keepalive);
1182 		mq_purge(&peer->p_stage_queue);
1183 		if (!timeout_pending(&t->t_zero_key_material))
1184 			timeout_add_sec(&t->t_zero_key_material,
1185 			    REJECT_AFTER_TIME * 3);
1186 	}
1187 }
1188 
1189 void
1190 wg_timers_run_send_keepalive(void *_t)
1191 {
1192 	struct wg_timers *t = _t;
1193 	struct wg_peer	 *peer = CONTAINER_OF(t, struct wg_peer, p_timers);
1194 
1195 	task_add(wg_crypt_taskq, &peer->p_send_keepalive);
1196 	if (t->t_need_another_keepalive) {
1197 		t->t_need_another_keepalive = 0;
1198 		timeout_add_sec(&t->t_send_keepalive, KEEPALIVE_TIMEOUT);
1199 	}
1200 }
1201 
1202 void
1203 wg_timers_run_new_handshake(void *_t)
1204 {
1205 	struct wg_timers *t = _t;
1206 	struct wg_peer	 *peer = CONTAINER_OF(t, struct wg_peer, p_timers);
1207 	char		  ipaddr[INET6_ADDRSTRLEN];
1208 
1209 	WGPRINTF(LOG_INFO, peer->p_sc, &peer->p_endpoint_mtx,
1210 	    "Retrying handshake with peer %llu (%s) because we "
1211 	    "stopped hearing back after %d seconds\n", peer->p_id,
1212 	    sockaddr_ntop(&peer->p_endpoint.e_remote.r_sa, ipaddr,
1213 	        sizeof(ipaddr)), NEW_HANDSHAKE_TIMEOUT);
1214 	wg_peer_clear_src(peer);
1215 
1216 	wg_timers_run_send_initiation(t, 0);
1217 }
1218 
1219 void
1220 wg_timers_run_zero_key_material(void *_t)
1221 {
1222 	struct wg_timers *t = _t;
1223 	struct wg_peer	 *peer = CONTAINER_OF(t, struct wg_peer, p_timers);
1224 	char		  ipaddr[INET6_ADDRSTRLEN];
1225 
1226 	WGPRINTF(LOG_INFO, peer->p_sc, &peer->p_endpoint_mtx, "Zeroing out "
1227 	    "keys for peer %llu (%s)\n", peer->p_id,
1228 	    sockaddr_ntop(&peer->p_endpoint.e_remote.r_sa, ipaddr,
1229 	        sizeof(ipaddr)));
1230 	task_add(wg_handshake_taskq, &peer->p_clear_secrets);
1231 }
1232 
1233 void
1234 wg_timers_run_persistent_keepalive(void *_t)
1235 {
1236 	struct wg_timers *t = _t;
1237 	struct wg_peer	 *peer = CONTAINER_OF(t, struct wg_peer, p_timers);
1238 	if (t->t_persistent_keepalive_interval != 0)
1239 		task_add(wg_crypt_taskq, &peer->p_send_keepalive);
1240 }
1241 
1242 /* The following functions handle handshakes */
1243 void
1244 wg_peer_send_buf(struct wg_peer *peer, uint8_t *buf, size_t len)
1245 {
1246 	struct wg_endpoint	 endpoint;
1247 
1248 	wg_peer_counters_add(peer, len, 0);
1249 	wg_timers_event_any_authenticated_packet_traversal(&peer->p_timers);
1250 	wg_timers_event_any_authenticated_packet_sent(&peer->p_timers);
1251 	wg_peer_get_endpoint(peer, &endpoint);
1252 	wg_send_buf(peer->p_sc, &endpoint, buf, len);
1253 }
1254 
1255 void
1256 wg_send_initiation(void *_peer)
1257 {
1258 	struct wg_peer			*peer = _peer;
1259 	struct wg_pkt_initiation	 pkt;
1260 	char				 ipaddr[INET6_ADDRSTRLEN];
1261 
1262 	if (wg_timers_check_handshake_last_sent(&peer->p_timers) != ETIMEDOUT)
1263 		return;
1264 
1265 	WGPRINTF(LOG_INFO, peer->p_sc, &peer->p_endpoint_mtx, "Sending "
1266 	    "handshake initiation to peer %llu (%s)\n", peer->p_id,
1267 	    sockaddr_ntop(&peer->p_endpoint.e_remote.r_sa, ipaddr,
1268 	        sizeof(ipaddr)));
1269 
1270 	if (noise_create_initiation(&peer->p_remote, &pkt.s_idx, pkt.ue, pkt.es,
1271 				    pkt.ets) != 0)
1272 		return;
1273 	pkt.t = WG_PKT_INITIATION;
1274 	cookie_maker_mac(&peer->p_cookie, &pkt.m, &pkt,
1275 	    sizeof(pkt)-sizeof(pkt.m));
1276 	wg_peer_send_buf(peer, (uint8_t *)&pkt, sizeof(pkt));
1277 	wg_timers_event_handshake_initiated(&peer->p_timers);
1278 }
1279 
1280 void
1281 wg_send_response(struct wg_peer *peer)
1282 {
1283 	struct wg_pkt_response	 pkt;
1284 	char			 ipaddr[INET6_ADDRSTRLEN];
1285 
1286 	WGPRINTF(LOG_INFO, peer->p_sc, &peer->p_endpoint_mtx, "Sending "
1287 	    "handshake response to peer %llu (%s)\n", peer->p_id,
1288 	    sockaddr_ntop(&peer->p_endpoint.e_remote.r_sa, ipaddr,
1289 	        sizeof(ipaddr)));
1290 
1291 	if (noise_create_response(&peer->p_remote, &pkt.s_idx, &pkt.r_idx,
1292 				  pkt.ue, pkt.en) != 0)
1293 		return;
1294 	if (noise_remote_begin_session(&peer->p_remote) != 0)
1295 		return;
1296 	wg_timers_event_session_derived(&peer->p_timers);
1297 	pkt.t = WG_PKT_RESPONSE;
1298 	cookie_maker_mac(&peer->p_cookie, &pkt.m, &pkt,
1299 	    sizeof(pkt)-sizeof(pkt.m));
1300 	wg_timers_event_handshake_responded(&peer->p_timers);
1301 	wg_peer_send_buf(peer, (uint8_t *)&pkt, sizeof(pkt));
1302 }
1303 
1304 void
1305 wg_send_cookie(struct wg_softc *sc, struct cookie_macs *cm, uint32_t idx,
1306     struct wg_endpoint *e)
1307 {
1308 	struct wg_pkt_cookie	pkt;
1309 
1310 	WGPRINTF(LOG_DEBUG, sc, NULL, "Sending cookie response for denied "
1311 	    "handshake message\n");
1312 
1313 	pkt.t = WG_PKT_COOKIE;
1314 	pkt.r_idx = idx;
1315 
1316 	cookie_checker_create_payload(&sc->sc_cookie, cm, pkt.nonce,
1317 	    pkt.ec, &e->e_remote.r_sa);
1318 
1319 	wg_send_buf(sc, e, (uint8_t *)&pkt, sizeof(pkt));
1320 }
1321 
1322 void
1323 wg_send_keepalive(void *_peer)
1324 {
1325 	struct wg_peer	*peer = _peer;
1326 	struct wg_softc	*sc = peer->p_sc;
1327 	struct wg_tag	*t;
1328 	struct mbuf	*m;
1329 
1330 	if (!mq_empty(&peer->p_stage_queue))
1331 		goto send;
1332 
1333 	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1334 		return;
1335 
1336 	if ((t = wg_tag_get(m)) == NULL) {
1337 		m_freem(m);
1338 		return;
1339 	}
1340 
1341 	t->t_peer = peer;
1342 	t->t_mbuf = NULL;
1343 	t->t_done = 0;
1344 	t->t_mtu = 0; /* MTU == 0 OK for keepalive */
1345 
1346 	mq_push(&peer->p_stage_queue, m);
1347 send:
1348 	if (noise_remote_ready(&peer->p_remote) == 0) {
1349 		wg_queue_out(sc, peer);
1350 		task_add(wg_crypt_taskq, &sc->sc_encap);
1351 	} else {
1352 		wg_timers_event_want_initiation(&peer->p_timers);
1353 	}
1354 }
1355 
1356 void
1357 wg_peer_clear_secrets(void *_peer)
1358 {
1359 	struct wg_peer *peer = _peer;
1360 	noise_remote_clear(&peer->p_remote);
1361 }
1362 
1363 void
1364 wg_handshake(struct wg_softc *sc, struct mbuf *m)
1365 {
1366 	struct wg_tag			*t;
1367 	struct wg_pkt_initiation	*init;
1368 	struct wg_pkt_response		*resp;
1369 	struct wg_pkt_cookie		*cook;
1370 	struct wg_peer			*peer;
1371 	struct noise_remote		*remote;
1372 	int				 res, underload = 0;
1373 	static struct timeval		 wg_last_underload; /* microuptime */
1374 	char				 ipaddr[INET6_ADDRSTRLEN];
1375 
1376 	if (mq_len(&sc->sc_handshake_queue) >= MAX_QUEUED_HANDSHAKES/8) {
1377 		getmicrouptime(&wg_last_underload);
1378 		underload = 1;
1379 	} else if (wg_last_underload.tv_sec != 0) {
1380 		if (!ratecheck(&wg_last_underload, &underload_interval))
1381 			underload = 1;
1382 		else
1383 			bzero(&wg_last_underload, sizeof(wg_last_underload));
1384 	}
1385 
1386 	t = wg_tag_get(m);
1387 
1388 	switch (*mtod(m, uint32_t *)) {
1389 	case WG_PKT_INITIATION:
1390 		init = mtod(m, struct wg_pkt_initiation *);
1391 
1392 		res = cookie_checker_validate_macs(&sc->sc_cookie, &init->m,
1393 				init, sizeof(*init) - sizeof(init->m),
1394 				underload, &t->t_endpoint.e_remote.r_sa);
1395 
1396 		if (res == EINVAL) {
1397 			WGPRINTF(LOG_INFO, sc, NULL, "Invalid initiation "
1398 			    "MAC from %s\n",
1399 			    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1400 			        sizeof(ipaddr)));
1401 			goto error;
1402 		} else if (res == ECONNREFUSED) {
1403 			WGPRINTF(LOG_DEBUG, sc, NULL, "Handshake "
1404 			    "ratelimited from %s\n",
1405 			    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1406 			        sizeof(ipaddr)));
1407 			goto error;
1408 		} else if (res == EAGAIN) {
1409 			wg_send_cookie(sc, &init->m, init->s_idx,
1410 			    &t->t_endpoint);
1411 			goto error;
1412 		} else if (res != 0) {
1413 			panic("unexpected response: %d", res);
1414 		}
1415 
1416 		if (noise_consume_initiation(&sc->sc_local, &remote,
1417 		    init->s_idx, init->ue, init->es, init->ets) != 0) {
1418 			WGPRINTF(LOG_INFO, sc, NULL, "Invalid handshake "
1419 			    "initiation from %s\n",
1420 			    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1421 			        sizeof(ipaddr)));
1422 			goto error;
1423 		}
1424 
1425 		peer = CONTAINER_OF(remote, struct wg_peer, p_remote);
1426 
1427 		WGPRINTF(LOG_INFO, sc, NULL, "Receiving handshake initiation "
1428 		    "from peer %llu (%s)\n", peer->p_id,
1429 		    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1430 		        sizeof(ipaddr)));
1431 
1432 		wg_peer_counters_add(peer, 0, sizeof(*init));
1433 		wg_peer_set_endpoint_from_tag(peer, t);
1434 		wg_send_response(peer);
1435 		break;
1436 	case WG_PKT_RESPONSE:
1437 		resp = mtod(m, struct wg_pkt_response *);
1438 
1439 		res = cookie_checker_validate_macs(&sc->sc_cookie, &resp->m,
1440 				resp, sizeof(*resp) - sizeof(resp->m),
1441 				underload, &t->t_endpoint.e_remote.r_sa);
1442 
1443 		if (res == EINVAL) {
1444 			WGPRINTF(LOG_INFO, sc, NULL, "Invalid response "
1445 			    "MAC from %s\n",
1446 			    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1447 			        sizeof(ipaddr)));
1448 			goto error;
1449 		} else if (res == ECONNREFUSED) {
1450 			WGPRINTF(LOG_DEBUG, sc, NULL, "Handshake "
1451 			    "ratelimited from %s\n",
1452 			    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1453 			        sizeof(ipaddr)));
1454 			goto error;
1455 		} else if (res == EAGAIN) {
1456 			wg_send_cookie(sc, &resp->m, resp->s_idx,
1457 			    &t->t_endpoint);
1458 			goto error;
1459 		} else if (res != 0) {
1460 			panic("unexpected response: %d", res);
1461 		}
1462 
1463 		if ((remote = wg_index_get(sc, resp->r_idx)) == NULL) {
1464 			WGPRINTF(LOG_INFO, sc, NULL, "Unknown "
1465 			    "handshake response from %s\n",
1466 			    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1467 			        sizeof(ipaddr)));
1468 			goto error;
1469 		}
1470 
1471 		peer = CONTAINER_OF(remote, struct wg_peer, p_remote);
1472 
1473 		if (noise_consume_response(remote, resp->s_idx, resp->r_idx,
1474 					   resp->ue, resp->en) != 0) {
1475 			WGPRINTF(LOG_INFO, sc, NULL, "Invalid handshake "
1476 			    "response from %s\n",
1477 			    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1478 			        sizeof(ipaddr)));
1479 			goto error;
1480 		}
1481 
1482 		WGPRINTF(LOG_INFO, sc, NULL, "Receiving handshake response "
1483 		    "from peer %llu (%s)\n", peer->p_id,
1484 		    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1485 		        sizeof(ipaddr)));
1486 
1487 		wg_peer_counters_add(peer, 0, sizeof(*resp));
1488 		wg_peer_set_endpoint_from_tag(peer, t);
1489 		if (noise_remote_begin_session(&peer->p_remote) == 0) {
1490 			wg_timers_event_session_derived(&peer->p_timers);
1491 			wg_timers_event_handshake_complete(&peer->p_timers);
1492 		}
1493 		break;
1494 	case WG_PKT_COOKIE:
1495 		cook = mtod(m, struct wg_pkt_cookie *);
1496 
1497 		if ((remote = wg_index_get(sc, cook->r_idx)) == NULL) {
1498 			WGPRINTF(LOG_DEBUG, sc, NULL, "Unknown cookie "
1499 			    "index from %s\n",
1500 			    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1501 			        sizeof(ipaddr)));
1502 			goto error;
1503 		}
1504 
1505 		peer = CONTAINER_OF(remote, struct wg_peer, p_remote);
1506 
1507 		if (cookie_maker_consume_payload(&peer->p_cookie,
1508 		    cook->nonce, cook->ec) != 0) {
1509 			WGPRINTF(LOG_DEBUG, sc, NULL, "Could not decrypt "
1510 			    "cookie response from %s\n",
1511 			    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1512 			        sizeof(ipaddr)));
1513 			goto error;
1514 		}
1515 
1516 		WGPRINTF(LOG_DEBUG, sc, NULL, "Receiving cookie response "
1517 		    "from %s\n",
1518 		    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
1519 		        sizeof(ipaddr)));
1520 		goto error;
1521 	default:
1522 		panic("invalid packet in handshake queue");
1523 	}
1524 
1525 	wg_timers_event_any_authenticated_packet_received(&peer->p_timers);
1526 	wg_timers_event_any_authenticated_packet_traversal(&peer->p_timers);
1527 error:
1528 	m_freem(m);
1529 }
1530 
1531 void
1532 wg_handshake_worker(void *_sc)
1533 {
1534 	struct mbuf *m;
1535 	struct wg_softc *sc = _sc;
1536 	while ((m = mq_dequeue(&sc->sc_handshake_queue)) != NULL)
1537 		wg_handshake(sc, m);
1538 }
1539 
1540 /*
1541  * The following functions handle encapsulation (encryption) and
1542  * decapsulation (decryption). The wg_{en,de}cap functions will run in the
1543  * sc_crypt_taskq, while wg_deliver_{in,out} must be serialised and will run
1544  * in nettq.
1545  *
1546  * The packets are tracked in two queues, a serial queue and a parallel queue.
1547  *  - The parallel queue is used to distribute the encryption across multiple
1548  *    threads.
1549  *  - The serial queue ensures that packets are not reordered and are
1550  *    delivered in sequence.
1551  * The wg_tag attached to the packet contains two flags to help the two queues
1552  * interact.
1553  *  - t_done: The parallel queue has finished with the packet, now the serial
1554  *            queue can do it's work.
1555  *  - t_mbuf: Used to store the *crypted packet. in the case of encryption,
1556  *            this is a newly allocated packet, and in the case of decryption,
1557  *            it is a pointer to the same packet, that has been decrypted and
1558  *            truncated. If t_mbuf is NULL, then *cryption failed and this
1559  *            packet should not be passed.
1560  * wg_{en,de}cap work on the parallel queue, while wg_deliver_{in,out} work
1561  * on the serial queue.
1562  */
1563 void
1564 wg_encap(struct wg_softc *sc, struct mbuf *m)
1565 {
1566 	int res = 0;
1567 	struct wg_pkt_data	*data;
1568 	struct wg_peer		*peer;
1569 	struct wg_tag		*t;
1570 	struct mbuf		*mc;
1571 	size_t			 padding_len, plaintext_len, out_len;
1572 	uint64_t		 nonce;
1573 	char			 ipaddr[INET6_ADDRSTRLEN];
1574 
1575 	t = wg_tag_get(m);
1576 	peer = t->t_peer;
1577 
1578 	plaintext_len = WG_PKT_WITH_PADDING(m->m_pkthdr.len);
1579 	padding_len = plaintext_len - m->m_pkthdr.len;
1580 	out_len = sizeof(struct wg_pkt_data) + plaintext_len +
1581 	    NOISE_AUTHTAG_LEN;
1582 
1583 	/*
1584 	 * For the time being we allocate a new packet with sufficient size to
1585 	 * hold the encrypted data and headers. It would be difficult to
1586 	 * overcome as p_encap_queue (mbuf_list) holds a reference to the mbuf.
1587 	 * If we m_makespace or similar, we risk corrupting that list.
1588 	 * Additionally, we only pass a buf and buf length to
1589 	 * noise_remote_encrypt. Technically it would be possible to teach
1590 	 * noise_remote_encrypt about mbufs, but we would need to sort out the
1591 	 * p_encap_queue situation first.
1592 	 */
1593 	if ((mc = m_clget(NULL, M_NOWAIT, out_len + max_hdr)) == NULL)
1594 		goto error;
1595 	m_align(mc, out_len);
1596 
1597 	data = mtod(mc, struct wg_pkt_data *);
1598 	m_copydata(m, 0, m->m_pkthdr.len, data->buf);
1599 	bzero(data->buf + m->m_pkthdr.len, padding_len);
1600 	data->t = WG_PKT_DATA;
1601 
1602 	/*
1603 	 * Copy the flow hash from the inner packet to the outer packet, so
1604 	 * that fq_codel can property separate streams, rather than falling
1605 	 * back to random buckets.
1606 	 */
1607 	mc->m_pkthdr.ph_flowid = m->m_pkthdr.ph_flowid;
1608 
1609 	mc->m_pkthdr.pf.prio = m->m_pkthdr.pf.prio;
1610 
1611 	res = noise_remote_encrypt(&peer->p_remote, &data->r_idx, &nonce,
1612 				   data->buf, plaintext_len);
1613 	nonce = htole64(nonce); /* Wire format is little endian. */
1614 	memcpy(data->nonce, &nonce, sizeof(data->nonce));
1615 
1616 	if (__predict_false(res == EINVAL)) {
1617 		m_freem(mc);
1618 		goto error;
1619 	} else if (__predict_false(res == ESTALE)) {
1620 		wg_timers_event_want_initiation(&peer->p_timers);
1621 	} else if (__predict_false(res != 0)) {
1622 		panic("unexpected result: %d", res);
1623 	}
1624 
1625 	/* A packet with length 0 is a keepalive packet */
1626 	if (__predict_false(m->m_pkthdr.len == 0))
1627 		WGPRINTF(LOG_DEBUG, sc, &peer->p_endpoint_mtx, "Sending "
1628 		    "keepalive packet to peer %llu (%s)\n", peer->p_id,
1629 		    sockaddr_ntop(&peer->p_endpoint.e_remote.r_sa, ipaddr,
1630 		        sizeof(ipaddr)));
1631 
1632 	mc->m_pkthdr.ph_loopcnt = m->m_pkthdr.ph_loopcnt;
1633 	mc->m_flags &= ~(M_MCAST | M_BCAST);
1634 	mc->m_pkthdr.len = mc->m_len = out_len;
1635 
1636 	/*
1637 	 * We would count ifc_opackets, ifc_obytes of m here, except if_snd
1638 	 * already does that for us, so no need to worry about it.
1639 	counters_pkt(sc->sc_if.if_counters, ifc_opackets, ifc_obytes,
1640 	    m->m_pkthdr.len);
1641 	 */
1642 	wg_peer_counters_add(peer, mc->m_pkthdr.len, 0);
1643 
1644 	t->t_mbuf = mc;
1645 error:
1646 	t->t_done = 1;
1647 	task_add(net_tq(sc->sc_if.if_index), &peer->p_deliver_out);
1648 }
1649 
1650 void
1651 wg_decap(struct wg_softc *sc, struct mbuf *m)
1652 {
1653 	int			 res, len;
1654 	struct ip		*ip;
1655 	struct ip6_hdr		*ip6;
1656 	struct wg_pkt_data	*data;
1657 	struct wg_peer		*peer, *allowed_peer;
1658 	struct wg_tag		*t;
1659 	size_t			 payload_len;
1660 	uint64_t		 nonce;
1661 	char			 ipaddr[INET6_ADDRSTRLEN];
1662 
1663 	t = wg_tag_get(m);
1664 	peer = t->t_peer;
1665 
1666 	/*
1667 	 * Likewise to wg_encap, we pass a buf and buf length to
1668 	 * noise_remote_decrypt. Again, possible to teach it about mbufs
1669 	 * but need to get over the p_decap_queue situation first. However,
1670 	 * we do not need to allocate a new mbuf as the decrypted packet is
1671 	 * strictly smaller than encrypted. We just set t_mbuf to m and
1672 	 * wg_deliver_in knows how to deal with that.
1673 	 */
1674 	data = mtod(m, struct wg_pkt_data *);
1675 	payload_len = m->m_pkthdr.len - sizeof(struct wg_pkt_data);
1676 	memcpy(&nonce, data->nonce, sizeof(nonce));
1677 	nonce = le64toh(nonce); /* Wire format is little endian. */
1678 	res = noise_remote_decrypt(&peer->p_remote, data->r_idx, nonce,
1679 				   data->buf, payload_len);
1680 
1681 	if (__predict_false(res == EINVAL)) {
1682 		goto error;
1683 	} else if (__predict_false(res == ECONNRESET)) {
1684 		wg_timers_event_handshake_complete(&peer->p_timers);
1685 	} else if (__predict_false(res == ESTALE)) {
1686 		wg_timers_event_want_initiation(&peer->p_timers);
1687 	} else if (__predict_false(res != 0)) {
1688 		panic("unexpected response: %d", res);
1689 	}
1690 
1691 	wg_peer_set_endpoint_from_tag(peer, t);
1692 
1693 	wg_peer_counters_add(peer, 0, m->m_pkthdr.len);
1694 
1695 	m_adj(m, sizeof(struct wg_pkt_data));
1696 	m_adj(m, -NOISE_AUTHTAG_LEN);
1697 
1698 	counters_pkt(sc->sc_if.if_counters, ifc_ipackets, ifc_ibytes,
1699 	    m->m_pkthdr.len);
1700 
1701 	/* A packet with length 0 is a keepalive packet */
1702 	if (__predict_false(m->m_pkthdr.len == 0)) {
1703 		WGPRINTF(LOG_DEBUG, sc, &peer->p_endpoint_mtx, "Receiving "
1704 		    "keepalive packet from peer %llu (%s)\n", peer->p_id,
1705 		    sockaddr_ntop(&peer->p_endpoint.e_remote.r_sa,
1706 		        ipaddr, sizeof(ipaddr)));
1707 		goto done;
1708 	}
1709 
1710 	/*
1711 	 * We can let the network stack handle the intricate validation of the
1712 	 * IP header, we just worry about the sizeof and the version, so we can
1713 	 * read the source address in wg_aip_lookup.
1714 	 *
1715 	 * We also need to trim the packet, as it was likely padded before
1716 	 * encryption. While we could drop it here, it will be more helpful to
1717 	 * pass it to bpf_mtap and use the counters that people are expecting
1718 	 * in ipv4_input and ipv6_input. We can rely on ipv4_input and
1719 	 * ipv6_input to properly validate the headers.
1720 	 */
1721 	ip = mtod(m, struct ip *);
1722 	ip6 = mtod(m, struct ip6_hdr *);
1723 
1724 	if (m->m_pkthdr.len >= sizeof(struct ip) && ip->ip_v == IPVERSION) {
1725 		m->m_pkthdr.ph_family = AF_INET;
1726 
1727 		len = ntohs(ip->ip_len);
1728 		if (len >= sizeof(struct ip) && len < m->m_pkthdr.len)
1729 			m_adj(m, len - m->m_pkthdr.len);
1730 
1731 		allowed_peer = wg_aip_lookup(sc->sc_aip4, &ip->ip_src);
1732 #ifdef INET6
1733 	} else if (m->m_pkthdr.len >= sizeof(struct ip6_hdr) &&
1734 	    (ip6->ip6_vfc & IPV6_VERSION_MASK) == IPV6_VERSION) {
1735 		m->m_pkthdr.ph_family = AF_INET6;
1736 
1737 		len = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr);
1738 		if (len < m->m_pkthdr.len)
1739 			m_adj(m, len - m->m_pkthdr.len);
1740 
1741 		allowed_peer = wg_aip_lookup(sc->sc_aip6, &ip6->ip6_src);
1742 #endif
1743 	} else {
1744 		WGPRINTF(LOG_WARNING, sc, &peer->p_endpoint_mtx, "Packet "
1745 		    "is neither IPv4 nor IPv6 from peer %llu (%s)\n",
1746 		    peer->p_id, sockaddr_ntop(&peer->p_endpoint.e_remote.r_sa,
1747 		        ipaddr, sizeof(ipaddr)));
1748 		goto error;
1749 	}
1750 
1751 	if (__predict_false(peer != allowed_peer)) {
1752 		WGPRINTF(LOG_WARNING, sc, &peer->p_endpoint_mtx, "Packet "
1753                     "has unallowed source IP from peer %llu (%s)\n",
1754                     peer->p_id, sockaddr_ntop(&peer->p_endpoint.e_remote.r_sa,
1755                         ipaddr, sizeof(ipaddr)));
1756 		goto error;
1757 	}
1758 
1759 	/* tunneled packet was not offloaded */
1760 	m->m_pkthdr.csum_flags = 0;
1761 
1762 	m->m_pkthdr.ph_ifidx = sc->sc_if.if_index;
1763 	m->m_pkthdr.ph_rtableid = sc->sc_if.if_rdomain;
1764 	m->m_flags &= ~(M_MCAST | M_BCAST);
1765 #if NPF > 0
1766 	pf_pkt_addr_changed(m);
1767 #endif /* NPF > 0 */
1768 
1769 done:
1770 	t->t_mbuf = m;
1771 error:
1772 	t->t_done = 1;
1773 	task_add(net_tq(sc->sc_if.if_index), &peer->p_deliver_in);
1774 }
1775 
1776 void
1777 wg_encap_worker(void *_sc)
1778 {
1779 	struct mbuf *m;
1780 	struct wg_softc *sc = _sc;
1781 	while ((m = wg_ring_dequeue(&sc->sc_encap_ring)) != NULL)
1782 		wg_encap(sc, m);
1783 }
1784 
1785 void
1786 wg_decap_worker(void *_sc)
1787 {
1788 	struct mbuf *m;
1789 	struct wg_softc *sc = _sc;
1790 	while ((m = wg_ring_dequeue(&sc->sc_decap_ring)) != NULL)
1791 		wg_decap(sc, m);
1792 }
1793 
1794 void
1795 wg_deliver_out(void *_peer)
1796 {
1797 	struct wg_peer		*peer = _peer;
1798 	struct wg_softc		*sc = peer->p_sc;
1799 	struct wg_endpoint	 endpoint;
1800 	struct wg_tag		*t;
1801 	struct mbuf		*m;
1802 	int			 ret;
1803 
1804 	wg_peer_get_endpoint(peer, &endpoint);
1805 
1806 	while ((m = wg_queue_dequeue(&peer->p_encap_queue, &t)) != NULL) {
1807 		/* t_mbuf will contain the encrypted packet */
1808 		if (t->t_mbuf == NULL){
1809 			counters_inc(sc->sc_if.if_counters, ifc_oerrors);
1810 			m_freem(m);
1811 			continue;
1812 		}
1813 
1814 		ret = wg_send(sc, &endpoint, t->t_mbuf);
1815 
1816 		if (ret == 0) {
1817 			wg_timers_event_any_authenticated_packet_traversal(
1818 			    &peer->p_timers);
1819 			wg_timers_event_any_authenticated_packet_sent(
1820 			    &peer->p_timers);
1821 
1822 			if (m->m_pkthdr.len != 0)
1823 				wg_timers_event_data_sent(&peer->p_timers);
1824 		} else if (ret == EADDRNOTAVAIL) {
1825 			wg_peer_clear_src(peer);
1826 			wg_peer_get_endpoint(peer, &endpoint);
1827 		}
1828 
1829 		m_freem(m);
1830 	}
1831 }
1832 
1833 void
1834 wg_deliver_in(void *_peer)
1835 {
1836 	struct wg_peer	*peer = _peer;
1837 	struct wg_softc	*sc = peer->p_sc;
1838 	struct wg_tag	*t;
1839 	struct mbuf	*m;
1840 
1841 	while ((m = wg_queue_dequeue(&peer->p_decap_queue, &t)) != NULL) {
1842 		/* t_mbuf will contain the decrypted packet */
1843 		if (t->t_mbuf == NULL) {
1844 			counters_inc(sc->sc_if.if_counters, ifc_ierrors);
1845 			m_freem(m);
1846 			continue;
1847 		}
1848 
1849 		/* From here on m == t->t_mbuf */
1850 		KASSERT(m == t->t_mbuf);
1851 
1852 		wg_timers_event_any_authenticated_packet_received(
1853 		    &peer->p_timers);
1854 		wg_timers_event_any_authenticated_packet_traversal(
1855 		    &peer->p_timers);
1856 
1857 		if (m->m_pkthdr.len == 0) {
1858 			m_freem(m);
1859 			continue;
1860 		}
1861 
1862 #if NBPFILTER > 0
1863 		if (sc->sc_if.if_bpf != NULL)
1864 			bpf_mtap_af(sc->sc_if.if_bpf,
1865 			    m->m_pkthdr.ph_family, m, BPF_DIRECTION_IN);
1866 #endif
1867 
1868 		NET_LOCK();
1869 		if (m->m_pkthdr.ph_family == AF_INET)
1870 			ipv4_input(&sc->sc_if, m);
1871 #ifdef INET6
1872 		else if (m->m_pkthdr.ph_family == AF_INET6)
1873 			ipv6_input(&sc->sc_if, m);
1874 #endif
1875 		else
1876 			panic("invalid ph_family");
1877 		NET_UNLOCK();
1878 
1879 		wg_timers_event_data_received(&peer->p_timers);
1880 	}
1881 }
1882 
1883 int
1884 wg_queue_in(struct wg_softc *sc, struct wg_peer *peer, struct mbuf *m)
1885 {
1886 	struct wg_ring		*parallel = &sc->sc_decap_ring;
1887 	struct wg_queue		*serial = &peer->p_decap_queue;
1888 	struct wg_tag		*t;
1889 
1890 	mtx_enter(&serial->q_mtx);
1891 	if (serial->q_list.ml_len < MAX_QUEUED_PKT) {
1892 		ml_enqueue(&serial->q_list, m);
1893 		mtx_leave(&serial->q_mtx);
1894 	} else {
1895 		mtx_leave(&serial->q_mtx);
1896 		m_freem(m);
1897 		return ENOBUFS;
1898 	}
1899 
1900 	mtx_enter(&parallel->r_mtx);
1901 	if (parallel->r_tail - parallel->r_head < MAX_QUEUED_PKT) {
1902 		parallel->r_buf[parallel->r_tail & MAX_QUEUED_PKT_MASK] = m;
1903 		parallel->r_tail++;
1904 		mtx_leave(&parallel->r_mtx);
1905 	} else {
1906 		mtx_leave(&parallel->r_mtx);
1907 		t = wg_tag_get(m);
1908 		t->t_done = 1;
1909 		return ENOBUFS;
1910 	}
1911 
1912 	return 0;
1913 }
1914 
1915 void
1916 wg_queue_out(struct wg_softc *sc, struct wg_peer *peer)
1917 {
1918 	struct wg_ring		*parallel = &sc->sc_encap_ring;
1919 	struct wg_queue		*serial = &peer->p_encap_queue;
1920 	struct mbuf_list 	 ml, ml_free;
1921 	struct mbuf		*m;
1922 	struct wg_tag		*t;
1923 	int			 dropped;
1924 
1925 	/*
1926 	 * We delist all staged packets and then add them to the queues. This
1927 	 * can race with wg_qstart when called from wg_send_keepalive, however
1928 	 * wg_qstart will not race as it is serialised.
1929 	 */
1930 	mq_delist(&peer->p_stage_queue, &ml);
1931 	ml_init(&ml_free);
1932 
1933 	while ((m = ml_dequeue(&ml)) != NULL) {
1934 		mtx_enter(&serial->q_mtx);
1935 		if (serial->q_list.ml_len < MAX_QUEUED_PKT) {
1936 			ml_enqueue(&serial->q_list, m);
1937 			mtx_leave(&serial->q_mtx);
1938 		} else {
1939 			mtx_leave(&serial->q_mtx);
1940 			ml_enqueue(&ml_free, m);
1941 			continue;
1942 		}
1943 
1944 		mtx_enter(&parallel->r_mtx);
1945 		if (parallel->r_tail - parallel->r_head < MAX_QUEUED_PKT) {
1946 			parallel->r_buf[parallel->r_tail & MAX_QUEUED_PKT_MASK] = m;
1947 			parallel->r_tail++;
1948 			mtx_leave(&parallel->r_mtx);
1949 		} else {
1950 			mtx_leave(&parallel->r_mtx);
1951 			t = wg_tag_get(m);
1952 			t->t_done = 1;
1953 		}
1954 	}
1955 
1956 	if ((dropped = ml_purge(&ml_free)) > 0)
1957 		counters_add(sc->sc_if.if_counters, ifc_oqdrops, dropped);
1958 }
1959 
1960 struct mbuf *
1961 wg_ring_dequeue(struct wg_ring *r)
1962 {
1963 	struct mbuf *m = NULL;
1964 	mtx_enter(&r->r_mtx);
1965 	if (r->r_head != r->r_tail) {
1966 		m = r->r_buf[r->r_head & MAX_QUEUED_PKT_MASK];
1967 		r->r_head++;
1968 	}
1969 	mtx_leave(&r->r_mtx);
1970 	return m;
1971 }
1972 
1973 struct mbuf *
1974 wg_queue_dequeue(struct wg_queue *q, struct wg_tag **t)
1975 {
1976 	struct mbuf *m;
1977 	mtx_enter(&q->q_mtx);
1978 	if ((m = q->q_list.ml_head) != NULL && (*t = wg_tag_get(m))->t_done)
1979 		ml_dequeue(&q->q_list);
1980 	else
1981 		m = NULL;
1982 	mtx_leave(&q->q_mtx);
1983 	return m;
1984 }
1985 
1986 size_t
1987 wg_queue_len(struct wg_queue *q)
1988 {
1989 	size_t len;
1990 	mtx_enter(&q->q_mtx);
1991 	len = q->q_list.ml_len;
1992 	mtx_leave(&q->q_mtx);
1993 	return len;
1994 }
1995 
1996 struct noise_remote *
1997 wg_remote_get(void *_sc, uint8_t public[NOISE_PUBLIC_KEY_LEN])
1998 {
1999 	struct wg_peer	*peer;
2000 	struct wg_softc	*sc = _sc;
2001 	if ((peer = wg_peer_lookup(sc, public)) == NULL)
2002 		return NULL;
2003 	return &peer->p_remote;
2004 }
2005 
2006 uint32_t
2007 wg_index_set(void *_sc, struct noise_remote *remote)
2008 {
2009 	struct wg_peer	*peer;
2010 	struct wg_softc	*sc = _sc;
2011 	struct wg_index *index, *iter;
2012 	uint32_t	 key;
2013 
2014 	/*
2015 	 * We can modify this without a lock as wg_index_set, wg_index_drop are
2016 	 * guaranteed to be serialised (per remote).
2017 	 */
2018 	peer = CONTAINER_OF(remote, struct wg_peer, p_remote);
2019 	index = SLIST_FIRST(&peer->p_unused_index);
2020 	KASSERT(index != NULL);
2021 	SLIST_REMOVE_HEAD(&peer->p_unused_index, i_unused_entry);
2022 
2023 	index->i_value = remote;
2024 
2025 	mtx_enter(&sc->sc_index_mtx);
2026 assign_id:
2027 	key = index->i_key = arc4random();
2028 	key &= sc->sc_index_mask;
2029 	LIST_FOREACH(iter, &sc->sc_index[key], i_entry)
2030 		if (iter->i_key == index->i_key)
2031 			goto assign_id;
2032 
2033 	LIST_INSERT_HEAD(&sc->sc_index[key], index, i_entry);
2034 
2035 	mtx_leave(&sc->sc_index_mtx);
2036 
2037 	/* Likewise, no need to lock for index here. */
2038 	return index->i_key;
2039 }
2040 
2041 struct noise_remote *
2042 wg_index_get(void *_sc, uint32_t key0)
2043 {
2044 	struct wg_softc		*sc = _sc;
2045 	struct wg_index		*iter;
2046 	struct noise_remote	*remote = NULL;
2047 	uint32_t		 key = key0 & sc->sc_index_mask;
2048 
2049 	mtx_enter(&sc->sc_index_mtx);
2050 	LIST_FOREACH(iter, &sc->sc_index[key], i_entry)
2051 		if (iter->i_key == key0) {
2052 			remote = iter->i_value;
2053 			break;
2054 		}
2055 	mtx_leave(&sc->sc_index_mtx);
2056 	return remote;
2057 }
2058 
2059 void
2060 wg_index_drop(void *_sc, uint32_t key0)
2061 {
2062 	struct wg_softc	*sc = _sc;
2063 	struct wg_index	*iter;
2064 	struct wg_peer	*peer = NULL;
2065 	uint32_t	 key = key0 & sc->sc_index_mask;
2066 
2067 	mtx_enter(&sc->sc_index_mtx);
2068 	LIST_FOREACH(iter, &sc->sc_index[key], i_entry)
2069 		if (iter->i_key == key0) {
2070 			LIST_REMOVE(iter, i_entry);
2071 			break;
2072 		}
2073 	mtx_leave(&sc->sc_index_mtx);
2074 
2075 	/* We expect a peer */
2076 	peer = CONTAINER_OF(iter->i_value, struct wg_peer, p_remote);
2077 	KASSERT(peer != NULL);
2078 	SLIST_INSERT_HEAD(&peer->p_unused_index, iter, i_unused_entry);
2079 }
2080 
2081 struct mbuf *
2082 wg_input(void *_sc, struct mbuf *m, struct ip *ip, struct ip6_hdr *ip6,
2083     void *_uh, int hlen)
2084 {
2085 	struct wg_pkt_data	*data;
2086 	struct noise_remote	*remote;
2087 	struct wg_tag		*t;
2088 	struct wg_softc		*sc = _sc;
2089 	struct udphdr		*uh = _uh;
2090 	char			 ipaddr[INET6_ADDRSTRLEN];
2091 
2092 	NET_ASSERT_LOCKED();
2093 
2094 	if ((t = wg_tag_get(m)) == NULL) {
2095 		m_freem(m);
2096 		return NULL;
2097 	}
2098 
2099 	if (ip != NULL) {
2100 		t->t_endpoint.e_remote.r_sa.sa_len = sizeof(struct sockaddr_in);
2101 		t->t_endpoint.e_remote.r_sa.sa_family = AF_INET;
2102 		t->t_endpoint.e_remote.r_sin.sin_port = uh->uh_sport;
2103 		t->t_endpoint.e_remote.r_sin.sin_addr = ip->ip_src;
2104 		t->t_endpoint.e_local.l_in = ip->ip_dst;
2105 #ifdef INET6
2106 	} else if (ip6 != NULL) {
2107 		t->t_endpoint.e_remote.r_sa.sa_len = sizeof(struct sockaddr_in6);
2108 		t->t_endpoint.e_remote.r_sa.sa_family = AF_INET6;
2109 		t->t_endpoint.e_remote.r_sin6.sin6_port = uh->uh_sport;
2110 		t->t_endpoint.e_remote.r_sin6.sin6_addr = ip6->ip6_src;
2111 		t->t_endpoint.e_local.l_in6 = ip6->ip6_dst;
2112 #endif
2113 	} else {
2114 		m_freem(m);
2115 		return NULL;
2116 	}
2117 
2118 	/* m has a IP/IPv6 header of hlen length, we don't need it anymore. */
2119 	m_adj(m, hlen);
2120 
2121 	/*
2122 	 * Ensure mbuf is contiguous over full length of packet. This is done
2123 	 * so we can directly read the handshake values in wg_handshake, and so
2124 	 * we can decrypt a transport packet by passing a single buffer to
2125 	 * noise_remote_decrypt in wg_decap.
2126 	 */
2127 	if ((m = m_pullup(m, m->m_pkthdr.len)) == NULL)
2128 		return NULL;
2129 
2130 	if ((m->m_pkthdr.len == sizeof(struct wg_pkt_initiation) &&
2131 		*mtod(m, uint32_t *) == WG_PKT_INITIATION) ||
2132 	    (m->m_pkthdr.len == sizeof(struct wg_pkt_response) &&
2133 		*mtod(m, uint32_t *) == WG_PKT_RESPONSE) ||
2134 	    (m->m_pkthdr.len == sizeof(struct wg_pkt_cookie) &&
2135 		*mtod(m, uint32_t *) == WG_PKT_COOKIE)) {
2136 
2137 		if (mq_enqueue(&sc->sc_handshake_queue, m) != 0)
2138 			WGPRINTF(LOG_DEBUG, sc, NULL, "Dropping handshake"
2139 			    "packet from %s\n",
2140 			    sockaddr_ntop(&t->t_endpoint.e_remote.r_sa, ipaddr,
2141 			        sizeof(ipaddr)));
2142 		task_add(wg_handshake_taskq, &sc->sc_handshake);
2143 
2144 	} else if (m->m_pkthdr.len >= sizeof(struct wg_pkt_data) +
2145 	    NOISE_AUTHTAG_LEN && *mtod(m, uint32_t *) == WG_PKT_DATA) {
2146 
2147 		data = mtod(m, struct wg_pkt_data *);
2148 
2149 		if ((remote = wg_index_get(sc, data->r_idx)) != NULL) {
2150 			t->t_peer = CONTAINER_OF(remote, struct wg_peer,
2151 			    p_remote);
2152 			t->t_mbuf = NULL;
2153 			t->t_done = 0;
2154 
2155 			if (wg_queue_in(sc, t->t_peer, m) != 0)
2156 				counters_inc(sc->sc_if.if_counters,
2157 				    ifc_iqdrops);
2158 			task_add(wg_crypt_taskq, &sc->sc_decap);
2159 		} else {
2160 			counters_inc(sc->sc_if.if_counters, ifc_ierrors);
2161 			m_freem(m);
2162 		}
2163 	} else {
2164 		counters_inc(sc->sc_if.if_counters, ifc_ierrors);
2165 		m_freem(m);
2166 	}
2167 
2168 	return NULL;
2169 }
2170 
2171 void
2172 wg_qstart(struct ifqueue *ifq)
2173 {
2174 	struct ifnet		*ifp = ifq->ifq_if;
2175 	struct wg_softc		*sc = ifp->if_softc;
2176 	struct wg_peer		*peer;
2177 	struct wg_tag		*t;
2178 	struct mbuf		*m;
2179 	SLIST_HEAD(,wg_peer)	 start_list;
2180 
2181 	SLIST_INIT(&start_list);
2182 
2183 	/*
2184 	 * We should be OK to modify p_start_list, p_start_onlist in this
2185 	 * function as there should only be one ifp->if_qstart invoked at a
2186 	 * time.
2187 	 */
2188 	while ((m = ifq_dequeue(ifq)) != NULL) {
2189 		t = wg_tag_get(m);
2190 		peer = t->t_peer;
2191 		if (mq_push(&peer->p_stage_queue, m) != 0)
2192 			counters_inc(ifp->if_counters, ifc_oqdrops);
2193 		if (!peer->p_start_onlist) {
2194 			SLIST_INSERT_HEAD(&start_list, peer, p_start_list);
2195 			peer->p_start_onlist = 1;
2196 		}
2197 	}
2198 	SLIST_FOREACH(peer, &start_list, p_start_list) {
2199 		if (noise_remote_ready(&peer->p_remote) == 0)
2200 			wg_queue_out(sc, peer);
2201 		else
2202 			wg_timers_event_want_initiation(&peer->p_timers);
2203 		peer->p_start_onlist = 0;
2204 	}
2205 	task_add(wg_crypt_taskq, &sc->sc_encap);
2206 }
2207 
2208 int
2209 wg_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2210     struct rtentry *rt)
2211 {
2212 	struct wg_softc	*sc = ifp->if_softc;
2213 	struct wg_peer	*peer;
2214 	struct wg_tag	*t;
2215 	int		 af, ret = EINVAL;
2216 
2217 	NET_ASSERT_LOCKED();
2218 
2219 	if ((t = wg_tag_get(m)) == NULL) {
2220 		ret = ENOBUFS;
2221 		goto error;
2222 	}
2223 
2224 	m->m_pkthdr.ph_family = sa->sa_family;
2225 	if (sa->sa_family == AF_INET) {
2226 		peer = wg_aip_lookup(sc->sc_aip4,
2227 		    &mtod(m, struct ip *)->ip_dst);
2228 #ifdef INET6
2229 	} else if (sa->sa_family == AF_INET6) {
2230 		peer = wg_aip_lookup(sc->sc_aip6,
2231 		    &mtod(m, struct ip6_hdr *)->ip6_dst);
2232 #endif
2233 	} else {
2234 		ret = EAFNOSUPPORT;
2235 		goto error;
2236 	}
2237 
2238 #if NBPFILTER > 0
2239 	if (sc->sc_if.if_bpf)
2240 		bpf_mtap_af(sc->sc_if.if_bpf, sa->sa_family, m,
2241 		    BPF_DIRECTION_OUT);
2242 #endif
2243 
2244 	if (peer == NULL) {
2245 		ret = ENETUNREACH;
2246 		goto error;
2247 	}
2248 
2249 	af = peer->p_endpoint.e_remote.r_sa.sa_family;
2250 	if (af != AF_INET && af != AF_INET6) {
2251 		WGPRINTF(LOG_DEBUG, sc, NULL, "No valid endpoint has been "
2252 		    "configured or discovered for peer %llu\n", peer->p_id);
2253 		ret = EDESTADDRREQ;
2254 		goto error;
2255 	}
2256 
2257 	if (m->m_pkthdr.ph_loopcnt++ > M_MAXLOOP) {
2258 		WGPRINTF(LOG_DEBUG, sc, NULL, "Packet looped\n");
2259 		ret = ELOOP;
2260 		goto error;
2261 	}
2262 
2263 	/*
2264 	 * As we hold a reference to peer in the mbuf, we can't handle a
2265 	 * delayed packet without doing some refcnting. If a peer is removed
2266 	 * while a delayed holds a reference, bad things will happen. For the
2267 	 * time being, delayed packets are unsupported. This may be fixed with
2268 	 * another aip_lookup in wg_qstart, or refcnting as mentioned before.
2269 	 */
2270 	if (m->m_pkthdr.pf.delay > 0) {
2271 		WGPRINTF(LOG_DEBUG, sc, NULL, "PF delay unsupported\n");
2272 		ret = EOPNOTSUPP;
2273 		goto error;
2274 	}
2275 
2276 	t->t_peer = peer;
2277 	t->t_mbuf = NULL;
2278 	t->t_done = 0;
2279 	t->t_mtu = ifp->if_mtu;
2280 
2281 	/*
2282 	 * We still have an issue with ifq that will count a packet that gets
2283 	 * dropped in wg_qstart, or not encrypted. These get counted as
2284 	 * ofails or oqdrops, so the packet gets counted twice.
2285 	 */
2286 	return if_enqueue(ifp, m);
2287 error:
2288 	counters_inc(ifp->if_counters, ifc_oerrors);
2289 	m_freem(m);
2290 	return ret;
2291 }
2292 
2293 int
2294 wg_ioctl_set(struct wg_softc *sc, struct wg_data_io *data)
2295 {
2296 	struct wg_interface_io	*iface_p, iface_o;
2297 	struct wg_peer_io	*peer_p, peer_o;
2298 	struct wg_aip_io	*aip_p, aip_o;
2299 
2300 	struct wg_peer		*peer, *tpeer;
2301 	struct wg_aip		*aip, *taip;
2302 
2303 	in_port_t		 port;
2304 	int			 rtable;
2305 
2306 	uint8_t			 public[WG_KEY_SIZE], private[WG_KEY_SIZE];
2307 	size_t			 i, j;
2308 	int			 ret, has_identity;
2309 
2310 	if ((ret = suser(curproc)) != 0)
2311 		return ret;
2312 
2313 	rw_enter_write(&sc->sc_lock);
2314 
2315 	iface_p = data->wgd_interface;
2316 	if ((ret = copyin(iface_p, &iface_o, sizeof(iface_o))) != 0)
2317 		goto error;
2318 
2319 	if (iface_o.i_flags & WG_INTERFACE_REPLACE_PEERS)
2320 		TAILQ_FOREACH_SAFE(peer, &sc->sc_peer_seq, p_seq_entry, tpeer)
2321 			wg_peer_destroy(peer);
2322 
2323 	if (iface_o.i_flags & WG_INTERFACE_HAS_PRIVATE &&
2324 	    (noise_local_keys(&sc->sc_local, NULL, private) ||
2325 	     timingsafe_bcmp(private, iface_o.i_private, WG_KEY_SIZE))) {
2326 		if (curve25519_generate_public(public, iface_o.i_private)) {
2327 			if ((peer = wg_peer_lookup(sc, public)) != NULL)
2328 				wg_peer_destroy(peer);
2329 		}
2330 		noise_local_lock_identity(&sc->sc_local);
2331 		has_identity = noise_local_set_private(&sc->sc_local,
2332 						       iface_o.i_private);
2333 		TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry) {
2334 			noise_remote_precompute(&peer->p_remote);
2335 			wg_timers_event_reset_handshake_last_sent(&peer->p_timers);
2336 			noise_remote_expire_current(&peer->p_remote);
2337 		}
2338 		cookie_checker_update(&sc->sc_cookie,
2339 				      has_identity == 0 ? public : NULL);
2340 		noise_local_unlock_identity(&sc->sc_local);
2341 	}
2342 
2343 	if (iface_o.i_flags & WG_INTERFACE_HAS_PORT)
2344 		port = htons(iface_o.i_port);
2345 	else
2346 		port = sc->sc_udp_port;
2347 
2348 	if (iface_o.i_flags & WG_INTERFACE_HAS_RTABLE)
2349 		rtable = iface_o.i_rtable;
2350 	else
2351 		rtable = sc->sc_udp_rtable;
2352 
2353 	if (port != sc->sc_udp_port || rtable != sc->sc_udp_rtable) {
2354 		TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry)
2355 			wg_peer_clear_src(peer);
2356 
2357 		if (sc->sc_if.if_flags & IFF_RUNNING)
2358 			if ((ret = wg_bind(sc, &port, &rtable)) != 0)
2359 				goto error;
2360 
2361 		sc->sc_udp_port = port;
2362 		sc->sc_udp_rtable = rtable;
2363 	}
2364 
2365 	peer_p = &iface_p->i_peers[0];
2366 	for (i = 0; i < iface_o.i_peers_count; i++) {
2367 		if ((ret = copyin(peer_p, &peer_o, sizeof(peer_o))) != 0)
2368 			goto error;
2369 
2370 		/* Peer must have public key */
2371 		if (!(peer_o.p_flags & WG_PEER_HAS_PUBLIC))
2372 			goto next_peer;
2373 
2374 		/* 0 = latest protocol, 1 = this protocol */
2375 		if (peer_o.p_protocol_version != 0) {
2376 			if (peer_o.p_protocol_version > 1) {
2377 				ret = EPFNOSUPPORT;
2378 				goto error;
2379 			}
2380 		}
2381 
2382 		/* Get local public and check that peer key doesn't match */
2383 		if (noise_local_keys(&sc->sc_local, public, NULL) == 0 &&
2384 		    bcmp(public, peer_o.p_public, WG_KEY_SIZE) == 0)
2385 			goto next_peer;
2386 
2387 		/* Lookup peer, or create if it doesn't exist */
2388 		if ((peer = wg_peer_lookup(sc, peer_o.p_public)) == NULL) {
2389 			/* If we want to delete, no need creating a new one.
2390 			 * Also, don't create a new one if we only want to
2391 			 * update. */
2392 			if (peer_o.p_flags & (WG_PEER_REMOVE|WG_PEER_UPDATE))
2393 				goto next_peer;
2394 
2395 			if ((peer = wg_peer_create(sc,
2396 			    peer_o.p_public)) == NULL) {
2397 				ret = ENOMEM;
2398 				goto error;
2399 			}
2400 		}
2401 
2402 		/* Remove peer and continue if specified */
2403 		if (peer_o.p_flags & WG_PEER_REMOVE) {
2404 			wg_peer_destroy(peer);
2405 			goto next_peer;
2406 		}
2407 
2408 		if (peer_o.p_flags & WG_PEER_HAS_ENDPOINT)
2409 			wg_peer_set_sockaddr(peer, &peer_o.p_sa);
2410 
2411 		if (peer_o.p_flags & WG_PEER_HAS_PSK)
2412 			noise_remote_set_psk(&peer->p_remote, peer_o.p_psk);
2413 
2414 		if (peer_o.p_flags & WG_PEER_HAS_PKA)
2415 			wg_timers_set_persistent_keepalive(&peer->p_timers,
2416 			    peer_o.p_pka);
2417 
2418 		if (peer_o.p_flags & WG_PEER_REPLACE_AIPS) {
2419 			LIST_FOREACH_SAFE(aip, &peer->p_aip, a_entry, taip) {
2420 				wg_aip_remove(sc, peer, &aip->a_data);
2421 			}
2422 		}
2423 
2424 		if (peer_o.p_flags & WG_PEER_SET_DESCRIPTION)
2425 			strlcpy(peer->p_description, peer_o.p_description,
2426 			    IFDESCRSIZE);
2427 
2428 		aip_p = &peer_p->p_aips[0];
2429 		for (j = 0; j < peer_o.p_aips_count; j++) {
2430 			if ((ret = copyin(aip_p, &aip_o, sizeof(aip_o))) != 0)
2431 				goto error;
2432 			ret = wg_aip_add(sc, peer, &aip_o);
2433 			if (ret != 0)
2434 				goto error;
2435 			aip_p++;
2436 		}
2437 
2438 		peer_p = (struct wg_peer_io *)aip_p;
2439 		continue;
2440 next_peer:
2441 		aip_p = &peer_p->p_aips[0];
2442 		aip_p += peer_o.p_aips_count;
2443 		peer_p = (struct wg_peer_io *)aip_p;
2444 	}
2445 
2446 error:
2447 	rw_exit_write(&sc->sc_lock);
2448 	explicit_bzero(&iface_o, sizeof(iface_o));
2449 	explicit_bzero(&peer_o, sizeof(peer_o));
2450 	explicit_bzero(&aip_o, sizeof(aip_o));
2451 	explicit_bzero(public, sizeof(public));
2452 	explicit_bzero(private, sizeof(private));
2453 	return ret;
2454 }
2455 
2456 int
2457 wg_ioctl_get(struct wg_softc *sc, struct wg_data_io *data)
2458 {
2459 	struct wg_interface_io	*iface_p, iface_o;
2460 	struct wg_peer_io	*peer_p, peer_o;
2461 	struct wg_aip_io	*aip_p;
2462 
2463 	struct wg_peer		*peer;
2464 	struct wg_aip		*aip;
2465 
2466 	size_t			 size, peer_count, aip_count;
2467 	int			 ret = 0, is_suser = suser(curproc) == 0;
2468 
2469 	size = sizeof(struct wg_interface_io);
2470 	if (data->wgd_size < size && !is_suser)
2471 		goto ret_size;
2472 
2473 	iface_p = data->wgd_interface;
2474 	bzero(&iface_o, sizeof(iface_o));
2475 
2476 	rw_enter_read(&sc->sc_lock);
2477 
2478 	if (sc->sc_udp_port != 0) {
2479 		iface_o.i_port = ntohs(sc->sc_udp_port);
2480 		iface_o.i_flags |= WG_INTERFACE_HAS_PORT;
2481 	}
2482 
2483 	if (sc->sc_udp_rtable != 0) {
2484 		iface_o.i_rtable = sc->sc_udp_rtable;
2485 		iface_o.i_flags |= WG_INTERFACE_HAS_RTABLE;
2486 	}
2487 
2488 	if (!is_suser)
2489 		goto copy_out_iface;
2490 
2491 	if (noise_local_keys(&sc->sc_local, iface_o.i_public,
2492 	    iface_o.i_private) == 0) {
2493 		iface_o.i_flags |= WG_INTERFACE_HAS_PUBLIC;
2494 		iface_o.i_flags |= WG_INTERFACE_HAS_PRIVATE;
2495 	}
2496 
2497 	size += sizeof(struct wg_peer_io) * sc->sc_peer_num;
2498 	size += sizeof(struct wg_aip_io) * sc->sc_aip_num;
2499 	if (data->wgd_size < size)
2500 		goto unlock_and_ret_size;
2501 
2502 	peer_count = 0;
2503 	peer_p = &iface_p->i_peers[0];
2504 	TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry) {
2505 		bzero(&peer_o, sizeof(peer_o));
2506 		peer_o.p_flags = WG_PEER_HAS_PUBLIC;
2507 		peer_o.p_protocol_version = 1;
2508 
2509 		if (noise_remote_keys(&peer->p_remote, peer_o.p_public,
2510 		    peer_o.p_psk) == 0)
2511 			peer_o.p_flags |= WG_PEER_HAS_PSK;
2512 
2513 		if (wg_timers_get_persistent_keepalive(&peer->p_timers,
2514 		    &peer_o.p_pka) == 0)
2515 			peer_o.p_flags |= WG_PEER_HAS_PKA;
2516 
2517 		if (wg_peer_get_sockaddr(peer, &peer_o.p_sa) == 0)
2518 			peer_o.p_flags |= WG_PEER_HAS_ENDPOINT;
2519 
2520 		mtx_enter(&peer->p_counters_mtx);
2521 		peer_o.p_txbytes = peer->p_counters_tx;
2522 		peer_o.p_rxbytes = peer->p_counters_rx;
2523 		mtx_leave(&peer->p_counters_mtx);
2524 
2525 		wg_timers_get_last_handshake(&peer->p_timers,
2526 		    &peer_o.p_last_handshake);
2527 
2528 		aip_count = 0;
2529 		aip_p = &peer_p->p_aips[0];
2530 		LIST_FOREACH(aip, &peer->p_aip, a_entry) {
2531 			if ((ret = copyout(&aip->a_data, aip_p, sizeof(*aip_p))) != 0)
2532 				goto unlock_and_ret_size;
2533 			aip_p++;
2534 			aip_count++;
2535 		}
2536 		peer_o.p_aips_count = aip_count;
2537 
2538 		strlcpy(peer_o.p_description, peer->p_description, IFDESCRSIZE);
2539 
2540 		if ((ret = copyout(&peer_o, peer_p, sizeof(peer_o))) != 0)
2541 			goto unlock_and_ret_size;
2542 
2543 		peer_p = (struct wg_peer_io *)aip_p;
2544 		peer_count++;
2545 	}
2546 	iface_o.i_peers_count = peer_count;
2547 
2548 copy_out_iface:
2549 	ret = copyout(&iface_o, iface_p, sizeof(iface_o));
2550 unlock_and_ret_size:
2551 	rw_exit_read(&sc->sc_lock);
2552 	explicit_bzero(&iface_o, sizeof(iface_o));
2553 	explicit_bzero(&peer_o, sizeof(peer_o));
2554 ret_size:
2555 	data->wgd_size = size;
2556 	return ret;
2557 }
2558 
2559 int
2560 wg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2561 {
2562 	struct ifreq	*ifr = (struct ifreq *) data;
2563 	struct wg_softc	*sc = ifp->if_softc;
2564 	int		 ret = 0;
2565 
2566 	switch (cmd) {
2567 	case SIOCSWG:
2568 		NET_UNLOCK();
2569 		ret = wg_ioctl_set(sc, (struct wg_data_io *) data);
2570 		NET_LOCK();
2571 		break;
2572 	case SIOCGWG:
2573 		NET_UNLOCK();
2574 		ret = wg_ioctl_get(sc, (struct wg_data_io *) data);
2575 		NET_LOCK();
2576 		break;
2577 	/* Interface IOCTLs */
2578 	case SIOCSIFADDR:
2579 		SET(ifp->if_flags, IFF_UP);
2580 		/* FALLTHROUGH */
2581 	case SIOCSIFFLAGS:
2582 		if (ISSET(ifp->if_flags, IFF_UP))
2583 			ret = wg_up(sc);
2584 		else
2585 			wg_down(sc);
2586 		break;
2587 	case SIOCSIFMTU:
2588 		/* Arbitrary limits */
2589 		if (ifr->ifr_mtu <= 0 || ifr->ifr_mtu > 9000)
2590 			ret = EINVAL;
2591 		else
2592 			ifp->if_mtu = ifr->ifr_mtu;
2593 		break;
2594 	case SIOCADDMULTI:
2595 	case SIOCDELMULTI:
2596 		break;
2597 	default:
2598 		ret = ENOTTY;
2599 	}
2600 
2601 	return ret;
2602 }
2603 
2604 int
2605 wg_up(struct wg_softc *sc)
2606 {
2607 	struct wg_peer	*peer;
2608 	int		 ret = 0;
2609 
2610 	NET_ASSERT_LOCKED();
2611 	/*
2612 	 * We use IFF_RUNNING as an exclusive access here. We also may want
2613 	 * an exclusive sc_lock as wg_bind may write to sc_udp_port. We also
2614 	 * want to drop NET_LOCK as we want to call socreate, sobind, etc. Once
2615 	 * solock is no longer === NET_LOCK, we may be able to avoid this.
2616 	 */
2617 	if (!ISSET(sc->sc_if.if_flags, IFF_RUNNING)) {
2618 		SET(sc->sc_if.if_flags, IFF_RUNNING);
2619 		NET_UNLOCK();
2620 
2621 		rw_enter_write(&sc->sc_lock);
2622 		/*
2623 		 * If we successfully bind the socket, then enable the timers
2624 		 * for the peer. This will send all staged packets and a
2625 		 * keepalive if necessary.
2626 		 */
2627 		ret = wg_bind(sc, &sc->sc_udp_port, &sc->sc_udp_rtable);
2628 		if (ret == 0) {
2629 			TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry) {
2630 				wg_timers_enable(&peer->p_timers);
2631 				wg_queue_out(sc, peer);
2632 			}
2633 		}
2634 		rw_exit_write(&sc->sc_lock);
2635 
2636 		NET_LOCK();
2637 		if (ret != 0)
2638 			CLR(sc->sc_if.if_flags, IFF_RUNNING);
2639 	}
2640 	return ret;
2641 }
2642 
2643 void
2644 wg_down(struct wg_softc *sc)
2645 {
2646 	struct wg_peer	*peer;
2647 
2648 	NET_ASSERT_LOCKED();
2649 	if (!ISSET(sc->sc_if.if_flags, IFF_RUNNING))
2650 		return;
2651 	CLR(sc->sc_if.if_flags, IFF_RUNNING);
2652 	NET_UNLOCK();
2653 
2654 	/*
2655 	 * We only need a read lock here, as we aren't writing to anything
2656 	 * that isn't granularly locked.
2657 	 */
2658 	rw_enter_read(&sc->sc_lock);
2659 	TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry) {
2660 		mq_purge(&peer->p_stage_queue);
2661 		wg_timers_disable(&peer->p_timers);
2662 	}
2663 
2664 	taskq_barrier(wg_handshake_taskq);
2665 	TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry) {
2666 		noise_remote_clear(&peer->p_remote);
2667 		wg_timers_event_reset_handshake_last_sent(&peer->p_timers);
2668 	}
2669 
2670 	wg_unbind(sc);
2671 	rw_exit_read(&sc->sc_lock);
2672 	NET_LOCK();
2673 }
2674 
2675 int
2676 wg_clone_create(struct if_clone *ifc, int unit)
2677 {
2678 	struct ifnet		*ifp;
2679 	struct wg_softc		*sc;
2680 	struct noise_upcall	 local_upcall;
2681 
2682 	KERNEL_ASSERT_LOCKED();
2683 
2684 	if (wg_counter == 0) {
2685 		wg_handshake_taskq = taskq_create("wg_handshake",
2686 		    2, IPL_NET, TASKQ_MPSAFE);
2687 		wg_crypt_taskq = taskq_create("wg_crypt",
2688 		    ncpus, IPL_NET, TASKQ_MPSAFE);
2689 
2690 		if (wg_handshake_taskq == NULL || wg_crypt_taskq == NULL) {
2691 			if (wg_handshake_taskq != NULL)
2692 				taskq_destroy(wg_handshake_taskq);
2693 			if (wg_crypt_taskq != NULL)
2694 				taskq_destroy(wg_crypt_taskq);
2695 			wg_handshake_taskq = NULL;
2696 			wg_crypt_taskq = NULL;
2697 			return ENOTRECOVERABLE;
2698 		}
2699 	}
2700 	wg_counter++;
2701 
2702 	if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL)
2703 		goto ret_00;
2704 
2705 	local_upcall.u_arg = sc;
2706 	local_upcall.u_remote_get = wg_remote_get;
2707 	local_upcall.u_index_set = wg_index_set;
2708 	local_upcall.u_index_drop = wg_index_drop;
2709 
2710 	TAILQ_INIT(&sc->sc_peer_seq);
2711 
2712 	/* sc_if is initialised after everything else */
2713 	arc4random_buf(&sc->sc_secret, sizeof(sc->sc_secret));
2714 
2715 	rw_init(&sc->sc_lock, "wg");
2716 	noise_local_init(&sc->sc_local, &local_upcall);
2717 	if (cookie_checker_init(&sc->sc_cookie, &wg_ratelimit_pool) != 0)
2718 		goto ret_01;
2719 	sc->sc_udp_port = 0;
2720 	sc->sc_udp_rtable = 0;
2721 
2722 	rw_init(&sc->sc_so_lock, "wg_so");
2723 	sc->sc_so4 = NULL;
2724 #ifdef INET6
2725 	sc->sc_so6 = NULL;
2726 #endif
2727 
2728 	sc->sc_aip_num = 0;
2729 	if ((sc->sc_aip4 = art_alloc(0, 32, 0)) == NULL)
2730 		goto ret_02;
2731 #ifdef INET6
2732 	if ((sc->sc_aip6 = art_alloc(0, 128, 0)) == NULL)
2733 		goto ret_03;
2734 #endif
2735 
2736 	rw_init(&sc->sc_peer_lock, "wg_peer");
2737 	sc->sc_peer_num = 0;
2738 	if ((sc->sc_peer = hashinit(HASHTABLE_PEER_SIZE, M_DEVBUF,
2739 	    M_NOWAIT, &sc->sc_peer_mask)) == NULL)
2740 		goto ret_04;
2741 
2742 	mtx_init(&sc->sc_index_mtx, IPL_NET);
2743 	if ((sc->sc_index = hashinit(HASHTABLE_INDEX_SIZE, M_DEVBUF,
2744 	    M_NOWAIT, &sc->sc_index_mask)) == NULL)
2745 		goto ret_05;
2746 
2747 	task_set(&sc->sc_handshake, wg_handshake_worker, sc);
2748 	mq_init(&sc->sc_handshake_queue, MAX_QUEUED_HANDSHAKES, IPL_NET);
2749 
2750 	task_set(&sc->sc_encap, wg_encap_worker, sc);
2751 	task_set(&sc->sc_decap, wg_decap_worker, sc);
2752 
2753 	bzero(&sc->sc_encap_ring, sizeof(sc->sc_encap_ring));
2754 	mtx_init(&sc->sc_encap_ring.r_mtx, IPL_NET);
2755 	bzero(&sc->sc_decap_ring, sizeof(sc->sc_decap_ring));
2756 	mtx_init(&sc->sc_decap_ring.r_mtx, IPL_NET);
2757 
2758 	/* We've setup the softc, now we can setup the ifnet */
2759 	ifp = &sc->sc_if;
2760 	ifp->if_softc = sc;
2761 
2762 	snprintf(ifp->if_xname, sizeof(ifp->if_xname), "wg%d", unit);
2763 
2764 	ifp->if_mtu = DEFAULT_MTU;
2765 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_NOARP;
2766 	ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE;
2767 	ifp->if_txmit = 64; /* Keep our workers active for longer. */
2768 
2769 	ifp->if_ioctl = wg_ioctl;
2770 	ifp->if_qstart = wg_qstart;
2771 	ifp->if_output = wg_output;
2772 
2773 	ifp->if_type = IFT_WIREGUARD;
2774 	ifp->if_rtrequest = p2p_rtrequest;
2775 
2776 	if_counters_alloc(ifp);
2777 	if_attach(ifp);
2778 	if_alloc_sadl(ifp);
2779 
2780 #if NBPFILTER > 0
2781 	bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(uint32_t));
2782 #endif
2783 
2784 	WGPRINTF(LOG_INFO, sc, NULL, "Interface created\n");
2785 
2786 	return 0;
2787 ret_05:
2788 	hashfree(sc->sc_peer, HASHTABLE_PEER_SIZE, M_DEVBUF);
2789 ret_04:
2790 #ifdef INET6
2791 	free(sc->sc_aip6, M_RTABLE, sizeof(*sc->sc_aip6));
2792 ret_03:
2793 #endif
2794 	free(sc->sc_aip4, M_RTABLE, sizeof(*sc->sc_aip4));
2795 ret_02:
2796 	cookie_checker_deinit(&sc->sc_cookie);
2797 ret_01:
2798 	free(sc, M_DEVBUF, sizeof(*sc));
2799 ret_00:
2800 	return ENOBUFS;
2801 }
2802 int
2803 wg_clone_destroy(struct ifnet *ifp)
2804 {
2805 	struct wg_softc	*sc = ifp->if_softc;
2806 	struct wg_peer	*peer, *tpeer;
2807 
2808 	KERNEL_ASSERT_LOCKED();
2809 
2810 	rw_enter_write(&sc->sc_lock);
2811 	TAILQ_FOREACH_SAFE(peer, &sc->sc_peer_seq, p_seq_entry, tpeer)
2812 		wg_peer_destroy(peer);
2813 	rw_exit_write(&sc->sc_lock);
2814 
2815 	wg_unbind(sc);
2816 	if_detach(ifp);
2817 
2818 	wg_counter--;
2819 	if (wg_counter == 0) {
2820 		KASSERT(wg_handshake_taskq != NULL && wg_crypt_taskq != NULL);
2821 		taskq_destroy(wg_handshake_taskq);
2822 		taskq_destroy(wg_crypt_taskq);
2823 		wg_handshake_taskq = NULL;
2824 		wg_crypt_taskq = NULL;
2825 	}
2826 
2827 	WGPRINTF(LOG_INFO, sc, NULL, "Interface destroyed\n");
2828 
2829 	hashfree(sc->sc_index, HASHTABLE_INDEX_SIZE, M_DEVBUF);
2830 	hashfree(sc->sc_peer, HASHTABLE_PEER_SIZE, M_DEVBUF);
2831 #ifdef INET6
2832 	free(sc->sc_aip6, M_RTABLE, sizeof(*sc->sc_aip6));
2833 #endif
2834 	free(sc->sc_aip4, M_RTABLE, sizeof(*sc->sc_aip4));
2835 	cookie_checker_deinit(&sc->sc_cookie);
2836 	free(sc, M_DEVBUF, sizeof(*sc));
2837 	return 0;
2838 }
2839 
2840 void
2841 wgattach(int nwg)
2842 {
2843 #ifdef WGTEST
2844 	cookie_test();
2845 	noise_test();
2846 #endif
2847 	if_clone_attach(&wg_cloner);
2848 
2849 	pool_init(&wg_aip_pool, sizeof(struct wg_aip), 0,
2850 			IPL_NET, 0, "wgaip", NULL);
2851 	pool_init(&wg_peer_pool, sizeof(struct wg_peer), 0,
2852 			IPL_NET, 0, "wgpeer", NULL);
2853 	pool_init(&wg_ratelimit_pool, sizeof(struct ratelimit_entry), 0,
2854 			IPL_NET, 0, "wgratelimit", NULL);
2855 }
2856