1*5663f157Sdlg /* $OpenBSD: if_sec.c,v 1.11 2024/03/19 03:49:11 dlg Exp $ */
2433cd47bSdlg
3433cd47bSdlg /*
4433cd47bSdlg * Copyright (c) 2022 The University of Queensland
5433cd47bSdlg *
6433cd47bSdlg * Permission to use, copy, modify, and distribute this software for any
7433cd47bSdlg * purpose with or without fee is hereby granted, provided that the above
8433cd47bSdlg * copyright notice and this permission notice appear in all copies.
9433cd47bSdlg *
10433cd47bSdlg * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11433cd47bSdlg * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12433cd47bSdlg * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13433cd47bSdlg * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14433cd47bSdlg * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15433cd47bSdlg * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16433cd47bSdlg * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17433cd47bSdlg */
18433cd47bSdlg
19433cd47bSdlg /*
20433cd47bSdlg * This code was written by David Gwynne <dlg@uq.edu.au> as part
21433cd47bSdlg * of the Information Technology Infrastructure Group (ITIG) in the
22433cd47bSdlg * Faculty of Engineering, Architecture and Information Technology
23433cd47bSdlg * (EAIT).
24433cd47bSdlg */
25433cd47bSdlg
26433cd47bSdlg #ifndef IPSEC
27433cd47bSdlg #error sec enabled without IPSEC defined
28433cd47bSdlg #endif
29433cd47bSdlg
30433cd47bSdlg #include "bpfilter.h"
31433cd47bSdlg #include "pf.h"
32433cd47bSdlg
33433cd47bSdlg #include <sys/param.h>
34433cd47bSdlg #include <sys/mbuf.h>
35433cd47bSdlg #include <sys/socket.h>
36433cd47bSdlg #include <sys/sockio.h>
37433cd47bSdlg #include <sys/systm.h>
38433cd47bSdlg #include <sys/errno.h>
39433cd47bSdlg #include <sys/smr.h>
40433cd47bSdlg #include <sys/refcnt.h>
410739faefSjsg #include <sys/task.h>
420739faefSjsg #include <sys/mutex.h>
43433cd47bSdlg
44433cd47bSdlg #include <net/if.h>
45433cd47bSdlg #include <net/if_var.h>
46433cd47bSdlg #include <net/if_types.h>
47433cd47bSdlg #include <net/toeplitz.h>
48433cd47bSdlg
49433cd47bSdlg #include <netinet/in.h>
50433cd47bSdlg #include <netinet/ip.h>
51433cd47bSdlg #include <netinet/ip_ipsp.h>
52433cd47bSdlg
53433cd47bSdlg #ifdef INET6
54433cd47bSdlg #include <netinet/ip6.h>
55433cd47bSdlg #endif
56433cd47bSdlg
57433cd47bSdlg #if NBPFILTER > 0
58433cd47bSdlg #include <net/bpf.h>
59433cd47bSdlg #endif
60433cd47bSdlg
61433cd47bSdlg #if NPF > 0
62433cd47bSdlg #include <net/pfvar.h>
63433cd47bSdlg #endif
64433cd47bSdlg
65433cd47bSdlg #define SEC_MTU 1280
66433cd47bSdlg #define SEC_MTU_MIN 1280
67433cd47bSdlg #define SEC_MTU_MAX 32768 /* could get closer to 64k... */
68433cd47bSdlg
69433cd47bSdlg struct sec_softc {
70433cd47bSdlg struct ifnet sc_if;
7131bb0ff4Sdlg unsigned int sc_dead;
72e2ace5e5Sdlg unsigned int sc_up;
73433cd47bSdlg
74433cd47bSdlg struct task sc_send;
7542aedc3cSdlg int sc_txprio;
76433cd47bSdlg
77433cd47bSdlg unsigned int sc_unit;
78433cd47bSdlg SMR_SLIST_ENTRY(sec_softc) sc_entry;
79433cd47bSdlg struct refcnt sc_refs;
80433cd47bSdlg };
81433cd47bSdlg
82433cd47bSdlg SMR_SLIST_HEAD(sec_bucket, sec_softc);
83433cd47bSdlg
84433cd47bSdlg static int sec_output(struct ifnet *, struct mbuf *, struct sockaddr *,
85433cd47bSdlg struct rtentry *);
86433cd47bSdlg static int sec_enqueue(struct ifnet *, struct mbuf *);
87433cd47bSdlg static void sec_send(void *);
8805ebbcb3Sdlg static void sec_start(struct ifqueue *);
89433cd47bSdlg
90433cd47bSdlg static int sec_ioctl(struct ifnet *, u_long, caddr_t);
91433cd47bSdlg static int sec_up(struct sec_softc *);
92433cd47bSdlg static int sec_down(struct sec_softc *);
93433cd47bSdlg
94433cd47bSdlg static int sec_clone_create(struct if_clone *, int);
95433cd47bSdlg static int sec_clone_destroy(struct ifnet *);
96433cd47bSdlg
97433cd47bSdlg static struct tdb *
98433cd47bSdlg sec_tdb_get(unsigned int);
99433cd47bSdlg static void sec_tdb_gc(void *);
100433cd47bSdlg
101433cd47bSdlg static struct if_clone sec_cloner =
102433cd47bSdlg IF_CLONE_INITIALIZER("sec", sec_clone_create, sec_clone_destroy);
103433cd47bSdlg
10442aedc3cSdlg static unsigned int sec_mix;
105433cd47bSdlg static struct sec_bucket sec_map[256] __aligned(CACHELINESIZE);
106433cd47bSdlg static struct tdb *sec_tdbh[256] __aligned(CACHELINESIZE);
107433cd47bSdlg
108433cd47bSdlg static struct tdb *sec_tdb_gc_list;
109433cd47bSdlg static struct task sec_tdb_gc_task =
110433cd47bSdlg TASK_INITIALIZER(sec_tdb_gc, NULL);
111433cd47bSdlg static struct mutex sec_tdb_gc_mtx =
112433cd47bSdlg MUTEX_INITIALIZER(IPL_MPFLOOR);
113433cd47bSdlg
114433cd47bSdlg void
secattach(int n)115433cd47bSdlg secattach(int n)
116433cd47bSdlg {
11742aedc3cSdlg sec_mix = arc4random();
118433cd47bSdlg if_clone_attach(&sec_cloner);
119433cd47bSdlg }
120433cd47bSdlg
121433cd47bSdlg static int
sec_clone_create(struct if_clone * ifc,int unit)122433cd47bSdlg sec_clone_create(struct if_clone *ifc, int unit)
123433cd47bSdlg {
124433cd47bSdlg struct sec_softc *sc;
125433cd47bSdlg struct ifnet *ifp;
126433cd47bSdlg
127433cd47bSdlg sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
128433cd47bSdlg
129433cd47bSdlg sc->sc_unit = unit;
130433cd47bSdlg
131433cd47bSdlg task_set(&sc->sc_send, sec_send, sc);
132433cd47bSdlg
133433cd47bSdlg snprintf(sc->sc_if.if_xname, sizeof sc->sc_if.if_xname, "%s%d",
134433cd47bSdlg ifc->ifc_name, unit);
135433cd47bSdlg
136433cd47bSdlg ifp = &sc->sc_if;
137433cd47bSdlg ifp->if_softc = sc;
138433cd47bSdlg ifp->if_type = IFT_TUNNEL;
139433cd47bSdlg ifp->if_mtu = SEC_MTU;
140433cd47bSdlg ifp->if_flags = IFF_POINTOPOINT|IFF_MULTICAST;
14105ebbcb3Sdlg ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE;
142433cd47bSdlg ifp->if_bpf_mtap = p2p_bpf_mtap;
143433cd47bSdlg ifp->if_input = p2p_input;
144433cd47bSdlg ifp->if_output = sec_output;
145433cd47bSdlg ifp->if_enqueue = sec_enqueue;
14605ebbcb3Sdlg ifp->if_qstart = sec_start;
147433cd47bSdlg ifp->if_ioctl = sec_ioctl;
148433cd47bSdlg ifp->if_rtrequest = p2p_rtrequest;
149433cd47bSdlg
150938ff1aeSbluhm if_counters_alloc(ifp);
151433cd47bSdlg if_attach(ifp);
152433cd47bSdlg if_alloc_sadl(ifp);
153433cd47bSdlg
154433cd47bSdlg #if NBPFILTER > 0
155433cd47bSdlg bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(uint32_t));
156433cd47bSdlg #endif
157433cd47bSdlg
158433cd47bSdlg return (0);
159433cd47bSdlg }
160433cd47bSdlg
161433cd47bSdlg static int
sec_clone_destroy(struct ifnet * ifp)162433cd47bSdlg sec_clone_destroy(struct ifnet *ifp)
163433cd47bSdlg {
164433cd47bSdlg struct sec_softc *sc = ifp->if_softc;
165433cd47bSdlg
166433cd47bSdlg NET_LOCK();
16731bb0ff4Sdlg sc->sc_dead = 1;
168433cd47bSdlg if (ISSET(ifp->if_flags, IFF_RUNNING))
169433cd47bSdlg sec_down(sc);
170433cd47bSdlg NET_UNLOCK();
171433cd47bSdlg
172433cd47bSdlg if_detach(ifp);
173433cd47bSdlg
174433cd47bSdlg free(sc, M_DEVBUF, sizeof(*sc));
175433cd47bSdlg
176433cd47bSdlg return (0);
177433cd47bSdlg }
178433cd47bSdlg
179433cd47bSdlg static int
sec_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)180433cd47bSdlg sec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
181433cd47bSdlg {
182433cd47bSdlg struct sec_softc *sc = ifp->if_softc;
183433cd47bSdlg struct ifreq *ifr = (struct ifreq *)data;
184433cd47bSdlg int error = 0;
185433cd47bSdlg
186433cd47bSdlg switch (cmd) {
187433cd47bSdlg case SIOCSIFADDR:
188433cd47bSdlg break;
189433cd47bSdlg
190433cd47bSdlg case SIOCSIFFLAGS:
191433cd47bSdlg if (ISSET(ifp->if_flags, IFF_UP)) {
192433cd47bSdlg if (!ISSET(ifp->if_flags, IFF_RUNNING))
193433cd47bSdlg error = sec_up(sc);
194433cd47bSdlg else
195433cd47bSdlg error = 0;
196433cd47bSdlg } else {
197433cd47bSdlg if (ISSET(ifp->if_flags, IFF_RUNNING))
198433cd47bSdlg error = sec_down(sc);
199433cd47bSdlg }
200433cd47bSdlg break;
201433cd47bSdlg
202433cd47bSdlg case SIOCADDMULTI:
203433cd47bSdlg case SIOCDELMULTI:
204433cd47bSdlg break;
205433cd47bSdlg
206433cd47bSdlg case SIOCSIFMTU:
207433cd47bSdlg if (ifr->ifr_mtu < SEC_MTU_MIN ||
208433cd47bSdlg ifr->ifr_mtu > SEC_MTU_MAX) {
209433cd47bSdlg error = EINVAL;
210433cd47bSdlg break;
211433cd47bSdlg }
212433cd47bSdlg
213433cd47bSdlg ifp->if_mtu = ifr->ifr_mtu;
214433cd47bSdlg break;
215433cd47bSdlg
216433cd47bSdlg default:
217433cd47bSdlg error = ENOTTY;
218433cd47bSdlg break;
219433cd47bSdlg }
220433cd47bSdlg
221433cd47bSdlg return (error);
222433cd47bSdlg }
223433cd47bSdlg
224433cd47bSdlg static int
sec_up(struct sec_softc * sc)225433cd47bSdlg sec_up(struct sec_softc *sc)
226433cd47bSdlg {
227433cd47bSdlg struct ifnet *ifp = &sc->sc_if;
228433cd47bSdlg unsigned int idx = stoeplitz_h32(sc->sc_unit) % nitems(sec_map);
229433cd47bSdlg
230433cd47bSdlg NET_ASSERT_LOCKED();
231e2ace5e5Sdlg KASSERT(!ISSET(ifp->if_flags, IFF_RUNNING));
232433cd47bSdlg
23331bb0ff4Sdlg if (sc->sc_dead)
23431bb0ff4Sdlg return (ENXIO);
23531bb0ff4Sdlg
236e2ace5e5Sdlg /*
237e2ace5e5Sdlg * coordinate with sec_down(). if sc_up is still up and
238e2ace5e5Sdlg * we're here then something else is running sec_down.
239e2ace5e5Sdlg */
240e2ace5e5Sdlg if (sc->sc_up)
241e2ace5e5Sdlg return (EBUSY);
242e2ace5e5Sdlg
243e2ace5e5Sdlg sc->sc_up = 1;
244e2ace5e5Sdlg
245433cd47bSdlg refcnt_init(&sc->sc_refs);
246e2ace5e5Sdlg SET(ifp->if_flags, IFF_RUNNING);
247433cd47bSdlg SMR_SLIST_INSERT_HEAD_LOCKED(&sec_map[idx], sc, sc_entry);
248433cd47bSdlg
249433cd47bSdlg return (0);
250433cd47bSdlg }
251433cd47bSdlg
252433cd47bSdlg static int
sec_down(struct sec_softc * sc)253433cd47bSdlg sec_down(struct sec_softc *sc)
254433cd47bSdlg {
255433cd47bSdlg struct ifnet *ifp = &sc->sc_if;
256433cd47bSdlg unsigned int idx = stoeplitz_h32(sc->sc_unit) % nitems(sec_map);
257433cd47bSdlg
258433cd47bSdlg NET_ASSERT_LOCKED();
259e2ace5e5Sdlg KASSERT(ISSET(ifp->if_flags, IFF_RUNNING));
260e2ace5e5Sdlg
261e2ace5e5Sdlg /*
262e2ace5e5Sdlg * taking sec down involves waiting for it to stop running
263e2ace5e5Sdlg * in various contexts. this thread cannot hold netlock
264e2ace5e5Sdlg * while waiting for a barrier for a task that could be trying
265e2ace5e5Sdlg * to take netlock itself. so give up netlock, but don't clear
266e2ace5e5Sdlg * sc_up to prevent sec_up from running.
267e2ace5e5Sdlg */
268433cd47bSdlg
269433cd47bSdlg CLR(ifp->if_flags, IFF_RUNNING);
270e2ace5e5Sdlg NET_UNLOCK();
271433cd47bSdlg
272433cd47bSdlg smr_barrier();
273433cd47bSdlg taskq_del_barrier(systq, &sc->sc_send);
274433cd47bSdlg
275433cd47bSdlg refcnt_finalize(&sc->sc_refs, "secdown");
276433cd47bSdlg
277e2ace5e5Sdlg NET_LOCK();
278e2ace5e5Sdlg SMR_SLIST_REMOVE_LOCKED(&sec_map[idx], sc, sec_softc, sc_entry);
279e2ace5e5Sdlg sc->sc_up = 0;
280e2ace5e5Sdlg
281433cd47bSdlg return (0);
282433cd47bSdlg }
283433cd47bSdlg
284433cd47bSdlg static int
sec_output(struct ifnet * ifp,struct mbuf * m,struct sockaddr * dst,struct rtentry * rt)285433cd47bSdlg sec_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
286433cd47bSdlg struct rtentry *rt)
287433cd47bSdlg {
288433cd47bSdlg struct m_tag *mtag;
289433cd47bSdlg int error = 0;
290433cd47bSdlg
291433cd47bSdlg if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
292433cd47bSdlg error = ENETDOWN;
293433cd47bSdlg goto drop;
294433cd47bSdlg }
295433cd47bSdlg
296433cd47bSdlg switch (dst->sa_family) {
297433cd47bSdlg case AF_INET:
298433cd47bSdlg #ifdef INET6
299433cd47bSdlg case AF_INET6:
300433cd47bSdlg #endif
301433cd47bSdlg #ifdef MPLS
302433cd47bSdlg case AF_MPLS:
303433cd47bSdlg #endif
304433cd47bSdlg break;
305433cd47bSdlg default:
306433cd47bSdlg error = EAFNOSUPPORT;
307433cd47bSdlg goto drop;
308433cd47bSdlg }
309433cd47bSdlg
310433cd47bSdlg mtag = NULL;
311433cd47bSdlg while ((mtag = m_tag_find(m, PACKET_TAG_GRE, mtag)) != NULL) {
312433cd47bSdlg if (ifp->if_index == *(int *)(mtag + 1)) {
313433cd47bSdlg error = EIO;
314433cd47bSdlg goto drop;
315433cd47bSdlg }
316433cd47bSdlg }
317433cd47bSdlg
3182d1127bbSdlg mtag = m_tag_get(PACKET_TAG_GRE, sizeof(ifp->if_index), M_NOWAIT);
3192d1127bbSdlg if (mtag == NULL) {
3202d1127bbSdlg error = ENOBUFS;
3212d1127bbSdlg goto drop;
3222d1127bbSdlg }
3232d1127bbSdlg *(int *)(mtag + 1) = ifp->if_index;
3242d1127bbSdlg m_tag_prepend(m, mtag);
3252d1127bbSdlg
326433cd47bSdlg m->m_pkthdr.ph_family = dst->sa_family;
327433cd47bSdlg
328433cd47bSdlg error = if_enqueue(ifp, m);
329433cd47bSdlg if (error != 0)
330*5663f157Sdlg counters_inc(ifp->if_counters, ifc_oqdrops);
331433cd47bSdlg
332433cd47bSdlg return (error);
333433cd47bSdlg
334433cd47bSdlg drop:
335433cd47bSdlg m_freem(m);
336433cd47bSdlg return (error);
337433cd47bSdlg }
338433cd47bSdlg
339433cd47bSdlg static int
sec_enqueue(struct ifnet * ifp,struct mbuf * m)340433cd47bSdlg sec_enqueue(struct ifnet *ifp, struct mbuf *m)
341433cd47bSdlg {
342433cd47bSdlg struct sec_softc *sc = ifp->if_softc;
343433cd47bSdlg struct ifqueue *ifq = &ifp->if_snd;
344433cd47bSdlg int error;
345433cd47bSdlg
346433cd47bSdlg error = ifq_enqueue(ifq, m);
347433cd47bSdlg if (error)
348433cd47bSdlg return (error);
349433cd47bSdlg
350433cd47bSdlg task_add(systq, &sc->sc_send);
351433cd47bSdlg
352433cd47bSdlg return (0);
353433cd47bSdlg }
354433cd47bSdlg
355433cd47bSdlg static void
sec_send(void * arg)356433cd47bSdlg sec_send(void *arg)
357433cd47bSdlg {
358433cd47bSdlg struct sec_softc *sc = arg;
359433cd47bSdlg struct ifnet *ifp = &sc->sc_if;
360433cd47bSdlg struct ifqueue *ifq = &ifp->if_snd;
361433cd47bSdlg struct tdb *tdb;
362433cd47bSdlg struct mbuf *m;
363433cd47bSdlg int error;
36442aedc3cSdlg unsigned int flowid;
365433cd47bSdlg
366433cd47bSdlg if (!ISSET(ifp->if_flags, IFF_RUNNING))
367433cd47bSdlg return;
368433cd47bSdlg
369433cd47bSdlg tdb = sec_tdb_get(sc->sc_unit);
370433cd47bSdlg if (tdb == NULL)
371433cd47bSdlg goto purge;
372433cd47bSdlg
37342aedc3cSdlg flowid = sc->sc_unit ^ sec_mix;
37442aedc3cSdlg
375433cd47bSdlg NET_LOCK();
376433cd47bSdlg while ((m = ifq_dequeue(ifq)) != NULL) {
377433cd47bSdlg CLR(m->m_flags, M_BCAST|M_MCAST);
378433cd47bSdlg
379433cd47bSdlg #if NPF > 0
380433cd47bSdlg pf_pkt_addr_changed(m);
381433cd47bSdlg #endif
382433cd47bSdlg
383a3e56974Sdlg #if NBPFILTER > 0
384a3e56974Sdlg if (ifp->if_bpf)
385a3e56974Sdlg bpf_mtap_af(ifp->if_bpf, m->m_pkthdr.ph_family, m,
386a3e56974Sdlg BPF_DIRECTION_OUT);
387a3e56974Sdlg #endif
388a3e56974Sdlg
38942aedc3cSdlg m->m_pkthdr.pf.prio = sc->sc_txprio;
39042aedc3cSdlg SET(m->m_pkthdr.csum_flags, M_FLOWID);
39142aedc3cSdlg m->m_pkthdr.ph_flowid = flowid;
39242aedc3cSdlg
393433cd47bSdlg error = ipsp_process_packet(m, tdb,
394433cd47bSdlg m->m_pkthdr.ph_family, /* already tunnelled? */ 0);
395433cd47bSdlg if (error != 0)
396433cd47bSdlg counters_inc(ifp->if_counters, ifc_oerrors);
397433cd47bSdlg }
398433cd47bSdlg NET_UNLOCK();
399433cd47bSdlg
400433cd47bSdlg tdb_unref(tdb);
401433cd47bSdlg return;
402433cd47bSdlg
403433cd47bSdlg purge:
404433cd47bSdlg counters_add(ifp->if_counters, ifc_oerrors, ifq_purge(ifq));
405433cd47bSdlg }
406433cd47bSdlg
407433cd47bSdlg static void
sec_start(struct ifqueue * ifq)40805ebbcb3Sdlg sec_start(struct ifqueue *ifq)
409433cd47bSdlg {
41005ebbcb3Sdlg struct ifnet *ifp = ifq->ifq_if;
41105ebbcb3Sdlg struct sec_softc *sc = ifp->if_softc;
41205ebbcb3Sdlg
41305ebbcb3Sdlg /* move this back to systq for KERNEL_LOCK */
41405ebbcb3Sdlg task_add(systq, &sc->sc_send);
415433cd47bSdlg }
416433cd47bSdlg
417433cd47bSdlg /*
418433cd47bSdlg * ipsec_input handling
419433cd47bSdlg */
420433cd47bSdlg
421433cd47bSdlg struct sec_softc *
sec_get(unsigned int unit)422433cd47bSdlg sec_get(unsigned int unit)
423433cd47bSdlg {
424433cd47bSdlg unsigned int idx = stoeplitz_h32(unit) % nitems(sec_map);
425433cd47bSdlg struct sec_bucket *sb = &sec_map[idx];
426433cd47bSdlg struct sec_softc *sc;
427433cd47bSdlg
428433cd47bSdlg smr_read_enter();
429433cd47bSdlg SMR_SLIST_FOREACH(sc, sb, sc_entry) {
430433cd47bSdlg if (sc->sc_unit == unit) {
431433cd47bSdlg refcnt_take(&sc->sc_refs);
432433cd47bSdlg break;
433433cd47bSdlg }
434433cd47bSdlg }
435433cd47bSdlg smr_read_leave();
436433cd47bSdlg
437433cd47bSdlg return (sc);
438433cd47bSdlg }
439433cd47bSdlg
440433cd47bSdlg void
sec_input(struct sec_softc * sc,int af,int proto,struct mbuf * m)441433cd47bSdlg sec_input(struct sec_softc *sc, int af, int proto, struct mbuf *m)
442433cd47bSdlg {
443433cd47bSdlg struct ip *iph;
444433cd47bSdlg int hlen;
445433cd47bSdlg
446433cd47bSdlg switch (af) {
447433cd47bSdlg case AF_INET:
448433cd47bSdlg iph = mtod(m, struct ip *);
449433cd47bSdlg hlen = iph->ip_hl << 2;
450433cd47bSdlg break;
451433cd47bSdlg #ifdef INET6
452433cd47bSdlg case AF_INET6:
453433cd47bSdlg hlen = sizeof(struct ip6_hdr);
454433cd47bSdlg break;
455433cd47bSdlg #endif
456433cd47bSdlg default:
457433cd47bSdlg unhandled_af(af);
458433cd47bSdlg }
459433cd47bSdlg
460433cd47bSdlg m_adj(m, hlen);
461433cd47bSdlg
462433cd47bSdlg switch (proto) {
463433cd47bSdlg case IPPROTO_IPV4:
464433cd47bSdlg af = AF_INET;
465433cd47bSdlg break;
466433cd47bSdlg case IPPROTO_IPV6:
467433cd47bSdlg af = AF_INET6;
468433cd47bSdlg break;
469433cd47bSdlg case IPPROTO_MPLS:
470433cd47bSdlg af = AF_MPLS;
471433cd47bSdlg break;
472433cd47bSdlg default:
473433cd47bSdlg af = AF_UNSPEC;
474433cd47bSdlg break;
475433cd47bSdlg }
476433cd47bSdlg
477433cd47bSdlg m->m_pkthdr.ph_family = af;
478433cd47bSdlg
479433cd47bSdlg if_vinput(&sc->sc_if, m);
480433cd47bSdlg }
481433cd47bSdlg
482433cd47bSdlg void
sec_put(struct sec_softc * sc)483433cd47bSdlg sec_put(struct sec_softc *sc)
484433cd47bSdlg {
485433cd47bSdlg refcnt_rele_wake(&sc->sc_refs);
486433cd47bSdlg }
487433cd47bSdlg
488433cd47bSdlg /*
489433cd47bSdlg * tdb handling
490433cd47bSdlg */
491433cd47bSdlg
492433cd47bSdlg static int
sec_tdb_valid(struct tdb * tdb)493433cd47bSdlg sec_tdb_valid(struct tdb *tdb)
494433cd47bSdlg {
495433cd47bSdlg KASSERT(ISSET(tdb->tdb_flags, TDBF_IFACE));
496433cd47bSdlg
497433cd47bSdlg if (!ISSET(tdb->tdb_flags, TDBF_TUNNELING))
498433cd47bSdlg return (0);
499433cd47bSdlg if (ISSET(tdb->tdb_flags, TDBF_INVALID))
500433cd47bSdlg return (0);
501433cd47bSdlg
502433cd47bSdlg if (tdb->tdb_iface_dir != IPSP_DIRECTION_OUT)
503433cd47bSdlg return (0);
504433cd47bSdlg
505433cd47bSdlg return (1);
506433cd47bSdlg }
507433cd47bSdlg
508433cd47bSdlg /*
509433cd47bSdlg * these are called from netinet/ip_ipsp.c with tdb_sadb_mtx held,
510433cd47bSdlg * which we rely on to serialise modifications to the sec_tdbh.
511433cd47bSdlg */
512433cd47bSdlg
513433cd47bSdlg void
sec_tdb_insert(struct tdb * tdb)514433cd47bSdlg sec_tdb_insert(struct tdb *tdb)
515433cd47bSdlg {
516433cd47bSdlg unsigned int idx;
517433cd47bSdlg struct tdb **tdbp;
518433cd47bSdlg struct tdb *ltdb;
519433cd47bSdlg
520433cd47bSdlg if (!sec_tdb_valid(tdb))
521433cd47bSdlg return;
522433cd47bSdlg
523433cd47bSdlg idx = stoeplitz_h32(tdb->tdb_iface) % nitems(sec_tdbh);
524433cd47bSdlg tdbp = &sec_tdbh[idx];
525433cd47bSdlg
526433cd47bSdlg tdb_ref(tdb); /* take a ref for the SMR pointer */
527433cd47bSdlg
528433cd47bSdlg /* wire the tdb into the head of the list */
529433cd47bSdlg ltdb = SMR_PTR_GET_LOCKED(tdbp);
530433cd47bSdlg SMR_PTR_SET_LOCKED(&tdb->tdb_dnext, ltdb);
531433cd47bSdlg SMR_PTR_SET_LOCKED(tdbp, tdb);
532433cd47bSdlg }
533433cd47bSdlg
534433cd47bSdlg void
sec_tdb_remove(struct tdb * tdb)535433cd47bSdlg sec_tdb_remove(struct tdb *tdb)
536433cd47bSdlg {
537433cd47bSdlg struct tdb **tdbp;
538433cd47bSdlg struct tdb *ltdb;
539433cd47bSdlg unsigned int idx;
540433cd47bSdlg
541433cd47bSdlg if (!sec_tdb_valid(tdb))
542433cd47bSdlg return;
543433cd47bSdlg
544433cd47bSdlg idx = stoeplitz_h32(tdb->tdb_iface) % nitems(sec_tdbh);
545433cd47bSdlg tdbp = &sec_tdbh[idx];
546433cd47bSdlg
547433cd47bSdlg while ((ltdb = SMR_PTR_GET_LOCKED(tdbp)) != NULL) {
548433cd47bSdlg if (ltdb == tdb) {
549433cd47bSdlg /* take the tdb out of the list */
550433cd47bSdlg ltdb = SMR_PTR_GET_LOCKED(&tdb->tdb_dnext);
551433cd47bSdlg SMR_PTR_SET_LOCKED(tdbp, ltdb);
552433cd47bSdlg
553433cd47bSdlg /* move the ref to the gc */
554433cd47bSdlg
555433cd47bSdlg mtx_enter(&sec_tdb_gc_mtx);
556433cd47bSdlg tdb->tdb_dnext = sec_tdb_gc_list;
557433cd47bSdlg sec_tdb_gc_list = tdb;
558433cd47bSdlg mtx_leave(&sec_tdb_gc_mtx);
559433cd47bSdlg task_add(systq, &sec_tdb_gc_task);
560433cd47bSdlg
561433cd47bSdlg return;
562433cd47bSdlg }
563433cd47bSdlg
564433cd47bSdlg tdbp = <db->tdb_dnext;
565433cd47bSdlg }
566433cd47bSdlg
567433cd47bSdlg panic("%s: unable to find tdb %p", __func__, tdb);
568433cd47bSdlg }
569433cd47bSdlg
570433cd47bSdlg static void
sec_tdb_gc(void * null)571433cd47bSdlg sec_tdb_gc(void *null)
572433cd47bSdlg {
573433cd47bSdlg struct tdb *tdb, *ntdb;
574433cd47bSdlg
575433cd47bSdlg mtx_enter(&sec_tdb_gc_mtx);
576433cd47bSdlg tdb = sec_tdb_gc_list;
577433cd47bSdlg sec_tdb_gc_list = NULL;
578433cd47bSdlg mtx_leave(&sec_tdb_gc_mtx);
579433cd47bSdlg
580433cd47bSdlg if (tdb == NULL)
581433cd47bSdlg return;
582433cd47bSdlg
583433cd47bSdlg smr_barrier();
584433cd47bSdlg
585433cd47bSdlg NET_LOCK();
586433cd47bSdlg do {
587433cd47bSdlg ntdb = tdb->tdb_dnext;
588433cd47bSdlg tdb_unref(tdb);
589433cd47bSdlg tdb = ntdb;
590433cd47bSdlg } while (tdb != NULL);
591433cd47bSdlg NET_UNLOCK();
592433cd47bSdlg }
593433cd47bSdlg
594433cd47bSdlg struct tdb *
sec_tdb_get(unsigned int unit)595433cd47bSdlg sec_tdb_get(unsigned int unit)
596433cd47bSdlg {
597433cd47bSdlg unsigned int idx;
598433cd47bSdlg struct tdb **tdbp;
599433cd47bSdlg struct tdb *tdb;
600433cd47bSdlg
601433cd47bSdlg idx = stoeplitz_h32(unit) % nitems(sec_map);
602433cd47bSdlg tdbp = &sec_tdbh[idx];
603433cd47bSdlg
604433cd47bSdlg smr_read_enter();
605433cd47bSdlg while ((tdb = SMR_PTR_GET(tdbp)) != NULL) {
606433cd47bSdlg KASSERT(ISSET(tdb->tdb_flags, TDBF_IFACE));
607433cd47bSdlg if (!ISSET(tdb->tdb_flags, TDBF_DELETED) &&
608433cd47bSdlg tdb->tdb_iface == unit) {
609433cd47bSdlg tdb_ref(tdb);
610433cd47bSdlg break;
611433cd47bSdlg }
612433cd47bSdlg
613433cd47bSdlg tdbp = &tdb->tdb_dnext;
614433cd47bSdlg }
615433cd47bSdlg smr_read_leave();
616433cd47bSdlg
617433cd47bSdlg return (tdb);
618433cd47bSdlg }
619