xref: /netbsd-src/sys/net/npf/npf_handler.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: npf_handler.c,v 1.30 2014/05/19 18:45:51 jakllsch Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This material is based upon work partially supported by The
8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * NPF packet handler.
34  *
35  * Note: pfil(9) hooks are currently locked by softnet_lock and kernel-lock.
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: npf_handler.c,v 1.30 2014/05/19 18:45:51 jakllsch Exp $");
40 
41 #include <sys/types.h>
42 #include <sys/param.h>
43 
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <net/if.h>
47 #include <net/pfil.h>
48 #include <sys/socketvar.h>
49 
50 #include <netinet/in_systm.h>
51 #include <netinet/in.h>
52 #include <netinet/ip_var.h>
53 #include <netinet/ip6.h>
54 #include <netinet6/ip6_var.h>
55 
56 #include "npf_impl.h"
57 
58 static bool		pfil_registered = false;
59 static pfil_head_t *	npf_ph_if = NULL;
60 static pfil_head_t *	npf_ph_inet = NULL;
61 static pfil_head_t *	npf_ph_inet6 = NULL;
62 
63 #ifndef INET6
64 #define ip6_reass_packet(x, y)	ENOTSUP
65 #endif
66 
67 /*
68  * npf_ifhook: hook handling interface changes.
69  */
70 static int
71 npf_ifhook(void *arg, struct mbuf **mp, ifnet_t *ifp, int di)
72 {
73 	u_long cmd = (u_long)mp;
74 
75 	if (di == PFIL_IFNET) {
76 		switch (cmd) {
77 		case PFIL_IFNET_ATTACH:
78 			npf_ifmap_attach(ifp);
79 			break;
80 		case PFIL_IFNET_DETACH:
81 			npf_ifmap_detach(ifp);
82 			break;
83 		}
84 	}
85 	return 0;
86 }
87 
88 static int
89 npf_reassembly(npf_cache_t *npc, nbuf_t *nbuf, struct mbuf **mp)
90 {
91 	int error = EINVAL;
92 
93 	/* Reset the mbuf as it may have changed. */
94 	*mp = nbuf_head_mbuf(nbuf);
95 	nbuf_reset(nbuf);
96 
97 	if (npf_iscached(npc, NPC_IP4)) {
98 		struct ip *ip = nbuf_dataptr(nbuf);
99 		error = ip_reass_packet(mp, ip);
100 	} else if (npf_iscached(npc, NPC_IP6)) {
101 		/*
102 		 * Note: ip6_reass_packet() offset is the start of
103 		 * the fragment header.
104 		 */
105 		error = ip6_reass_packet(mp, npc->npc_hlen);
106 		if (error && *mp == NULL) {
107 			memset(nbuf, 0, sizeof(nbuf_t));
108 		}
109 	}
110 	if (error) {
111 		npf_stats_inc(NPF_STAT_REASSFAIL);
112 		return error;
113 	}
114 	if (*mp == NULL) {
115 		/* More fragments should come. */
116 		npf_stats_inc(NPF_STAT_FRAGMENTS);
117 		return 0;
118 	}
119 
120 	/*
121 	 * Reassembly is complete, we have the final packet.
122 	 * Cache again, since layer 4 data is accessible now.
123 	 */
124 	nbuf_init(nbuf, *mp, nbuf->nb_ifp);
125 	npc->npc_info = 0;
126 
127 	if (npf_cache_all(npc, nbuf) & NPC_IPFRAG) {
128 		return EINVAL;
129 	}
130 	npf_stats_inc(NPF_STAT_REASSEMBLY);
131 	return 0;
132 }
133 
134 /*
135  * npf_packet_handler: main packet handling routine for layer 3.
136  *
137  * Note: packet flow and inspection logic is in strict order.
138  */
139 int
140 npf_packet_handler(void *arg, struct mbuf **mp, ifnet_t *ifp, int di)
141 {
142 	nbuf_t nbuf;
143 	npf_cache_t npc;
144 	npf_session_t *se;
145 	npf_rule_t *rl;
146 	npf_rproc_t *rp;
147 	int error, retfl;
148 	int decision;
149 
150 	/*
151 	 * Initialise packet information cache.
152 	 * Note: it is enough to clear the info bits.
153 	 */
154 	KASSERT(ifp != NULL);
155 	nbuf_init(&nbuf, *mp, ifp);
156 	npc.npc_info = 0;
157 	decision = NPF_DECISION_BLOCK;
158 	error = 0;
159 	retfl = 0;
160 	rp = NULL;
161 
162 	/* Cache everything.  Determine whether it is an IP fragment. */
163 	if (npf_cache_all(&npc, &nbuf) & NPC_IPFRAG) {
164 		/*
165 		 * Pass to IPv4 or IPv6 reassembly mechanism.
166 		 */
167 		error = npf_reassembly(&npc, &nbuf, mp);
168 		if (error) {
169 			se = NULL;
170 			goto out;
171 		}
172 		if (*mp == NULL) {
173 			/* More fragments should come; return. */
174 			return 0;
175 		}
176 	}
177 
178 	/* Inspect the list of sessions (if found, acquires a reference). */
179 	se = npf_session_inspect(&npc, &nbuf, di, &error);
180 
181 	/* If "passing" session found - skip the ruleset inspection. */
182 	if (se && npf_session_pass(se, &rp)) {
183 		npf_stats_inc(NPF_STAT_PASS_SESSION);
184 		KASSERT(error == 0);
185 		goto pass;
186 	}
187 	if (error) {
188 		if (error == ENETUNREACH)
189 			goto block;
190 		goto out;
191 	}
192 
193 	/* Acquire the lock, inspect the ruleset using this packet. */
194 	int slock = npf_config_read_enter();
195 	npf_ruleset_t *rlset = npf_config_ruleset();
196 
197 	rl = npf_ruleset_inspect(&npc, &nbuf, rlset, di, NPF_LAYER_3);
198 	if (rl == NULL) {
199 		const bool pass = npf_default_pass();
200 		npf_config_read_exit(slock);
201 
202 		if (pass) {
203 			npf_stats_inc(NPF_STAT_PASS_DEFAULT);
204 			goto pass;
205 		}
206 		npf_stats_inc(NPF_STAT_BLOCK_DEFAULT);
207 		goto block;
208 	}
209 
210 	/*
211 	 * Get the rule procedure (acquires a reference) for association
212 	 * with a session (if any) and execution.
213 	 */
214 	KASSERT(rp == NULL);
215 	rp = npf_rule_getrproc(rl);
216 
217 	/* Conclude with the rule and release the lock. */
218 	error = npf_rule_conclude(rl, &retfl);
219 	npf_config_read_exit(slock);
220 
221 	if (error) {
222 		npf_stats_inc(NPF_STAT_BLOCK_RULESET);
223 		goto block;
224 	}
225 	npf_stats_inc(NPF_STAT_PASS_RULESET);
226 
227 	/*
228 	 * Establish a "pass" session, if required.  Just proceed,
229 	 * if session creation fails (e.g. due to unsupported protocol).
230 	 */
231 	if ((retfl & NPF_RULE_STATEFUL) != 0 && !se) {
232 		se = npf_session_establish(&npc, &nbuf, di,
233 		    (retfl & NPF_RULE_MULTIENDS) == 0);
234 		if (se) {
235 			/*
236 			 * Note: the reference on the rule procedure is
237 			 * transfered to the session.  It will be released
238 			 * on session destruction.
239 			 */
240 			npf_session_setpass(se, rp);
241 		}
242 	}
243 pass:
244 	decision = NPF_DECISION_PASS;
245 	KASSERT(error == 0);
246 	/*
247 	 * Perform NAT.
248 	 */
249 	error = npf_do_nat(&npc, se, &nbuf, di);
250 block:
251 	/*
252 	 * Execute the rule procedure, if any is associated.
253 	 * It may reverse the decision from pass to block.
254 	 */
255 	if (rp && !npf_rproc_run(&npc, &nbuf, rp, &decision)) {
256 		if (se) {
257 			npf_session_release(se);
258 		}
259 		npf_rproc_release(rp);
260 		*mp = NULL;
261 		return 0;
262 	}
263 out:
264 	/*
265 	 * Release the reference on a session.  Release the reference on a
266 	 * rule procedure only if there was no association.
267 	 */
268 	if (se) {
269 		npf_session_release(se);
270 	} else if (rp) {
271 		npf_rproc_release(rp);
272 	}
273 
274 	/* Reset mbuf pointer before returning to the caller. */
275 	if ((*mp = nbuf_head_mbuf(&nbuf)) == NULL) {
276 		return error ? error : ENOMEM;
277 	}
278 
279 	/* Pass the packet if decided and there is no error. */
280 	if (decision == NPF_DECISION_PASS && !error) {
281 		/*
282 		 * XXX: Disable for now, it will be set accordingly later,
283 		 * for optimisations (to reduce inspection).
284 		 */
285 		(*mp)->m_flags &= ~M_CANFASTFWD;
286 		return 0;
287 	}
288 
289 	/*
290 	 * Block the packet.  ENETUNREACH is used to indicate blocking.
291 	 * Depending on the flags and protocol, return TCP reset (RST) or
292 	 * ICMP destination unreachable.
293 	 */
294 	if (retfl && npf_return_block(&npc, &nbuf, retfl)) {
295 		*mp = NULL;
296 	}
297 
298 	if (!error) {
299 		error = ENETUNREACH;
300 	}
301 
302 	if (*mp) {
303 		m_freem(*mp);
304 		*mp = NULL;
305 	}
306 	return error;
307 }
308 
309 /*
310  * npf_pfil_register: register pfil(9) hooks.
311  */
312 int
313 npf_pfil_register(bool init)
314 {
315 	int error = 0;
316 
317 	mutex_enter(softnet_lock);
318 	KERNEL_LOCK(1, NULL);
319 
320 	/* Init: interface re-config and attach/detach hook. */
321 	if (!npf_ph_if) {
322 		npf_ph_if = pfil_head_get(PFIL_TYPE_IFNET, 0);
323 		if (!npf_ph_if) {
324 			error = ENOENT;
325 			goto out;
326 		}
327 		error = pfil_add_hook(npf_ifhook, NULL,
328 		    PFIL_IFADDR | PFIL_IFNET, npf_ph_if);
329 		KASSERT(error == 0);
330 	}
331 	if (init) {
332 		goto out;
333 	}
334 
335 	/* Check if pfil hooks are not already registered. */
336 	if (pfil_registered) {
337 		error = EEXIST;
338 		goto out;
339 	}
340 
341 	/* Capture points of the activity in the IP layer. */
342 	npf_ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
343 	npf_ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
344 	if (!npf_ph_inet && !npf_ph_inet6) {
345 		error = ENOENT;
346 		goto out;
347 	}
348 
349 	/* Packet IN/OUT handlers for IP layer. */
350 	if (npf_ph_inet) {
351 		error = pfil_add_hook(npf_packet_handler, NULL,
352 		    PFIL_ALL, npf_ph_inet);
353 		KASSERT(error == 0);
354 	}
355 	if (npf_ph_inet6) {
356 		error = pfil_add_hook(npf_packet_handler, NULL,
357 		    PFIL_ALL, npf_ph_inet6);
358 		KASSERT(error == 0);
359 	}
360 	pfil_registered = true;
361 out:
362 	KERNEL_UNLOCK_ONE(NULL);
363 	mutex_exit(softnet_lock);
364 
365 	return error;
366 }
367 
368 /*
369  * npf_pfil_unregister: unregister pfil(9) hooks.
370  */
371 void
372 npf_pfil_unregister(bool fini)
373 {
374 	mutex_enter(softnet_lock);
375 	KERNEL_LOCK(1, NULL);
376 
377 	if (fini && npf_ph_if) {
378 		(void)pfil_remove_hook(npf_ifhook, NULL,
379 		    PFIL_IFADDR | PFIL_IFNET, npf_ph_if);
380 	}
381 	if (npf_ph_inet) {
382 		(void)pfil_remove_hook(npf_packet_handler, NULL,
383 		    PFIL_ALL, npf_ph_inet);
384 	}
385 	if (npf_ph_inet6) {
386 		(void)pfil_remove_hook(npf_packet_handler, NULL,
387 		    PFIL_ALL, npf_ph_inet6);
388 	}
389 	pfil_registered = false;
390 
391 	KERNEL_UNLOCK_ONE(NULL);
392 	mutex_exit(softnet_lock);
393 }
394 
395 bool
396 npf_pfil_registered_p(void)
397 {
398 	return pfil_registered;
399 }
400