xref: /openbsd-src/sys/net/pf_norm.c (revision b2ea75c1b17e1a9a339660e7ed45cd24946b230e)
1 /*	$OpenBSD: pf_norm.c,v 1.6 2001/08/11 12:05:00 dhartmei Exp $ */
2 
3 /*
4  * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/mbuf.h>
31 #include <sys/filio.h>
32 #include <sys/fcntl.h>
33 #include <sys/socket.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/time.h>
37 #include <sys/pool.h>
38 
39 #include <net/if.h>
40 #include <net/if_types.h>
41 #include <net/bpf.h>
42 #include <net/route.h>
43 #include <net/if_pflog.h>
44 
45 #include <netinet/in.h>
46 #include <netinet/in_var.h>
47 #include <netinet/in_systm.h>
48 #include <netinet/ip.h>
49 #include <netinet/ip_var.h>
50 #include <netinet/tcp.h>
51 #include <netinet/tcp_seq.h>
52 #include <netinet/udp.h>
53 #include <netinet/ip_icmp.h>
54 
55 #include <net/pfvar.h>
56 
57 #include "pflog.h"
58 
59 struct pf_frent {
60 	LIST_ENTRY(pf_frent) fr_next;
61 	struct ip *fr_ip;
62 	struct mbuf *fr_m;
63 };
64 
65 #define PFFRAG_SEENLAST	0x0001		/* Seen the last fragment for this */
66 
67 struct pf_fragment {
68 	TAILQ_ENTRY(pf_fragment) frag_next;
69 	struct in_addr	fr_src;
70 	struct in_addr	fr_dst;
71 	u_int8_t	fr_p;		/* protocol of this fragment */
72 	u_int8_t	fr_flags;	/* status flags */
73 	u_int16_t	fr_id;		/* fragment id for reassemble */
74 	u_int16_t	fr_max;		/* fragment data max */
75 	struct timeval	fr_timeout;
76 	LIST_HEAD(pf_fragq, pf_frent) fr_queue;
77 };
78 
79 TAILQ_HEAD(pf_fragqueue, pf_fragment)	pf_fragqueue;
80 
81 /* Private prototypes */
82 void			 pf_ip2key(struct pf_tree_key *, struct ip *);
83 void			 pf_remove_fragment(struct pf_fragment *);
84 void			 pf_flush_fragments(void);
85 void			 pf_free_fragment(struct pf_fragment *);
86 struct pf_fragment	*pf_find_fragment(struct ip *);
87 struct mbuf		*pf_reassemble(struct mbuf **, struct pf_fragment *,
88 			    struct pf_frent *, int);
89 u_int16_t		 pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t);
90 int			 pf_normalize_tcp(int, struct ifnet *, struct mbuf *,
91 			    int, int, struct ip *, struct tcphdr *);
92 
93 #define PFFRAG_FRENT_HIWAT	5000	/* Number of fragment entries */
94 #define PFFRAG_FRAG_HIWAT	1000	/* Number of fragmented packets */
95 
96 #define DPFPRINTF(x)		if (pf_status.debug) printf x
97 
98 #if NPFLOG > 0
99 #define		 PFLOG_PACKET(x,a,b,c,d,e) \
100 		do { \
101 			HTONS((x)->ip_len); \
102 			HTONS((x)->ip_off); \
103 			pflog_packet(a,b,c,d,e); \
104 			NTOHS((x)->ip_len); \
105 			NTOHS((x)->ip_off); \
106 		} while (0)
107 #else
108 #define		 PFLOG_PACKET
109 #endif
110 
111 /* Globals */
112 struct pf_tree_node	*tree_fragment;
113 struct pool		 pf_frent_pl, pf_frag_pl;
114 int			 pf_nfrents;
115 
116 void
117 pf_normalize_init(void)
118 {
119 	pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent",
120 	    0, NULL, NULL, 0);
121 	pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag",
122 	    0, NULL, NULL, 0);
123 
124 	pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
125 	pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
126 
127 	TAILQ_INIT(&pf_fragqueue);
128 }
129 
130 #define FRAG_EXPIRE	30
131 
132 void
133 pf_purge_expired_fragments(void)
134 {
135 	struct pf_fragment *frag;
136 	struct timeval now, expire;
137 
138 	microtime(&now);
139 
140 	timerclear(&expire);
141 	expire.tv_sec = FRAG_EXPIRE;
142 	timersub(&now, &expire, &expire);
143 
144 	while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
145 		if (timercmp(&frag->fr_timeout, &expire, >))
146 			break;
147 
148 		DPFPRINTF((__FUNCTION__": expiring %p\n", frag));
149 		pf_free_fragment(frag);
150 	}
151 }
152 
153 /*
154  *  Try to flush old fragments to make space for new ones
155  */
156 
157 void
158 pf_flush_fragments(void)
159 {
160 	struct pf_fragment *frag;
161 	int goal = pf_nfrents * 9 / 10;
162 
163 	DPFPRINTF((__FUNCTION__": trying to free > %d frents\n",
164 		   pf_nfrents - goal));
165 
166 	while (goal < pf_nfrents) {
167 		frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
168 		if (frag == NULL)
169 			break;
170 		pf_free_fragment(frag);
171 	}
172 }
173 
174 /* Frees the fragments and all associated entries */
175 
176 void
177 pf_free_fragment(struct pf_fragment *frag)
178 {
179 	struct pf_frent *frent;
180 
181 	/* Free all fragments */
182 	for (frent = LIST_FIRST(&frag->fr_queue); frent;
183 	    frent = LIST_FIRST(&frag->fr_queue)) {
184 		LIST_REMOVE(frent, fr_next);
185 
186 		m_freem(frent->fr_m);
187 		pool_put(&pf_frent_pl, frent);
188 		pf_nfrents--;
189 	}
190 
191 	pf_remove_fragment(frag);
192 }
193 
194 void
195 pf_ip2key(struct pf_tree_key *key, struct ip *ip)
196 {
197 	key->proto = ip->ip_p;
198 	key->addr[0] = ip->ip_src;
199 	key->addr[1] = ip->ip_dst;
200 	key->port[0] = ip->ip_id;
201 	key->port[1] = 0;
202 }
203 
204 struct pf_fragment *
205 pf_find_fragment(struct ip *ip)
206 {
207 	struct pf_tree_key key;
208 	struct pf_fragment *frag;
209 
210 	pf_ip2key(&key, ip);
211 
212 	frag = (struct pf_fragment *)pf_find_state(tree_fragment, &key);
213 
214 	if (frag != NULL) {
215 		microtime(&frag->fr_timeout);
216 		TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
217 		TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
218 	}
219 
220 	return (frag);
221 }
222 
223 /* Removes a fragment from the fragment queue and frees the fragment */
224 
225 void
226 pf_remove_fragment(struct pf_fragment *frag)
227 {
228 	struct pf_tree_key key;
229 
230 	key.proto = frag->fr_p;
231 	key.addr[0] = frag->fr_src;
232 	key.addr[1] = frag->fr_dst;
233 	key.port[0] = frag->fr_id;
234 	key.port[1] = 0;
235 
236 	pf_tree_remove(&tree_fragment, NULL, &key);
237 	TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
238 
239 	pool_put(&pf_frag_pl, frag);
240 }
241 
242 struct mbuf *
243 pf_reassemble(struct mbuf **m0, struct pf_fragment *frag,
244     struct pf_frent *frent, int mff)
245 {
246 	struct mbuf *m = *m0, *m2;
247 	struct pf_frent *frep, *frea, *next;
248 	struct ip *ip = frent->fr_ip;
249 	int hlen = ip->ip_hl << 2;
250 	u_int16_t off = ip->ip_off;
251 	u_int16_t max = ip->ip_len + off;
252 
253 	/* Strip off ip header */
254 	m->m_data += hlen;
255 	m->m_len -= hlen;
256 
257 	/* Create a new reassembly queue for this packet */
258 	if (frag == NULL) {
259 		struct pf_tree_key key;
260 
261 		frag = pool_get(&pf_frag_pl, M_NOWAIT);
262 		if (frag == NULL) {
263 			pf_flush_fragments();
264 			frag = pool_get(&pf_frag_pl, M_NOWAIT);
265 			if (frag == NULL)
266 				goto drop_fragment;
267 		}
268 
269 		frag->fr_flags = 0;
270 		frag->fr_max = 0;
271 		frag->fr_src = frent->fr_ip->ip_src;
272 		frag->fr_dst = frent->fr_ip->ip_dst;
273 		frag->fr_p = frent->fr_ip->ip_p;
274 		frag->fr_id = frent->fr_ip->ip_id;
275 		LIST_INIT(&frag->fr_queue);
276 
277 		pf_ip2key(&key, frent->fr_ip);
278 
279 		pf_tree_insert(&tree_fragment, NULL, &key,
280 		    (struct pf_state *)frag);
281 		TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
282 
283 		/* We do not have a previous fragment */
284 		frep = NULL;
285 		goto insert;
286 	}
287 
288 	/*
289 	 * Find a fragment after the current one:
290 	 *  - off contains the real shifted offset.
291 	 */
292 	LIST_FOREACH(frea, &frag->fr_queue, fr_next) {
293 		if (frea->fr_ip->ip_off > off)
294 			break;
295 		frep = frea;
296 	}
297 
298 	KASSERT(frep != NULL || frea != NULL);
299 
300 	if (frep != NULL) {
301 		u_int16_t precut;
302 
303 		precut = frep->fr_ip->ip_off + frep->fr_ip->ip_len - off;
304 		if (precut > ip->ip_len)
305 			goto drop_fragment;
306 		if (precut) {
307 			m_adj(frent->fr_m, precut);
308 
309 			DPFPRINTF((__FUNCTION__": overlap -%d\n", precut));
310 			/* Enforce 8 byte boundaries */
311 			off = ip->ip_off += precut;
312 			ip->ip_len -= precut;
313 		}
314 	}
315 
316 	for (; frea != NULL && ip->ip_len + off > frea->fr_ip->ip_off;
317 	    frea = next) {
318 		u_int16_t aftercut;
319 
320 		aftercut = (ip->ip_len + off) - frea->fr_ip->ip_off;
321 		DPFPRINTF((__FUNCTION__": adjust overlap %d\n", aftercut));
322 		if (aftercut < frea->fr_ip->ip_len) {
323 			frea->fr_ip->ip_len -= aftercut;
324 			frea->fr_ip->ip_off += aftercut;
325 			m_adj(frea->fr_m, aftercut);
326 			break;
327 		}
328 
329 		/* This fragment is completely overlapped, loose it */
330 		next = LIST_NEXT(frea, fr_next);
331 		m_freem(frea->fr_m);
332 		LIST_REMOVE(frea, fr_next);
333 		pool_put(&pf_frent_pl, frea);
334 		pf_nfrents--;
335 	}
336 
337  insert:
338 	/* Update maxmimum data size */
339 	if (frag->fr_max < max)
340 		frag->fr_max = max;
341 	/* This is the last segment */
342 	if (!mff)
343 		frag->fr_flags |= PFFRAG_SEENLAST;
344 
345 	if (frep == NULL)
346 		LIST_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
347 	else
348 		LIST_INSERT_AFTER(frep, frent, fr_next);
349 
350 	/* Check if we are completely reassembled */
351 	if (!(frag->fr_flags & PFFRAG_SEENLAST))
352 		return (NULL);
353 
354 	/* Check if we have all the data */
355 	off = 0;
356 	for (frep = LIST_FIRST(&frag->fr_queue); frep; frep = next) {
357 		next = LIST_NEXT(frep, fr_next);
358 
359 		off += frep->fr_ip->ip_len;
360 		if (off < frag->fr_max &&
361 		    (next == NULL || next->fr_ip->ip_off != off)) {
362 			DPFPRINTF((__FUNCTION__
363 			    ": missing fragment at %d, next %d, max %d\n",
364 			    off, next == NULL ? -1 : next->fr_ip->ip_off,
365 			    frag->fr_max));
366 			return (NULL);
367 		}
368 	}
369 	DPFPRINTF((__FUNCTION__": %d < %d?\n", off, frag->fr_max));
370 	if (off < frag->fr_max)
371 		return (NULL);
372 
373 	/* We have all the data */
374 	frent = LIST_FIRST(&frag->fr_queue);
375 	KASSERT(frent != NULL);
376 	if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
377 		DPFPRINTF((__FUNCTION__": drop: too big: %d\n", off));
378 		pf_free_fragment(frag);
379 		return (NULL);
380 	}
381 	next = LIST_NEXT(frent, fr_next);
382 
383 	/* Magic from ip_input */
384 	ip = frent->fr_ip;
385 	m = frent->fr_m;
386 	m2 = m->m_next;
387 	m->m_next = NULL;
388 	m_cat(m, m2);
389 	pool_put(&pf_frent_pl, frent);
390 	pf_nfrents--;
391 	for (frent = next; frent != NULL; frent = next) {
392 		next = LIST_NEXT(frent, fr_next);
393 
394 		m2 = frent->fr_m;
395 		pool_put(&pf_frent_pl, frent);
396 		pf_nfrents--;
397 		m_cat(m, m2);
398 	}
399 
400 	ip->ip_src = frag->fr_src;
401 	ip->ip_dst = frag->fr_dst;
402 
403 	/* Remove from fragment queue */
404 	pf_remove_fragment(frag);
405 
406 	hlen = ip->ip_hl << 2;
407 	ip->ip_len = off + hlen;
408 	m->m_len += hlen;
409 	m->m_data -= hlen;
410 
411 	/* some debugging cruft by sklower, below, will go away soon */
412 	/* XXX this should be done elsewhere */
413 	if (m->m_flags & M_PKTHDR) {
414 		int plen = 0;
415 		for (m2 = m; m2; m2 = m2->m_next)
416 			plen += m2->m_len;
417 		m->m_pkthdr.len = plen;
418 	}
419 
420 	DPFPRINTF((__FUNCTION__": complete: %p(%d)\n", m, ip->ip_len));
421 	return (m);
422 
423  drop_fragment:
424 	/* Oops - fail safe - drop packet */
425 	m_freem(m);
426 	return (NULL);
427 }
428 
429 int
430 pf_normalize_ip(struct mbuf **m0, int dir, struct ifnet *ifp, u_short *reason)
431 {
432 	struct mbuf *m = *m0;
433 	struct pf_rule *r;
434 	struct pf_frent *frent;
435 	struct pf_fragment *frag;
436 	struct ip *h = mtod(m, struct ip *);
437 	int mff = (h->ip_off & IP_MF), hlen = h->ip_hl << 2;
438 	u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3;
439 	u_int16_t max;
440 
441 	TAILQ_FOREACH(r, pf_rules_active, entries) {
442 		if ((r->action == PF_SCRUB) &&
443 		    MATCH_TUPLE(h, r, dir, ifp))
444 			break;
445 	}
446 
447 	if (r == NULL)
448 		return (PF_PASS);
449 
450 	/* Check for illegal packets */
451 	if (hlen < sizeof(struct ip))
452 		goto drop;
453 
454 	if (hlen > h->ip_len)
455 		goto drop;
456 
457 	/* We will need other tests here */
458 	if (!fragoff && !mff)
459 		goto no_fragment;
460 
461 	/* Now we are dealing with a fragmented packet */
462 	frag = pf_find_fragment(h);
463 
464 	/* This can not happen */
465 	if (h->ip_off & IP_DF) {
466 		DPFPRINTF((__FUNCTION__": IP_DF\n"));
467 		goto bad;
468 	}
469 
470 	h->ip_len -= hlen;
471 	h->ip_off <<= 3;
472 
473 	/* All fragments are 8 byte aligned */
474 	if (mff && (h->ip_len & 0x7)) {
475 		DPFPRINTF((__FUNCTION__": mff and %d\n", h->ip_len));
476 		goto bad;
477 	}
478 
479 	max = fragoff + h->ip_len;
480 	/* Respect maximum length */
481 	if (max > IP_MAXPACKET) {
482 		DPFPRINTF((__FUNCTION__": max packet %d\n", max));
483 		goto bad;
484 	}
485 	/* Check if we saw the last fragment already */
486 	if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
487 	    max > frag->fr_max)
488 		goto bad;
489 
490 	/* Get an entry for the fragment queue */
491 	frent = pool_get(&pf_frent_pl, PR_NOWAIT);
492 	if (frent == NULL) {
493 		/* Try to clean up old fragments */
494 		pf_flush_fragments();
495 		frent = pool_get(&pf_frent_pl, PR_NOWAIT);
496 		if (frent == NULL) {
497 			REASON_SET(reason, PFRES_MEMORY);
498 			return (PF_DROP);
499 		}
500 	}
501 	pf_nfrents++;
502 	frent->fr_ip = h;
503 	frent->fr_m = m;
504 
505 	/* Might return a completely reassembled mbuf, or NULL */
506 	DPFPRINTF((__FUNCTION__": reass frag %d @ %d\n", h->ip_id, fragoff));
507 	*m0 = m = pf_reassemble(m0, frag, frent, mff);
508 
509 	if (m == NULL)
510 		return (PF_DROP);
511 
512 	h = mtod(m, struct ip *);
513 
514  no_fragment:
515 	if (dir != PF_OUT)
516 		return (PF_PASS);
517 
518 	/* At this point, only IP_DF is allowed in ip_off */
519 	if (r->rule_flag & PFRULE_NODF)
520 		h->ip_off = 0;
521 	else
522 		h->ip_off &= IP_DF;
523 
524 	/* Enforce a minimum ttl, may cause endless packet loops */
525 	if (r->min_ttl && h->ip_ttl < r->min_ttl)
526 		h->ip_ttl = r->min_ttl;
527 
528 	return (PF_PASS);
529 
530  drop:
531 	REASON_SET(reason, PFRES_NORM);
532 	if (r != NULL && r->log)
533 		PFLOG_PACKET(h, m, AF_INET, dir, *reason, r);
534 	return (PF_DROP);
535 
536  bad:
537 	DPFPRINTF((__FUNCTION__": dropping bad fragment\n"));
538 
539 	/* Free assoicated fragments */
540 	if (frag != NULL)
541 		pf_free_fragment(frag);
542 
543 	REASON_SET(reason, PFRES_FRAG);
544 	if (r != NULL && r->log)
545 		PFLOG_PACKET(h, m, AF_INET, dir, *reason, r);
546 
547 	return (PF_DROP);
548 }
549 
550 int
551 pf_normalize_tcp(int dir, struct ifnet *ifp, struct mbuf *m, int ipoff,
552     int off, struct ip *h, struct tcphdr *th)
553 {
554 	struct pf_rule *r, *rm = NULL;
555 	int rewrite = 0, reason;
556 	u_int8_t flags;
557 
558 	r = TAILQ_FIRST(pf_rules_active);
559 	while (r != NULL) {
560 		if (r->action != PF_SCRUB) {
561 			r = TAILQ_NEXT(r, entries);
562 			continue;
563 		}
564 		if (r->proto && r->proto != h->ip_p)
565 			r = r->skip[0];
566 		else if (r->src.mask && !pf_match_addr(r->src.not,
567 			    r->src.addr, r->src.mask, h->ip_src.s_addr))
568 			r = r->skip[1];
569 		else if (r->src.port_op && !pf_match_port(r->src.port_op,
570 			    r->src.port[0], r->src.port[1], th->th_sport))
571 			r = r->skip[2];
572 		else if (r->dst.mask && !pf_match_addr(r->dst.not,
573 			    r->dst.addr, r->dst.mask, h->ip_dst.s_addr))
574 			r = r->skip[3];
575 		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
576 			    r->dst.port[0], r->dst.port[1], th->th_dport))
577 			r = r->skip[4];
578 		else if (r->direction != dir)
579 			r = TAILQ_NEXT(r, entries);
580 		else if (r->ifp != NULL && r->ifp != ifp)
581 			r = TAILQ_NEXT(r, entries);
582 		else {
583 			rm = r;
584 			break;
585 		}
586 	}
587 
588 	if (rm == NULL)
589 		return (PF_PASS);
590 
591 	flags = th->th_flags;
592 	if (flags & TH_SYN) {
593 		/* Illegal packet */
594 		if (flags & TH_RST)
595 			goto tcp_drop;
596 
597 		if (flags & TH_FIN)
598 			flags &= ~TH_FIN;
599 	} else {
600 		/* Illegal packet */
601 		if (!(flags & (TH_ACK|TH_RST)))
602 			goto tcp_drop;
603 	}
604 
605 	if (!(flags & TH_ACK)) {
606 		/* These flags are only valid if ACK is set */
607 		if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
608 			goto tcp_drop;
609 	}
610 
611 	/* Check for illegal header length */
612 	if (th->th_off < (sizeof(struct tcphdr) >> 2))
613 		goto tcp_drop;
614 
615 	/* If flags changed, or reserved data set, then adjust */
616 	if (flags != th->th_flags || th->th_x2 != 0) {
617 		u_int16_t ov, nv;
618 
619 		ov = *(u_int16_t *)(&th->th_ack + 1);
620 		th->th_flags = flags;
621 		th->th_x2 = 0;
622 		nv = *(u_int16_t *)(&th->th_ack + 1);
623 
624 		th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv);
625 		rewrite = 1;
626 	}
627 
628 	/* Remove urgent pointer, if TH_URG is not set */
629 	if (!(flags & TH_URG) && th->th_urp) {
630 		th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0);
631 		th->th_urp = 0;
632 		rewrite = 1;
633 	}
634 
635 	/* copy back packet headers if we sanitized */
636 	if (rewrite)
637 		m_copyback(m, off, sizeof(*th), (caddr_t)th);
638 
639 	return (PF_PASS);
640 
641  tcp_drop:
642 	REASON_SET(&reason, PFRES_NORM);
643 	if (rm != NULL && rm->log)
644 		PFLOG_PACKET(h, m, AF_INET, dir, reason, rm);
645 	return (PF_DROP);
646 }
647