1 /* $OpenBSD: frag6.c,v 1.89 2024/07/29 12:41:30 bluhm Exp $ */ 2 /* $KAME: frag6.c,v 1.40 2002/05/27 21:40:31 itojun Exp $ */ 3 4 /* 5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the project nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/mbuf.h> 36 #include <sys/socket.h> 37 #include <sys/errno.h> 38 #include <sys/time.h> 39 #include <sys/kernel.h> 40 #include <sys/pool.h> 41 #include <sys/mutex.h> 42 43 #include <net/if.h> 44 #include <net/if_var.h> 45 #include <net/route.h> 46 47 #include <netinet/in.h> 48 #include <netinet6/in6_var.h> 49 #include <netinet/ip6.h> 50 #include <netinet6/ip6_var.h> 51 #include <netinet/icmp6.h> 52 #include <netinet/ip.h> /* for ECN definitions */ 53 54 /* Protects `frag6_queue', `frag6_nfragpackets' and `frag6_nfrags'. */ 55 struct mutex frag6_mutex = MUTEX_INITIALIZER(IPL_SOFTNET); 56 57 u_int frag6_nfragpackets; 58 u_int frag6_nfrags; 59 TAILQ_HEAD(ip6q_head, ip6q) frag6_queue; /* ip6 reassemble queue */ 60 61 void frag6_freef(struct ip6q *); 62 void frag6_unlink(struct ip6q *, struct ip6q_head *); 63 64 struct pool ip6af_pool; 65 struct pool ip6q_pool; 66 67 /* 68 * Initialise reassembly queue and pools. 69 */ 70 void 71 frag6_init(void) 72 { 73 pool_init(&ip6af_pool, sizeof(struct ip6asfrag), 74 0, IPL_SOFTNET, 0, "ip6af", NULL); 75 pool_init(&ip6q_pool, sizeof(struct ip6q), 76 0, IPL_SOFTNET, 0, "ip6q", NULL); 77 78 TAILQ_INIT(&frag6_queue); 79 } 80 81 /* 82 * In RFC2460, fragment and reassembly rule do not agree with each other, 83 * in terms of next header field handling in fragment header. 84 * While the sender will use the same value for all of the fragmented packets, 85 * receiver is suggested not to check the consistency. 86 * 87 * fragment rule (p20): 88 * (2) A Fragment header containing: 89 * The Next Header value that identifies the first header of 90 * the Fragmentable Part of the original packet. 91 * -> next header field is same for all fragments 92 * 93 * reassembly rule (p21): 94 * The Next Header field of the last header of the Unfragmentable 95 * Part is obtained from the Next Header field of the first 96 * fragment's Fragment header. 97 * -> should grab it from the first fragment only 98 * 99 * The following note also contradicts with fragment rule - noone is going to 100 * send different fragment with different next header field. 101 * 102 * additional note (p22): 103 * The Next Header values in the Fragment headers of different 104 * fragments of the same original packet may differ. Only the value 105 * from the Offset zero fragment packet is used for reassembly. 106 * -> should grab it from the first fragment only 107 * 108 * There is no explicit reason given in the RFC. Historical reason maybe? 109 */ 110 /* 111 * Fragment input 112 */ 113 int 114 frag6_input(struct mbuf **mp, int *offp, int proto, int af) 115 { 116 struct mbuf *m = *mp, *t; 117 struct ip6_hdr *ip6; 118 struct ip6_frag *ip6f; 119 struct ip6q *q6; 120 struct ip6asfrag *af6, *ip6af, *naf6, *paf6; 121 int offset = *offp, nxt, i, next; 122 int first_frag = 0; 123 int fragoff, frgpartlen; /* must be larger than u_int16_t */ 124 u_int8_t ecn, ecn0; 125 126 ip6 = mtod(m, struct ip6_hdr *); 127 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f)); 128 if (ip6f == NULL) 129 return IPPROTO_DONE; 130 131 /* jumbo payload can't contain a fragment header */ 132 if (ip6->ip6_plen == 0) { 133 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 134 offset); 135 return IPPROTO_DONE; 136 } 137 138 /* 139 * check whether fragment packet's fragment length is 140 * multiple of 8 octets. 141 * sizeof(struct ip6_frag) == 8 142 * sizeof(struct ip6_hdr) = 40 143 */ 144 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) && 145 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) { 146 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 147 offsetof(struct ip6_hdr, ip6_plen)); 148 return IPPROTO_DONE; 149 } 150 151 ip6stat_inc(ip6s_fragments); 152 153 /* offset now points to data portion */ 154 offset += sizeof(struct ip6_frag); 155 156 /* 157 * RFC6946: A host that receives an IPv6 packet which includes 158 * a Fragment Header with the "Fragment Offset" equal to 0 and 159 * the "M" bit equal to 0 MUST process such packet in isolation 160 * from any other packets/fragments. 161 */ 162 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK); 163 if (fragoff == 0 && !(ip6f->ip6f_offlg & IP6F_MORE_FRAG)) { 164 ip6stat_inc(ip6s_reassembled); 165 *offp = offset; 166 return ip6f->ip6f_nxt; 167 } 168 169 /* Ignore empty non atomic fragment, do not classify as overlapping. */ 170 if (sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) <= offset) { 171 m_freem(m); 172 return IPPROTO_DONE; 173 } 174 175 mtx_enter(&frag6_mutex); 176 177 /* 178 * Enforce upper bound on number of fragments. 179 * If maxfrag is 0, never accept fragments. 180 * If maxfrag is -1, accept all fragments without limitation. 181 */ 182 if (ip6_maxfrags >= 0 && frag6_nfrags >= (u_int)ip6_maxfrags) { 183 mtx_leave(&frag6_mutex); 184 goto dropfrag; 185 } 186 187 TAILQ_FOREACH(q6, &frag6_queue, ip6q_queue) 188 if (ip6f->ip6f_ident == q6->ip6q_ident && 189 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && 190 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)) 191 break; 192 193 if (q6 == NULL) { 194 /* 195 * the first fragment to arrive, create a reassembly queue. 196 */ 197 first_frag = 1; 198 199 /* 200 * Enforce upper bound on number of fragmented packets 201 * for which we attempt reassembly; 202 * If maxfragpackets is 0, never accept fragments. 203 * If maxfragpackets is -1, accept all fragments without 204 * limitation. 205 */ 206 if (ip6_maxfragpackets >= 0 && 207 frag6_nfragpackets >= (u_int)ip6_maxfragpackets) { 208 mtx_leave(&frag6_mutex); 209 goto dropfrag; 210 } 211 frag6_nfragpackets++; 212 q6 = pool_get(&ip6q_pool, PR_NOWAIT | PR_ZERO); 213 if (q6 == NULL) { 214 mtx_leave(&frag6_mutex); 215 goto dropfrag; 216 } 217 218 TAILQ_INSERT_HEAD(&frag6_queue, q6, ip6q_queue); 219 220 /* ip6q_nxt will be filled afterwards, from 1st fragment */ 221 LIST_INIT(&q6->ip6q_asfrag); 222 q6->ip6q_ident = ip6f->ip6f_ident; 223 q6->ip6q_ttl = IPV6_FRAGTTL; 224 q6->ip6q_src = ip6->ip6_src; 225 q6->ip6q_dst = ip6->ip6_dst; 226 q6->ip6q_ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; 227 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ 228 q6->ip6q_nfrag = 0; 229 } 230 231 /* 232 * If it's the 1st fragment, record the length of the 233 * unfragmentable part and the next header of the fragment header. 234 */ 235 if (fragoff == 0) { 236 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) - 237 sizeof(struct ip6_frag); 238 q6->ip6q_nxt = ip6f->ip6f_nxt; 239 } 240 241 /* 242 * Check that the reassembled packet would not exceed 65535 bytes 243 * in size. 244 * If it would exceed, discard the fragment and return an ICMP error. 245 */ 246 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset; 247 if (q6->ip6q_unfrglen >= 0) { 248 /* The 1st fragment has already arrived. */ 249 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) { 250 mtx_leave(&frag6_mutex); 251 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 252 offset - sizeof(struct ip6_frag) + 253 offsetof(struct ip6_frag, ip6f_offlg)); 254 return (IPPROTO_DONE); 255 } 256 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) { 257 mtx_leave(&frag6_mutex); 258 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 259 offset - sizeof(struct ip6_frag) + 260 offsetof(struct ip6_frag, ip6f_offlg)); 261 return (IPPROTO_DONE); 262 } 263 /* 264 * If it's the first fragment, do the above check for each 265 * fragment already stored in the reassembly queue. 266 */ 267 if (fragoff == 0) { 268 LIST_FOREACH_SAFE(af6, &q6->ip6q_asfrag, ip6af_list, naf6) { 269 if (q6->ip6q_unfrglen + af6->ip6af_off + 270 af6->ip6af_frglen > IPV6_MAXPACKET) { 271 struct mbuf *merr = af6->ip6af_m; 272 struct ip6_hdr *ip6err; 273 int erroff = af6->ip6af_offset; 274 275 /* dequeue the fragment. */ 276 LIST_REMOVE(af6, ip6af_list); 277 pool_put(&ip6af_pool, af6); 278 279 /* adjust pointer. */ 280 ip6err = mtod(merr, struct ip6_hdr *); 281 282 /* 283 * Restore source and destination addresses 284 * in the erroneous IPv6 header. 285 */ 286 ip6err->ip6_src = q6->ip6q_src; 287 ip6err->ip6_dst = q6->ip6q_dst; 288 289 icmp6_error(merr, ICMP6_PARAM_PROB, 290 ICMP6_PARAMPROB_HEADER, 291 erroff - sizeof(struct ip6_frag) + 292 offsetof(struct ip6_frag, ip6f_offlg)); 293 } 294 } 295 } 296 297 ip6af = pool_get(&ip6af_pool, PR_NOWAIT | PR_ZERO); 298 if (ip6af == NULL) { 299 mtx_leave(&frag6_mutex); 300 goto dropfrag; 301 } 302 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG; 303 ip6af->ip6af_off = fragoff; 304 ip6af->ip6af_frglen = frgpartlen; 305 ip6af->ip6af_offset = offset; 306 ip6af->ip6af_m = m; 307 308 if (first_frag) { 309 paf6 = NULL; 310 goto insert; 311 } 312 313 /* 314 * Handle ECN by comparing this segment with the first one; 315 * if CE is set, do not lose CE. 316 * drop if CE and not-ECT are mixed for the same packet. 317 */ 318 ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; 319 ecn0 = q6->ip6q_ecn; 320 if (ecn == IPTOS_ECN_CE) { 321 if (ecn0 == IPTOS_ECN_NOTECT) { 322 mtx_leave(&frag6_mutex); 323 pool_put(&ip6af_pool, ip6af); 324 goto dropfrag; 325 } 326 if (ecn0 != IPTOS_ECN_CE) 327 q6->ip6q_ecn = IPTOS_ECN_CE; 328 } 329 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) { 330 mtx_leave(&frag6_mutex); 331 pool_put(&ip6af_pool, ip6af); 332 goto dropfrag; 333 } 334 335 /* 336 * Find a segment which begins after this one does. 337 */ 338 for (paf6 = NULL, af6 = LIST_FIRST(&q6->ip6q_asfrag); 339 af6 != NULL; 340 paf6 = af6, af6 = LIST_NEXT(af6, ip6af_list)) 341 if (af6->ip6af_off > ip6af->ip6af_off) 342 break; 343 344 /* 345 * RFC 5722, Errata 3089: When reassembling an IPv6 datagram, if one 346 * or more its constituent fragments is determined to be an overlapping 347 * fragment, the entire datagram (and any constituent fragments) MUST 348 * be silently discarded. 349 */ 350 if (paf6 != NULL) { 351 i = (paf6->ip6af_off + paf6->ip6af_frglen) - ip6af->ip6af_off; 352 if (i > 0) 353 goto flushfrags; 354 } 355 if (af6 != NULL) { 356 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; 357 if (i > 0) 358 goto flushfrags; 359 } 360 361 insert: 362 /* 363 * Stick new segment in its place; 364 * check for complete reassembly. 365 * Move to front of packet queue, as we are 366 * the most recently active fragmented packet. 367 */ 368 if (paf6 != NULL) 369 LIST_INSERT_AFTER(paf6, ip6af, ip6af_list); 370 else 371 LIST_INSERT_HEAD(&q6->ip6q_asfrag, ip6af, ip6af_list); 372 frag6_nfrags++; 373 q6->ip6q_nfrag++; 374 next = 0; 375 for (paf6 = NULL, af6 = LIST_FIRST(&q6->ip6q_asfrag); 376 af6 != NULL; 377 paf6 = af6, af6 = LIST_NEXT(af6, ip6af_list)) { 378 if (af6->ip6af_off != next) { 379 mtx_leave(&frag6_mutex); 380 return IPPROTO_DONE; 381 } 382 next += af6->ip6af_frglen; 383 } 384 if (paf6->ip6af_mff) { 385 mtx_leave(&frag6_mutex); 386 return IPPROTO_DONE; 387 } 388 389 /* 390 * Reassembly is complete; concatenate fragments. 391 */ 392 ip6af = LIST_FIRST(&q6->ip6q_asfrag); 393 LIST_REMOVE(ip6af, ip6af_list); 394 t = m = ip6af->ip6af_m; 395 while ((af6 = LIST_FIRST(&q6->ip6q_asfrag)) != NULL) { 396 LIST_REMOVE(af6, ip6af_list); 397 while (t->m_next) 398 t = t->m_next; 399 t->m_next = af6->ip6af_m; 400 m_adj(t->m_next, af6->ip6af_offset); 401 m_removehdr(t->m_next); 402 pool_put(&ip6af_pool, af6); 403 } 404 405 /* adjust offset to point where the original next header starts */ 406 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); 407 pool_put(&ip6af_pool, ip6af); 408 next += offset - sizeof(struct ip6_hdr); 409 if ((u_int)next > IPV6_MAXPACKET) { 410 TAILQ_REMOVE(&frag6_queue, q6, ip6q_queue); 411 frag6_nfrags -= q6->ip6q_nfrag; 412 frag6_nfragpackets--; 413 mtx_leave(&frag6_mutex); 414 pool_put(&ip6q_pool, q6); 415 goto dropfrag; 416 } 417 ip6 = mtod(m, struct ip6_hdr *); 418 ip6->ip6_plen = htons(next); 419 ip6->ip6_src = q6->ip6q_src; 420 ip6->ip6_dst = q6->ip6q_dst; 421 if (q6->ip6q_ecn == IPTOS_ECN_CE) 422 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20); 423 nxt = q6->ip6q_nxt; 424 425 /* Delete frag6 header */ 426 if (frag6_deletefraghdr(m, offset) != 0) { 427 TAILQ_REMOVE(&frag6_queue, q6, ip6q_queue); 428 frag6_nfrags -= q6->ip6q_nfrag; 429 frag6_nfragpackets--; 430 mtx_leave(&frag6_mutex); 431 pool_put(&ip6q_pool, q6); 432 goto dropfrag; 433 } 434 435 TAILQ_REMOVE(&frag6_queue, q6, ip6q_queue); 436 frag6_nfrags -= q6->ip6q_nfrag; 437 frag6_nfragpackets--; 438 439 mtx_leave(&frag6_mutex); 440 441 pool_put(&ip6q_pool, q6); 442 443 m_calchdrlen(m); 444 445 /* 446 * Restore NXT to the original. 447 */ 448 { 449 int prvnxt = ip6_get_prevhdr(m, offset); 450 uint8_t *prvnxtp; 451 452 IP6_EXTHDR_GET(prvnxtp, uint8_t *, m, prvnxt, 453 sizeof(*prvnxtp)); 454 if (prvnxtp == NULL) 455 goto dropfrag; 456 *prvnxtp = nxt; 457 } 458 459 ip6stat_inc(ip6s_reassembled); 460 461 /* 462 * Tell launch routine the next header 463 */ 464 465 *mp = m; 466 *offp = offset; 467 468 return nxt; 469 470 flushfrags: 471 TAILQ_REMOVE(&frag6_queue, q6, ip6q_queue); 472 frag6_nfrags -= q6->ip6q_nfrag; 473 frag6_nfragpackets--; 474 475 mtx_leave(&frag6_mutex); 476 477 pool_put(&ip6af_pool, ip6af); 478 479 while ((af6 = LIST_FIRST(&q6->ip6q_asfrag)) != NULL) { 480 LIST_REMOVE(af6, ip6af_list); 481 m_freem(af6->ip6af_m); 482 pool_put(&ip6af_pool, af6); 483 } 484 ip6stat_add(ip6s_fragdropped, q6->ip6q_nfrag + 1); 485 pool_put(&ip6q_pool, q6); 486 m_freem(m); 487 return IPPROTO_DONE; 488 489 dropfrag: 490 ip6stat_inc(ip6s_fragdropped); 491 m_freem(m); 492 return IPPROTO_DONE; 493 } 494 495 /* 496 * Delete fragment header after the unfragmentable header portions. 497 */ 498 int 499 frag6_deletefraghdr(struct mbuf *m, int offset) 500 { 501 struct mbuf *t; 502 503 if (m->m_len >= offset + sizeof(struct ip6_frag)) { 504 memmove(mtod(m, caddr_t) + sizeof(struct ip6_frag), 505 mtod(m, caddr_t), offset); 506 m->m_data += sizeof(struct ip6_frag); 507 m->m_len -= sizeof(struct ip6_frag); 508 } else { 509 /* this comes with no copy if the boundary is on cluster */ 510 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) 511 return (ENOBUFS); 512 m_adj(t, sizeof(struct ip6_frag)); 513 m_cat(m, t); 514 } 515 516 return (0); 517 } 518 519 /* 520 * Free a fragment reassembly header and all 521 * associated datagrams. 522 * The header must not be in any queue. 523 */ 524 void 525 frag6_freef(struct ip6q *q6) 526 { 527 struct ip6asfrag *af6; 528 529 while ((af6 = LIST_FIRST(&q6->ip6q_asfrag)) != NULL) { 530 struct mbuf *m = af6->ip6af_m; 531 532 LIST_REMOVE(af6, ip6af_list); 533 534 /* 535 * Return ICMP time exceeded error for the 1st fragment. 536 * Just free other fragments. 537 */ 538 if (af6->ip6af_off == 0) { 539 struct ip6_hdr *ip6; 540 541 /* adjust pointer */ 542 ip6 = mtod(m, struct ip6_hdr *); 543 544 /* restore source and destination addresses */ 545 ip6->ip6_src = q6->ip6q_src; 546 ip6->ip6_dst = q6->ip6q_dst; 547 548 NET_LOCK_SHARED(); 549 icmp6_error(m, ICMP6_TIME_EXCEEDED, 550 ICMP6_TIME_EXCEED_REASSEMBLY, 0); 551 NET_UNLOCK_SHARED(); 552 } else 553 m_freem(m); 554 pool_put(&ip6af_pool, af6); 555 } 556 pool_put(&ip6q_pool, q6); 557 } 558 559 /* 560 * Unlinks a fragment reassembly header from the reassembly queue 561 * and inserts it into a given remove queue. 562 */ 563 void 564 frag6_unlink(struct ip6q *q6, struct ip6q_head *rmq6) 565 { 566 MUTEX_ASSERT_LOCKED(&frag6_mutex); 567 568 TAILQ_REMOVE(&frag6_queue, q6, ip6q_queue); 569 TAILQ_INSERT_HEAD(rmq6, q6, ip6q_queue); 570 frag6_nfrags -= q6->ip6q_nfrag; 571 frag6_nfragpackets--; 572 } 573 574 /* 575 * IPv6 reassembling timer processing; 576 * if a timer expires on a reassembly 577 * queue, discard it. 578 */ 579 void 580 frag6_slowtimo(void) 581 { 582 struct ip6q_head rmq6; 583 struct ip6q *q6, *nq6; 584 585 TAILQ_INIT(&rmq6); 586 587 mtx_enter(&frag6_mutex); 588 589 TAILQ_FOREACH_SAFE(q6, &frag6_queue, ip6q_queue, nq6) { 590 if (--q6->ip6q_ttl == 0) { 591 ip6stat_inc(ip6s_fragtimeout); 592 frag6_unlink(q6, &rmq6); 593 } 594 } 595 596 /* 597 * If we are over the maximum number of fragments 598 * (due to the limit being lowered), drain off 599 * enough to get down to the new limit. 600 */ 601 while (frag6_nfragpackets > (u_int)ip6_maxfragpackets && 602 !TAILQ_EMPTY(&frag6_queue)) { 603 ip6stat_inc(ip6s_fragoverflow); 604 frag6_unlink(TAILQ_LAST(&frag6_queue, ip6q_head), &rmq6); 605 } 606 607 mtx_leave(&frag6_mutex); 608 609 while ((q6 = TAILQ_FIRST(&rmq6)) != NULL) { 610 TAILQ_REMOVE(&rmq6, q6, ip6q_queue); 611 frag6_freef(q6); 612 } 613 } 614