1 /* $NetBSD: frag6.c,v 1.60 2017/01/24 07:09:25 ozaki-r Exp $ */ 2 /* $KAME: frag6.c,v 1.40 2002/05/27 21:40:31 itojun Exp $ */ 3 4 /* 5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the project nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: frag6.c,v 1.60 2017/01/24 07:09:25 ozaki-r Exp $"); 35 36 #ifdef _KERNEL_OPT 37 #include "opt_net_mpsafe.h" 38 #endif 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/mbuf.h> 43 #include <sys/errno.h> 44 #include <sys/time.h> 45 #include <sys/kmem.h> 46 #include <sys/kernel.h> 47 #include <sys/syslog.h> 48 49 #include <net/if.h> 50 #include <net/route.h> 51 52 #include <netinet/in.h> 53 #include <netinet/in_var.h> 54 #include <netinet/ip6.h> 55 #include <netinet6/ip6_var.h> 56 #include <netinet6/ip6_private.h> 57 #include <netinet/icmp6.h> 58 59 #include <net/net_osdep.h> 60 61 static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *); 62 static void frag6_deq(struct ip6asfrag *); 63 static void frag6_insque(struct ip6q *, struct ip6q *); 64 static void frag6_remque(struct ip6q *); 65 static void frag6_freef(struct ip6q *); 66 67 static int frag6_drainwanted; 68 69 u_int frag6_nfragpackets; 70 u_int frag6_nfrags; 71 struct ip6q ip6q; /* ip6 reassemble queue */ 72 73 static kmutex_t frag6_lock; 74 75 /* 76 * Initialise reassembly queue and fragment identifier. 77 */ 78 void 79 frag6_init(void) 80 { 81 82 ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q; 83 mutex_init(&frag6_lock, MUTEX_DEFAULT, IPL_NET); 84 } 85 86 /* 87 * IPv6 fragment input. 88 * 89 * In RFC2460, fragment and reassembly rule do not agree with each other, 90 * in terms of next header field handling in fragment header. 91 * While the sender will use the same value for all of the fragmented packets, 92 * receiver is suggested not to check the consistency. 93 * 94 * fragment rule (p20): 95 * (2) A Fragment header containing: 96 * The Next Header value that identifies the first header of 97 * the Fragmentable Part of the original packet. 98 * -> next header field is same for all fragments 99 * 100 * reassembly rule (p21): 101 * The Next Header field of the last header of the Unfragmentable 102 * Part is obtained from the Next Header field of the first 103 * fragment's Fragment header. 104 * -> should grab it from the first fragment only 105 * 106 * The following note also contradicts with fragment rule - noone is going to 107 * send different fragment with different next header field. 108 * 109 * additional note (p22): 110 * The Next Header values in the Fragment headers of different 111 * fragments of the same original packet may differ. Only the value 112 * from the Offset zero fragment packet is used for reassembly. 113 * -> should grab it from the first fragment only 114 * 115 * There is no explicit reason given in the RFC. Historical reason maybe? 116 */ 117 int 118 frag6_input(struct mbuf **mp, int *offp, int proto) 119 { 120 struct rtentry *rt; 121 struct mbuf *m = *mp, *t; 122 struct ip6_hdr *ip6; 123 struct ip6_frag *ip6f; 124 struct ip6q *q6; 125 struct ip6asfrag *af6, *ip6af, *af6dwn; 126 int offset = *offp, nxt, i, next; 127 int first_frag = 0; 128 int fragoff, frgpartlen; /* must be larger than u_int16_t */ 129 struct ifnet *dstifp; 130 static struct route ro; 131 union { 132 struct sockaddr dst; 133 struct sockaddr_in6 dst6; 134 } u; 135 136 ip6 = mtod(m, struct ip6_hdr *); 137 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f)); 138 if (ip6f == NULL) 139 return IPPROTO_DONE; 140 141 dstifp = NULL; 142 /* find the destination interface of the packet. */ 143 sockaddr_in6_init(&u.dst6, &ip6->ip6_dst, 0, 0, 0); 144 if ((rt = rtcache_lookup(&ro, &u.dst)) != NULL && rt->rt_ifa != NULL) 145 dstifp = ((struct in6_ifaddr *)rt->rt_ifa)->ia_ifp; 146 147 /* jumbo payload can't contain a fragment header */ 148 if (ip6->ip6_plen == 0) { 149 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset); 150 in6_ifstat_inc(dstifp, ifs6_reass_fail); 151 goto done; 152 } 153 154 /* 155 * check whether fragment packet's fragment length is 156 * multiple of 8 octets. 157 * sizeof(struct ip6_frag) == 8 158 * sizeof(struct ip6_hdr) = 40 159 */ 160 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) && 161 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) { 162 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 163 offsetof(struct ip6_hdr, ip6_plen)); 164 in6_ifstat_inc(dstifp, ifs6_reass_fail); 165 goto done; 166 } 167 168 IP6_STATINC(IP6_STAT_FRAGMENTS); 169 in6_ifstat_inc(dstifp, ifs6_reass_reqd); 170 171 /* offset now points to data portion */ 172 offset += sizeof(struct ip6_frag); 173 174 /* 175 * RFC6946: A host that receives an IPv6 packet which includes 176 * a Fragment Header with the "Fragmen Offset" equal to 0 and 177 * the "M" bit equal to 0 MUST process such packet in isolation 178 * from any other packets/fragments. 179 */ 180 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK); 181 if (fragoff == 0 && !(ip6f->ip6f_offlg & IP6F_MORE_FRAG)) { 182 IP6_STATINC(IP6_STAT_REASSEMBLED); 183 in6_ifstat_inc(dstifp, ifs6_reass_ok); 184 *offp = offset; 185 rtcache_unref(rt, &ro); 186 return ip6f->ip6f_nxt; 187 } 188 189 mutex_enter(&frag6_lock); 190 191 /* 192 * Enforce upper bound on number of fragments. 193 * If maxfrag is 0, never accept fragments. 194 * If maxfrag is -1, accept all fragments without limitation. 195 */ 196 if (ip6_maxfrags < 0) 197 ; 198 else if (frag6_nfrags >= (u_int)ip6_maxfrags) 199 goto dropfrag; 200 201 for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next) 202 if (ip6f->ip6f_ident == q6->ip6q_ident && 203 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && 204 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)) 205 break; 206 207 if (q6 == &ip6q) { 208 /* 209 * the first fragment to arrive, create a reassembly queue. 210 */ 211 first_frag = 1; 212 213 /* 214 * Enforce upper bound on number of fragmented packets 215 * for which we attempt reassembly; 216 * If maxfragpackets is 0, never accept fragments. 217 * If maxfragpackets is -1, accept all fragments without 218 * limitation. 219 */ 220 if (ip6_maxfragpackets < 0) 221 ; 222 else if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets) 223 goto dropfrag; 224 frag6_nfragpackets++; 225 226 q6 = kmem_intr_zalloc(sizeof(struct ip6q), KM_NOSLEEP); 227 if (q6 == NULL) { 228 goto dropfrag; 229 } 230 frag6_insque(q6, &ip6q); 231 232 /* ip6q_nxt will be filled afterwards, from 1st fragment */ 233 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6; 234 #ifdef notyet 235 q6->ip6q_nxtp = (u_char *)nxtp; 236 #endif 237 q6->ip6q_ident = ip6f->ip6f_ident; 238 q6->ip6q_arrive = 0; /* Is it used anywhere? */ 239 q6->ip6q_ttl = IPV6_FRAGTTL; 240 q6->ip6q_src = ip6->ip6_src; 241 q6->ip6q_dst = ip6->ip6_dst; 242 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ 243 244 q6->ip6q_nfrag = 0; 245 } 246 247 /* 248 * If it's the 1st fragment, record the length of the 249 * unfragmentable part and the next header of the fragment header. 250 */ 251 252 if (fragoff == 0) { 253 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) - 254 sizeof(struct ip6_frag); 255 q6->ip6q_nxt = ip6f->ip6f_nxt; 256 } 257 258 /* 259 * Check that the reassembled packet would not exceed 65535 bytes 260 * in size. 261 * If it would exceed, discard the fragment and return an ICMP error. 262 */ 263 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset; 264 if (q6->ip6q_unfrglen >= 0) { 265 /* The 1st fragment has already arrived. */ 266 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) { 267 mutex_exit(&frag6_lock); 268 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 269 offset - sizeof(struct ip6_frag) + 270 offsetof(struct ip6_frag, ip6f_offlg)); 271 goto done; 272 } 273 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) { 274 mutex_exit(&frag6_lock); 275 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 276 offset - sizeof(struct ip6_frag) + 277 offsetof(struct ip6_frag, ip6f_offlg)); 278 goto done; 279 } 280 /* 281 * If it's the first fragment, do the above check for each 282 * fragment already stored in the reassembly queue. 283 */ 284 if (fragoff == 0) { 285 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 286 af6 = af6dwn) { 287 af6dwn = af6->ip6af_down; 288 289 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen > 290 IPV6_MAXPACKET) { 291 struct mbuf *merr = IP6_REASS_MBUF(af6); 292 struct ip6_hdr *ip6err; 293 int erroff = af6->ip6af_offset; 294 295 /* dequeue the fragment. */ 296 frag6_deq(af6); 297 kmem_intr_free(af6, sizeof(struct ip6asfrag)); 298 299 /* adjust pointer. */ 300 ip6err = mtod(merr, struct ip6_hdr *); 301 302 /* 303 * Restore source and destination addresses 304 * in the erroneous IPv6 header. 305 */ 306 ip6err->ip6_src = q6->ip6q_src; 307 ip6err->ip6_dst = q6->ip6q_dst; 308 309 icmp6_error(merr, ICMP6_PARAM_PROB, 310 ICMP6_PARAMPROB_HEADER, 311 erroff - sizeof(struct ip6_frag) + 312 offsetof(struct ip6_frag, ip6f_offlg)); 313 } 314 } 315 } 316 317 ip6af = kmem_intr_zalloc(sizeof(struct ip6asfrag), KM_NOSLEEP); 318 if (ip6af == NULL) { 319 goto dropfrag; 320 } 321 ip6af->ip6af_head = ip6->ip6_flow; 322 ip6af->ip6af_len = ip6->ip6_plen; 323 ip6af->ip6af_nxt = ip6->ip6_nxt; 324 ip6af->ip6af_hlim = ip6->ip6_hlim; 325 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG; 326 ip6af->ip6af_off = fragoff; 327 ip6af->ip6af_frglen = frgpartlen; 328 ip6af->ip6af_offset = offset; 329 IP6_REASS_MBUF(ip6af) = m; 330 331 if (first_frag) { 332 af6 = (struct ip6asfrag *)q6; 333 goto insert; 334 } 335 336 /* 337 * Find a segment which begins after this one does. 338 */ 339 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 340 af6 = af6->ip6af_down) 341 if (af6->ip6af_off > ip6af->ip6af_off) 342 break; 343 344 /* 345 * If the incoming fragment overlaps some existing fragments in 346 * the reassembly queue - drop it as per RFC 5722. 347 */ 348 if (af6->ip6af_up != (struct ip6asfrag *)q6) { 349 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen 350 - ip6af->ip6af_off; 351 if (i > 0) { 352 kmem_intr_free(ip6af, sizeof(struct ip6asfrag)); 353 goto dropfrag; 354 } 355 } 356 if (af6 != (struct ip6asfrag *)q6) { 357 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; 358 if (i > 0) { 359 kmem_intr_free(ip6af, sizeof(struct ip6asfrag)); 360 goto dropfrag; 361 } 362 } 363 364 insert: 365 366 /* 367 * Stick new segment in its place; 368 * check for complete reassembly. 369 * Move to front of packet queue, as we are 370 * the most recently active fragmented packet. 371 */ 372 frag6_enq(ip6af, af6->ip6af_up); 373 frag6_nfrags++; 374 q6->ip6q_nfrag++; 375 #if 0 /* xxx */ 376 if (q6 != ip6q.ip6q_next) { 377 frag6_remque(q6); 378 frag6_insque(q6, &ip6q); 379 } 380 #endif 381 next = 0; 382 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 383 af6 = af6->ip6af_down) { 384 if (af6->ip6af_off != next) { 385 mutex_exit(&frag6_lock); 386 goto done; 387 } 388 next += af6->ip6af_frglen; 389 } 390 if (af6->ip6af_up->ip6af_mff) { 391 mutex_exit(&frag6_lock); 392 goto done; 393 } 394 395 /* 396 * Reassembly is complete; concatenate fragments. 397 */ 398 ip6af = q6->ip6q_down; 399 t = m = IP6_REASS_MBUF(ip6af); 400 af6 = ip6af->ip6af_down; 401 frag6_deq(ip6af); 402 while (af6 != (struct ip6asfrag *)q6) { 403 af6dwn = af6->ip6af_down; 404 frag6_deq(af6); 405 while (t->m_next) 406 t = t->m_next; 407 t->m_next = IP6_REASS_MBUF(af6); 408 m_adj(t->m_next, af6->ip6af_offset); 409 kmem_intr_free(af6, sizeof(struct ip6asfrag)); 410 af6 = af6dwn; 411 } 412 413 /* adjust offset to point where the original next header starts */ 414 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); 415 kmem_intr_free(ip6af, sizeof(struct ip6asfrag)); 416 ip6 = mtod(m, struct ip6_hdr *); 417 ip6->ip6_plen = htons(next + offset - sizeof(struct ip6_hdr)); 418 ip6->ip6_src = q6->ip6q_src; 419 ip6->ip6_dst = q6->ip6q_dst; 420 nxt = q6->ip6q_nxt; 421 #ifdef notyet 422 *q6->ip6q_nxtp = (u_char)(nxt & 0xff); 423 #endif 424 425 /* 426 * Delete frag6 header with as a few cost as possible. 427 */ 428 if (m->m_len >= offset + sizeof(struct ip6_frag)) { 429 memmove((char *)ip6 + sizeof(struct ip6_frag), ip6, offset); 430 m->m_data += sizeof(struct ip6_frag); 431 m->m_len -= sizeof(struct ip6_frag); 432 } else { 433 /* this comes with no copy if the boundary is on cluster */ 434 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) { 435 frag6_remque(q6); 436 frag6_nfrags -= q6->ip6q_nfrag; 437 kmem_intr_free(q6, sizeof(struct ip6q)); 438 frag6_nfragpackets--; 439 goto dropfrag; 440 } 441 m_adj(t, sizeof(struct ip6_frag)); 442 m_cat(m, t); 443 } 444 445 /* 446 * Store NXT to the original. 447 */ 448 { 449 u_int8_t *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */ 450 *prvnxtp = nxt; 451 } 452 453 frag6_remque(q6); 454 frag6_nfrags -= q6->ip6q_nfrag; 455 kmem_intr_free(q6, sizeof(struct ip6q)); 456 frag6_nfragpackets--; 457 458 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ 459 int plen = 0; 460 for (t = m; t; t = t->m_next) 461 plen += t->m_len; 462 m->m_pkthdr.len = plen; 463 } 464 465 IP6_STATINC(IP6_STAT_REASSEMBLED); 466 in6_ifstat_inc(dstifp, ifs6_reass_ok); 467 rtcache_unref(rt, &ro); 468 469 /* 470 * Tell launch routine the next header 471 */ 472 473 *mp = m; 474 *offp = offset; 475 476 mutex_exit(&frag6_lock); 477 return nxt; 478 479 dropfrag: 480 mutex_exit(&frag6_lock); 481 in6_ifstat_inc(dstifp, ifs6_reass_fail); 482 IP6_STATINC(IP6_STAT_FRAGDROPPED); 483 m_freem(m); 484 done: 485 rtcache_unref(rt, &ro); 486 return IPPROTO_DONE; 487 } 488 489 int 490 ip6_reass_packet(struct mbuf **mp, int offset) 491 { 492 493 if (frag6_input(mp, &offset, IPPROTO_IPV6) == IPPROTO_DONE) { 494 *mp = NULL; 495 return EINVAL; 496 } 497 return 0; 498 } 499 500 /* 501 * Free a fragment reassembly header and all 502 * associated datagrams. 503 */ 504 void 505 frag6_freef(struct ip6q *q6) 506 { 507 struct ip6asfrag *af6, *down6; 508 509 KASSERT(mutex_owned(&frag6_lock)); 510 511 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 512 af6 = down6) { 513 struct mbuf *m = IP6_REASS_MBUF(af6); 514 515 down6 = af6->ip6af_down; 516 frag6_deq(af6); 517 518 /* 519 * Return ICMP time exceeded error for the 1st fragment. 520 * Just free other fragments. 521 */ 522 if (af6->ip6af_off == 0) { 523 struct ip6_hdr *ip6; 524 525 /* adjust pointer */ 526 ip6 = mtod(m, struct ip6_hdr *); 527 528 /* restoure source and destination addresses */ 529 ip6->ip6_src = q6->ip6q_src; 530 ip6->ip6_dst = q6->ip6q_dst; 531 532 icmp6_error(m, ICMP6_TIME_EXCEEDED, 533 ICMP6_TIME_EXCEED_REASSEMBLY, 0); 534 } else { 535 m_freem(m); 536 } 537 kmem_intr_free(af6, sizeof(struct ip6asfrag)); 538 } 539 frag6_remque(q6); 540 frag6_nfrags -= q6->ip6q_nfrag; 541 kmem_intr_free(q6, sizeof(struct ip6q)); 542 frag6_nfragpackets--; 543 } 544 545 /* 546 * Put an ip fragment on a reassembly chain. 547 * Like insque, but pointers in middle of structure. 548 */ 549 void 550 frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6) 551 { 552 553 KASSERT(mutex_owned(&frag6_lock)); 554 555 af6->ip6af_up = up6; 556 af6->ip6af_down = up6->ip6af_down; 557 up6->ip6af_down->ip6af_up = af6; 558 up6->ip6af_down = af6; 559 } 560 561 /* 562 * To frag6_enq as remque is to insque. 563 */ 564 void 565 frag6_deq(struct ip6asfrag *af6) 566 { 567 568 KASSERT(mutex_owned(&frag6_lock)); 569 570 af6->ip6af_up->ip6af_down = af6->ip6af_down; 571 af6->ip6af_down->ip6af_up = af6->ip6af_up; 572 } 573 574 void 575 frag6_insque(struct ip6q *newq, struct ip6q *oldq) 576 { 577 578 KASSERT(mutex_owned(&frag6_lock)); 579 580 newq->ip6q_prev = oldq; 581 newq->ip6q_next = oldq->ip6q_next; 582 oldq->ip6q_next->ip6q_prev= newq; 583 oldq->ip6q_next = newq; 584 } 585 586 void 587 frag6_remque(struct ip6q *p6) 588 { 589 590 KASSERT(mutex_owned(&frag6_lock)); 591 592 p6->ip6q_prev->ip6q_next = p6->ip6q_next; 593 p6->ip6q_next->ip6q_prev = p6->ip6q_prev; 594 } 595 596 void 597 frag6_fasttimo(void) 598 { 599 600 #ifndef NET_MPSAFE 601 mutex_enter(softnet_lock); 602 KERNEL_LOCK(1, NULL); 603 #endif 604 605 if (frag6_drainwanted) { 606 frag6_drain(); 607 frag6_drainwanted = 0; 608 } 609 610 #ifndef NET_MPSAFE 611 KERNEL_UNLOCK_ONE(NULL); 612 mutex_exit(softnet_lock); 613 #endif 614 } 615 616 /* 617 * IPv6 reassembling timer processing; 618 * if a timer expires on a reassembly 619 * queue, discard it. 620 */ 621 void 622 frag6_slowtimo(void) 623 { 624 struct ip6q *q6; 625 626 #ifndef NET_MPSAFE 627 mutex_enter(softnet_lock); 628 KERNEL_LOCK(1, NULL); 629 #endif 630 631 mutex_enter(&frag6_lock); 632 q6 = ip6q.ip6q_next; 633 if (q6) 634 while (q6 != &ip6q) { 635 --q6->ip6q_ttl; 636 q6 = q6->ip6q_next; 637 if (q6->ip6q_prev->ip6q_ttl == 0) { 638 IP6_STATINC(IP6_STAT_FRAGTIMEOUT); 639 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 640 frag6_freef(q6->ip6q_prev); 641 } 642 } 643 /* 644 * If we are over the maximum number of fragments 645 * (due to the limit being lowered), drain off 646 * enough to get down to the new limit. 647 */ 648 while (frag6_nfragpackets > (u_int)ip6_maxfragpackets && 649 ip6q.ip6q_prev) { 650 IP6_STATINC(IP6_STAT_FRAGOVERFLOW); 651 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 652 frag6_freef(ip6q.ip6q_prev); 653 } 654 mutex_exit(&frag6_lock); 655 656 #ifndef NET_MPSAFE 657 KERNEL_UNLOCK_ONE(NULL); 658 mutex_exit(softnet_lock); 659 #endif 660 661 #if 0 662 /* 663 * Routing changes might produce a better route than we last used; 664 * make sure we notice eventually, even if forwarding only for one 665 * destination and the cache is never replaced. 666 */ 667 rtcache_free(&ip6_forward_rt); 668 rtcache_free(&ipsrcchk_rt); 669 #endif 670 671 } 672 673 void 674 frag6_drainstub(void) 675 { 676 frag6_drainwanted = 1; 677 } 678 679 /* 680 * Drain off all datagram fragments. 681 */ 682 void 683 frag6_drain(void) 684 { 685 686 if (mutex_tryenter(&frag6_lock)) { 687 while (ip6q.ip6q_next != &ip6q) { 688 IP6_STATINC(IP6_STAT_FRAGDROPPED); 689 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 690 frag6_freef(ip6q.ip6q_next); 691 } 692 mutex_exit(&frag6_lock); 693 } 694 } 695