1 /* $NetBSD: frag6.c,v 1.49 2011/05/03 17:44:30 dyoung Exp $ */ 2 /* $KAME: frag6.c,v 1.40 2002/05/27 21:40:31 itojun Exp $ */ 3 4 /* 5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the project nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: frag6.c,v 1.49 2011/05/03 17:44:30 dyoung Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/domain.h> 41 #include <sys/protosw.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/errno.h> 45 #include <sys/time.h> 46 #include <sys/kernel.h> 47 #include <sys/syslog.h> 48 49 #include <net/if.h> 50 #include <net/route.h> 51 52 #include <netinet/in.h> 53 #include <netinet/in_var.h> 54 #include <netinet/ip6.h> 55 #include <netinet6/ip6_var.h> 56 #include <netinet6/ip6_private.h> 57 #include <netinet/icmp6.h> 58 59 #include <net/net_osdep.h> 60 61 static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *); 62 static void frag6_deq(struct ip6asfrag *); 63 static void frag6_insque(struct ip6q *, struct ip6q *); 64 static void frag6_remque(struct ip6q *); 65 static void frag6_freef(struct ip6q *); 66 67 static int ip6q_locked; 68 static int frag6_drainwanted; 69 70 u_int frag6_nfragpackets; 71 u_int frag6_nfrags; 72 struct ip6q ip6q; /* ip6 reassemble queue */ 73 74 static inline int ip6q_lock_try(void); 75 static inline void ip6q_unlock(void); 76 77 static inline int 78 ip6q_lock_try(void) 79 { 80 int s; 81 82 /* 83 * Use splvm() -- we're bloking things that would cause 84 * mbuf allocation. 85 */ 86 s = splvm(); 87 if (ip6q_locked) { 88 splx(s); 89 return (0); 90 } 91 ip6q_locked = 1; 92 splx(s); 93 return (1); 94 } 95 96 static inline void 97 ip6q_unlock(void) 98 { 99 int s; 100 101 s = splvm(); 102 ip6q_locked = 0; 103 splx(s); 104 } 105 106 #ifdef DIAGNOSTIC 107 #define IP6Q_LOCK() \ 108 do { \ 109 if (ip6q_lock_try() == 0) { \ 110 printf("%s:%d: ip6q already locked\n", __FILE__, __LINE__); \ 111 panic("ip6q_lock"); \ 112 } \ 113 } while (/*CONSTCOND*/ 0) 114 #define IP6Q_LOCK_CHECK() \ 115 do { \ 116 if (ip6q_locked == 0) { \ 117 printf("%s:%d: ip6q lock not held\n", __FILE__, __LINE__); \ 118 panic("ip6q lock check"); \ 119 } \ 120 } while (/*CONSTCOND*/ 0) 121 #else 122 #define IP6Q_LOCK() (void) ip6q_lock_try() 123 #define IP6Q_LOCK_CHECK() /* nothing */ 124 #endif 125 126 #define IP6Q_UNLOCK() ip6q_unlock() 127 128 #ifndef offsetof /* XXX */ 129 #define offsetof(type, member) ((size_t)(&((type *)0)->member)) 130 #endif 131 132 /* 133 * Initialise reassembly queue and fragment identifier. 134 */ 135 void 136 frag6_init(void) 137 { 138 139 ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q; 140 } 141 142 /* 143 * In RFC2460, fragment and reassembly rule do not agree with each other, 144 * in terms of next header field handling in fragment header. 145 * While the sender will use the same value for all of the fragmented packets, 146 * receiver is suggested not to check the consistency. 147 * 148 * fragment rule (p20): 149 * (2) A Fragment header containing: 150 * The Next Header value that identifies the first header of 151 * the Fragmentable Part of the original packet. 152 * -> next header field is same for all fragments 153 * 154 * reassembly rule (p21): 155 * The Next Header field of the last header of the Unfragmentable 156 * Part is obtained from the Next Header field of the first 157 * fragment's Fragment header. 158 * -> should grab it from the first fragment only 159 * 160 * The following note also contradicts with fragment rule - noone is going to 161 * send different fragment with different next header field. 162 * 163 * additional note (p22): 164 * The Next Header values in the Fragment headers of different 165 * fragments of the same original packet may differ. Only the value 166 * from the Offset zero fragment packet is used for reassembly. 167 * -> should grab it from the first fragment only 168 * 169 * There is no explicit reason given in the RFC. Historical reason maybe? 170 */ 171 /* 172 * Fragment input 173 */ 174 int 175 frag6_input(struct mbuf **mp, int *offp, int proto) 176 { 177 struct rtentry *rt; 178 struct mbuf *m = *mp, *t; 179 struct ip6_hdr *ip6; 180 struct ip6_frag *ip6f; 181 struct ip6q *q6; 182 struct ip6asfrag *af6, *ip6af, *af6dwn; 183 int offset = *offp, nxt, i, next; 184 int first_frag = 0; 185 int fragoff, frgpartlen; /* must be larger than u_int16_t */ 186 struct ifnet *dstifp; 187 static struct route ro; 188 union { 189 struct sockaddr dst; 190 struct sockaddr_in6 dst6; 191 } u; 192 193 ip6 = mtod(m, struct ip6_hdr *); 194 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f)); 195 if (ip6f == NULL) 196 return IPPROTO_DONE; 197 198 dstifp = NULL; 199 /* find the destination interface of the packet. */ 200 sockaddr_in6_init(&u.dst6, &ip6->ip6_dst, 0, 0, 0); 201 if ((rt = rtcache_lookup(&ro, &u.dst)) != NULL && rt->rt_ifa != NULL) 202 dstifp = ((struct in6_ifaddr *)rt->rt_ifa)->ia_ifp; 203 204 /* jumbo payload can't contain a fragment header */ 205 if (ip6->ip6_plen == 0) { 206 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset); 207 in6_ifstat_inc(dstifp, ifs6_reass_fail); 208 return IPPROTO_DONE; 209 } 210 211 /* 212 * check whether fragment packet's fragment length is 213 * multiple of 8 octets. 214 * sizeof(struct ip6_frag) == 8 215 * sizeof(struct ip6_hdr) = 40 216 */ 217 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) && 218 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) { 219 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 220 offsetof(struct ip6_hdr, ip6_plen)); 221 in6_ifstat_inc(dstifp, ifs6_reass_fail); 222 return IPPROTO_DONE; 223 } 224 225 IP6_STATINC(IP6_STAT_FRAGMENTS); 226 in6_ifstat_inc(dstifp, ifs6_reass_reqd); 227 228 /* offset now points to data portion */ 229 offset += sizeof(struct ip6_frag); 230 231 IP6Q_LOCK(); 232 233 /* 234 * Enforce upper bound on number of fragments. 235 * If maxfrag is 0, never accept fragments. 236 * If maxfrag is -1, accept all fragments without limitation. 237 */ 238 if (ip6_maxfrags < 0) 239 ; 240 else if (frag6_nfrags >= (u_int)ip6_maxfrags) 241 goto dropfrag; 242 243 for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next) 244 if (ip6f->ip6f_ident == q6->ip6q_ident && 245 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && 246 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)) 247 break; 248 249 if (q6 == &ip6q) { 250 /* 251 * the first fragment to arrive, create a reassembly queue. 252 */ 253 first_frag = 1; 254 255 /* 256 * Enforce upper bound on number of fragmented packets 257 * for which we attempt reassembly; 258 * If maxfragpackets is 0, never accept fragments. 259 * If maxfragpackets is -1, accept all fragments without 260 * limitation. 261 */ 262 if (ip6_maxfragpackets < 0) 263 ; 264 else if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets) 265 goto dropfrag; 266 frag6_nfragpackets++; 267 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE, 268 M_DONTWAIT); 269 if (q6 == NULL) 270 goto dropfrag; 271 memset(q6, 0, sizeof(*q6)); 272 273 frag6_insque(q6, &ip6q); 274 275 /* ip6q_nxt will be filled afterwards, from 1st fragment */ 276 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6; 277 #ifdef notyet 278 q6->ip6q_nxtp = (u_char *)nxtp; 279 #endif 280 q6->ip6q_ident = ip6f->ip6f_ident; 281 q6->ip6q_arrive = 0; /* Is it used anywhere? */ 282 q6->ip6q_ttl = IPV6_FRAGTTL; 283 q6->ip6q_src = ip6->ip6_src; 284 q6->ip6q_dst = ip6->ip6_dst; 285 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ 286 287 q6->ip6q_nfrag = 0; 288 } 289 290 /* 291 * If it's the 1st fragment, record the length of the 292 * unfragmentable part and the next header of the fragment header. 293 */ 294 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK); 295 if (fragoff == 0) { 296 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) - 297 sizeof(struct ip6_frag); 298 q6->ip6q_nxt = ip6f->ip6f_nxt; 299 } 300 301 /* 302 * Check that the reassembled packet would not exceed 65535 bytes 303 * in size. 304 * If it would exceed, discard the fragment and return an ICMP error. 305 */ 306 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset; 307 if (q6->ip6q_unfrglen >= 0) { 308 /* The 1st fragment has already arrived. */ 309 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) { 310 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 311 offset - sizeof(struct ip6_frag) + 312 offsetof(struct ip6_frag, ip6f_offlg)); 313 IP6Q_UNLOCK(); 314 return (IPPROTO_DONE); 315 } 316 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) { 317 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 318 offset - sizeof(struct ip6_frag) + 319 offsetof(struct ip6_frag, ip6f_offlg)); 320 IP6Q_UNLOCK(); 321 return (IPPROTO_DONE); 322 } 323 /* 324 * If it's the first fragment, do the above check for each 325 * fragment already stored in the reassembly queue. 326 */ 327 if (fragoff == 0) { 328 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 329 af6 = af6dwn) { 330 af6dwn = af6->ip6af_down; 331 332 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen > 333 IPV6_MAXPACKET) { 334 struct mbuf *merr = IP6_REASS_MBUF(af6); 335 struct ip6_hdr *ip6err; 336 int erroff = af6->ip6af_offset; 337 338 /* dequeue the fragment. */ 339 frag6_deq(af6); 340 free(af6, M_FTABLE); 341 342 /* adjust pointer. */ 343 ip6err = mtod(merr, struct ip6_hdr *); 344 345 /* 346 * Restore source and destination addresses 347 * in the erroneous IPv6 header. 348 */ 349 ip6err->ip6_src = q6->ip6q_src; 350 ip6err->ip6_dst = q6->ip6q_dst; 351 352 icmp6_error(merr, ICMP6_PARAM_PROB, 353 ICMP6_PARAMPROB_HEADER, 354 erroff - sizeof(struct ip6_frag) + 355 offsetof(struct ip6_frag, ip6f_offlg)); 356 } 357 } 358 } 359 360 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE, 361 M_DONTWAIT); 362 if (ip6af == NULL) 363 goto dropfrag; 364 memset(ip6af, 0, sizeof(*ip6af)); 365 ip6af->ip6af_head = ip6->ip6_flow; 366 ip6af->ip6af_len = ip6->ip6_plen; 367 ip6af->ip6af_nxt = ip6->ip6_nxt; 368 ip6af->ip6af_hlim = ip6->ip6_hlim; 369 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG; 370 ip6af->ip6af_off = fragoff; 371 ip6af->ip6af_frglen = frgpartlen; 372 ip6af->ip6af_offset = offset; 373 IP6_REASS_MBUF(ip6af) = m; 374 375 if (first_frag) { 376 af6 = (struct ip6asfrag *)q6; 377 goto insert; 378 } 379 380 /* 381 * Find a segment which begins after this one does. 382 */ 383 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 384 af6 = af6->ip6af_down) 385 if (af6->ip6af_off > ip6af->ip6af_off) 386 break; 387 388 #if 0 389 /* 390 * If there is a preceding segment, it may provide some of 391 * our data already. If so, drop the data from the incoming 392 * segment. If it provides all of our data, drop us. 393 */ 394 if (af6->ip6af_up != (struct ip6asfrag *)q6) { 395 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen 396 - ip6af->ip6af_off; 397 if (i > 0) { 398 if (i >= ip6af->ip6af_frglen) 399 goto dropfrag; 400 m_adj(IP6_REASS_MBUF(ip6af), i); 401 ip6af->ip6af_off += i; 402 ip6af->ip6af_frglen -= i; 403 } 404 } 405 406 /* 407 * While we overlap succeeding segments trim them or, 408 * if they are completely covered, dequeue them. 409 */ 410 while (af6 != (struct ip6asfrag *)q6 && 411 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) { 412 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; 413 if (i < af6->ip6af_frglen) { 414 af6->ip6af_frglen -= i; 415 af6->ip6af_off += i; 416 m_adj(IP6_REASS_MBUF(af6), i); 417 break; 418 } 419 af6 = af6->ip6af_down; 420 m_freem(IP6_REASS_MBUF(af6->ip6af_up)); 421 frag6_deq(af6->ip6af_up); 422 } 423 #else 424 /* 425 * If the incoming framgent overlaps some existing fragments in 426 * the reassembly queue, drop it, since it is dangerous to override 427 * existing fragments from a security point of view. 428 * We don't know which fragment is the bad guy - here we trust 429 * fragment that came in earlier, with no real reason. 430 */ 431 if (af6->ip6af_up != (struct ip6asfrag *)q6) { 432 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen 433 - ip6af->ip6af_off; 434 if (i > 0) { 435 #if 0 /* suppress the noisy log */ 436 log(LOG_ERR, "%d bytes of a fragment from %s " 437 "overlaps the previous fragment\n", 438 i, ip6_sprintf(&q6->ip6q_src)); 439 #endif 440 free(ip6af, M_FTABLE); 441 goto dropfrag; 442 } 443 } 444 if (af6 != (struct ip6asfrag *)q6) { 445 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; 446 if (i > 0) { 447 #if 0 /* suppress the noisy log */ 448 log(LOG_ERR, "%d bytes of a fragment from %s " 449 "overlaps the succeeding fragment", 450 i, ip6_sprintf(&q6->ip6q_src)); 451 #endif 452 free(ip6af, M_FTABLE); 453 goto dropfrag; 454 } 455 } 456 #endif 457 458 insert: 459 460 /* 461 * Stick new segment in its place; 462 * check for complete reassembly. 463 * Move to front of packet queue, as we are 464 * the most recently active fragmented packet. 465 */ 466 frag6_enq(ip6af, af6->ip6af_up); 467 frag6_nfrags++; 468 q6->ip6q_nfrag++; 469 #if 0 /* xxx */ 470 if (q6 != ip6q.ip6q_next) { 471 frag6_remque(q6); 472 frag6_insque(q6, &ip6q); 473 } 474 #endif 475 next = 0; 476 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 477 af6 = af6->ip6af_down) { 478 if (af6->ip6af_off != next) { 479 IP6Q_UNLOCK(); 480 return IPPROTO_DONE; 481 } 482 next += af6->ip6af_frglen; 483 } 484 if (af6->ip6af_up->ip6af_mff) { 485 IP6Q_UNLOCK(); 486 return IPPROTO_DONE; 487 } 488 489 /* 490 * Reassembly is complete; concatenate fragments. 491 */ 492 ip6af = q6->ip6q_down; 493 t = m = IP6_REASS_MBUF(ip6af); 494 af6 = ip6af->ip6af_down; 495 frag6_deq(ip6af); 496 while (af6 != (struct ip6asfrag *)q6) { 497 af6dwn = af6->ip6af_down; 498 frag6_deq(af6); 499 while (t->m_next) 500 t = t->m_next; 501 t->m_next = IP6_REASS_MBUF(af6); 502 m_adj(t->m_next, af6->ip6af_offset); 503 free(af6, M_FTABLE); 504 af6 = af6dwn; 505 } 506 507 /* adjust offset to point where the original next header starts */ 508 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); 509 free(ip6af, M_FTABLE); 510 ip6 = mtod(m, struct ip6_hdr *); 511 ip6->ip6_plen = htons(next + offset - sizeof(struct ip6_hdr)); 512 ip6->ip6_src = q6->ip6q_src; 513 ip6->ip6_dst = q6->ip6q_dst; 514 nxt = q6->ip6q_nxt; 515 #ifdef notyet 516 *q6->ip6q_nxtp = (u_char)(nxt & 0xff); 517 #endif 518 519 /* 520 * Delete frag6 header with as a few cost as possible. 521 */ 522 if (m->m_len >= offset + sizeof(struct ip6_frag)) { 523 memmove((char *)ip6 + sizeof(struct ip6_frag), ip6, offset); 524 m->m_data += sizeof(struct ip6_frag); 525 m->m_len -= sizeof(struct ip6_frag); 526 } else { 527 /* this comes with no copy if the boundary is on cluster */ 528 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) { 529 frag6_remque(q6); 530 frag6_nfrags -= q6->ip6q_nfrag; 531 free(q6, M_FTABLE); 532 frag6_nfragpackets--; 533 goto dropfrag; 534 } 535 m_adj(t, sizeof(struct ip6_frag)); 536 m_cat(m, t); 537 } 538 539 /* 540 * Store NXT to the original. 541 */ 542 { 543 u_int8_t *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */ 544 *prvnxtp = nxt; 545 } 546 547 frag6_remque(q6); 548 frag6_nfrags -= q6->ip6q_nfrag; 549 free(q6, M_FTABLE); 550 frag6_nfragpackets--; 551 552 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ 553 int plen = 0; 554 for (t = m; t; t = t->m_next) 555 plen += t->m_len; 556 m->m_pkthdr.len = plen; 557 } 558 559 IP6_STATINC(IP6_STAT_REASSEMBLED); 560 in6_ifstat_inc(dstifp, ifs6_reass_ok); 561 562 /* 563 * Tell launch routine the next header 564 */ 565 566 *mp = m; 567 *offp = offset; 568 569 IP6Q_UNLOCK(); 570 return nxt; 571 572 dropfrag: 573 in6_ifstat_inc(dstifp, ifs6_reass_fail); 574 IP6_STATINC(IP6_STAT_FRAGDROPPED); 575 m_freem(m); 576 IP6Q_UNLOCK(); 577 return IPPROTO_DONE; 578 } 579 580 /* 581 * Free a fragment reassembly header and all 582 * associated datagrams. 583 */ 584 void 585 frag6_freef(struct ip6q *q6) 586 { 587 struct ip6asfrag *af6, *down6; 588 589 IP6Q_LOCK_CHECK(); 590 591 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 592 af6 = down6) { 593 struct mbuf *m = IP6_REASS_MBUF(af6); 594 595 down6 = af6->ip6af_down; 596 frag6_deq(af6); 597 598 /* 599 * Return ICMP time exceeded error for the 1st fragment. 600 * Just free other fragments. 601 */ 602 if (af6->ip6af_off == 0) { 603 struct ip6_hdr *ip6; 604 605 /* adjust pointer */ 606 ip6 = mtod(m, struct ip6_hdr *); 607 608 /* restoure source and destination addresses */ 609 ip6->ip6_src = q6->ip6q_src; 610 ip6->ip6_dst = q6->ip6q_dst; 611 612 icmp6_error(m, ICMP6_TIME_EXCEEDED, 613 ICMP6_TIME_EXCEED_REASSEMBLY, 0); 614 } else 615 m_freem(m); 616 free(af6, M_FTABLE); 617 } 618 frag6_remque(q6); 619 frag6_nfrags -= q6->ip6q_nfrag; 620 free(q6, M_FTABLE); 621 frag6_nfragpackets--; 622 } 623 624 /* 625 * Put an ip fragment on a reassembly chain. 626 * Like insque, but pointers in middle of structure. 627 */ 628 void 629 frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6) 630 { 631 632 IP6Q_LOCK_CHECK(); 633 634 af6->ip6af_up = up6; 635 af6->ip6af_down = up6->ip6af_down; 636 up6->ip6af_down->ip6af_up = af6; 637 up6->ip6af_down = af6; 638 } 639 640 /* 641 * To frag6_enq as remque is to insque. 642 */ 643 void 644 frag6_deq(struct ip6asfrag *af6) 645 { 646 647 IP6Q_LOCK_CHECK(); 648 649 af6->ip6af_up->ip6af_down = af6->ip6af_down; 650 af6->ip6af_down->ip6af_up = af6->ip6af_up; 651 } 652 653 void 654 frag6_insque(struct ip6q *new, struct ip6q *old) 655 { 656 657 IP6Q_LOCK_CHECK(); 658 659 new->ip6q_prev = old; 660 new->ip6q_next = old->ip6q_next; 661 old->ip6q_next->ip6q_prev= new; 662 old->ip6q_next = new; 663 } 664 665 void 666 frag6_remque(struct ip6q *p6) 667 { 668 669 IP6Q_LOCK_CHECK(); 670 671 p6->ip6q_prev->ip6q_next = p6->ip6q_next; 672 p6->ip6q_next->ip6q_prev = p6->ip6q_prev; 673 } 674 675 void 676 frag6_fasttimo(void) 677 { 678 if (frag6_drainwanted) { 679 frag6_drain(); 680 frag6_drainwanted = 0; 681 } 682 } 683 684 /* 685 * IPv6 reassembling timer processing; 686 * if a timer expires on a reassembly 687 * queue, discard it. 688 */ 689 void 690 frag6_slowtimo(void) 691 { 692 struct ip6q *q6; 693 694 mutex_enter(softnet_lock); 695 KERNEL_LOCK(1, NULL); 696 697 IP6Q_LOCK(); 698 q6 = ip6q.ip6q_next; 699 if (q6) 700 while (q6 != &ip6q) { 701 --q6->ip6q_ttl; 702 q6 = q6->ip6q_next; 703 if (q6->ip6q_prev->ip6q_ttl == 0) { 704 IP6_STATINC(IP6_STAT_FRAGTIMEOUT); 705 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 706 frag6_freef(q6->ip6q_prev); 707 } 708 } 709 /* 710 * If we are over the maximum number of fragments 711 * (due to the limit being lowered), drain off 712 * enough to get down to the new limit. 713 */ 714 while (frag6_nfragpackets > (u_int)ip6_maxfragpackets && 715 ip6q.ip6q_prev) { 716 IP6_STATINC(IP6_STAT_FRAGOVERFLOW); 717 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 718 frag6_freef(ip6q.ip6q_prev); 719 } 720 IP6Q_UNLOCK(); 721 722 #if 0 723 /* 724 * Routing changes might produce a better route than we last used; 725 * make sure we notice eventually, even if forwarding only for one 726 * destination and the cache is never replaced. 727 */ 728 rtcache_free(&ip6_forward_rt); 729 rtcache_free(&ipsrcchk_rt); 730 #endif 731 732 KERNEL_UNLOCK_ONE(NULL); 733 mutex_exit(softnet_lock); 734 } 735 736 void 737 frag6_drainstub(void) 738 { 739 frag6_drainwanted = 1; 740 } 741 742 /* 743 * Drain off all datagram fragments. 744 */ 745 void 746 frag6_drain(void) 747 { 748 749 KERNEL_LOCK(1, NULL); 750 if (ip6q_lock_try() != 0) { 751 while (ip6q.ip6q_next != &ip6q) { 752 IP6_STATINC(IP6_STAT_FRAGDROPPED); 753 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 754 frag6_freef(ip6q.ip6q_next); 755 } 756 IP6Q_UNLOCK(); 757 } 758 KERNEL_UNLOCK_ONE(NULL); 759 } 760