1 /* $KAME: altq_rmclass.c,v 1.18 2003/11/06 06:32:53 kjc Exp $ */ 2 /* $DragonFly: src/sys/net/altq/altq_rmclass.c,v 1.1 2005/02/11 22:25:57 joerg Exp $ */ 3 4 /* 5 * Copyright (c) 1991-1997 Regents of the University of California. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the Network Research 19 * Group at Lawrence Berkeley Laboratory. 20 * 4. Neither the name of the University nor of the Laboratory may be used 21 * to endorse or promote products derived from this software without 22 * specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * LBL code modified by speer@eng.sun.com, May 1977. 37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov 38 */ 39 40 #ident "@(#)rm_class.c 1.48 97/12/05 SMI" 41 42 #include "opt_altq.h" 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 46 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */ 47 48 #include <sys/param.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/socket.h> 52 #include <sys/systm.h> 53 #include <sys/callout.h> 54 #include <sys/errno.h> 55 #include <sys/time.h> 56 57 #include <net/if.h> 58 59 #include <net/altq/altq.h> 60 #include <net/altq/altq_rmclass.h> 61 #include <net/altq/altq_rmclass_debug.h> 62 #include <net/altq/altq_red.h> 63 #include <net/altq/altq_rio.h> 64 65 #ifdef CBQ_TRACE 66 static struct cbqtrace cbqtrace_buffer[NCBQTRACE+1]; 67 static struct cbqtrace *cbqtrace_ptr = NULL; 68 static int cbqtrace_count; 69 #endif 70 71 /* 72 * Local Macros 73 */ 74 75 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; } 76 77 /* 78 * Local routines. 79 */ 80 81 static int rmc_satisfied(struct rm_class *, struct timeval *); 82 static void rmc_wrr_set_weights(struct rm_ifdat *); 83 static void rmc_depth_compute(struct rm_class *); 84 static void rmc_depth_recompute(rm_class_t *); 85 86 static struct mbuf *_rmc_wrr_dequeue_next(struct rm_ifdat *, int); 87 static struct mbuf *_rmc_prr_dequeue_next(struct rm_ifdat *, int); 88 89 static int _rmc_addq(rm_class_t *, struct mbuf *); 90 static void _rmc_dropq(rm_class_t *); 91 static struct mbuf *_rmc_getq(rm_class_t *); 92 static struct mbuf *_rmc_pollq(rm_class_t *); 93 94 static int rmc_under_limit(struct rm_class *, struct timeval *); 95 static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *); 96 static void rmc_drop_action(struct rm_class *); 97 static void rmc_restart(void *); 98 static void rmc_root_overlimit(struct rm_class *, struct rm_class *); 99 100 #define BORROW_OFFTIME 101 /* 102 * BORROW_OFFTIME (experimental): 103 * borrow the offtime of the class borrowing from. 104 * the reason is that when its own offtime is set, the class is unable 105 * to borrow much, especially when cutoff is taking effect. 106 * but when the borrowed class is overloaded (advidle is close to minidle), 107 * use the borrowing class's offtime to avoid overload. 108 */ 109 #define ADJUST_CUTOFF 110 /* 111 * ADJUST_CUTOFF (experimental): 112 * if no underlimit class is found due to cutoff, increase cutoff and 113 * retry the scheduling loop. 114 * also, don't invoke delay_actions while cutoff is taking effect, 115 * since a sleeping class won't have a chance to be scheduled in the 116 * next loop. 117 * 118 * now heuristics for setting the top-level variable (cutoff_) becomes: 119 * 1. if a packet arrives for a not-overlimit class, set cutoff 120 * to the depth of the class. 121 * 2. if cutoff is i, and a packet arrives for an overlimit class 122 * with an underlimit ancestor at a lower level than i (say j), 123 * then set cutoff to j. 124 * 3. at scheduling a packet, if there is no underlimit class 125 * due to the current cutoff level, increase cutoff by 1 and 126 * then try to schedule again. 127 */ 128 129 /* 130 * rm_class_t * 131 * rmc_newclass(...) - Create a new resource management class at priority 132 * 'pri' on the interface given by 'ifd'. 133 * 134 * nsecPerByte is the data rate of the interface in nanoseconds/byte. 135 * E.g., 800 for a 10Mb/s ethernet. If the class gets less 136 * than 100% of the bandwidth, this number should be the 137 * 'effective' rate for the class. Let f be the 138 * bandwidth fraction allocated to this class, and let 139 * nsPerByte be the data rate of the output link in 140 * nanoseconds/byte. Then nsecPerByte is set to 141 * nsPerByte / f. E.g., 1600 (= 800 / .5) 142 * for a class that gets 50% of an ethernet's bandwidth. 143 * 144 * action the routine to call when the class is over limit. 145 * 146 * maxq max allowable queue size for class (in packets). 147 * 148 * parent parent class pointer. 149 * 150 * borrow class to borrow from (should be either 'parent' or null). 151 * 152 * maxidle max value allowed for class 'idle' time estimate (this 153 * parameter determines how large an initial burst of packets 154 * can be before overlimit action is invoked. 155 * 156 * offtime how long 'delay' action will delay when class goes over 157 * limit (this parameter determines the steady-state burst 158 * size when a class is running over its limit). 159 * 160 * Maxidle and offtime have to be computed from the following: If the 161 * average packet size is s, the bandwidth fraction allocated to this 162 * class is f, we want to allow b packet bursts, and the gain of the 163 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then: 164 * 165 * ptime = s * nsPerByte * (1 - f) / f 166 * maxidle = ptime * (1 - g^b) / g^b 167 * minidle = -ptime * (1 / (f - 1)) 168 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1) 169 * 170 * Operationally, it's convenient to specify maxidle & offtime in units 171 * independent of the link bandwidth so the maxidle & offtime passed to 172 * this routine are the above values multiplied by 8*f/(1000*nsPerByte). 173 * (The constant factor is a scale factor needed to make the parameters 174 * integers. This scaling also means that the 'unscaled' values of 175 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds, 176 * not nanoseconds.) Also note that the 'idle' filter computation keeps 177 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of 178 * maxidle also must be scaled upward by this value. Thus, the passed 179 * values for maxidle and offtime can be computed as follows: 180 * 181 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte) 182 * offtime = offtime * 8 / (1000 * nsecPerByte) 183 * 184 * When USE_HRTIME is employed, then maxidle and offtime become: 185 * maxidle = maxilde * (8.0 / nsecPerByte); 186 * offtime = offtime * (8.0 / nsecPerByte); 187 */ 188 struct rm_class * 189 rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte, 190 void (*action)(rm_class_t *, rm_class_t *), int maxq, 191 struct rm_class *parent, struct rm_class *borrow, u_int maxidle, 192 int minidle, u_int offtime, int pktsize, int flags) 193 { 194 struct rm_class *cl; 195 struct rm_class *peer; 196 int s; 197 198 if (pri >= RM_MAXPRIO) 199 return (NULL); 200 #ifndef ALTQ_RED 201 if (flags & RMCF_RED) { 202 #ifdef ALTQ_DEBUG 203 printf("rmc_newclass: RED not configured for CBQ!\n"); 204 #endif 205 return (NULL); 206 } 207 #endif 208 #ifndef ALTQ_RIO 209 if (flags & RMCF_RIO) { 210 #ifdef ALTQ_DEBUG 211 printf("rmc_newclass: RIO not configured for CBQ!\n"); 212 #endif 213 return (NULL); 214 } 215 #endif 216 217 cl = malloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO); 218 callout_init(&cl->callout_); 219 cl->q_ = malloc(sizeof(*cl->q_), M_ALTQ, M_WAITOK | M_ZERO); 220 221 /* 222 * Class initialization. 223 */ 224 cl->children_ = NULL; 225 cl->parent_ = parent; 226 cl->borrow_ = borrow; 227 cl->leaf_ = 1; 228 cl->ifdat_ = ifd; 229 cl->pri_ = pri; 230 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */ 231 cl->depth_ = 0; 232 cl->qthresh_ = 0; 233 cl->ns_per_byte_ = nsecPerByte; 234 235 qlimit(cl->q_) = maxq; 236 qtype(cl->q_) = Q_DROPHEAD; 237 qlen(cl->q_) = 0; 238 cl->flags_ = flags; 239 240 #if 1 /* minidle is also scaled in ALTQ */ 241 cl->minidle_ = (minidle * (int)nsecPerByte) / 8; 242 if (cl->minidle_ > 0) 243 cl->minidle_ = 0; 244 #else 245 cl->minidle_ = minidle; 246 #endif 247 cl->maxidle_ = (maxidle * nsecPerByte) / 8; 248 if (cl->maxidle_ == 0) 249 cl->maxidle_ = 1; 250 #if 1 /* offtime is also scaled in ALTQ */ 251 cl->avgidle_ = cl->maxidle_; 252 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN; 253 if (cl->offtime_ == 0) 254 cl->offtime_ = 1; 255 #else 256 cl->avgidle_ = 0; 257 cl->offtime_ = (offtime * nsecPerByte) / 8; 258 #endif 259 cl->overlimit = action; 260 261 #ifdef ALTQ_RED 262 if (flags & (RMCF_RED|RMCF_RIO)) { 263 int red_flags, red_pkttime; 264 265 red_flags = 0; 266 if (flags & RMCF_ECN) 267 red_flags |= REDF_ECN; 268 #ifdef ALTQ_RIO 269 if (flags & RMCF_CLEARDSCP) 270 red_flags |= RIOF_CLEARDSCP; 271 #endif 272 red_pkttime = nsecPerByte * pktsize / 1000; 273 274 if (flags & RMCF_RED) { 275 cl->red_ = red_alloc(0, 0, 276 qlimit(cl->q_) * 10/100, 277 qlimit(cl->q_) * 30/100, 278 red_flags, red_pkttime); 279 if (cl->red_ != NULL) 280 qtype(cl->q_) = Q_RED; 281 } 282 #ifdef ALTQ_RIO 283 else { 284 cl->red_ = (red_t *)rio_alloc(0, NULL, 285 red_flags, red_pkttime); 286 if (cl->red_ != NULL) 287 qtype(cl->q_) = Q_RIO; 288 } 289 #endif 290 } 291 #endif /* ALTQ_RED */ 292 293 /* 294 * put the class into the class tree 295 */ 296 s = splimp(); 297 if ((peer = ifd->active_[pri]) != NULL) { 298 /* find the last class at this pri */ 299 cl->peer_ = peer; 300 while (peer->peer_ != ifd->active_[pri]) 301 peer = peer->peer_; 302 peer->peer_ = cl; 303 } else { 304 ifd->active_[pri] = cl; 305 cl->peer_ = cl; 306 } 307 308 if (cl->parent_) { 309 cl->next_ = parent->children_; 310 parent->children_ = cl; 311 parent->leaf_ = 0; 312 } 313 314 /* 315 * Compute the depth of this class and its ancestors in the class 316 * hierarchy. 317 */ 318 rmc_depth_compute(cl); 319 320 /* 321 * If CBQ's WRR is enabled, then initialize the class WRR state. 322 */ 323 if (ifd->wrr_) { 324 ifd->num_[pri]++; 325 ifd->alloc_[pri] += cl->allotment_; 326 rmc_wrr_set_weights(ifd); 327 } 328 splx(s); 329 return (cl); 330 } 331 332 int 333 rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle, 334 int minidle, u_int offtime, int pktsize) 335 { 336 struct rm_ifdat *ifd; 337 u_int old_allotment; 338 int s; 339 340 ifd = cl->ifdat_; 341 old_allotment = cl->allotment_; 342 343 s = splimp(); 344 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */ 345 cl->qthresh_ = 0; 346 cl->ns_per_byte_ = nsecPerByte; 347 348 qlimit(cl->q_) = maxq; 349 350 #if 1 /* minidle is also scaled in ALTQ */ 351 cl->minidle_ = (minidle * nsecPerByte) / 8; 352 if (cl->minidle_ > 0) 353 cl->minidle_ = 0; 354 #else 355 cl->minidle_ = minidle; 356 #endif 357 cl->maxidle_ = (maxidle * nsecPerByte) / 8; 358 if (cl->maxidle_ == 0) 359 cl->maxidle_ = 1; 360 #if 1 /* offtime is also scaled in ALTQ */ 361 cl->avgidle_ = cl->maxidle_; 362 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN; 363 if (cl->offtime_ == 0) 364 cl->offtime_ = 1; 365 #else 366 cl->avgidle_ = 0; 367 cl->offtime_ = (offtime * nsecPerByte) / 8; 368 #endif 369 370 /* 371 * If CBQ's WRR is enabled, then initialize the class WRR state. 372 */ 373 if (ifd->wrr_) { 374 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment; 375 rmc_wrr_set_weights(ifd); 376 } 377 splx(s); 378 return (0); 379 } 380 381 /* 382 * static void 383 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes 384 * the appropriate run robin weights for the CBQ weighted round robin 385 * algorithm. 386 * 387 * Returns: NONE 388 */ 389 390 static void 391 rmc_wrr_set_weights(struct rm_ifdat *ifd) 392 { 393 int i; 394 struct rm_class *cl, *clh; 395 396 for (i = 0; i < RM_MAXPRIO; i++) { 397 /* 398 * This is inverted from that of the simulator to 399 * maintain precision. 400 */ 401 if (ifd->num_[i] == 0) 402 ifd->M_[i] = 0; 403 else 404 ifd->M_[i] = ifd->alloc_[i] / 405 (ifd->num_[i] * ifd->maxpkt_); 406 /* 407 * Compute the weighted allotment for each class. 408 * This takes the expensive div instruction out 409 * of the main loop for the wrr scheduling path. 410 * These only get recomputed when a class comes or 411 * goes. 412 */ 413 if (ifd->active_[i] != NULL) { 414 clh = cl = ifd->active_[i]; 415 do { 416 /* safe-guard for slow link or alloc_ == 0 */ 417 if (ifd->M_[i] == 0) 418 cl->w_allotment_ = 0; 419 else 420 cl->w_allotment_ = cl->allotment_ / 421 ifd->M_[i]; 422 cl = cl->peer_; 423 } while ((cl != NULL) && (cl != clh)); 424 } 425 } 426 } 427 428 int 429 rmc_get_weight(struct rm_ifdat *ifd, int pri) 430 { 431 if ((pri >= 0) && (pri < RM_MAXPRIO)) 432 return (ifd->M_[pri]); 433 else 434 return (0); 435 } 436 437 /* 438 * static void 439 * rmc_depth_compute(struct rm_class *cl) - This function computes the 440 * appropriate depth of class 'cl' and its ancestors. 441 * 442 * Returns: NONE 443 */ 444 445 static void 446 rmc_depth_compute(struct rm_class *cl) 447 { 448 rm_class_t *t = cl, *p; 449 450 /* 451 * Recompute the depth for the branch of the tree. 452 */ 453 while (t != NULL) { 454 p = t->parent_; 455 if (p && (t->depth_ >= p->depth_)) { 456 p->depth_ = t->depth_ + 1; 457 t = p; 458 } else 459 t = NULL; 460 } 461 } 462 463 /* 464 * static void 465 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes 466 * the depth of the tree after a class has been deleted. 467 * 468 * Returns: NONE 469 */ 470 471 static void 472 rmc_depth_recompute(rm_class_t *cl) 473 { 474 #if 1 /* ALTQ */ 475 rm_class_t *p, *t; 476 477 p = cl; 478 while (p != NULL) { 479 if ((t = p->children_) == NULL) { 480 p->depth_ = 0; 481 } else { 482 int cdepth = 0; 483 484 while (t != NULL) { 485 if (t->depth_ > cdepth) 486 cdepth = t->depth_; 487 t = t->next_; 488 } 489 490 if (p->depth_ == cdepth + 1) 491 /* no change to this parent */ 492 return; 493 494 p->depth_ = cdepth + 1; 495 } 496 497 p = p->parent_; 498 } 499 #else 500 rm_class_t *t; 501 502 if (cl->depth_ >= 1) { 503 if (cl->children_ == NULL) { 504 cl->depth_ = 0; 505 } else if ((t = cl->children_) != NULL) { 506 while (t != NULL) { 507 if (t->children_ != NULL) 508 rmc_depth_recompute(t); 509 t = t->next_; 510 } 511 } else 512 rmc_depth_compute(cl); 513 } 514 #endif 515 } 516 517 /* 518 * void 519 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This 520 * function deletes a class from the link-sharing structure and frees 521 * all resources associated with the class. 522 * 523 * Returns: NONE 524 */ 525 526 void 527 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl) 528 { 529 struct rm_class *p, *head, *previous; 530 int s; 531 532 KKASSERT(cl->children_ == NULL); 533 534 if (cl->sleeping_) 535 callout_stop(&cl->callout_); 536 537 s = splimp(); 538 /* 539 * Free packets in the packet queue. 540 * XXX - this may not be a desired behavior. Packets should be 541 * re-queued. 542 */ 543 rmc_dropall(cl); 544 545 /* 546 * If the class has a parent, then remove the class from the 547 * class from the parent's children chain. 548 */ 549 if (cl->parent_ != NULL) { 550 head = cl->parent_->children_; 551 p = previous = head; 552 if (head->next_ == NULL) { 553 KKASSERT(head == cl); 554 cl->parent_->children_ = NULL; 555 cl->parent_->leaf_ = 1; 556 } else while (p != NULL) { 557 if (p == cl) { 558 if (cl == head) 559 cl->parent_->children_ = cl->next_; 560 else 561 previous->next_ = cl->next_; 562 cl->next_ = NULL; 563 p = NULL; 564 } else { 565 previous = p; 566 p = p->next_; 567 } 568 } 569 } 570 571 /* 572 * Delete class from class priority peer list. 573 */ 574 if ((p = ifd->active_[cl->pri_]) != NULL) { 575 /* 576 * If there is more than one member of this priority 577 * level, then look for class(cl) in the priority level. 578 */ 579 if (p != p->peer_) { 580 while (p->peer_ != cl) 581 p = p->peer_; 582 p->peer_ = cl->peer_; 583 584 if (ifd->active_[cl->pri_] == cl) 585 ifd->active_[cl->pri_] = cl->peer_; 586 } else { 587 KKASSERT(p == cl); 588 ifd->active_[cl->pri_] = NULL; 589 } 590 } 591 592 /* 593 * Recompute the WRR weights. 594 */ 595 if (ifd->wrr_) { 596 ifd->alloc_[cl->pri_] -= cl->allotment_; 597 ifd->num_[cl->pri_]--; 598 rmc_wrr_set_weights(ifd); 599 } 600 601 /* 602 * Re-compute the depth of the tree. 603 */ 604 #if 1 /* ALTQ */ 605 rmc_depth_recompute(cl->parent_); 606 #else 607 rmc_depth_recompute(ifd->root_); 608 #endif 609 610 splx(s); 611 612 /* 613 * Free the class structure. 614 */ 615 if (cl->red_ != NULL) { 616 #ifdef ALTQ_RIO 617 if (q_is_rio(cl->q_)) 618 rio_destroy((rio_t *)cl->red_); 619 #endif 620 #ifdef ALTQ_RED 621 if (q_is_red(cl->q_)) 622 red_destroy(cl->red_); 623 #endif 624 } 625 free(cl->q_, M_ALTQ); 626 free(cl, M_ALTQ); 627 } 628 629 /* 630 * void 631 * rmc_init(...) - Initialize the resource management data structures 632 * associated with the output portion of interface 'ifp'. 'ifd' is 633 * where the structures will be built (for backwards compatibility, the 634 * structures aren't kept in the ifnet struct). 'nsecPerByte' 635 * gives the link speed (inverse of bandwidth) in nanoseconds/byte. 636 * 'restart' is the driver-specific routine that the generic 'delay 637 * until under limit' action will call to restart output. `maxq' 638 * is the queue size of the 'link' & 'default' classes. 'maxqueued' 639 * is the maximum number of packets that the resource management 640 * code will allow to be queued 'downstream' (this is typically 1). 641 * 642 * Returns: NONE 643 */ 644 645 void 646 rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte, 647 void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle, 648 int minidle, u_int offtime, int flags) 649 { 650 int i, mtu; 651 652 /* 653 * Initialize the CBQ tracing/debug facility. 654 */ 655 CBQTRACEINIT(); 656 657 bzero(ifd, sizeof (*ifd)); 658 mtu = ifq->altq_ifp->if_mtu; 659 ifd->ifq_ = ifq; 660 ifd->restart = restart; 661 ifd->maxqueued_ = maxqueued; 662 ifd->ns_per_byte_ = nsecPerByte; 663 ifd->maxpkt_ = mtu; 664 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0; 665 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0; 666 #if 1 667 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16; 668 if (mtu * nsecPerByte > 10 * 1000000) 669 ifd->maxiftime_ /= 4; 670 #endif 671 672 reset_cutoff(ifd); 673 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_); 674 675 /* 676 * Initialize the CBQ's WRR state. 677 */ 678 for (i = 0; i < RM_MAXPRIO; i++) { 679 ifd->alloc_[i] = 0; 680 ifd->M_[i] = 0; 681 ifd->num_[i] = 0; 682 ifd->na_[i] = 0; 683 ifd->active_[i] = NULL; 684 } 685 686 /* 687 * Initialize current packet state. 688 */ 689 ifd->qi_ = 0; 690 ifd->qo_ = 0; 691 for (i = 0; i < RM_MAXQUEUED; i++) { 692 ifd->class_[i] = NULL; 693 ifd->curlen_[i] = 0; 694 ifd->borrowed_[i] = NULL; 695 } 696 697 /* 698 * Create the root class of the link-sharing structure. 699 */ 700 ifd->root_ = rmc_newclass(0, ifd, nsecPerByte, rmc_root_overlimit, 701 maxq, 0, 0, maxidle, minidle, offtime, 0, 0); 702 if (ifd->root_ == NULL) { 703 printf("rmc_init: root class not allocated\n"); 704 return ; 705 } 706 ifd->root_->depth_ = 0; 707 } 708 709 /* 710 * void 711 * rmc_queue_packet(struct rm_class *cl, struct mbuf *m) - Add packet given by 712 * mbuf 'm' to queue for resource class 'cl'. This routine is called 713 * by a driver's if_output routine. This routine must be called with 714 * output packet completion interrupts locked out (to avoid racing with 715 * rmc_dequeue_next). 716 * 717 * Returns: 0 on successful queueing 718 * -1 when packet drop occurs 719 */ 720 int 721 rmc_queue_packet(struct rm_class *cl, struct mbuf *m) 722 { 723 struct timeval now; 724 struct rm_ifdat *ifd = cl->ifdat_; 725 int cpri = cl->pri_; 726 int is_empty = qempty(cl->q_); 727 728 RM_GETTIME(now); 729 if (ifd->cutoff_ > 0) { 730 if (TV_LT(&cl->undertime_, &now)) { 731 if (ifd->cutoff_ > cl->depth_) 732 ifd->cutoff_ = cl->depth_; 733 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_); 734 } 735 #if 1 /* ALTQ */ 736 else { 737 /* 738 * the class is overlimit. if the class has 739 * underlimit ancestors, set cutoff to the lowest 740 * depth among them. 741 */ 742 struct rm_class *borrow = cl->borrow_; 743 744 while (borrow != NULL && 745 borrow->depth_ < ifd->cutoff_) { 746 if (TV_LT(&borrow->undertime_, &now)) { 747 ifd->cutoff_ = borrow->depth_; 748 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_); 749 break; 750 } 751 borrow = borrow->borrow_; 752 } 753 } 754 #else /* !ALTQ */ 755 else if ((ifd->cutoff_ > 1) && cl->borrow_) { 756 if (TV_LT(&cl->borrow_->undertime_, &now)) { 757 ifd->cutoff_ = cl->borrow_->depth_; 758 CBQTRACE(rmc_queue_packet, 'ffob', 759 cl->borrow_->depth_); 760 } 761 } 762 #endif /* !ALTQ */ 763 } 764 765 if (_rmc_addq(cl, m) < 0) 766 /* failed */ 767 return (-1); 768 769 if (is_empty) { 770 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle); 771 ifd->na_[cpri]++; 772 } 773 774 if (qlen(cl->q_) > qlimit(cl->q_)) { 775 /* note: qlimit can be set to 0 or 1 */ 776 rmc_drop_action(cl); 777 return (-1); 778 } 779 return (0); 780 } 781 782 /* 783 * void 784 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all 785 * classes to see if there are satified. 786 */ 787 788 static void 789 rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) 790 { 791 int i; 792 rm_class_t *p, *bp; 793 794 for (i = RM_MAXPRIO - 1; i >= 0; i--) { 795 if ((bp = ifd->active_[i]) != NULL) { 796 p = bp; 797 do { 798 if (!rmc_satisfied(p, now)) { 799 ifd->cutoff_ = p->depth_; 800 return; 801 } 802 p = p->peer_; 803 } while (p != bp); 804 } 805 } 806 807 reset_cutoff(ifd); 808 } 809 810 /* 811 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise. 812 */ 813 814 static int 815 rmc_satisfied(struct rm_class *cl, struct timeval *now) 816 { 817 rm_class_t *p; 818 819 if (cl == NULL) 820 return (1); 821 if (TV_LT(now, &cl->undertime_)) 822 return (1); 823 if (cl->depth_ == 0) { 824 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_)) 825 return (0); 826 else 827 return (1); 828 } 829 if (cl->children_ != NULL) { 830 p = cl->children_; 831 while (p != NULL) { 832 if (!rmc_satisfied(p, now)) 833 return (0); 834 p = p->next_; 835 } 836 } 837 838 return (1); 839 } 840 841 /* 842 * Return 1 if class 'cl' is under limit or can borrow from a parent, 843 * 0 if overlimit. As a side-effect, this routine will invoke the 844 * class overlimit action if the class if overlimit. 845 */ 846 847 static int 848 rmc_under_limit(struct rm_class *cl, struct timeval *now) 849 { 850 rm_class_t *p = cl; 851 rm_class_t *top; 852 struct rm_ifdat *ifd = cl->ifdat_; 853 854 ifd->borrowed_[ifd->qi_] = NULL; 855 /* 856 * If cl is the root class, then always return that it is 857 * underlimit. Otherwise, check to see if the class is underlimit. 858 */ 859 if (cl->parent_ == NULL) 860 return (1); 861 862 if (cl->sleeping_) { 863 if (TV_LT(now, &cl->undertime_)) 864 return (0); 865 866 callout_stop(&cl->callout_); 867 cl->sleeping_ = 0; 868 cl->undertime_.tv_sec = 0; 869 return (1); 870 } 871 872 top = NULL; 873 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) { 874 if (((cl = cl->borrow_) == NULL) || 875 (cl->depth_ > ifd->cutoff_)) { 876 #ifdef ADJUST_CUTOFF 877 if (cl != NULL) 878 /* cutoff is taking effect, just 879 return false without calling 880 the delay action. */ 881 return (0); 882 #endif 883 #ifdef BORROW_OFFTIME 884 /* 885 * check if the class can borrow offtime too. 886 * borrow offtime from the top of the borrow 887 * chain if the top class is not overloaded. 888 */ 889 if (cl != NULL) { 890 /* cutoff is taking effect, use this class as top. */ 891 top = cl; 892 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_); 893 } 894 if (top != NULL && top->avgidle_ == top->minidle_) 895 top = NULL; 896 p->overtime_ = *now; 897 (p->overlimit)(p, top); 898 #else 899 p->overtime_ = *now; 900 (p->overlimit)(p, NULL); 901 #endif 902 return (0); 903 } 904 top = cl; 905 } 906 907 if (cl != p) 908 ifd->borrowed_[ifd->qi_] = cl; 909 return (1); 910 } 911 912 /* 913 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to 914 * Packet-by-packet round robin. 915 * 916 * The heart of the weighted round-robin scheduler, which decides which 917 * class next gets to send a packet. Highest priority first, then 918 * weighted round-robin within priorites. 919 * 920 * Each able-to-send class gets to send until its byte allocation is 921 * exhausted. Thus, the active pointer is only changed after a class has 922 * exhausted its allocation. 923 * 924 * If the scheduler finds no class that is underlimit or able to borrow, 925 * then the first class found that had a nonzero queue and is allowed to 926 * borrow gets to send. 927 */ 928 929 static struct mbuf * 930 _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op) 931 { 932 struct rm_class *cl = NULL, *first = NULL; 933 u_int deficit; 934 int cpri; 935 struct mbuf *m; 936 struct timeval now; 937 938 RM_GETTIME(now); 939 940 /* 941 * if the driver polls the top of the queue and then removes 942 * the polled packet, we must return the same packet. 943 */ 944 if (op == ALTDQ_REMOVE && ifd->pollcache_) { 945 cl = ifd->pollcache_; 946 cpri = cl->pri_; 947 if (ifd->efficient_) { 948 /* check if this class is overlimit */ 949 if (cl->undertime_.tv_sec != 0 && 950 rmc_under_limit(cl, &now) == 0) 951 first = cl; 952 } 953 ifd->pollcache_ = NULL; 954 goto _wrr_out; 955 } 956 else { 957 /* mode == ALTDQ_POLL || pollcache == NULL */ 958 ifd->pollcache_ = NULL; 959 ifd->borrowed_[ifd->qi_] = NULL; 960 } 961 #ifdef ADJUST_CUTOFF 962 _again: 963 #endif 964 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) { 965 if (ifd->na_[cpri] == 0) 966 continue; 967 deficit = 0; 968 /* 969 * Loop through twice for a priority level, if some class 970 * was unable to send a packet the first round because 971 * of the weighted round-robin mechanism. 972 * During the second loop at this level, deficit==2. 973 * (This second loop is not needed if for every class, 974 * "M[cl->pri_])" times "cl->allotment" is greater than 975 * the byte size for the largest packet in the class.) 976 */ 977 _wrr_loop: 978 cl = ifd->active_[cpri]; 979 KKASSERT(cl != NULL); 980 do { 981 if ((deficit < 2) && (cl->bytes_alloc_ <= 0)) 982 cl->bytes_alloc_ += cl->w_allotment_; 983 if (!qempty(cl->q_)) { 984 if ((cl->undertime_.tv_sec == 0) || 985 rmc_under_limit(cl, &now)) { 986 if (cl->bytes_alloc_ > 0 || deficit > 1) 987 goto _wrr_out; 988 989 /* underlimit but no alloc */ 990 deficit = 1; 991 #if 1 992 ifd->borrowed_[ifd->qi_] = NULL; 993 #endif 994 } 995 else if (first == NULL && cl->borrow_ != NULL) 996 first = cl; /* borrowing candidate */ 997 } 998 999 cl->bytes_alloc_ = 0; 1000 cl = cl->peer_; 1001 } while (cl != ifd->active_[cpri]); 1002 1003 if (deficit == 1) { 1004 /* first loop found an underlimit class with deficit */ 1005 /* Loop on same priority level, with new deficit. */ 1006 deficit = 2; 1007 goto _wrr_loop; 1008 } 1009 } 1010 1011 #ifdef ADJUST_CUTOFF 1012 /* 1013 * no underlimit class found. if cutoff is taking effect, 1014 * increase cutoff and try again. 1015 */ 1016 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) { 1017 ifd->cutoff_++; 1018 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_); 1019 goto _again; 1020 } 1021 #endif /* ADJUST_CUTOFF */ 1022 /* 1023 * If LINK_EFFICIENCY is turned on, then the first overlimit 1024 * class we encounter will send a packet if all the classes 1025 * of the link-sharing structure are overlimit. 1026 */ 1027 reset_cutoff(ifd); 1028 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_); 1029 1030 if (!ifd->efficient_ || first == NULL) 1031 return (NULL); 1032 1033 cl = first; 1034 cpri = cl->pri_; 1035 #if 0 /* too time-consuming for nothing */ 1036 if (cl->sleeping_) 1037 callout_stop(&cl->callout_); 1038 cl->sleeping_ = 0; 1039 cl->undertime_.tv_sec = 0; 1040 #endif 1041 ifd->borrowed_[ifd->qi_] = cl->borrow_; 1042 ifd->cutoff_ = cl->borrow_->depth_; 1043 1044 /* 1045 * Deque the packet and do the book keeping... 1046 */ 1047 _wrr_out: 1048 if (op == ALTDQ_REMOVE) { 1049 m = _rmc_getq(cl); 1050 if (m == NULL) 1051 panic("_rmc_wrr_dequeue_next"); 1052 if (qempty(cl->q_)) 1053 ifd->na_[cpri]--; 1054 1055 /* 1056 * Update class statistics and link data. 1057 */ 1058 if (cl->bytes_alloc_ > 0) 1059 cl->bytes_alloc_ -= m_pktlen(m); 1060 1061 if ((cl->bytes_alloc_ <= 0) || first == cl) 1062 ifd->active_[cl->pri_] = cl->peer_; 1063 else 1064 ifd->active_[cl->pri_] = cl; 1065 1066 ifd->class_[ifd->qi_] = cl; 1067 ifd->curlen_[ifd->qi_] = m_pktlen(m); 1068 ifd->now_[ifd->qi_] = now; 1069 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_; 1070 ifd->queued_++; 1071 } else { 1072 /* mode == ALTDQ_PPOLL */ 1073 m = _rmc_pollq(cl); 1074 ifd->pollcache_ = cl; 1075 } 1076 return (m); 1077 } 1078 1079 /* 1080 * Dequeue & return next packet from the highest priority class that 1081 * has a packet to send & has enough allocation to send it. This 1082 * routine is called by a driver whenever it needs a new packet to 1083 * output. 1084 */ 1085 static struct mbuf * 1086 _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op) 1087 { 1088 struct mbuf *m; 1089 int cpri; 1090 struct rm_class *cl, *first = NULL; 1091 struct timeval now; 1092 1093 RM_GETTIME(now); 1094 1095 /* 1096 * if the driver polls the top of the queue and then removes 1097 * the polled packet, we must return the same packet. 1098 */ 1099 if (op == ALTDQ_REMOVE && ifd->pollcache_) { 1100 cl = ifd->pollcache_; 1101 cpri = cl->pri_; 1102 ifd->pollcache_ = NULL; 1103 goto _prr_out; 1104 } else { 1105 /* mode == ALTDQ_POLL || pollcache == NULL */ 1106 ifd->pollcache_ = NULL; 1107 ifd->borrowed_[ifd->qi_] = NULL; 1108 } 1109 #ifdef ADJUST_CUTOFF 1110 _again: 1111 #endif 1112 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) { 1113 if (ifd->na_[cpri] == 0) 1114 continue; 1115 cl = ifd->active_[cpri]; 1116 KKASSERT(cl != NULL); 1117 do { 1118 if (!qempty(cl->q_)) { 1119 if ((cl->undertime_.tv_sec == 0) || 1120 rmc_under_limit(cl, &now)) 1121 goto _prr_out; 1122 if (first == NULL && cl->borrow_ != NULL) 1123 first = cl; 1124 } 1125 cl = cl->peer_; 1126 } while (cl != ifd->active_[cpri]); 1127 } 1128 1129 #ifdef ADJUST_CUTOFF 1130 /* 1131 * no underlimit class found. if cutoff is taking effect, increase 1132 * cutoff and try again. 1133 */ 1134 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) { 1135 ifd->cutoff_++; 1136 goto _again; 1137 } 1138 #endif /* ADJUST_CUTOFF */ 1139 /* 1140 * If LINK_EFFICIENCY is turned on, then the first overlimit 1141 * class we encounter will send a packet if all the classes 1142 * of the link-sharing structure are overlimit. 1143 */ 1144 reset_cutoff(ifd); 1145 if (!ifd->efficient_ || first == NULL) 1146 return (NULL); 1147 1148 cl = first; 1149 cpri = cl->pri_; 1150 #if 0 /* too time-consuming for nothing */ 1151 if (cl->sleeping_) 1152 callout_stop(&cl->callout_); 1153 cl->sleeping_ = 0; 1154 cl->undertime_.tv_sec = 0; 1155 #endif 1156 ifd->borrowed_[ifd->qi_] = cl->borrow_; 1157 ifd->cutoff_ = cl->borrow_->depth_; 1158 1159 /* 1160 * Deque the packet and do the book keeping... 1161 */ 1162 _prr_out: 1163 if (op == ALTDQ_REMOVE) { 1164 m = _rmc_getq(cl); 1165 if (m == NULL) 1166 panic("_rmc_prr_dequeue_next"); 1167 if (qempty(cl->q_)) 1168 ifd->na_[cpri]--; 1169 1170 ifd->active_[cpri] = cl->peer_; 1171 1172 ifd->class_[ifd->qi_] = cl; 1173 ifd->curlen_[ifd->qi_] = m_pktlen(m); 1174 ifd->now_[ifd->qi_] = now; 1175 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_; 1176 ifd->queued_++; 1177 } else { 1178 /* mode == ALTDQ_POLL */ 1179 m = _rmc_pollq(cl); 1180 ifd->pollcache_ = cl; 1181 } 1182 return (m); 1183 } 1184 1185 /* 1186 * struct mbuf * 1187 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function 1188 * is invoked by the packet driver to get the next packet to be 1189 * dequeued and output on the link. If WRR is enabled, then the 1190 * WRR dequeue next routine will determine the next packet to sent. 1191 * Otherwise, packet-by-packet round robin is invoked. 1192 * 1193 * Returns: NULL, if a packet is not available or if all 1194 * classes are overlimit. 1195 * 1196 * Otherwise, Pointer to the next packet. 1197 */ 1198 1199 struct mbuf * 1200 rmc_dequeue_next(struct rm_ifdat *ifd, int mode) 1201 { 1202 if (ifd->queued_ >= ifd->maxqueued_) 1203 return (NULL); 1204 else if (ifd->wrr_) 1205 return (_rmc_wrr_dequeue_next(ifd, mode)); 1206 else 1207 return (_rmc_prr_dequeue_next(ifd, mode)); 1208 } 1209 1210 /* 1211 * Update the utilization estimate for the packet that just completed. 1212 * The packet's class & the parent(s) of that class all get their 1213 * estimators updated. This routine is called by the driver's output- 1214 * packet-completion interrupt service routine. 1215 */ 1216 1217 /* 1218 * a macro to approximate "divide by 1000" that gives 0.000999, 1219 * if a value has enough effective digits. 1220 * (on pentium, mul takes 9 cycles but div takes 46!) 1221 */ 1222 #define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17)) 1223 void 1224 rmc_update_class_util(struct rm_ifdat *ifd) 1225 { 1226 int idle, avgidle, pktlen; 1227 int pkt_time, tidle; 1228 rm_class_t *cl, *borrowed; 1229 rm_class_t *borrows; 1230 struct timeval *nowp; 1231 1232 /* 1233 * Get the most recent completed class. 1234 */ 1235 if ((cl = ifd->class_[ifd->qo_]) == NULL) 1236 return; 1237 1238 pktlen = ifd->curlen_[ifd->qo_]; 1239 borrowed = ifd->borrowed_[ifd->qo_]; 1240 borrows = borrowed; 1241 1242 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen); 1243 1244 /* 1245 * Run estimator on class and its ancestors. 1246 */ 1247 /* 1248 * rm_update_class_util is designed to be called when the 1249 * transfer is completed from a xmit complete interrupt, 1250 * but most drivers don't implement an upcall for that. 1251 * so, just use estimated completion time. 1252 * as a result, ifd->qi_ and ifd->qo_ are always synced. 1253 */ 1254 nowp = &ifd->now_[ifd->qo_]; 1255 /* get pkt_time (for link) in usec */ 1256 #if 1 /* use approximation */ 1257 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_; 1258 pkt_time = NSEC_TO_USEC(pkt_time); 1259 #else 1260 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000; 1261 #endif 1262 #if 1 /* ALTQ4PPP */ 1263 if (TV_LT(nowp, &ifd->ifnow_)) { 1264 int iftime; 1265 1266 /* 1267 * make sure the estimated completion time does not go 1268 * too far. it can happen when the link layer supports 1269 * data compression or the interface speed is set to 1270 * a much lower value. 1271 */ 1272 TV_DELTA(&ifd->ifnow_, nowp, iftime); 1273 if (iftime+pkt_time < ifd->maxiftime_) { 1274 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_); 1275 } else { 1276 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_); 1277 } 1278 } else { 1279 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_); 1280 } 1281 #else 1282 if (TV_LT(nowp, &ifd->ifnow_)) { 1283 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_); 1284 } else { 1285 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_); 1286 } 1287 #endif 1288 1289 while (cl != NULL) { 1290 TV_DELTA(&ifd->ifnow_, &cl->last_, idle); 1291 if (idle >= 2000000) 1292 /* 1293 * this class is idle enough, reset avgidle. 1294 * (TV_DELTA returns 2000000 us when delta is large.) 1295 */ 1296 cl->avgidle_ = cl->maxidle_; 1297 1298 /* get pkt_time (for class) in usec */ 1299 #if 1 /* use approximation */ 1300 pkt_time = pktlen * cl->ns_per_byte_; 1301 pkt_time = NSEC_TO_USEC(pkt_time); 1302 #else 1303 pkt_time = pktlen * cl->ns_per_byte_ / 1000; 1304 #endif 1305 idle -= pkt_time; 1306 1307 avgidle = cl->avgidle_; 1308 avgidle += idle - (avgidle >> RM_FILTER_GAIN); 1309 cl->avgidle_ = avgidle; 1310 1311 /* Are we overlimit ? */ 1312 if (avgidle <= 0) { 1313 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle); 1314 #if 1 /* ALTQ */ 1315 /* 1316 * need some lower bound for avgidle, otherwise 1317 * a borrowing class gets unbounded penalty. 1318 */ 1319 if (avgidle < cl->minidle_) 1320 avgidle = cl->avgidle_ = cl->minidle_; 1321 #endif 1322 /* set next idle to make avgidle 0 */ 1323 tidle = pkt_time + 1324 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN); 1325 TV_ADD_DELTA(nowp, tidle, &cl->undertime_); 1326 ++cl->stats_.over; 1327 } else { 1328 cl->avgidle_ = 1329 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle; 1330 cl->undertime_.tv_sec = 0; 1331 if (cl->sleeping_) { 1332 callout_stop(&cl->callout_); 1333 cl->sleeping_ = 0; 1334 } 1335 } 1336 1337 if (borrows != NULL) { 1338 if (borrows != cl) 1339 ++cl->stats_.borrows; 1340 else 1341 borrows = NULL; 1342 } 1343 cl->last_ = ifd->ifnow_; 1344 cl->last_pkttime_ = pkt_time; 1345 1346 #if 1 1347 if (cl->parent_ == NULL) { 1348 /* take stats of root class */ 1349 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen); 1350 } 1351 #endif 1352 1353 cl = cl->parent_; 1354 } 1355 1356 /* 1357 * Check to see if cutoff needs to set to a new level. 1358 */ 1359 cl = ifd->class_[ifd->qo_]; 1360 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) { 1361 #if 1 /* ALTQ */ 1362 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) { 1363 rmc_tl_satisfied(ifd, nowp); 1364 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_); 1365 } else { 1366 ifd->cutoff_ = borrowed->depth_; 1367 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_); 1368 } 1369 #else /* !ALTQ */ 1370 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) { 1371 reset_cutoff(ifd); 1372 #ifdef notdef 1373 rmc_tl_satisfied(ifd, &now); 1374 #endif 1375 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_); 1376 } else { 1377 ifd->cutoff_ = borrowed->depth_; 1378 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_); 1379 } 1380 #endif /* !ALTQ */ 1381 } 1382 1383 /* 1384 * Release class slot 1385 */ 1386 ifd->borrowed_[ifd->qo_] = NULL; 1387 ifd->class_[ifd->qo_] = NULL; 1388 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_; 1389 ifd->queued_--; 1390 } 1391 1392 /* 1393 * void 1394 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific) 1395 * over-limit action routines. These get invoked by rmc_under_limit() 1396 * if a class with packets to send if over its bandwidth limit & can't 1397 * borrow from a parent class. 1398 * 1399 * Returns: NONE 1400 */ 1401 1402 static void 1403 rmc_drop_action(struct rm_class *cl) 1404 { 1405 struct rm_ifdat *ifd = cl->ifdat_; 1406 1407 KKASSERT(qlen(cl->q_) > 0); 1408 _rmc_dropq(cl); 1409 if (qempty(cl->q_)) 1410 ifd->na_[cl->pri_]--; 1411 } 1412 1413 void rmc_dropall(struct rm_class *cl) 1414 { 1415 struct rm_ifdat *ifd = cl->ifdat_; 1416 1417 if (!qempty(cl->q_)) { 1418 _flushq(cl->q_); 1419 1420 ifd->na_[cl->pri_]--; 1421 } 1422 } 1423 1424 /* 1425 * void 1426 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ 1427 * delay action routine. It is invoked via rmc_under_limit when the 1428 * packet is discoverd to be overlimit. 1429 * 1430 * If the delay action is result of borrow class being overlimit, then 1431 * delay for the offtime of the borrowing class that is overlimit. 1432 * 1433 * Returns: NONE 1434 */ 1435 1436 void 1437 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow) 1438 { 1439 int delay, t, extradelay; 1440 1441 cl->stats_.overactions++; 1442 TV_DELTA(&cl->undertime_, &cl->overtime_, delay); 1443 #ifndef BORROW_OFFTIME 1444 delay += cl->offtime_; 1445 #endif 1446 1447 if (!cl->sleeping_) { 1448 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle); 1449 #ifdef BORROW_OFFTIME 1450 if (borrow != NULL) 1451 extradelay = borrow->offtime_; 1452 else 1453 #endif 1454 extradelay = cl->offtime_; 1455 1456 #ifdef ALTQ 1457 /* 1458 * XXX recalculate suspend time: 1459 * current undertime is (tidle + pkt_time) calculated 1460 * from the last transmission. 1461 * tidle: time required to bring avgidle back to 0 1462 * pkt_time: target waiting time for this class 1463 * we need to replace pkt_time by offtime 1464 */ 1465 extradelay -= cl->last_pkttime_; 1466 #endif 1467 if (extradelay > 0) { 1468 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_); 1469 delay += extradelay; 1470 } 1471 1472 cl->sleeping_ = 1; 1473 cl->stats_.delays++; 1474 1475 /* 1476 * Since packets are phased randomly with respect to the 1477 * clock, 1 tick (the next clock tick) can be an arbitrarily 1478 * short time so we have to wait for at least two ticks. 1479 * NOTE: If there's no other traffic, we need the timer as 1480 * a 'backstop' to restart this class. 1481 */ 1482 if (delay > tick * 2) 1483 t = (delay + tick - 1) / tick; 1484 else 1485 t = 2; 1486 callout_reset(&cl->callout_, t, rmc_restart, cl); 1487 } 1488 } 1489 1490 /* 1491 * void 1492 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is 1493 * called by the system timer code & is responsible checking if the 1494 * class is still sleeping (it might have been restarted as a side 1495 * effect of the queue scan on a packet arrival) and, if so, restarting 1496 * output for the class. Inspecting the class state & restarting output 1497 * require locking the class structure. In general the driver is 1498 * responsible for locking but this is the only routine that is not 1499 * called directly or indirectly from the interface driver so it has 1500 * know about system locking conventions. Under bsd, locking is done 1501 * by raising IPL to splimp so that's what's implemented here. On a 1502 * different system this would probably need to be changed. 1503 * 1504 * Returns: NONE 1505 */ 1506 1507 static void 1508 rmc_restart(void *arg) 1509 { 1510 struct rm_class *cl = arg; 1511 struct rm_ifdat *ifd = cl->ifdat_; 1512 int s; 1513 1514 s = splimp(); 1515 if (cl->sleeping_) { 1516 cl->sleeping_ = 0; 1517 cl->undertime_.tv_sec = 0; 1518 1519 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) { 1520 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle); 1521 (ifd->restart)(ifd->ifq_); 1522 } 1523 } 1524 splx(s); 1525 } 1526 1527 /* 1528 * void 1529 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit 1530 * handling routine for the root class of the link sharing structure. 1531 * 1532 * Returns: NONE 1533 */ 1534 1535 static void 1536 rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow) 1537 { 1538 panic("rmc_root_overlimit"); 1539 } 1540 1541 /* 1542 * Packet Queue handling routines. Eventually, this is to localize the 1543 * effects on the code whether queues are red queues or droptail 1544 * queues. 1545 */ 1546 1547 static int 1548 _rmc_addq(rm_class_t *cl, struct mbuf *m) 1549 { 1550 #ifdef ALTQ_RIO 1551 if (q_is_rio(cl->q_)) 1552 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_); 1553 #endif 1554 #ifdef ALTQ_RED 1555 if (q_is_red(cl->q_)) 1556 return red_addq(cl->red_, cl->q_, m, cl->pktattr_); 1557 #endif /* ALTQ_RED */ 1558 1559 if (cl->flags_ & RMCF_CLEARDSCP) 1560 write_dsfield(m, cl->pktattr_, 0); 1561 1562 _addq(cl->q_, m); 1563 return (0); 1564 } 1565 1566 /* note: _rmc_dropq is not called for red */ 1567 static void 1568 _rmc_dropq(rm_class_t *cl) 1569 { 1570 struct mbuf *m; 1571 1572 if ((m = _getq(cl->q_)) != NULL) 1573 m_freem(m); 1574 } 1575 1576 static struct mbuf * 1577 _rmc_getq(rm_class_t *cl) 1578 { 1579 #ifdef ALTQ_RIO 1580 if (q_is_rio(cl->q_)) 1581 return rio_getq((rio_t *)cl->red_, cl->q_); 1582 #endif 1583 #ifdef ALTQ_RED 1584 if (q_is_red(cl->q_)) 1585 return red_getq(cl->red_, cl->q_); 1586 #endif 1587 return _getq(cl->q_); 1588 } 1589 1590 static struct mbuf * 1591 _rmc_pollq(rm_class_t *cl) 1592 { 1593 return qhead(cl->q_); 1594 } 1595 1596 #ifdef CBQ_TRACE 1597 /* 1598 * DDB hook to trace cbq events: 1599 * the last 1024 events are held in a circular buffer. 1600 * use "call cbqtrace_dump(N)" to display 20 events from Nth event. 1601 */ 1602 void cbqtrace_dump(int); 1603 static char *rmc_funcname(void *); 1604 1605 static struct rmc_funcs { 1606 void *func; 1607 char *name; 1608 } rmc_funcs[] = { 1609 rmc_init, "rmc_init", 1610 rmc_queue_packet, "rmc_queue_packet", 1611 rmc_under_limit, "rmc_under_limit", 1612 rmc_update_class_util, "rmc_update_class_util", 1613 rmc_delay_action, "rmc_delay_action", 1614 rmc_restart, "rmc_restart", 1615 _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next", 1616 NULL, NULL 1617 }; 1618 1619 static char *rmc_funcname(void *func) 1620 { 1621 struct rmc_funcs *fp; 1622 1623 for (fp = rmc_funcs; fp->func != NULL; fp++) { 1624 if (fp->func == func) 1625 return (fp->name); 1626 } 1627 1628 return ("unknown"); 1629 } 1630 1631 void 1632 cbqtrace_dump(int counter) 1633 { 1634 int i, *p; 1635 char *cp; 1636 1637 counter = counter % NCBQTRACE; 1638 p = (int *)&cbqtrace_buffer[counter]; 1639 1640 for (i=0; i<20; i++) { 1641 printf("[0x%x] ", *p++); 1642 printf("%s: ", rmc_funcname((void *)*p++)); 1643 cp = (char *)p++; 1644 printf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]); 1645 printf("%d\n",*p++); 1646 1647 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE]) 1648 p = (int *)cbqtrace_buffer; 1649 } 1650 } 1651 #endif /* CBQ_TRACE */ 1652 #endif /* ALTQ_CBQ */ 1653