1 /* $NetBSD: altq_hfsc.c,v 1.7 2003/01/06 03:44:23 christos Exp $ */ 2 /* $KAME: altq_hfsc.c,v 1.9 2001/10/26 04:56:11 kjc Exp $ */ 3 4 /* 5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 6 * 7 * Permission to use, copy, modify, and distribute this software and 8 * its documentation is hereby granted (including for commercial or 9 * for-profit use), provided that both the copyright notice and this 10 * permission notice appear in all copies of the software, derivative 11 * works, or modified versions, and any portions thereof, and that 12 * both notices appear in supporting documentation, and that credit 13 * is given to Carnegie Mellon University in all publications reporting 14 * on direct or indirect use of this code or its derivatives. 15 * 16 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 17 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 18 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 24 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 25 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 26 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 29 * DAMAGE. 30 * 31 * Carnegie Mellon encourages (but does not require) users of this 32 * software to return any improvements or extensions that they make, 33 * and to grant Carnegie Mellon the rights to redistribute these 34 * changes without encumbrance. 35 */ 36 /* 37 * H-FSC is described in Proceedings of SIGCOMM'97, 38 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 39 * Real-Time and Priority Service" 40 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.7 2003/01/06 03:44:23 christos Exp $"); 45 46 #if defined(__FreeBSD__) || defined(__NetBSD__) 47 #include "opt_altq.h" 48 #if (__FreeBSD__ != 2) 49 #include "opt_inet.h" 50 #ifdef __FreeBSD__ 51 #include "opt_inet6.h" 52 #endif 53 #endif 54 #endif /* __FreeBSD__ || __NetBSD__ */ 55 56 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 57 58 #include <sys/param.h> 59 #include <sys/malloc.h> 60 #include <sys/mbuf.h> 61 #include <sys/socket.h> 62 #include <sys/sockio.h> 63 #include <sys/systm.h> 64 #include <sys/proc.h> 65 #include <sys/errno.h> 66 #include <sys/kernel.h> 67 #include <sys/queue.h> 68 69 #include <net/if.h> 70 #include <net/if_types.h> 71 72 #include <altq/altq.h> 73 #include <altq/altq_conf.h> 74 #include <altq/altq_hfsc.h> 75 76 /* 77 * function prototypes 78 */ 79 static struct hfsc_if *hfsc_attach __P((struct ifaltq *, u_int)); 80 static int hfsc_detach __P((struct hfsc_if *)); 81 static int hfsc_clear_interface __P((struct hfsc_if *)); 82 static int hfsc_request __P((struct ifaltq *, int, void *)); 83 static void hfsc_purge __P((struct hfsc_if *)); 84 static struct hfsc_class *hfsc_class_create __P((struct hfsc_if *, 85 struct service_curve *, struct hfsc_class *, int, int)); 86 static int hfsc_class_destroy __P((struct hfsc_class *)); 87 static int hfsc_class_modify __P((struct hfsc_class *, 88 struct service_curve *, struct service_curve *)); 89 static struct hfsc_class *hfsc_nextclass __P((struct hfsc_class *)); 90 91 static int hfsc_enqueue __P((struct ifaltq *, struct mbuf *, 92 struct altq_pktattr *)); 93 static struct mbuf *hfsc_dequeue __P((struct ifaltq *, int)); 94 95 static int hfsc_addq __P((struct hfsc_class *, struct mbuf *)); 96 static struct mbuf *hfsc_getq __P((struct hfsc_class *)); 97 static struct mbuf *hfsc_pollq __P((struct hfsc_class *)); 98 static void hfsc_purgeq __P((struct hfsc_class *)); 99 100 static void set_active __P((struct hfsc_class *, int)); 101 static void set_passive __P((struct hfsc_class *)); 102 103 static void init_ed __P((struct hfsc_class *, int)); 104 static void update_ed __P((struct hfsc_class *, int)); 105 static void update_d __P((struct hfsc_class *, int)); 106 static void init_v __P((struct hfsc_class *, int)); 107 static void update_v __P((struct hfsc_class *, int)); 108 static ellist_t *ellist_alloc __P((void)); 109 static void ellist_destroy __P((ellist_t *)); 110 static void ellist_insert __P((struct hfsc_class *)); 111 static void ellist_remove __P((struct hfsc_class *)); 112 static void ellist_update __P((struct hfsc_class *)); 113 struct hfsc_class *ellist_get_mindl __P((ellist_t *)); 114 static actlist_t *actlist_alloc __P((void)); 115 static void actlist_destroy __P((actlist_t *)); 116 static void actlist_insert __P((struct hfsc_class *)); 117 static void actlist_remove __P((struct hfsc_class *)); 118 static void actlist_update __P((struct hfsc_class *)); 119 120 static __inline u_int64_t seg_x2y __P((u_int64_t, u_int64_t)); 121 static __inline u_int64_t seg_y2x __P((u_int64_t, u_int64_t)); 122 static __inline u_int64_t m2sm __P((u_int)); 123 static __inline u_int64_t m2ism __P((u_int)); 124 static __inline u_int64_t d2dx __P((u_int)); 125 static u_int sm2m __P((u_int64_t)); 126 static u_int dx2d __P((u_int64_t)); 127 128 static void sc2isc __P((struct service_curve *, struct internal_sc *)); 129 static void rtsc_init __P((struct runtime_sc *, struct internal_sc *, 130 u_int64_t, u_int64_t)); 131 static u_int64_t rtsc_y2x __P((struct runtime_sc *, u_int64_t)); 132 static u_int64_t rtsc_x2y __P((struct runtime_sc *, u_int64_t)); 133 static void rtsc_min __P((struct runtime_sc *, struct internal_sc *, 134 u_int64_t, u_int64_t)); 135 136 int hfscopen __P((dev_t, int, int, struct proc *)); 137 int hfscclose __P((dev_t, int, int, struct proc *)); 138 int hfscioctl __P((dev_t, ioctlcmd_t, caddr_t, int, struct proc *)); 139 static int hfsccmd_if_attach __P((struct hfsc_attach *)); 140 static int hfsccmd_if_detach __P((struct hfsc_interface *)); 141 static int hfsccmd_add_class __P((struct hfsc_add_class *)); 142 static int hfsccmd_delete_class __P((struct hfsc_delete_class *)); 143 static int hfsccmd_modify_class __P((struct hfsc_modify_class *)); 144 static int hfsccmd_add_filter __P((struct hfsc_add_filter *)); 145 static int hfsccmd_delete_filter __P((struct hfsc_delete_filter *)); 146 static int hfsccmd_class_stats __P((struct hfsc_class_stats *)); 147 static void get_class_stats __P((struct hfsc_basic_class_stats *, 148 struct hfsc_class *)); 149 static struct hfsc_class *clh_to_clp __P((struct hfsc_if *, u_long)); 150 static u_long clp_to_clh __P((struct hfsc_class *)); 151 152 /* 153 * macros 154 */ 155 #define is_a_parent_class(cl) ((cl)->cl_children != NULL) 156 157 /* hif_list keeps all hfsc_if's allocated. */ 158 static struct hfsc_if *hif_list = NULL; 159 160 static struct hfsc_if * 161 hfsc_attach(ifq, bandwidth) 162 struct ifaltq *ifq; 163 u_int bandwidth; 164 { 165 struct hfsc_if *hif; 166 struct service_curve root_sc; 167 168 MALLOC(hif, struct hfsc_if *, sizeof(struct hfsc_if), 169 M_DEVBUF, M_WAITOK); 170 if (hif == NULL) 171 return (NULL); 172 bzero(hif, sizeof(struct hfsc_if)); 173 174 hif->hif_eligible = ellist_alloc(); 175 if (hif->hif_eligible == NULL) { 176 FREE(hif, M_DEVBUF); 177 return NULL; 178 } 179 180 hif->hif_ifq = ifq; 181 182 /* 183 * create root class 184 */ 185 root_sc.m1 = bandwidth; 186 root_sc.d = 0; 187 root_sc.m2 = bandwidth; 188 if ((hif->hif_rootclass = 189 hfsc_class_create(hif, &root_sc, NULL, 0, 0)) == NULL) { 190 FREE(hif, M_DEVBUF); 191 return (NULL); 192 } 193 194 /* add this state to the hfsc list */ 195 hif->hif_next = hif_list; 196 hif_list = hif; 197 198 return (hif); 199 } 200 201 static int 202 hfsc_detach(hif) 203 struct hfsc_if *hif; 204 { 205 (void)hfsc_clear_interface(hif); 206 (void)hfsc_class_destroy(hif->hif_rootclass); 207 208 /* remove this interface from the hif list */ 209 if (hif_list == hif) 210 hif_list = hif->hif_next; 211 else { 212 struct hfsc_if *h; 213 214 for (h = hif_list; h != NULL; h = h->hif_next) 215 if (h->hif_next == hif) { 216 h->hif_next = hif->hif_next; 217 break; 218 } 219 ASSERT(h != NULL); 220 } 221 222 ellist_destroy(hif->hif_eligible); 223 224 FREE(hif, M_DEVBUF); 225 226 return (0); 227 } 228 229 /* 230 * bring the interface back to the initial state by discarding 231 * all the filters and classes except the root class. 232 */ 233 static int 234 hfsc_clear_interface(hif) 235 struct hfsc_if *hif; 236 { 237 struct hfsc_class *cl; 238 239 /* free the filters for this interface */ 240 acc_discard_filters(&hif->hif_classifier, NULL, 1); 241 242 /* clear out the classes */ 243 while ((cl = hif->hif_rootclass->cl_children) != NULL) { 244 /* 245 * remove the first leaf class found in the hierarchy 246 * then start over 247 */ 248 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 249 if (!is_a_parent_class(cl)) { 250 (void)hfsc_class_destroy(cl); 251 break; 252 } 253 } 254 } 255 256 return (0); 257 } 258 259 static int 260 hfsc_request(ifq, req, arg) 261 struct ifaltq *ifq; 262 int req; 263 void *arg; 264 { 265 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 266 267 switch (req) { 268 case ALTRQ_PURGE: 269 hfsc_purge(hif); 270 break; 271 } 272 return (0); 273 } 274 275 /* discard all the queued packets on the interface */ 276 static void 277 hfsc_purge(hif) 278 struct hfsc_if *hif; 279 { 280 struct hfsc_class *cl; 281 282 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 283 if (!qempty(cl->cl_q)) 284 hfsc_purgeq(cl); 285 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 286 hif->hif_ifq->ifq_len = 0; 287 } 288 289 struct hfsc_class * 290 hfsc_class_create(hif, sc, parent, qlimit, flags) 291 struct hfsc_if *hif; 292 struct service_curve *sc; 293 struct hfsc_class *parent; 294 int qlimit, flags; 295 { 296 struct hfsc_class *cl, *p; 297 int s; 298 299 #ifndef ALTQ_RED 300 if (flags & HFCF_RED) { 301 printf("hfsc_class_create: RED not configured for HFSC!\n"); 302 return (NULL); 303 } 304 #endif 305 306 MALLOC(cl, struct hfsc_class *, sizeof(struct hfsc_class), 307 M_DEVBUF, M_WAITOK); 308 if (cl == NULL) 309 return (NULL); 310 bzero(cl, sizeof(struct hfsc_class)); 311 312 MALLOC(cl->cl_q, class_queue_t *, sizeof(class_queue_t), 313 M_DEVBUF, M_WAITOK); 314 if (cl->cl_q == NULL) 315 goto err_ret; 316 bzero(cl->cl_q, sizeof(class_queue_t)); 317 318 cl->cl_actc = actlist_alloc(); 319 if (cl->cl_actc == NULL) 320 goto err_ret; 321 322 if (qlimit == 0) 323 qlimit = 50; /* use default */ 324 qlimit(cl->cl_q) = qlimit; 325 qtype(cl->cl_q) = Q_DROPTAIL; 326 qlen(cl->cl_q) = 0; 327 cl->cl_flags = flags; 328 #ifdef ALTQ_RED 329 if (flags & (HFCF_RED|HFCF_RIO)) { 330 int red_flags, red_pkttime; 331 332 red_flags = 0; 333 if (flags & HFCF_ECN) 334 red_flags |= REDF_ECN; 335 #ifdef ALTQ_RIO 336 if (flags & HFCF_CLEARDSCP) 337 red_flags |= RIOF_CLEARDSCP; 338 #endif 339 if (sc->m2 < 8) 340 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 341 else 342 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 343 * 1000 * 1000 * 1000 / (sc->m2 / 8); 344 if (flags & HFCF_RED) { 345 cl->cl_red = red_alloc(0, 0, 0, 0, 346 red_flags, red_pkttime); 347 if (cl->cl_red != NULL) 348 qtype(cl->cl_q) = Q_RED; 349 } 350 #ifdef ALTQ_RIO 351 else { 352 cl->cl_red = (red_t *)rio_alloc(0, NULL, 353 red_flags, red_pkttime); 354 if (cl->cl_red != NULL) 355 qtype(cl->cl_q) = Q_RIO; 356 } 357 #endif 358 } 359 #endif /* ALTQ_RED */ 360 361 if (sc != NULL && (sc->m1 != 0 || sc->m2 != 0)) { 362 MALLOC(cl->cl_rsc, struct internal_sc *, 363 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK); 364 if (cl->cl_rsc == NULL) 365 goto err_ret; 366 bzero(cl->cl_rsc, sizeof(struct internal_sc)); 367 sc2isc(sc, cl->cl_rsc); 368 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 369 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 370 371 MALLOC(cl->cl_fsc, struct internal_sc *, 372 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK); 373 if (cl->cl_fsc == NULL) 374 goto err_ret; 375 bzero(cl->cl_fsc, sizeof(struct internal_sc)); 376 sc2isc(sc, cl->cl_fsc); 377 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 378 } 379 380 cl->cl_id = hif->hif_classid++; 381 cl->cl_handle = (u_long)cl; /* XXX: just a pointer to this class */ 382 cl->cl_hif = hif; 383 cl->cl_parent = parent; 384 385 s = splnet(); 386 hif->hif_classes++; 387 if (flags & HFCF_DEFAULTCLASS) 388 hif->hif_defaultclass = cl; 389 390 /* add this class to the children list of the parent */ 391 if (parent == NULL) { 392 /* this is root class */ 393 } 394 else if ((p = parent->cl_children) == NULL) 395 parent->cl_children = cl; 396 else { 397 while (p->cl_siblings != NULL) 398 p = p->cl_siblings; 399 p->cl_siblings = cl; 400 } 401 splx(s); 402 403 return (cl); 404 405 err_ret: 406 if (cl->cl_actc != NULL) 407 actlist_destroy(cl->cl_actc); 408 if (cl->cl_red != NULL) { 409 #ifdef ALTQ_RIO 410 if (q_is_rio(cl->cl_q)) 411 rio_destroy((rio_t *)cl->cl_red); 412 #endif 413 #ifdef ALTQ_RED 414 if (q_is_red(cl->cl_q)) 415 red_destroy(cl->cl_red); 416 #endif 417 } 418 if (cl->cl_fsc != NULL) 419 FREE(cl->cl_fsc, M_DEVBUF); 420 if (cl->cl_rsc != NULL) 421 FREE(cl->cl_rsc, M_DEVBUF); 422 if (cl->cl_q != NULL) 423 FREE(cl->cl_q, M_DEVBUF); 424 FREE(cl, M_DEVBUF); 425 return (NULL); 426 } 427 428 static int 429 hfsc_class_destroy(cl) 430 struct hfsc_class *cl; 431 { 432 int s; 433 434 if (is_a_parent_class(cl)) 435 return (EBUSY); 436 437 s = splnet(); 438 439 /* delete filters referencing to this class */ 440 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0); 441 442 if (!qempty(cl->cl_q)) 443 hfsc_purgeq(cl); 444 445 if (cl->cl_parent == NULL) { 446 /* this is root class */ 447 } else { 448 struct hfsc_class *p = cl->cl_parent->cl_children; 449 450 if (p == cl) 451 cl->cl_parent->cl_children = cl->cl_siblings; 452 else do { 453 if (p->cl_siblings == cl) { 454 p->cl_siblings = cl->cl_siblings; 455 break; 456 } 457 } while ((p = p->cl_siblings) != NULL); 458 ASSERT(p != NULL); 459 } 460 cl->cl_hif->hif_classes--; 461 splx(s); 462 463 actlist_destroy(cl->cl_actc); 464 465 if (cl->cl_red != NULL) { 466 #ifdef ALTQ_RIO 467 if (q_is_rio(cl->cl_q)) 468 rio_destroy((rio_t *)cl->cl_red); 469 #endif 470 #ifdef ALTQ_RED 471 if (q_is_red(cl->cl_q)) 472 red_destroy(cl->cl_red); 473 #endif 474 } 475 if (cl->cl_fsc != NULL) 476 FREE(cl->cl_fsc, M_DEVBUF); 477 if (cl->cl_rsc != NULL) 478 FREE(cl->cl_rsc, M_DEVBUF); 479 FREE(cl->cl_q, M_DEVBUF); 480 FREE(cl, M_DEVBUF); 481 482 return (0); 483 } 484 485 static int 486 hfsc_class_modify(cl, rsc, fsc) 487 struct hfsc_class *cl; 488 struct service_curve *rsc, *fsc; 489 { 490 struct internal_sc *rsc_tmp, *fsc_tmp; 491 int s; 492 493 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) && 494 cl->cl_rsc == NULL) { 495 MALLOC(rsc_tmp, struct internal_sc *, 496 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK); 497 if (rsc_tmp == NULL) 498 return (ENOMEM); 499 } 500 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) && 501 cl->cl_fsc == NULL) { 502 MALLOC(fsc_tmp, struct internal_sc *, 503 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK); 504 if (fsc_tmp == NULL) 505 return (ENOMEM); 506 } 507 508 s = splnet(); 509 if (!qempty(cl->cl_q)) 510 hfsc_purgeq(cl); 511 512 if (rsc != NULL) { 513 if (rsc->m1 == 0 && rsc->m2 == 0) { 514 if (cl->cl_rsc != NULL) { 515 FREE(cl->cl_rsc, M_DEVBUF); 516 cl->cl_rsc = NULL; 517 } 518 } else { 519 if (cl->cl_rsc == NULL) 520 cl->cl_rsc = rsc_tmp; 521 bzero(cl->cl_rsc, sizeof(struct internal_sc)); 522 sc2isc(rsc, cl->cl_rsc); 523 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 524 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 525 } 526 } 527 528 if (fsc != NULL) { 529 if (fsc->m1 == 0 && fsc->m2 == 0) { 530 if (cl->cl_fsc != NULL) { 531 FREE(cl->cl_fsc, M_DEVBUF); 532 cl->cl_fsc = NULL; 533 } 534 } else { 535 if (cl->cl_fsc == NULL) 536 cl->cl_fsc = fsc_tmp; 537 bzero(cl->cl_fsc, sizeof(struct internal_sc)); 538 sc2isc(fsc, cl->cl_fsc); 539 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 540 } 541 } 542 splx(s); 543 544 return (0); 545 } 546 547 /* 548 * hfsc_nextclass returns the next class in the tree. 549 * usage: 550 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 551 * do_something; 552 */ 553 static struct hfsc_class * 554 hfsc_nextclass(cl) 555 struct hfsc_class *cl; 556 { 557 if (cl->cl_children != NULL) 558 cl = cl->cl_children; 559 else if (cl->cl_siblings != NULL) 560 cl = cl->cl_siblings; 561 else { 562 while ((cl = cl->cl_parent) != NULL) 563 if (cl->cl_siblings) { 564 cl = cl->cl_siblings; 565 break; 566 } 567 } 568 569 return (cl); 570 } 571 572 /* 573 * hfsc_enqueue is an enqueue function to be registered to 574 * (*altq_enqueue) in struct ifaltq. 575 */ 576 static int 577 hfsc_enqueue(ifq, m, pktattr) 578 struct ifaltq *ifq; 579 struct mbuf *m; 580 struct altq_pktattr *pktattr; 581 { 582 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 583 struct hfsc_class *cl; 584 int len; 585 586 /* grab class set by classifier */ 587 if (pktattr == NULL || (cl = pktattr->pattr_class) == NULL) 588 cl = hif->hif_defaultclass; 589 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */ 590 591 len = m_pktlen(m); 592 if (hfsc_addq(cl, m) != 0) { 593 /* drop occurred. mbuf was freed in hfsc_addq. */ 594 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 595 return (ENOBUFS); 596 } 597 IFQ_INC_LEN(ifq); 598 cl->cl_hif->hif_packets++; 599 600 /* successfully queued. */ 601 if (qlen(cl->cl_q) == 1) 602 set_active(cl, m_pktlen(m)); 603 604 #ifdef HFSC_PKTLOG 605 /* put the logging_hook here */ 606 #endif 607 return (0); 608 } 609 610 /* 611 * hfsc_dequeue is a dequeue function to be registered to 612 * (*altq_dequeue) in struct ifaltq. 613 * 614 * note: ALTDQ_POLL returns the next packet without removing the packet 615 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 616 * ALTDQ_REMOVE must return the same packet if called immediately 617 * after ALTDQ_POLL. 618 */ 619 static struct mbuf * 620 hfsc_dequeue(ifq, op) 621 struct ifaltq *ifq; 622 int op; 623 { 624 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 625 struct hfsc_class *cl; 626 struct mbuf *m; 627 int len, next_len; 628 int realtime = 0; 629 630 if (hif->hif_packets == 0) 631 /* no packet in the tree */ 632 return (NULL); 633 634 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 635 u_int64_t cur_time; 636 637 cl = hif->hif_pollcache; 638 hif->hif_pollcache = NULL; 639 /* check if the class was scheduled by real-time criteria */ 640 if (cl->cl_rsc != NULL) { 641 cur_time = read_machclk(); 642 realtime = (cl->cl_e <= cur_time); 643 } 644 } else { 645 /* 646 * if there are eligible classes, use real-time criteria. 647 * find the class with the minimum deadline among 648 * the eligible classes. 649 */ 650 if ((cl = ellist_get_mindl(hif->hif_eligible)) != NULL) { 651 realtime = 1; 652 } else { 653 /* 654 * use link-sharing criteria 655 * get the class with the minimum vt in the hierarchy 656 */ 657 cl = hif->hif_rootclass; 658 while (is_a_parent_class(cl)) { 659 cl = actlist_first(cl->cl_actc); 660 if (cl == NULL) 661 return (NULL); 662 } 663 } 664 665 if (op == ALTDQ_POLL) { 666 hif->hif_pollcache = cl; 667 m = hfsc_pollq(cl); 668 return (m); 669 } 670 } 671 672 m = hfsc_getq(cl); 673 len = m_pktlen(m); 674 cl->cl_hif->hif_packets--; 675 IFQ_DEC_LEN(ifq); 676 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 677 678 update_v(cl, len); 679 if (realtime) 680 cl->cl_cumul += len; 681 682 if (!qempty(cl->cl_q)) { 683 if (cl->cl_rsc != NULL) { 684 /* update ed */ 685 next_len = m_pktlen(qhead(cl->cl_q)); 686 687 if (realtime) 688 update_ed(cl, next_len); 689 else 690 update_d(cl, next_len); 691 } 692 } else { 693 /* the class becomes passive */ 694 set_passive(cl); 695 } 696 697 #ifdef HFSC_PKTLOG 698 /* put the logging_hook here */ 699 #endif 700 701 return (m); 702 } 703 704 static int 705 hfsc_addq(cl, m) 706 struct hfsc_class *cl; 707 struct mbuf *m; 708 { 709 710 #ifdef ALTQ_RIO 711 if (q_is_rio(cl->cl_q)) 712 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 713 m, cl->cl_pktattr); 714 #endif 715 #ifdef ALTQ_RED 716 if (q_is_red(cl->cl_q)) 717 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 718 #endif 719 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 720 m_freem(m); 721 return (-1); 722 } 723 724 if (cl->cl_flags & HFCF_CLEARDSCP) 725 write_dsfield(m, cl->cl_pktattr, 0); 726 727 _addq(cl->cl_q, m); 728 729 return (0); 730 } 731 732 static struct mbuf * 733 hfsc_getq(cl) 734 struct hfsc_class *cl; 735 { 736 #ifdef ALTQ_RIO 737 if (q_is_rio(cl->cl_q)) 738 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 739 #endif 740 #ifdef ALTQ_RED 741 if (q_is_red(cl->cl_q)) 742 return red_getq(cl->cl_red, cl->cl_q); 743 #endif 744 return _getq(cl->cl_q); 745 } 746 747 static struct mbuf * 748 hfsc_pollq(cl) 749 struct hfsc_class *cl; 750 { 751 return qhead(cl->cl_q); 752 } 753 754 static void 755 hfsc_purgeq(cl) 756 struct hfsc_class *cl; 757 { 758 struct mbuf *m; 759 760 if (qempty(cl->cl_q)) 761 return; 762 763 while ((m = _getq(cl->cl_q)) != NULL) { 764 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 765 m_freem(m); 766 } 767 ASSERT(qlen(cl->cl_q) == 0); 768 769 set_passive(cl); 770 } 771 772 static void 773 set_active(cl, len) 774 struct hfsc_class *cl; 775 int len; 776 { 777 if (cl->cl_rsc != NULL) 778 init_ed(cl, len); 779 if (cl->cl_fsc != NULL) 780 init_v(cl, len); 781 782 cl->cl_stats.period++; 783 } 784 785 static void 786 set_passive(cl) 787 struct hfsc_class *cl; 788 { 789 if (cl->cl_rsc != NULL) 790 ellist_remove(cl); 791 792 if (cl->cl_fsc != NULL) { 793 while (cl->cl_parent != NULL) { 794 if (--cl->cl_nactive == 0) { 795 /* remove this class from the vt list */ 796 actlist_remove(cl); 797 } else 798 /* still has active children */ 799 break; 800 801 /* go up to the parent class */ 802 cl = cl->cl_parent; 803 } 804 } 805 } 806 807 static void 808 init_ed(cl, next_len) 809 struct hfsc_class *cl; 810 int next_len; 811 { 812 u_int64_t cur_time; 813 814 cur_time = read_machclk(); 815 816 /* update the deadline curve */ 817 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 818 819 /* 820 * update the eligible curve. 821 * for concave, it is equal to the deadline curve. 822 * for convex, it is a linear curve with slope m2. 823 */ 824 cl->cl_eligible = cl->cl_deadline; 825 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 826 cl->cl_eligible.dx = 0; 827 cl->cl_eligible.dy = 0; 828 } 829 830 /* compute e and d */ 831 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 832 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 833 834 ellist_insert(cl); 835 } 836 837 static void 838 update_ed(cl, next_len) 839 struct hfsc_class *cl; 840 int next_len; 841 { 842 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 843 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 844 845 ellist_update(cl); 846 } 847 848 static void 849 update_d(cl, next_len) 850 struct hfsc_class *cl; 851 int next_len; 852 { 853 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 854 } 855 856 static void 857 init_v(cl, len) 858 struct hfsc_class *cl; 859 int len; 860 { 861 struct hfsc_class *min_cl, *max_cl; 862 863 while (cl->cl_parent != NULL) { 864 865 if (cl->cl_nactive++ > 0) 866 /* already active */ 867 break; 868 869 /* 870 * if parent became idle while this class was idle. 871 * reset vt and the runtime service curve. 872 */ 873 if (cl->cl_parent->cl_nactive == 0 || 874 cl->cl_parent->cl_vtperiod != cl->cl_parentperiod) { 875 cl->cl_vt = 0; 876 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 877 0, cl->cl_total); 878 } 879 min_cl = actlist_first(cl->cl_parent->cl_actc); 880 if (min_cl != NULL) { 881 u_int64_t vt; 882 883 /* 884 * set vt to the average of the min and max classes. 885 * if the parent's period didn't change, 886 * don't decrease vt of the class. 887 */ 888 max_cl = actlist_last(cl->cl_parent->cl_actc); 889 vt = (min_cl->cl_vt + max_cl->cl_vt) / 2; 890 if (cl->cl_parent->cl_vtperiod != cl->cl_parentperiod 891 || vt > cl->cl_vt) 892 cl->cl_vt = vt; 893 } 894 895 /* update the virtual curve */ 896 rtsc_min(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt, cl->cl_total); 897 898 cl->cl_vtperiod++; /* increment vt period */ 899 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 900 if (cl->cl_parent->cl_nactive == 0) 901 cl->cl_parentperiod++; 902 903 actlist_insert(cl); 904 905 /* go up to the parent class */ 906 cl = cl->cl_parent; 907 } 908 } 909 910 static void 911 update_v(cl, len) 912 struct hfsc_class *cl; 913 int len; 914 { 915 while (cl->cl_parent != NULL) { 916 917 cl->cl_total += len; 918 919 if (cl->cl_fsc != NULL) { 920 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total); 921 922 /* update the vt list */ 923 actlist_update(cl); 924 } 925 926 /* go up to the parent class */ 927 cl = cl->cl_parent; 928 } 929 } 930 931 /* 932 * TAILQ based ellist and actlist implementation 933 * (ion wanted to make a calendar queue based implementation) 934 */ 935 /* 936 * eligible list holds backlogged classes being sorted by their eligible times. 937 * there is one eligible list per interface. 938 */ 939 940 static ellist_t * 941 ellist_alloc() 942 { 943 ellist_t *head; 944 945 MALLOC(head, ellist_t *, sizeof(ellist_t), M_DEVBUF, M_WAITOK); 946 TAILQ_INIT(head); 947 return (head); 948 } 949 950 static void 951 ellist_destroy(head) 952 ellist_t *head; 953 { 954 FREE(head, M_DEVBUF); 955 } 956 957 static void 958 ellist_insert(cl) 959 struct hfsc_class *cl; 960 { 961 struct hfsc_if *hif = cl->cl_hif; 962 struct hfsc_class *p; 963 964 /* check the last entry first */ 965 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL || 966 p->cl_e <= cl->cl_e) { 967 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 968 return; 969 } 970 971 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) { 972 if (cl->cl_e < p->cl_e) { 973 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 974 return; 975 } 976 } 977 ASSERT(0); /* should not reach here */ 978 } 979 980 static void 981 ellist_remove(cl) 982 struct hfsc_class *cl; 983 { 984 struct hfsc_if *hif = cl->cl_hif; 985 986 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 987 } 988 989 static void 990 ellist_update(cl) 991 struct hfsc_class *cl; 992 { 993 struct hfsc_if *hif = cl->cl_hif; 994 struct hfsc_class *p, *last; 995 996 /* 997 * the eligible time of a class increases monotonically. 998 * if the next entry has a larger eligible time, nothing to do. 999 */ 1000 p = TAILQ_NEXT(cl, cl_ellist); 1001 if (p == NULL || cl->cl_e <= p->cl_e) 1002 return; 1003 1004 /* check the last entry */ 1005 last = TAILQ_LAST(hif->hif_eligible, _eligible); 1006 ASSERT(last != NULL); 1007 if (last->cl_e <= cl->cl_e) { 1008 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1009 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1010 return; 1011 } 1012 1013 /* 1014 * the new position must be between the next entry 1015 * and the last entry 1016 */ 1017 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1018 if (cl->cl_e < p->cl_e) { 1019 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1020 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1021 return; 1022 } 1023 } 1024 ASSERT(0); /* should not reach here */ 1025 } 1026 1027 /* find the class with the minimum deadline among the eligible classes */ 1028 struct hfsc_class * 1029 ellist_get_mindl(head) 1030 ellist_t *head; 1031 { 1032 struct hfsc_class *p, *cl = NULL; 1033 u_int64_t cur_time; 1034 1035 cur_time = read_machclk(); 1036 1037 TAILQ_FOREACH(p, head, cl_ellist) { 1038 if (p->cl_e > cur_time) 1039 break; 1040 if (cl == NULL || p->cl_d < cl->cl_d) 1041 cl = p; 1042 } 1043 return (cl); 1044 } 1045 1046 /* 1047 * active children list holds backlogged child classes being sorted 1048 * by their virtual time. 1049 * each intermediate class has one active children list. 1050 */ 1051 static actlist_t * 1052 actlist_alloc() 1053 { 1054 actlist_t *head; 1055 1056 MALLOC(head, actlist_t *, sizeof(actlist_t), M_DEVBUF, M_WAITOK); 1057 TAILQ_INIT(head); 1058 return (head); 1059 } 1060 1061 static void 1062 actlist_destroy(head) 1063 actlist_t *head; 1064 { 1065 FREE(head, M_DEVBUF); 1066 } 1067 static void 1068 actlist_insert(cl) 1069 struct hfsc_class *cl; 1070 { 1071 struct hfsc_class *p; 1072 1073 /* check the last entry first */ 1074 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL 1075 || p->cl_vt <= cl->cl_vt) { 1076 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1077 return; 1078 } 1079 1080 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) { 1081 if (cl->cl_vt < p->cl_vt) { 1082 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1083 return; 1084 } 1085 } 1086 ASSERT(0); /* should not reach here */ 1087 } 1088 1089 static void 1090 actlist_remove(cl) 1091 struct hfsc_class *cl; 1092 { 1093 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1094 } 1095 1096 static void 1097 actlist_update(cl) 1098 struct hfsc_class *cl; 1099 { 1100 struct hfsc_class *p, *last; 1101 1102 /* 1103 * the virtual time of a class increases monotonically during its 1104 * backlogged period. 1105 * if the next entry has a larger virtual time, nothing to do. 1106 */ 1107 p = TAILQ_NEXT(cl, cl_actlist); 1108 if (p == NULL || cl->cl_vt <= p->cl_vt) 1109 return; 1110 1111 /* check the last entry */ 1112 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active); 1113 ASSERT(last != NULL); 1114 if (last->cl_vt <= cl->cl_vt) { 1115 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1116 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1117 return; 1118 } 1119 1120 /* 1121 * the new position must be between the next entry 1122 * and the last entry 1123 */ 1124 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1125 if (cl->cl_vt < p->cl_vt) { 1126 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1127 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1128 return; 1129 } 1130 } 1131 ASSERT(0); /* should not reach here */ 1132 } 1133 1134 /* 1135 * service curve support functions 1136 * 1137 * external service curve parameters 1138 * m: bits/sec 1139 * d: msec 1140 * internal service curve parameters 1141 * sm: (bytes/tsc_interval) << SM_SHIFT 1142 * ism: (tsc_count/byte) << ISM_SHIFT 1143 * dx: tsc_count 1144 * 1145 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. 1146 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU 1147 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective 1148 * digits in decimal using the following table. 1149 * 1150 * bits/set 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 1151 * ----------+------------------------------------------------------- 1152 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6 1153 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6 1154 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6 1155 * 1156 * nsec/byte 80000 8000 800 80 8 1157 * ism(500MHz) 40000 4000 400 40 4 1158 * ism(200MHz) 16000 1600 160 16 1.6 1159 */ 1160 #define SM_SHIFT 24 1161 #define ISM_SHIFT 10 1162 1163 #define SC_LARGEVAL (1LL << 32) 1164 #define SC_INFINITY 0xffffffffffffffffLL 1165 1166 static __inline u_int64_t 1167 seg_x2y(x, sm) 1168 u_int64_t x; 1169 u_int64_t sm; 1170 { 1171 u_int64_t y; 1172 1173 if (x < SC_LARGEVAL) 1174 y = x * sm >> SM_SHIFT; 1175 else 1176 y = (x >> SM_SHIFT) * sm; 1177 return (y); 1178 } 1179 1180 static __inline u_int64_t 1181 seg_y2x(y, ism) 1182 u_int64_t y; 1183 u_int64_t ism; 1184 { 1185 u_int64_t x; 1186 1187 if (y == 0) 1188 x = 0; 1189 else if (ism == SC_INFINITY) 1190 x = SC_INFINITY; 1191 else if (y < SC_LARGEVAL) 1192 x = y * ism >> ISM_SHIFT; 1193 else 1194 x = (y >> ISM_SHIFT) * ism; 1195 return (x); 1196 } 1197 1198 static __inline u_int64_t 1199 m2sm(m) 1200 u_int m; 1201 { 1202 u_int64_t sm; 1203 1204 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq; 1205 return (sm); 1206 } 1207 1208 static __inline u_int64_t 1209 m2ism(m) 1210 u_int m; 1211 { 1212 u_int64_t ism; 1213 1214 if (m == 0) 1215 ism = SC_INFINITY; 1216 else 1217 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1218 return (ism); 1219 } 1220 1221 static __inline u_int64_t 1222 d2dx(d) 1223 u_int d; 1224 { 1225 u_int64_t dx; 1226 1227 dx = ((u_int64_t)d * machclk_freq) / 1000; 1228 return (dx); 1229 } 1230 1231 static u_int 1232 sm2m(sm) 1233 u_int64_t sm; 1234 { 1235 u_int64_t m; 1236 1237 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1238 return ((u_int)m); 1239 } 1240 1241 static u_int 1242 dx2d(dx) 1243 u_int64_t dx; 1244 { 1245 u_int64_t d; 1246 1247 d = dx * 1000 / machclk_freq; 1248 return ((u_int)d); 1249 } 1250 1251 static void 1252 sc2isc(sc, isc) 1253 struct service_curve *sc; 1254 struct internal_sc *isc; 1255 { 1256 isc->sm1 = m2sm(sc->m1); 1257 isc->ism1 = m2ism(sc->m1); 1258 isc->dx = d2dx(sc->d); 1259 isc->dy = seg_x2y(isc->dx, isc->sm1); 1260 isc->sm2 = m2sm(sc->m2); 1261 isc->ism2 = m2ism(sc->m2); 1262 } 1263 1264 /* 1265 * initialize the runtime service curve with the given internal 1266 * service curve starting at (x, y). 1267 */ 1268 static void 1269 rtsc_init(rtsc, isc, x, y) 1270 struct runtime_sc *rtsc; 1271 struct internal_sc *isc; 1272 u_int64_t x, y; 1273 { 1274 rtsc->x = x; 1275 rtsc->y = y; 1276 rtsc->sm1 = isc->sm1; 1277 rtsc->ism1 = isc->ism1; 1278 rtsc->dx = isc->dx; 1279 rtsc->dy = isc->dy; 1280 rtsc->sm2 = isc->sm2; 1281 rtsc->ism2 = isc->ism2; 1282 } 1283 1284 /* 1285 * calculate the y-projection of the runtime service curve by the 1286 * given x-projection value 1287 */ 1288 static u_int64_t 1289 rtsc_y2x(rtsc, y) 1290 struct runtime_sc *rtsc; 1291 u_int64_t y; 1292 { 1293 u_int64_t x; 1294 1295 if (y < rtsc->y) 1296 x = rtsc->x; 1297 else if (y <= rtsc->y + rtsc->dy) { 1298 /* x belongs to the 1st segment */ 1299 if (rtsc->dy == 0) 1300 x = rtsc->x + rtsc->dx; 1301 else 1302 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1303 } else { 1304 /* x belongs to the 2nd segment */ 1305 x = rtsc->x + rtsc->dx 1306 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1307 } 1308 return (x); 1309 } 1310 1311 static u_int64_t 1312 rtsc_x2y(rtsc, x) 1313 struct runtime_sc *rtsc; 1314 u_int64_t x; 1315 { 1316 u_int64_t y; 1317 1318 if (x <= rtsc->x) 1319 y = rtsc->y; 1320 else if (x <= rtsc->x + rtsc->dx) 1321 /* y belongs to the 1st segment */ 1322 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1323 else 1324 /* y belongs to the 2nd segment */ 1325 y = rtsc->y + rtsc->dy 1326 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1327 return (y); 1328 } 1329 1330 /* 1331 * update the runtime service curve by taking the minimum of the current 1332 * runtime service curve and the service curve starting at (x, y). 1333 */ 1334 static void 1335 rtsc_min(rtsc, isc, x, y) 1336 struct runtime_sc *rtsc; 1337 struct internal_sc *isc; 1338 u_int64_t x, y; 1339 { 1340 u_int64_t y1, y2, dx, dy; 1341 1342 if (isc->sm1 <= isc->sm2) { 1343 /* service curve is convex */ 1344 y1 = rtsc_x2y(rtsc, x); 1345 if (y1 < y) 1346 /* the current rtsc is smaller */ 1347 return; 1348 rtsc->x = x; 1349 rtsc->y = y; 1350 return; 1351 } 1352 1353 /* 1354 * service curve is concave 1355 * compute the two y values of the current rtsc 1356 * y1: at x 1357 * y2: at (x + dx) 1358 */ 1359 y1 = rtsc_x2y(rtsc, x); 1360 if (y1 <= y) { 1361 /* rtsc is below isc, no change to rtsc */ 1362 return; 1363 } 1364 1365 y2 = rtsc_x2y(rtsc, x + isc->dx); 1366 if (y2 >= y + isc->dy) { 1367 /* rtsc is above isc, replace rtsc by isc */ 1368 rtsc->x = x; 1369 rtsc->y = y; 1370 rtsc->dx = isc->dx; 1371 rtsc->dy = isc->dy; 1372 return; 1373 } 1374 1375 /* 1376 * the two curves intersect 1377 * compute the offsets (dx, dy) using the reverse 1378 * function of seg_x2y() 1379 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1380 */ 1381 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1382 /* 1383 * check if (x, y1) belongs to the 1st segment of rtsc. 1384 * if so, add the offset. 1385 */ 1386 if (rtsc->x + rtsc->dx > x) 1387 dx += rtsc->x + rtsc->dx - x; 1388 dy = seg_x2y(dx, isc->sm1); 1389 1390 rtsc->x = x; 1391 rtsc->y = y; 1392 rtsc->dx = dx; 1393 rtsc->dy = dy; 1394 return; 1395 } 1396 1397 /* 1398 * hfsc device interface 1399 */ 1400 int 1401 hfscopen(dev, flag, fmt, p) 1402 dev_t dev; 1403 int flag, fmt; 1404 struct proc *p; 1405 { 1406 if (machclk_freq == 0) 1407 init_machclk(); 1408 1409 if (machclk_freq == 0) { 1410 printf("hfsc: no cpu clock available!\n"); 1411 return (ENXIO); 1412 } 1413 1414 /* everything will be done when the queueing scheme is attached. */ 1415 return 0; 1416 } 1417 1418 int 1419 hfscclose(dev, flag, fmt, p) 1420 dev_t dev; 1421 int flag, fmt; 1422 struct proc *p; 1423 { 1424 struct hfsc_if *hif; 1425 int err, error = 0; 1426 1427 while ((hif = hif_list) != NULL) { 1428 /* destroy all */ 1429 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 1430 altq_disable(hif->hif_ifq); 1431 1432 err = altq_detach(hif->hif_ifq); 1433 if (err == 0) 1434 err = hfsc_detach(hif); 1435 if (err != 0 && error == 0) 1436 error = err; 1437 } 1438 1439 return error; 1440 } 1441 1442 int 1443 hfscioctl(dev, cmd, addr, flag, p) 1444 dev_t dev; 1445 ioctlcmd_t cmd; 1446 caddr_t addr; 1447 int flag; 1448 struct proc *p; 1449 { 1450 struct hfsc_if *hif; 1451 struct hfsc_interface *ifacep; 1452 int error = 0; 1453 1454 /* check super-user privilege */ 1455 switch (cmd) { 1456 case HFSC_GETSTATS: 1457 break; 1458 default: 1459 #if (__FreeBSD_version > 400000) 1460 if ((error = suser(p)) != 0) 1461 return (error); 1462 #else 1463 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 1464 return (error); 1465 #endif 1466 break; 1467 } 1468 1469 switch (cmd) { 1470 1471 case HFSC_IF_ATTACH: 1472 error = hfsccmd_if_attach((struct hfsc_attach *)addr); 1473 break; 1474 1475 case HFSC_IF_DETACH: 1476 error = hfsccmd_if_detach((struct hfsc_interface *)addr); 1477 break; 1478 1479 case HFSC_ENABLE: 1480 case HFSC_DISABLE: 1481 case HFSC_CLEAR_HIERARCHY: 1482 ifacep = (struct hfsc_interface *)addr; 1483 if ((hif = altq_lookup(ifacep->hfsc_ifname, 1484 ALTQT_HFSC)) == NULL) { 1485 error = EBADF; 1486 break; 1487 } 1488 1489 switch (cmd) { 1490 1491 case HFSC_ENABLE: 1492 if (hif->hif_defaultclass == NULL) { 1493 #if 1 1494 printf("hfsc: no default class\n"); 1495 #endif 1496 error = EINVAL; 1497 break; 1498 } 1499 error = altq_enable(hif->hif_ifq); 1500 break; 1501 1502 case HFSC_DISABLE: 1503 error = altq_disable(hif->hif_ifq); 1504 break; 1505 1506 case HFSC_CLEAR_HIERARCHY: 1507 hfsc_clear_interface(hif); 1508 break; 1509 } 1510 break; 1511 1512 case HFSC_ADD_CLASS: 1513 error = hfsccmd_add_class((struct hfsc_add_class *)addr); 1514 break; 1515 1516 case HFSC_DEL_CLASS: 1517 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr); 1518 break; 1519 1520 case HFSC_MOD_CLASS: 1521 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr); 1522 break; 1523 1524 case HFSC_ADD_FILTER: 1525 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr); 1526 break; 1527 1528 case HFSC_DEL_FILTER: 1529 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr); 1530 break; 1531 1532 case HFSC_GETSTATS: 1533 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr); 1534 break; 1535 1536 default: 1537 error = EINVAL; 1538 break; 1539 } 1540 return error; 1541 } 1542 1543 static int 1544 hfsccmd_if_attach(ap) 1545 struct hfsc_attach *ap; 1546 { 1547 struct hfsc_if *hif; 1548 struct ifnet *ifp; 1549 int error; 1550 1551 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL) 1552 return (ENXIO); 1553 1554 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL) 1555 return (ENOMEM); 1556 1557 /* 1558 * set HFSC to this ifnet structure. 1559 */ 1560 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif, 1561 hfsc_enqueue, hfsc_dequeue, hfsc_request, 1562 &hif->hif_classifier, acc_classify)) != 0) 1563 (void)hfsc_detach(hif); 1564 1565 return (error); 1566 } 1567 1568 static int 1569 hfsccmd_if_detach(ap) 1570 struct hfsc_interface *ap; 1571 { 1572 struct hfsc_if *hif; 1573 int error; 1574 1575 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL) 1576 return (EBADF); 1577 1578 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 1579 altq_disable(hif->hif_ifq); 1580 1581 if ((error = altq_detach(hif->hif_ifq))) 1582 return (error); 1583 1584 return hfsc_detach(hif); 1585 } 1586 1587 static int 1588 hfsccmd_add_class(ap) 1589 struct hfsc_add_class *ap; 1590 { 1591 struct hfsc_if *hif; 1592 struct hfsc_class *cl, *parent; 1593 1594 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 1595 return (EBADF); 1596 1597 if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL) { 1598 if (ap->parent_handle == HFSC_ROOTCLASS_HANDLE) 1599 parent = hif->hif_rootclass; 1600 else 1601 return (EINVAL); 1602 } 1603 1604 if ((cl = hfsc_class_create(hif, &ap->service_curve, parent, 1605 ap->qlimit, ap->flags)) == NULL) 1606 return (ENOMEM); 1607 1608 /* return a class handle to the user */ 1609 ap->class_handle = clp_to_clh(cl); 1610 return (0); 1611 } 1612 1613 static int 1614 hfsccmd_delete_class(ap) 1615 struct hfsc_delete_class *ap; 1616 { 1617 struct hfsc_if *hif; 1618 struct hfsc_class *cl; 1619 1620 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 1621 return (EBADF); 1622 1623 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 1624 return (EINVAL); 1625 1626 return hfsc_class_destroy(cl); 1627 } 1628 1629 static int 1630 hfsccmd_modify_class(ap) 1631 struct hfsc_modify_class *ap; 1632 { 1633 struct hfsc_if *hif; 1634 struct hfsc_class *cl; 1635 struct service_curve *rsc = NULL; 1636 struct service_curve *fsc = NULL; 1637 1638 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 1639 return (EBADF); 1640 1641 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 1642 return (EINVAL); 1643 1644 if (ap->sctype & HFSC_REALTIMESC) 1645 rsc = &ap->service_curve; 1646 if (ap->sctype & HFSC_LINKSHARINGSC) 1647 fsc = &ap->service_curve; 1648 1649 return hfsc_class_modify(cl, rsc, fsc); 1650 } 1651 1652 static int 1653 hfsccmd_add_filter(ap) 1654 struct hfsc_add_filter *ap; 1655 { 1656 struct hfsc_if *hif; 1657 struct hfsc_class *cl; 1658 1659 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 1660 return (EBADF); 1661 1662 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 1663 return (EINVAL); 1664 1665 if (is_a_parent_class(cl)) { 1666 #if 1 1667 printf("hfsccmd_add_filter: not a leaf class!\n"); 1668 #endif 1669 return (EINVAL); 1670 } 1671 1672 return acc_add_filter(&hif->hif_classifier, &ap->filter, 1673 cl, &ap->filter_handle); 1674 } 1675 1676 static int 1677 hfsccmd_delete_filter(ap) 1678 struct hfsc_delete_filter *ap; 1679 { 1680 struct hfsc_if *hif; 1681 1682 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 1683 return (EBADF); 1684 1685 return acc_delete_filter(&hif->hif_classifier, 1686 ap->filter_handle); 1687 } 1688 1689 static int 1690 hfsccmd_class_stats(ap) 1691 struct hfsc_class_stats *ap; 1692 { 1693 struct hfsc_if *hif; 1694 struct hfsc_class *cl; 1695 struct hfsc_basic_class_stats stats, *usp; 1696 int n, nclasses, error; 1697 1698 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 1699 return (EBADF); 1700 1701 ap->cur_time = read_machclk(); 1702 ap->hif_classes = hif->hif_classes; 1703 ap->hif_packets = hif->hif_packets; 1704 1705 /* skip the first N classes in the tree */ 1706 nclasses = ap->nskip; 1707 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses; 1708 cl = hfsc_nextclass(cl), n++) 1709 ; 1710 if (n != nclasses) 1711 return (EINVAL); 1712 1713 /* then, read the next N classes in the tree */ 1714 nclasses = ap->nclasses; 1715 usp = ap->stats; 1716 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) { 1717 1718 get_class_stats(&stats, cl); 1719 1720 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++, 1721 sizeof(stats))) != 0) 1722 return (error); 1723 } 1724 1725 ap->nclasses = n; 1726 1727 return (0); 1728 } 1729 1730 static void get_class_stats(sp, cl) 1731 struct hfsc_basic_class_stats *sp; 1732 struct hfsc_class *cl; 1733 { 1734 sp->class_id = cl->cl_id; 1735 sp->class_handle = clp_to_clh(cl); 1736 1737 if (cl->cl_rsc != NULL) { 1738 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1739 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1740 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1741 } else { 1742 sp->rsc.m1 = 0; 1743 sp->rsc.d = 0; 1744 sp->rsc.m2 = 0; 1745 } 1746 if (cl->cl_fsc != NULL) { 1747 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1748 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1749 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1750 } else { 1751 sp->fsc.m1 = 0; 1752 sp->fsc.d = 0; 1753 sp->fsc.m2 = 0; 1754 } 1755 1756 sp->total = cl->cl_total; 1757 sp->cumul = cl->cl_cumul; 1758 1759 sp->d = cl->cl_d; 1760 sp->e = cl->cl_e; 1761 sp->vt = cl->cl_vt; 1762 1763 sp->qlength = qlen(cl->cl_q); 1764 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1765 sp->drop_cnt = cl->cl_stats.drop_cnt; 1766 sp->period = cl->cl_stats.period; 1767 1768 sp->qtype = qtype(cl->cl_q); 1769 #ifdef ALTQ_RED 1770 if (q_is_red(cl->cl_q)) 1771 red_getstats(cl->cl_red, &sp->red[0]); 1772 #endif 1773 #ifdef ALTQ_RIO 1774 if (q_is_rio(cl->cl_q)) 1775 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1776 #endif 1777 } 1778 1779 /* convert a class handle to the corresponding class pointer */ 1780 static struct hfsc_class * 1781 clh_to_clp(hif, chandle) 1782 struct hfsc_if *hif; 1783 u_long chandle; 1784 { 1785 struct hfsc_class *cl; 1786 1787 cl = (struct hfsc_class *)chandle; 1788 if (chandle != ALIGN(cl)) { 1789 #if 1 1790 printf("clh_to_cl: unaligned pointer %p\n", cl); 1791 #endif 1792 return (NULL); 1793 } 1794 1795 if (cl == NULL || cl->cl_handle != chandle || cl->cl_hif != hif) 1796 return (NULL); 1797 1798 return (cl); 1799 } 1800 1801 /* convert a class pointer to the corresponding class handle */ 1802 static u_long 1803 clp_to_clh(cl) 1804 struct hfsc_class *cl; 1805 { 1806 if (cl->cl_parent == NULL) 1807 return (HFSC_ROOTCLASS_HANDLE); /* XXX */ 1808 return (cl->cl_handle); 1809 } 1810 1811 #ifdef KLD_MODULE 1812 1813 static struct altqsw hfsc_sw = 1814 {"hfsc", hfscopen, hfscclose, hfscioctl}; 1815 1816 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw); 1817 1818 #endif /* KLD_MODULE */ 1819 1820 #endif /* ALTQ_HFSC */ 1821