1 /* $KAME: altq_subr.c,v 1.23 2004/04/20 16:10:06 itojun Exp $ */ 2 /* $DragonFly: src/sys/net/altq/altq_subr.c,v 1.10 2008/04/06 18:58:15 dillon Exp $ */ 3 4 /* 5 * Copyright (C) 1997-2003 6 * Sony Computer Science Laboratories Inc. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include "opt_altq.h" 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #include <sys/param.h> 35 #include <sys/malloc.h> 36 #include <sys/mbuf.h> 37 #include <sys/systm.h> 38 #include <sys/proc.h> 39 #include <sys/socket.h> 40 #include <sys/socketvar.h> 41 #include <sys/kernel.h> 42 #include <sys/callout.h> 43 #include <sys/errno.h> 44 #include <sys/syslog.h> 45 #include <sys/sysctl.h> 46 #include <sys/queue.h> 47 #include <sys/thread2.h> 48 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/if_types.h> 52 #include <net/ifq_var.h> 53 54 #include <netinet/in.h> 55 #include <netinet/in_systm.h> 56 #include <netinet/ip.h> 57 #ifdef INET6 58 #include <netinet/ip6.h> 59 #endif 60 #include <netinet/tcp.h> 61 #include <netinet/udp.h> 62 63 #include <net/pf/pfvar.h> 64 #include <net/altq/altq.h> 65 66 /* machine dependent clock related includes */ 67 #if defined(__i386__) 68 #include <machine/clock.h> /* for tsc_freq */ 69 #include <machine/md_var.h> /* for cpu_feature */ 70 #include <machine/specialreg.h> /* for CPUID_TSC */ 71 #endif /* __i386__ */ 72 73 /* 74 * internal function prototypes 75 */ 76 static void tbr_timeout(void *); 77 int (*altq_input)(struct mbuf *, int) = NULL; 78 static int tbr_timer = 0; /* token bucket regulator timer */ 79 static struct callout tbr_callout; 80 81 int pfaltq_running; /* keep track of running state */ 82 83 MALLOC_DEFINE(M_ALTQ, "altq", "ALTQ structures"); 84 85 /* 86 * alternate queueing support routines 87 */ 88 89 /* look up the queue state by the interface name and the queueing type. */ 90 void * 91 altq_lookup(const char *name, int type) 92 { 93 struct ifnet *ifp; 94 95 if ((ifp = ifunit(name)) != NULL) { 96 if (type != ALTQT_NONE && ifp->if_snd.altq_type == type) 97 return (ifp->if_snd.altq_disc); 98 } 99 100 return (NULL); 101 } 102 103 int 104 altq_attach(struct ifaltq *ifq, int type, void *discipline, 105 int (*enqueue)(struct ifaltq *, struct mbuf *, struct altq_pktattr *), 106 struct mbuf *(*dequeue)(struct ifaltq *, struct mbuf *, int), 107 int (*request)(struct ifaltq *, int, void *), 108 void *clfier, 109 void *(*classify)(struct ifaltq *, struct mbuf *, 110 struct altq_pktattr *)) 111 { 112 if (!ifq_is_ready(ifq)) 113 return ENXIO; 114 115 ifq->altq_type = type; 116 ifq->altq_disc = discipline; 117 ifq->altq_enqueue = enqueue; 118 ifq->altq_dequeue = dequeue; 119 ifq->altq_request = request; 120 ifq->altq_clfier = clfier; 121 ifq->altq_classify = classify; 122 ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED); 123 return 0; 124 } 125 126 int 127 altq_detach(struct ifaltq *ifq) 128 { 129 if (!ifq_is_ready(ifq)) 130 return ENXIO; 131 if (ifq_is_enabled(ifq)) 132 return EBUSY; 133 if (!ifq_is_attached(ifq)) 134 return (0); 135 136 ifq_set_classic(ifq); 137 ifq->altq_type = ALTQT_NONE; 138 ifq->altq_disc = NULL; 139 ifq->altq_clfier = NULL; 140 ifq->altq_classify = NULL; 141 ifq->altq_flags &= ALTQF_CANTCHANGE; 142 return 0; 143 } 144 145 int 146 altq_enable(struct ifaltq *ifq) 147 { 148 if (!ifq_is_ready(ifq)) 149 return ENXIO; 150 if (ifq_is_enabled(ifq)) 151 return 0; 152 153 crit_enter(); 154 ifq_purge(ifq); 155 KKASSERT(ifq->ifq_len == 0); 156 ifq->altq_flags |= ALTQF_ENABLED; 157 if (ifq->altq_clfier != NULL) 158 ifq->altq_flags |= ALTQF_CLASSIFY; 159 crit_exit(); 160 161 return 0; 162 } 163 164 int 165 altq_disable(struct ifaltq *ifq) 166 { 167 if (!ifq_is_enabled(ifq)) 168 return 0; 169 170 crit_enter(); 171 ifq_purge(ifq); 172 KKASSERT(ifq->ifq_len == 0); 173 ifq->altq_flags &= ~(ALTQF_ENABLED|ALTQF_CLASSIFY); 174 crit_exit(); 175 return 0; 176 } 177 178 /* 179 * internal representation of token bucket parameters 180 * rate: byte_per_unittime << 32 181 * (((bits_per_sec) / 8) << 32) / machclk_freq 182 * depth: byte << 32 183 * 184 */ 185 #define TBR_SHIFT 32 186 #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT) 187 #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT) 188 189 struct mbuf * 190 tbr_dequeue(struct ifaltq *ifq, struct mbuf *mpolled, int op) 191 { 192 struct tb_regulator *tbr; 193 struct mbuf *m; 194 int64_t interval; 195 uint64_t now; 196 197 crit_enter(); 198 tbr = ifq->altq_tbr; 199 if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) { 200 /* if this is a remove after poll, bypass tbr check */ 201 } else { 202 /* update token only when it is negative */ 203 if (tbr->tbr_token <= 0) { 204 now = read_machclk(); 205 interval = now - tbr->tbr_last; 206 if (interval >= tbr->tbr_filluptime) 207 tbr->tbr_token = tbr->tbr_depth; 208 else { 209 tbr->tbr_token += interval * tbr->tbr_rate; 210 if (tbr->tbr_token > tbr->tbr_depth) 211 tbr->tbr_token = tbr->tbr_depth; 212 } 213 tbr->tbr_last = now; 214 } 215 /* if token is still negative, don't allow dequeue */ 216 if (tbr->tbr_token <= 0) { 217 crit_exit(); 218 return (NULL); 219 } 220 } 221 222 if (ifq_is_enabled(ifq)) { 223 m = (*ifq->altq_dequeue)(ifq, mpolled, op); 224 } else if (op == ALTDQ_POLL) { 225 IF_POLL(ifq, m); 226 } else { 227 IF_DEQUEUE(ifq, m); 228 KKASSERT(mpolled == NULL || mpolled == m); 229 } 230 231 if (m != NULL && op == ALTDQ_REMOVE) 232 tbr->tbr_token -= TBR_SCALE(m_pktlen(m)); 233 tbr->tbr_lastop = op; 234 crit_exit(); 235 return (m); 236 } 237 238 /* 239 * set a token bucket regulator. 240 * if the specified rate is zero, the token bucket regulator is deleted. 241 */ 242 int 243 tbr_set(struct ifaltq *ifq, struct tb_profile *profile) 244 { 245 struct tb_regulator *tbr, *otbr; 246 247 if (machclk_freq == 0) 248 init_machclk(); 249 if (machclk_freq == 0) { 250 kprintf("tbr_set: no cpu clock available!\n"); 251 return (ENXIO); 252 } 253 254 if (profile->rate == 0) { 255 /* delete this tbr */ 256 if ((tbr = ifq->altq_tbr) == NULL) 257 return (ENOENT); 258 ifq->altq_tbr = NULL; 259 kfree(tbr, M_ALTQ); 260 return (0); 261 } 262 263 tbr = kmalloc(sizeof(*tbr), M_ALTQ, M_WAITOK | M_ZERO); 264 tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq; 265 tbr->tbr_depth = TBR_SCALE(profile->depth); 266 if (tbr->tbr_rate > 0) 267 tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate; 268 else 269 tbr->tbr_filluptime = 0xffffffffffffffffLL; 270 tbr->tbr_token = tbr->tbr_depth; 271 tbr->tbr_last = read_machclk(); 272 tbr->tbr_lastop = ALTDQ_REMOVE; 273 274 otbr = ifq->altq_tbr; 275 ifq->altq_tbr = tbr; /* set the new tbr */ 276 277 if (otbr != NULL) 278 kfree(otbr, M_ALTQ); 279 else if (tbr_timer == 0) { 280 callout_reset(&tbr_callout, 1, tbr_timeout, NULL); 281 tbr_timer = 1; 282 } 283 return (0); 284 } 285 286 /* 287 * tbr_timeout goes through the interface list, and kicks the drivers 288 * if necessary. 289 */ 290 static void 291 tbr_timeout(void *arg) 292 { 293 struct ifnet *ifp; 294 int active; 295 296 active = 0; 297 crit_enter(); 298 for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) { 299 if (ifp->if_snd.altq_tbr == NULL) 300 continue; 301 active++; 302 if (!ifq_is_empty(&ifp->if_snd) && ifp->if_start != NULL) { 303 lwkt_serialize_enter(ifp->if_serializer); 304 (*ifp->if_start)(ifp); 305 lwkt_serialize_exit(ifp->if_serializer); 306 } 307 } 308 crit_exit(); 309 if (active > 0) 310 callout_reset(&tbr_callout, 1, tbr_timeout, NULL); 311 else 312 tbr_timer = 0; /* don't need tbr_timer anymore */ 313 } 314 315 /* 316 * get token bucket regulator profile 317 */ 318 int 319 tbr_get(struct ifaltq *ifq, struct tb_profile *profile) 320 { 321 struct tb_regulator *tbr; 322 323 if ((tbr = ifq->altq_tbr) == NULL) { 324 profile->rate = 0; 325 profile->depth = 0; 326 } else { 327 profile->rate = 328 (u_int)TBR_UNSCALE(tbr->tbr_rate * 8 * machclk_freq); 329 profile->depth = (u_int)TBR_UNSCALE(tbr->tbr_depth); 330 } 331 return (0); 332 } 333 334 /* 335 * attach a discipline to the interface. if one already exists, it is 336 * overridden. 337 */ 338 int 339 altq_pfattach(struct pf_altq *a) 340 { 341 struct ifnet *ifp; 342 struct tb_profile tb; 343 int error = 0; 344 345 switch (a->scheduler) { 346 case ALTQT_NONE: 347 break; 348 #ifdef ALTQ_CBQ 349 case ALTQT_CBQ: 350 error = cbq_pfattach(a); 351 break; 352 #endif 353 #ifdef ALTQ_PRIQ 354 case ALTQT_PRIQ: 355 error = priq_pfattach(a); 356 break; 357 #endif 358 #ifdef ALTQ_HFSC 359 case ALTQT_HFSC: 360 error = hfsc_pfattach(a); 361 break; 362 #endif 363 #ifdef ALTQ_FAIRQ 364 case ALTQT_FAIRQ: 365 error = fairq_pfattach(a); 366 break; 367 #endif 368 default: 369 error = ENXIO; 370 } 371 372 ifp = ifunit(a->ifname); 373 374 /* if the state is running, enable altq */ 375 if (error == 0 && pfaltq_running && 376 ifp != NULL && ifp->if_snd.altq_type != ALTQT_NONE && 377 !ifq_is_enabled(&ifp->if_snd)) 378 error = altq_enable(&ifp->if_snd); 379 380 /* if altq is already enabled, reset set tokenbucket regulator */ 381 if (error == 0 && ifp != NULL && ifq_is_enabled(&ifp->if_snd)) { 382 tb.rate = a->ifbandwidth; 383 tb.depth = a->tbrsize; 384 crit_enter(); 385 error = tbr_set(&ifp->if_snd, &tb); 386 crit_exit(); 387 } 388 389 return (error); 390 } 391 392 /* 393 * detach a discipline from the interface. 394 * it is possible that the discipline was already overridden by another 395 * discipline. 396 */ 397 int 398 altq_pfdetach(struct pf_altq *a) 399 { 400 struct ifnet *ifp; 401 int error = 0; 402 403 if ((ifp = ifunit(a->ifname)) == NULL) 404 return (EINVAL); 405 406 /* if this discipline is no longer referenced, just return */ 407 if (a->altq_disc == NULL || a->altq_disc != ifp->if_snd.altq_disc) 408 return (0); 409 410 crit_enter(); 411 if (ifq_is_enabled(&ifp->if_snd)) 412 error = altq_disable(&ifp->if_snd); 413 if (error == 0) 414 error = altq_detach(&ifp->if_snd); 415 crit_exit(); 416 417 return (error); 418 } 419 420 /* 421 * add a discipline or a queue 422 */ 423 int 424 altq_add(struct pf_altq *a) 425 { 426 int error = 0; 427 428 if (a->qname[0] != 0) 429 return (altq_add_queue(a)); 430 431 if (machclk_freq == 0) 432 init_machclk(); 433 if (machclk_freq == 0) 434 panic("altq_add: no cpu clock"); 435 436 switch (a->scheduler) { 437 #ifdef ALTQ_CBQ 438 case ALTQT_CBQ: 439 error = cbq_add_altq(a); 440 break; 441 #endif 442 #ifdef ALTQ_PRIQ 443 case ALTQT_PRIQ: 444 error = priq_add_altq(a); 445 break; 446 #endif 447 #ifdef ALTQ_HFSC 448 case ALTQT_HFSC: 449 error = hfsc_add_altq(a); 450 break; 451 #endif 452 #ifdef ALTQ_FAIRQ 453 case ALTQT_FAIRQ: 454 error = fairq_add_altq(a); 455 break; 456 #endif 457 default: 458 error = ENXIO; 459 } 460 461 return (error); 462 } 463 464 /* 465 * remove a discipline or a queue 466 */ 467 int 468 altq_remove(struct pf_altq *a) 469 { 470 int error = 0; 471 472 if (a->qname[0] != 0) 473 return (altq_remove_queue(a)); 474 475 switch (a->scheduler) { 476 #ifdef ALTQ_CBQ 477 case ALTQT_CBQ: 478 error = cbq_remove_altq(a); 479 break; 480 #endif 481 #ifdef ALTQ_PRIQ 482 case ALTQT_PRIQ: 483 error = priq_remove_altq(a); 484 break; 485 #endif 486 #ifdef ALTQ_HFSC 487 case ALTQT_HFSC: 488 error = hfsc_remove_altq(a); 489 break; 490 #endif 491 #ifdef ALTQ_FAIRQ 492 case ALTQT_FAIRQ: 493 error = fairq_remove_altq(a); 494 break; 495 #endif 496 default: 497 error = ENXIO; 498 } 499 500 return (error); 501 } 502 503 /* 504 * add a queue to the discipline 505 */ 506 int 507 altq_add_queue(struct pf_altq *a) 508 { 509 int error = 0; 510 511 switch (a->scheduler) { 512 #ifdef ALTQ_CBQ 513 case ALTQT_CBQ: 514 error = cbq_add_queue(a); 515 break; 516 #endif 517 #ifdef ALTQ_PRIQ 518 case ALTQT_PRIQ: 519 error = priq_add_queue(a); 520 break; 521 #endif 522 #ifdef ALTQ_HFSC 523 case ALTQT_HFSC: 524 error = hfsc_add_queue(a); 525 break; 526 #endif 527 #ifdef ALTQ_FAIRQ 528 case ALTQT_FAIRQ: 529 error = fairq_add_queue(a); 530 break; 531 #endif 532 default: 533 error = ENXIO; 534 } 535 536 return (error); 537 } 538 539 /* 540 * remove a queue from the discipline 541 */ 542 int 543 altq_remove_queue(struct pf_altq *a) 544 { 545 int error = 0; 546 547 switch (a->scheduler) { 548 #ifdef ALTQ_CBQ 549 case ALTQT_CBQ: 550 error = cbq_remove_queue(a); 551 break; 552 #endif 553 #ifdef ALTQ_PRIQ 554 case ALTQT_PRIQ: 555 error = priq_remove_queue(a); 556 break; 557 #endif 558 #ifdef ALTQ_HFSC 559 case ALTQT_HFSC: 560 error = hfsc_remove_queue(a); 561 break; 562 #endif 563 #ifdef ALTQ_FAIRQ 564 case ALTQT_FAIRQ: 565 error = fairq_remove_queue(a); 566 break; 567 #endif 568 default: 569 error = ENXIO; 570 } 571 572 return (error); 573 } 574 575 /* 576 * get queue statistics 577 */ 578 int 579 altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) 580 { 581 int error = 0; 582 583 switch (a->scheduler) { 584 #ifdef ALTQ_CBQ 585 case ALTQT_CBQ: 586 error = cbq_getqstats(a, ubuf, nbytes); 587 break; 588 #endif 589 #ifdef ALTQ_PRIQ 590 case ALTQT_PRIQ: 591 error = priq_getqstats(a, ubuf, nbytes); 592 break; 593 #endif 594 #ifdef ALTQ_HFSC 595 case ALTQT_HFSC: 596 error = hfsc_getqstats(a, ubuf, nbytes); 597 break; 598 #endif 599 #ifdef ALTQ_FAIRQ 600 case ALTQT_FAIRQ: 601 error = fairq_getqstats(a, ubuf, nbytes); 602 break; 603 #endif 604 default: 605 error = ENXIO; 606 } 607 608 return (error); 609 } 610 611 /* 612 * read and write diffserv field in IPv4 or IPv6 header 613 */ 614 uint8_t 615 read_dsfield(struct mbuf *m, struct altq_pktattr *pktattr) 616 { 617 struct mbuf *m0; 618 uint8_t ds_field = 0; 619 620 if (pktattr == NULL || 621 (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6)) 622 return ((uint8_t)0); 623 624 /* verify that pattr_hdr is within the mbuf data */ 625 for (m0 = m; m0 != NULL; m0 = m0->m_next) { 626 if ((pktattr->pattr_hdr >= m0->m_data) && 627 (pktattr->pattr_hdr < m0->m_data + m0->m_len)) 628 break; 629 } 630 if (m0 == NULL) { 631 /* ick, pattr_hdr is stale */ 632 pktattr->pattr_af = AF_UNSPEC; 633 #ifdef ALTQ_DEBUG 634 kprintf("read_dsfield: can't locate header!\n"); 635 #endif 636 return ((uint8_t)0); 637 } 638 639 if (pktattr->pattr_af == AF_INET) { 640 struct ip *ip = (struct ip *)pktattr->pattr_hdr; 641 642 if (ip->ip_v != 4) 643 return ((uint8_t)0); /* version mismatch! */ 644 ds_field = ip->ip_tos; 645 } 646 #ifdef INET6 647 else if (pktattr->pattr_af == AF_INET6) { 648 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr; 649 uint32_t flowlabel; 650 651 flowlabel = ntohl(ip6->ip6_flow); 652 if ((flowlabel >> 28) != 6) 653 return ((uint8_t)0); /* version mismatch! */ 654 ds_field = (flowlabel >> 20) & 0xff; 655 } 656 #endif 657 return (ds_field); 658 } 659 660 void 661 write_dsfield(struct mbuf *m, struct altq_pktattr *pktattr, uint8_t dsfield) 662 { 663 struct mbuf *m0; 664 665 if (pktattr == NULL || 666 (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6)) 667 return; 668 669 /* verify that pattr_hdr is within the mbuf data */ 670 for (m0 = m; m0 != NULL; m0 = m0->m_next) { 671 if ((pktattr->pattr_hdr >= m0->m_data) && 672 (pktattr->pattr_hdr < m0->m_data + m0->m_len)) 673 break; 674 } 675 if (m0 == NULL) { 676 /* ick, pattr_hdr is stale */ 677 pktattr->pattr_af = AF_UNSPEC; 678 #ifdef ALTQ_DEBUG 679 kprintf("write_dsfield: can't locate header!\n"); 680 #endif 681 return; 682 } 683 684 if (pktattr->pattr_af == AF_INET) { 685 struct ip *ip = (struct ip *)pktattr->pattr_hdr; 686 uint8_t old; 687 int32_t sum; 688 689 if (ip->ip_v != 4) 690 return; /* version mismatch! */ 691 old = ip->ip_tos; 692 dsfield |= old & 3; /* leave CU bits */ 693 if (old == dsfield) 694 return; 695 ip->ip_tos = dsfield; 696 /* 697 * update checksum (from RFC1624) 698 * HC' = ~(~HC + ~m + m') 699 */ 700 sum = ~ntohs(ip->ip_sum) & 0xffff; 701 sum += 0xff00 + (~old & 0xff) + dsfield; 702 sum = (sum >> 16) + (sum & 0xffff); 703 sum += (sum >> 16); /* add carry */ 704 705 ip->ip_sum = htons(~sum & 0xffff); 706 } 707 #ifdef INET6 708 else if (pktattr->pattr_af == AF_INET6) { 709 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr; 710 uint32_t flowlabel; 711 712 flowlabel = ntohl(ip6->ip6_flow); 713 if ((flowlabel >> 28) != 6) 714 return; /* version mismatch! */ 715 flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20); 716 ip6->ip6_flow = htonl(flowlabel); 717 } 718 #endif 719 } 720 721 /* 722 * high resolution clock support taking advantage of a machine dependent 723 * high resolution time counter (e.g., timestamp counter of intel pentium). 724 * we assume 725 * - 64-bit-long monotonically-increasing counter 726 * - frequency range is 100M-4GHz (CPU speed) 727 */ 728 /* if pcc is not available or disabled, emulate 256MHz using microtime() */ 729 #define MACHCLK_SHIFT 8 730 731 int machclk_usepcc; 732 uint32_t machclk_freq = 0; 733 uint32_t machclk_per_tick = 0; 734 735 void 736 init_machclk(void) 737 { 738 callout_init(&tbr_callout); 739 740 machclk_usepcc = 1; 741 742 #if !defined(__i386__) || defined(ALTQ_NOPCC) 743 machclk_usepcc = 0; 744 #elif defined(__DragonFly__) && defined(SMP) 745 machclk_usepcc = 0; 746 #elif defined(__i386__) 747 /* check if TSC is available */ 748 if (machclk_usepcc == 1 && (cpu_feature & CPUID_TSC) == 0) 749 machclk_usepcc = 0; 750 #endif 751 752 if (machclk_usepcc == 0) { 753 /* emulate 256MHz using microtime() */ 754 machclk_freq = 1000000 << MACHCLK_SHIFT; 755 machclk_per_tick = machclk_freq / hz; 756 #ifdef ALTQ_DEBUG 757 kprintf("altq: emulate %uHz cpu clock\n", machclk_freq); 758 #endif 759 return; 760 } 761 762 /* 763 * if the clock frequency (of Pentium TSC or Alpha PCC) is 764 * accessible, just use it. 765 */ 766 #ifdef __i386__ 767 machclk_freq = tsc_freq; 768 #else 769 #error "machclk_freq interface not implemented" 770 #endif 771 772 /* 773 * if we don't know the clock frequency, measure it. 774 */ 775 if (machclk_freq == 0) { 776 static int wait; 777 struct timeval tv_start, tv_end; 778 uint64_t start, end, diff; 779 int timo; 780 781 microtime(&tv_start); 782 start = read_machclk(); 783 timo = hz; /* 1 sec */ 784 tsleep(&wait, PCATCH, "init_machclk", timo); 785 microtime(&tv_end); 786 end = read_machclk(); 787 diff = (uint64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000 788 + tv_end.tv_usec - tv_start.tv_usec; 789 if (diff != 0) 790 machclk_freq = (u_int)((end - start) * 1000000 / diff); 791 } 792 793 machclk_per_tick = machclk_freq / hz; 794 795 #ifdef ALTQ_DEBUG 796 kprintf("altq: CPU clock: %uHz\n", machclk_freq); 797 #endif 798 } 799 800 uint64_t 801 read_machclk(void) 802 { 803 uint64_t val; 804 805 if (machclk_usepcc) { 806 #if defined(__i386__) 807 val = rdtsc(); 808 #else 809 panic("read_machclk"); 810 #endif 811 } else { 812 struct timeval tv; 813 814 microtime(&tv); 815 val = (((uint64_t)(tv.tv_sec - boottime.tv_sec) * 1000000 816 + tv.tv_usec) << MACHCLK_SHIFT); 817 } 818 return (val); 819 } 820