xref: /dflybsd-src/sys/net/altq/altq_subr.c (revision 0402ebbc7d4b6f34d02791995169d25c4aec3b15)
1 /*	$KAME: altq_subr.c,v 1.23 2004/04/20 16:10:06 itojun Exp $	*/
2 /*	$DragonFly: src/sys/net/altq/altq_subr.c,v 1.2 2005/04/04 17:08:16 joerg Exp $ */
3 
4 /*
5  * Copyright (C) 1997-2003
6  *	Sony Computer Science Laboratories Inc.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_altq.h"
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/kernel.h>
42 #include <sys/callout.h>
43 #include <sys/errno.h>
44 #include <sys/syslog.h>
45 #include <sys/sysctl.h>
46 #include <sys/queue.h>
47 
48 #include <net/if.h>
49 #include <net/if_dl.h>
50 #include <net/if_types.h>
51 #include <net/ifq_var.h>
52 
53 #include <netinet/in.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/ip.h>
56 #ifdef INET6
57 #include <netinet/ip6.h>
58 #endif
59 #include <netinet/tcp.h>
60 #include <netinet/udp.h>
61 
62 #include <net/pf/pfvar.h>
63 #include <net/altq/altq.h>
64 
65 /* machine dependent clock related includes */
66 #if defined(__i386__)
67 #include <machine/clock.h>		/* for tsc_freq */
68 #include <machine/md_var.h>		/* for cpu_feature */
69 #include <machine/specialreg.h>		/* for CPUID_TSC */
70 #endif /* __i386__ */
71 
72 /*
73  * internal function prototypes
74  */
75 static void	tbr_timeout(void *);
76 int (*altq_input)(struct mbuf *, int) = NULL;
77 static int tbr_timer = 0;	/* token bucket regulator timer */
78 static struct callout tbr_callout;
79 
80 int pfaltq_running;	/* keep track of running state */
81 
82 MALLOC_DEFINE(M_ALTQ, "altq", "ALTQ structures");
83 
84 /*
85  * alternate queueing support routines
86  */
87 
88 /* look up the queue state by the interface name and the queueing type. */
89 void *
90 altq_lookup(const char *name, int type)
91 {
92 	struct ifnet *ifp;
93 
94 	if ((ifp = ifunit(name)) != NULL) {
95 		if (type != ALTQT_NONE && ifp->if_snd.altq_type == type)
96 			return (ifp->if_snd.altq_disc);
97 	}
98 
99 	return (NULL);
100 }
101 
102 int
103 altq_attach(struct ifaltq *ifq, int type, void *discipline,
104 	    int (*enqueue)(struct ifaltq *, struct mbuf *, struct altq_pktattr *),
105 	    struct mbuf *(*dequeue)(struct ifaltq *, int),
106 	    int (*request)(struct ifaltq *, int, void *),
107 	    void *clfier,
108 	    void *(*classify)(struct ifaltq *, struct mbuf *,
109 			      struct altq_pktattr *))
110 {
111 	if (!ifq_is_ready(ifq))
112 		return ENXIO;
113 
114 	ifq->altq_type     = type;
115 	ifq->altq_disc     = discipline;
116 	ifq->altq_enqueue  = enqueue;
117 	ifq->altq_dequeue  = dequeue;
118 	ifq->altq_request  = request;
119 	ifq->altq_clfier   = clfier;
120 	ifq->altq_classify = classify;
121 	ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED);
122 	return 0;
123 }
124 
125 int
126 altq_detach(struct ifaltq *ifq)
127 {
128 	if (!ifq_is_ready(ifq))
129 		return ENXIO;
130 	if (ifq_is_enabled(ifq))
131 		return EBUSY;
132 	if (!ifq_is_attached(ifq))
133 		return (0);
134 
135 	ifq_set_classic(ifq);
136 	ifq->altq_type     = ALTQT_NONE;
137 	ifq->altq_disc     = NULL;
138 	ifq->altq_clfier   = NULL;
139 	ifq->altq_classify = NULL;
140 	ifq->altq_flags &= ALTQF_CANTCHANGE;
141 	return 0;
142 }
143 
144 int
145 altq_enable(struct ifaltq *ifq)
146 {
147 	int s;
148 
149 	if (!ifq_is_ready(ifq))
150 		return ENXIO;
151 	if (ifq_is_enabled(ifq))
152 		return 0;
153 
154 	s = splimp();
155 	ifq_purge(ifq);
156 	KKASSERT(ifq->ifq_len == 0);
157 	ifq->altq_flags |= ALTQF_ENABLED;
158 	if (ifq->altq_clfier != NULL)
159 		ifq->altq_flags |= ALTQF_CLASSIFY;
160 	splx(s);
161 
162 	return 0;
163 }
164 
165 int
166 altq_disable(struct ifaltq *ifq)
167 {
168 	int s;
169 
170 	if (!ifq_is_enabled(ifq))
171 		return 0;
172 
173 	s = splimp();
174 	ifq_purge(ifq);
175 	KKASSERT(ifq->ifq_len == 0);
176 	ifq->altq_flags &= ~(ALTQF_ENABLED|ALTQF_CLASSIFY);
177 	splx(s);
178 	return 0;
179 }
180 
181 /*
182  * internal representation of token bucket parameters
183  *	rate:	byte_per_unittime << 32
184  *		(((bits_per_sec) / 8) << 32) / machclk_freq
185  *	depth:	byte << 32
186  *
187  */
188 #define	TBR_SHIFT	32
189 #define	TBR_SCALE(x)	((int64_t)(x) << TBR_SHIFT)
190 #define	TBR_UNSCALE(x)	((x) >> TBR_SHIFT)
191 
192 struct mbuf *
193 tbr_dequeue(struct ifaltq *ifq, int op)
194 {
195 	struct tb_regulator *tbr;
196 	struct mbuf *m;
197 	int64_t interval;
198 	uint64_t now;
199 
200 	tbr = ifq->altq_tbr;
201 	if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) {
202 		/* if this is a remove after poll, bypass tbr check */
203 	} else {
204 		/* update token only when it is negative */
205 		if (tbr->tbr_token <= 0) {
206 			now = read_machclk();
207 			interval = now - tbr->tbr_last;
208 			if (interval >= tbr->tbr_filluptime)
209 				tbr->tbr_token = tbr->tbr_depth;
210 			else {
211 				tbr->tbr_token += interval * tbr->tbr_rate;
212 				if (tbr->tbr_token > tbr->tbr_depth)
213 					tbr->tbr_token = tbr->tbr_depth;
214 			}
215 			tbr->tbr_last = now;
216 		}
217 		/* if token is still negative, don't allow dequeue */
218 		if (tbr->tbr_token <= 0)
219 			return (NULL);
220 	}
221 
222 	if (ifq_is_enabled(ifq))
223 		m = (*ifq->altq_dequeue)(ifq, op);
224 	else if (op == ALTDQ_POLL)
225 		IF_POLL(ifq, m);
226 	else
227 		IF_DEQUEUE(ifq, m);
228 
229 	if (m != NULL && op == ALTDQ_REMOVE)
230 		tbr->tbr_token -= TBR_SCALE(m_pktlen(m));
231 	tbr->tbr_lastop = op;
232 	return (m);
233 }
234 
235 /*
236  * set a token bucket regulator.
237  * if the specified rate is zero, the token bucket regulator is deleted.
238  */
239 int
240 tbr_set(struct ifaltq *ifq, struct tb_profile *profile)
241 {
242 	struct tb_regulator *tbr, *otbr;
243 
244 	if (machclk_freq == 0)
245 		init_machclk();
246 	if (machclk_freq == 0) {
247 		printf("tbr_set: no cpu clock available!\n");
248 		return (ENXIO);
249 	}
250 
251 	if (profile->rate == 0) {
252 		/* delete this tbr */
253 		if ((tbr = ifq->altq_tbr) == NULL)
254 			return (ENOENT);
255 		ifq->altq_tbr = NULL;
256 		free(tbr, M_ALTQ);
257 		return (0);
258 	}
259 
260 	tbr = malloc(sizeof(*tbr), M_ALTQ, M_WAITOK | M_ZERO);
261 	tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq;
262 	tbr->tbr_depth = TBR_SCALE(profile->depth);
263 	if (tbr->tbr_rate > 0)
264 		tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate;
265 	else
266 		tbr->tbr_filluptime = 0xffffffffffffffffLL;
267 	tbr->tbr_token = tbr->tbr_depth;
268 	tbr->tbr_last = read_machclk();
269 	tbr->tbr_lastop = ALTDQ_REMOVE;
270 
271 	otbr = ifq->altq_tbr;
272 	ifq->altq_tbr = tbr;	/* set the new tbr */
273 
274 	if (otbr != NULL)
275 		free(otbr, M_ALTQ);
276 	else if (tbr_timer == 0) {
277 		callout_reset(&tbr_callout, 1, tbr_timeout, NULL);
278 		tbr_timer = 1;
279 	}
280 	return (0);
281 }
282 
283 /*
284  * tbr_timeout goes through the interface list, and kicks the drivers
285  * if necessary.
286  */
287 static void
288 tbr_timeout(void *arg)
289 {
290 	struct ifnet *ifp;
291 	int active, s;
292 
293 	active = 0;
294 	s = splimp();
295 	for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) {
296 		if (ifp->if_snd.altq_tbr == NULL)
297 			continue;
298 		active++;
299 		if (!ifq_is_empty(&ifp->if_snd) && ifp->if_start != NULL)
300 			(*ifp->if_start)(ifp);
301 	}
302 	splx(s);
303 	if (active > 0)
304 		callout_reset(&tbr_callout, 1, tbr_timeout, NULL);
305 	else
306 		tbr_timer = 0;	/* don't need tbr_timer anymore */
307 }
308 
309 /*
310  * get token bucket regulator profile
311  */
312 int
313 tbr_get(struct ifaltq *ifq, struct tb_profile *profile)
314 {
315 	struct tb_regulator *tbr;
316 
317 	if ((tbr = ifq->altq_tbr) == NULL) {
318 		profile->rate = 0;
319 		profile->depth = 0;
320 	} else {
321 		profile->rate =
322 		    (u_int)TBR_UNSCALE(tbr->tbr_rate * 8 * machclk_freq);
323 		profile->depth = (u_int)TBR_UNSCALE(tbr->tbr_depth);
324 	}
325 	return (0);
326 }
327 
328 /*
329  * attach a discipline to the interface.  if one already exists, it is
330  * overridden.
331  */
332 int
333 altq_pfattach(struct pf_altq *a)
334 {
335 	struct ifnet *ifp;
336 	struct tb_profile tb;
337 	int s, error = 0;
338 
339 	switch (a->scheduler) {
340 	case ALTQT_NONE:
341 		break;
342 #ifdef ALTQ_CBQ
343 	case ALTQT_CBQ:
344 		error = cbq_pfattach(a);
345 		break;
346 #endif
347 #ifdef ALTQ_PRIQ
348 	case ALTQT_PRIQ:
349 		error = priq_pfattach(a);
350 		break;
351 #endif
352 #ifdef ALTQ_HFSC
353 	case ALTQT_HFSC:
354 		error = hfsc_pfattach(a);
355 		break;
356 #endif
357 	default:
358 		error = ENXIO;
359 	}
360 
361 	ifp = ifunit(a->ifname);
362 
363 	/* if the state is running, enable altq */
364 	if (error == 0 && pfaltq_running &&
365 	    ifp != NULL && ifp->if_snd.altq_type != ALTQT_NONE &&
366 	    !ifq_is_enabled(&ifp->if_snd))
367 			error = altq_enable(&ifp->if_snd);
368 
369 	/* if altq is already enabled, reset set tokenbucket regulator */
370 	if (error == 0 && ifp != NULL && ifq_is_enabled(&ifp->if_snd)) {
371 		tb.rate = a->ifbandwidth;
372 		tb.depth = a->tbrsize;
373 		s = splimp();
374 		error = tbr_set(&ifp->if_snd, &tb);
375 		splx(s);
376 	}
377 
378 	return (error);
379 }
380 
381 /*
382  * detach a discipline from the interface.
383  * it is possible that the discipline was already overridden by another
384  * discipline.
385  */
386 int
387 altq_pfdetach(struct pf_altq *a)
388 {
389 	struct ifnet *ifp;
390 	int s, error = 0;
391 
392 	if ((ifp = ifunit(a->ifname)) == NULL)
393 		return (EINVAL);
394 
395 	/* if this discipline is no longer referenced, just return */
396 	if (a->altq_disc == NULL || a->altq_disc != ifp->if_snd.altq_disc)
397 		return (0);
398 
399 	s = splimp();
400 	if (ifq_is_enabled(&ifp->if_snd))
401 		error = altq_disable(&ifp->if_snd);
402 	if (error == 0)
403 		error = altq_detach(&ifp->if_snd);
404 	splx(s);
405 
406 	return (error);
407 }
408 
409 /*
410  * add a discipline or a queue
411  */
412 int
413 altq_add(struct pf_altq *a)
414 {
415 	int error = 0;
416 
417 	if (a->qname[0] != 0)
418 		return (altq_add_queue(a));
419 
420 	if (machclk_freq == 0)
421 		init_machclk();
422 	if (machclk_freq == 0)
423 		panic("altq_add: no cpu clock");
424 
425 	switch (a->scheduler) {
426 #ifdef ALTQ_CBQ
427 	case ALTQT_CBQ:
428 		error = cbq_add_altq(a);
429 		break;
430 #endif
431 #ifdef ALTQ_PRIQ
432 	case ALTQT_PRIQ:
433 		error = priq_add_altq(a);
434 		break;
435 #endif
436 #ifdef ALTQ_HFSC
437 	case ALTQT_HFSC:
438 		error = hfsc_add_altq(a);
439 		break;
440 #endif
441 	default:
442 		error = ENXIO;
443 	}
444 
445 	return (error);
446 }
447 
448 /*
449  * remove a discipline or a queue
450  */
451 int
452 altq_remove(struct pf_altq *a)
453 {
454 	int error = 0;
455 
456 	if (a->qname[0] != 0)
457 		return (altq_remove_queue(a));
458 
459 	switch (a->scheduler) {
460 #ifdef ALTQ_CBQ
461 	case ALTQT_CBQ:
462 		error = cbq_remove_altq(a);
463 		break;
464 #endif
465 #ifdef ALTQ_PRIQ
466 	case ALTQT_PRIQ:
467 		error = priq_remove_altq(a);
468 		break;
469 #endif
470 #ifdef ALTQ_HFSC
471 	case ALTQT_HFSC:
472 		error = hfsc_remove_altq(a);
473 		break;
474 #endif
475 	default:
476 		error = ENXIO;
477 	}
478 
479 	return (error);
480 }
481 
482 /*
483  * add a queue to the discipline
484  */
485 int
486 altq_add_queue(struct pf_altq *a)
487 {
488 	int error = 0;
489 
490 	switch (a->scheduler) {
491 #ifdef ALTQ_CBQ
492 	case ALTQT_CBQ:
493 		error = cbq_add_queue(a);
494 		break;
495 #endif
496 #ifdef ALTQ_PRIQ
497 	case ALTQT_PRIQ:
498 		error = priq_add_queue(a);
499 		break;
500 #endif
501 #ifdef ALTQ_HFSC
502 	case ALTQT_HFSC:
503 		error = hfsc_add_queue(a);
504 		break;
505 #endif
506 	default:
507 		error = ENXIO;
508 	}
509 
510 	return (error);
511 }
512 
513 /*
514  * remove a queue from the discipline
515  */
516 int
517 altq_remove_queue(struct pf_altq *a)
518 {
519 	int error = 0;
520 
521 	switch (a->scheduler) {
522 #ifdef ALTQ_CBQ
523 	case ALTQT_CBQ:
524 		error = cbq_remove_queue(a);
525 		break;
526 #endif
527 #ifdef ALTQ_PRIQ
528 	case ALTQT_PRIQ:
529 		error = priq_remove_queue(a);
530 		break;
531 #endif
532 #ifdef ALTQ_HFSC
533 	case ALTQT_HFSC:
534 		error = hfsc_remove_queue(a);
535 		break;
536 #endif
537 	default:
538 		error = ENXIO;
539 	}
540 
541 	return (error);
542 }
543 
544 /*
545  * get queue statistics
546  */
547 int
548 altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
549 {
550 	int error = 0;
551 
552 	switch (a->scheduler) {
553 #ifdef ALTQ_CBQ
554 	case ALTQT_CBQ:
555 		error = cbq_getqstats(a, ubuf, nbytes);
556 		break;
557 #endif
558 #ifdef ALTQ_PRIQ
559 	case ALTQT_PRIQ:
560 		error = priq_getqstats(a, ubuf, nbytes);
561 		break;
562 #endif
563 #ifdef ALTQ_HFSC
564 	case ALTQT_HFSC:
565 		error = hfsc_getqstats(a, ubuf, nbytes);
566 		break;
567 #endif
568 	default:
569 		error = ENXIO;
570 	}
571 
572 	return (error);
573 }
574 
575 /*
576  * read and write diffserv field in IPv4 or IPv6 header
577  */
578 uint8_t
579 read_dsfield(struct mbuf *m, struct altq_pktattr *pktattr)
580 {
581 	struct mbuf *m0;
582 	uint8_t ds_field = 0;
583 
584 	if (pktattr == NULL ||
585 	    (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
586 		return ((uint8_t)0);
587 
588 	/* verify that pattr_hdr is within the mbuf data */
589 	for (m0 = m; m0 != NULL; m0 = m0->m_next) {
590 		if ((pktattr->pattr_hdr >= m0->m_data) &&
591 		    (pktattr->pattr_hdr < m0->m_data + m0->m_len))
592 			break;
593 	}
594 	if (m0 == NULL) {
595 		/* ick, pattr_hdr is stale */
596 		pktattr->pattr_af = AF_UNSPEC;
597 #ifdef ALTQ_DEBUG
598 		printf("read_dsfield: can't locate header!\n");
599 #endif
600 		return ((uint8_t)0);
601 	}
602 
603 	if (pktattr->pattr_af == AF_INET) {
604 		struct ip *ip = (struct ip *)pktattr->pattr_hdr;
605 
606 		if (ip->ip_v != 4)
607 			return ((uint8_t)0);	/* version mismatch! */
608 		ds_field = ip->ip_tos;
609 	}
610 #ifdef INET6
611 	else if (pktattr->pattr_af == AF_INET6) {
612 		struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
613 		uint32_t flowlabel;
614 
615 		flowlabel = ntohl(ip6->ip6_flow);
616 		if ((flowlabel >> 28) != 6)
617 			return ((uint8_t)0);	/* version mismatch! */
618 		ds_field = (flowlabel >> 20) & 0xff;
619 	}
620 #endif
621 	return (ds_field);
622 }
623 
624 void
625 write_dsfield(struct mbuf *m, struct altq_pktattr *pktattr, uint8_t dsfield)
626 {
627 	struct mbuf *m0;
628 
629 	if (pktattr == NULL ||
630 	    (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
631 		return;
632 
633 	/* verify that pattr_hdr is within the mbuf data */
634 	for (m0 = m; m0 != NULL; m0 = m0->m_next) {
635 		if ((pktattr->pattr_hdr >= m0->m_data) &&
636 		    (pktattr->pattr_hdr < m0->m_data + m0->m_len))
637 			break;
638 	}
639 	if (m0 == NULL) {
640 		/* ick, pattr_hdr is stale */
641 		pktattr->pattr_af = AF_UNSPEC;
642 #ifdef ALTQ_DEBUG
643 		printf("write_dsfield: can't locate header!\n");
644 #endif
645 		return;
646 	}
647 
648 	if (pktattr->pattr_af == AF_INET) {
649 		struct ip *ip = (struct ip *)pktattr->pattr_hdr;
650 		uint8_t old;
651 		int32_t sum;
652 
653 		if (ip->ip_v != 4)
654 			return;		/* version mismatch! */
655 		old = ip->ip_tos;
656 		dsfield |= old & 3;	/* leave CU bits */
657 		if (old == dsfield)
658 			return;
659 		ip->ip_tos = dsfield;
660 		/*
661 		 * update checksum (from RFC1624)
662 		 *	   HC' = ~(~HC + ~m + m')
663 		 */
664 		sum = ~ntohs(ip->ip_sum) & 0xffff;
665 		sum += 0xff00 + (~old & 0xff) + dsfield;
666 		sum = (sum >> 16) + (sum & 0xffff);
667 		sum += (sum >> 16);  /* add carry */
668 
669 		ip->ip_sum = htons(~sum & 0xffff);
670 	}
671 #ifdef INET6
672 	else if (pktattr->pattr_af == AF_INET6) {
673 		struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
674 		uint32_t flowlabel;
675 
676 		flowlabel = ntohl(ip6->ip6_flow);
677 		if ((flowlabel >> 28) != 6)
678 			return;		/* version mismatch! */
679 		flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20);
680 		ip6->ip6_flow = htonl(flowlabel);
681 	}
682 #endif
683 }
684 
685 /*
686  * high resolution clock support taking advantage of a machine dependent
687  * high resolution time counter (e.g., timestamp counter of intel pentium).
688  * we assume
689  *  - 64-bit-long monotonically-increasing counter
690  *  - frequency range is 100M-4GHz (CPU speed)
691  */
692 /* if pcc is not available or disabled, emulate 256MHz using microtime() */
693 #define	MACHCLK_SHIFT	8
694 
695 int machclk_usepcc;
696 uint32_t machclk_freq = 0;
697 uint32_t machclk_per_tick = 0;
698 
699 void
700 init_machclk(void)
701 {
702 	callout_init(&tbr_callout);
703 
704 	machclk_usepcc = 1;
705 
706 #if !defined(__i386__) || defined(ALTQ_NOPCC)
707 	machclk_usepcc = 0;
708 #elif defined(__DragonFly__) && defined(SMP)
709 	machclk_usepcc = 0;
710 #elif defined(__i386__)
711 	/* check if TSC is available */
712 	if (machclk_usepcc == 1 && (cpu_feature & CPUID_TSC) == 0)
713 		machclk_usepcc = 0;
714 #endif
715 
716 	if (machclk_usepcc == 0) {
717 		/* emulate 256MHz using microtime() */
718 		machclk_freq = 1000000 << MACHCLK_SHIFT;
719 		machclk_per_tick = machclk_freq / hz;
720 #ifdef ALTQ_DEBUG
721 		printf("altq: emulate %uHz cpu clock\n", machclk_freq);
722 #endif
723 		return;
724 	}
725 
726 	/*
727 	 * if the clock frequency (of Pentium TSC or Alpha PCC) is
728 	 * accessible, just use it.
729 	 */
730 #ifdef __i386__
731 	machclk_freq = tsc_freq;
732 #else
733 #error "machclk_freq interface not implemented"
734 #endif
735 
736 	/*
737 	 * if we don't know the clock frequency, measure it.
738 	 */
739 	if (machclk_freq == 0) {
740 		static int	wait;
741 		struct timeval	tv_start, tv_end;
742 		uint64_t	start, end, diff;
743 		int		timo;
744 
745 		microtime(&tv_start);
746 		start = read_machclk();
747 		timo = hz;	/* 1 sec */
748 		tsleep(&wait, PCATCH, "init_machclk", timo);
749 		microtime(&tv_end);
750 		end = read_machclk();
751 		diff = (uint64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000
752 		    + tv_end.tv_usec - tv_start.tv_usec;
753 		if (diff != 0)
754 			machclk_freq = (u_int)((end - start) * 1000000 / diff);
755 	}
756 
757 	machclk_per_tick = machclk_freq / hz;
758 
759 #ifdef ALTQ_DEBUG
760 	printf("altq: CPU clock: %uHz\n", machclk_freq);
761 #endif
762 }
763 
764 uint64_t
765 read_machclk(void)
766 {
767 	uint64_t val;
768 
769 	if (machclk_usepcc) {
770 #if defined(__i386__)
771 		val = rdtsc();
772 #else
773 		panic("read_machclk");
774 #endif
775 	} else {
776 		struct timeval tv;
777 
778 		microtime(&tv);
779 		val = (((uint64_t)(tv.tv_sec - boottime.tv_sec) * 1000000
780 		    + tv.tv_usec) << MACHCLK_SHIFT);
781 	}
782 	return (val);
783 }
784