xref: /dflybsd-src/sys/net/altq/altq_subr.c (revision 1996f1e5804febee7937f3076a1b113924fca812)
1 /*	$KAME: altq_subr.c,v 1.23 2004/04/20 16:10:06 itojun Exp $	*/
2 /*	$DragonFly: src/sys/net/altq/altq_subr.c,v 1.12 2008/05/14 11:59:23 sephe Exp $ */
3 
4 /*
5  * Copyright (C) 1997-2003
6  *	Sony Computer Science Laboratories Inc.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_altq.h"
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/kernel.h>
42 #include <sys/callout.h>
43 #include <sys/errno.h>
44 #include <sys/syslog.h>
45 #include <sys/sysctl.h>
46 #include <sys/queue.h>
47 #include <sys/thread2.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_types.h>
52 #include <net/ifq_var.h>
53 #include <net/netmsg2.h>
54 #include <net/netisr2.h>
55 
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #ifdef INET6
60 #include <netinet/ip6.h>
61 #endif
62 #include <netinet/tcp.h>
63 #include <netinet/udp.h>
64 
65 #include <net/pf/pfvar.h>
66 #include <net/altq/altq.h>
67 
68 /* machine dependent clock related includes */
69 #include <machine/clock.h>		/* for tsc_frequency */
70 #include <machine/md_var.h>		/* for cpu_feature */
71 #include <machine/specialreg.h>		/* for CPUID_TSC */
72 
73 /*
74  * internal function prototypes
75  */
76 static void	tbr_timeout(void *);
77 static void	tbr_timeout_dispatch(netmsg_t);
78 static int	altq_enable_locked(struct ifaltq *);
79 static int	altq_disable_locked(struct ifaltq *);
80 static int	altq_detach_locked(struct ifaltq *);
81 static int	tbr_set_locked(struct ifaltq *, struct tb_profile *);
82 
83 int (*altq_input)(struct mbuf *, int) = NULL;
84 static int tbr_timer = 0;	/* token bucket regulator timer */
85 static struct callout tbr_callout;
86 static struct netmsg_base tbr_timeout_netmsg;
87 
88 int pfaltq_running;	/* keep track of running state */
89 
90 MALLOC_DEFINE(M_ALTQ, "altq", "ALTQ structures");
91 
92 /*
93  * alternate queueing support routines
94  */
95 
96 /* look up the queue state by the interface name and the queueing type. */
97 void *
98 altq_lookup(const char *name, int type)
99 {
100 	struct ifnet *ifp;
101 
102 	if ((ifp = ifunit(name)) != NULL) {
103 		if (type != ALTQT_NONE && ifp->if_snd.altq_type == type)
104 			return (ifp->if_snd.altq_disc);
105 	}
106 
107 	return (NULL);
108 }
109 
110 int
111 altq_attach(struct ifaltq *ifq, int type, void *discipline,
112     altq_mapsubq_t mapsubq,
113     ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request,
114     void *clfier,
115     void *(*classify)(struct ifaltq *, struct mbuf *, struct altq_pktattr *))
116 {
117 	if (!ifq_is_ready(ifq))
118 		return ENXIO;
119 
120 	ifq->altq_type     = type;
121 	ifq->altq_disc     = discipline;
122 	ifq->altq_clfier   = clfier;
123 	ifq->altq_classify = classify;
124 	ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED);
125 	ifq_set_methods(ifq, mapsubq, enqueue, dequeue, request);
126 	return 0;
127 }
128 
129 static int
130 altq_detach_locked(struct ifaltq *ifq)
131 {
132 	if (!ifq_is_ready(ifq))
133 		return ENXIO;
134 	if (ifq_is_enabled(ifq))
135 		return EBUSY;
136 	if (!ifq_is_attached(ifq))
137 		return (0);
138 
139 	ifq_set_classic(ifq);
140 	ifq->altq_type     = ALTQT_NONE;
141 	ifq->altq_disc     = NULL;
142 	ifq->altq_clfier   = NULL;
143 	ifq->altq_classify = NULL;
144 	ifq->altq_flags &= ALTQF_CANTCHANGE;
145 	return 0;
146 }
147 
148 int
149 altq_detach(struct ifaltq *ifq)
150 {
151 	int error;
152 
153 	ifq_lock_all(ifq);
154 	error = altq_detach_locked(ifq);
155 	ifq_unlock_all(ifq);
156 	return error;
157 }
158 
159 static int
160 altq_enable_locked(struct ifaltq *ifq)
161 {
162 	if (!ifq_is_ready(ifq))
163 		return ENXIO;
164 	if (ifq_is_enabled(ifq))
165 		return 0;
166 
167 	ifq_purge_all_locked(ifq);
168 
169 	ifq->altq_flags |= ALTQF_ENABLED;
170 	if (ifq->altq_clfier != NULL)
171 		ifq->altq_flags |= ALTQF_CLASSIFY;
172 	return 0;
173 }
174 
175 int
176 altq_enable(struct ifaltq *ifq)
177 {
178 	int error;
179 
180 	ifq_lock_all(ifq);
181 	error = altq_enable_locked(ifq);
182 	ifq_unlock_all(ifq);
183 	return error;
184 }
185 
186 static int
187 altq_disable_locked(struct ifaltq *ifq)
188 {
189 	if (!ifq_is_enabled(ifq))
190 		return 0;
191 
192 	ifq_purge_all_locked(ifq);
193 	ifq->altq_flags &= ~(ALTQF_ENABLED|ALTQF_CLASSIFY);
194 	return 0;
195 }
196 
197 int
198 altq_disable(struct ifaltq *ifq)
199 {
200 	int error;
201 
202 	ifq_lock_all(ifq);
203 	error = altq_disable_locked(ifq);
204 	ifq_unlock_all(ifq);
205 	return error;
206 }
207 
208 /*
209  * internal representation of token bucket parameters
210  *	rate:	byte_per_unittime << 32
211  *		(((bits_per_sec) / 8) << 32) / machclk_freq
212  *	depth:	byte << 32
213  *
214  */
215 #define	TBR_SHIFT	32
216 #define	TBR_SCALE(x)	((int64_t)(x) << TBR_SHIFT)
217 #define	TBR_UNSCALE(x)	((x) >> TBR_SHIFT)
218 
219 struct mbuf *
220 tbr_dequeue(struct ifaltq_subque *ifsq, int op)
221 {
222 	struct ifaltq *ifq = ifsq->ifsq_altq;
223 	struct tb_regulator *tbr;
224 	struct mbuf *m;
225 	int64_t interval;
226 	uint64_t now;
227 
228 	if (ifsq_get_index(ifsq) != ALTQ_SUBQ_INDEX_DEFAULT) {
229 		/*
230 		 * Race happened, the unrelated subqueue was
231 		 * picked during the packet scheduler transition.
232 		 */
233 		ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
234 		return NULL;
235 	}
236 
237 	crit_enter();
238 	tbr = ifq->altq_tbr;
239 	if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) {
240 		/* if this is a remove after poll, bypass tbr check */
241 	} else {
242 		/* update token only when it is negative */
243 		if (tbr->tbr_token <= 0) {
244 			now = read_machclk();
245 			interval = now - tbr->tbr_last;
246 			if (interval >= tbr->tbr_filluptime)
247 				tbr->tbr_token = tbr->tbr_depth;
248 			else {
249 				tbr->tbr_token += interval * tbr->tbr_rate;
250 				if (tbr->tbr_token > tbr->tbr_depth)
251 					tbr->tbr_token = tbr->tbr_depth;
252 			}
253 			tbr->tbr_last = now;
254 		}
255 		/* if token is still negative, don't allow dequeue */
256 		if (tbr->tbr_token <= 0) {
257 			crit_exit();
258 			return (NULL);
259 		}
260 	}
261 
262 	if (ifq_is_enabled(ifq))
263 		m = (*ifsq->ifsq_dequeue)(ifsq, op);
264 	else
265 		m = ifsq_classic_dequeue(ifsq, op);
266 
267 	if (m != NULL && op == ALTDQ_REMOVE)
268 		tbr->tbr_token -= TBR_SCALE(m_pktlen(m));
269 	tbr->tbr_lastop = op;
270 	crit_exit();
271 	return (m);
272 }
273 
274 /*
275  * set a token bucket regulator.
276  * if the specified rate is zero, the token bucket regulator is deleted.
277  */
278 static int
279 tbr_set_locked(struct ifaltq *ifq, struct tb_profile *profile)
280 {
281 	struct tb_regulator *tbr, *otbr;
282 
283 	if (machclk_freq == 0)
284 		init_machclk();
285 	if (machclk_freq == 0) {
286 		kprintf("%s: no cpu clock available!\n", __func__);
287 		return (ENXIO);
288 	}
289 
290 	if (profile->rate == 0) {
291 		/* delete this tbr */
292 		if ((tbr = ifq->altq_tbr) == NULL)
293 			return (ENOENT);
294 		ifq->altq_tbr = NULL;
295 		kfree(tbr, M_ALTQ);
296 		return (0);
297 	}
298 
299 	tbr = kmalloc(sizeof(*tbr), M_ALTQ, M_WAITOK | M_ZERO);
300 	tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq;
301 	tbr->tbr_depth = TBR_SCALE(profile->depth);
302 	if (tbr->tbr_rate > 0)
303 		tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate;
304 	else
305 		tbr->tbr_filluptime = 0xffffffffffffffffLL;
306 	tbr->tbr_token = tbr->tbr_depth;
307 	tbr->tbr_last = read_machclk();
308 	tbr->tbr_lastop = ALTDQ_REMOVE;
309 
310 	otbr = ifq->altq_tbr;
311 	ifq->altq_tbr = tbr;	/* set the new tbr */
312 
313 	if (otbr != NULL)
314 		kfree(otbr, M_ALTQ);
315 	else if (tbr_timer == 0) {
316 		callout_reset_bycpu(&tbr_callout, 1, tbr_timeout, NULL, 0);
317 		tbr_timer = 1;
318 	}
319 	return (0);
320 }
321 
322 int
323 tbr_set(struct ifaltq *ifq, struct tb_profile *profile)
324 {
325 	int error;
326 
327 	ifq_lock_all(ifq);
328 	error = tbr_set_locked(ifq, profile);
329 	ifq_unlock_all(ifq);
330 	return error;
331 }
332 
333 static void
334 tbr_timeout(void *arg __unused)
335 {
336 	struct lwkt_msg *lmsg = &tbr_timeout_netmsg.lmsg;
337 
338 	KASSERT(mycpuid == 0, ("not on cpu0"));
339 	crit_enter();
340 	if (lmsg->ms_flags & MSGF_DONE)
341 		lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg);
342 	crit_exit();
343 }
344 
345 /*
346  * tbr_timeout goes through the interface list, and kicks the drivers
347  * if necessary.
348  */
349 static void
350 tbr_timeout_dispatch(netmsg_t nmsg)
351 {
352 	struct ifnet *ifp;
353 	int active;
354 
355 	KASSERT(&curthread->td_msgport == netisr_cpuport(0),
356 	    ("not in netisr0"));
357 
358 	crit_enter();
359 	lwkt_replymsg(&nmsg->lmsg, 0);	/* reply ASAP */
360 	crit_exit();
361 
362 	active = 0;
363 	for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) {
364 		struct ifaltq_subque *ifsq;
365 
366 		if (ifp->if_snd.altq_tbr == NULL)
367 			continue;
368 
369 		ifsq = &ifp->if_snd.altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
370 		active++;
371 		if (!ifsq_is_empty(ifsq) && ifp->if_start != NULL) {
372 			ifsq_serialize_hw(ifsq);
373 			(*ifp->if_start)(ifp, ifsq);
374 			ifsq_deserialize_hw(ifsq);
375 		}
376 	}
377 	if (active > 0)
378 		callout_reset(&tbr_callout, 1, tbr_timeout, NULL);
379 	else
380 		tbr_timer = 0;	/* don't need tbr_timer anymore */
381 }
382 
383 /*
384  * get token bucket regulator profile
385  */
386 int
387 tbr_get(struct ifaltq *ifq, struct tb_profile *profile)
388 {
389 	struct tb_regulator *tbr;
390 
391 	if ((tbr = ifq->altq_tbr) == NULL) {
392 		profile->rate = 0;
393 		profile->depth = 0;
394 	} else {
395 		profile->rate =
396 		    (u_int)TBR_UNSCALE(tbr->tbr_rate * 8 * machclk_freq);
397 		profile->depth = (u_int)TBR_UNSCALE(tbr->tbr_depth);
398 	}
399 	return (0);
400 }
401 
402 /*
403  * attach a discipline to the interface.  if one already exists, it is
404  * overridden.
405  */
406 int
407 altq_pfattach(struct pf_altq *a)
408 {
409 	struct ifaltq *ifq;
410 	struct ifnet *ifp;
411 	int error;
412 
413 	if (a->scheduler == ALTQT_NONE)
414 		return 0;
415 
416 	if (a->altq_disc == NULL)
417 		return EINVAL;
418 
419 	ifp = ifunit(a->ifname);
420 	if (ifp == NULL)
421 		return EINVAL;
422 	ifq = &ifp->if_snd;
423 
424 	ifq_lock_all(ifq);
425 
426 	switch (a->scheduler) {
427 #ifdef ALTQ_CBQ
428 	case ALTQT_CBQ:
429 		error = cbq_pfattach(a, ifq);
430 		break;
431 #endif
432 #ifdef ALTQ_PRIQ
433 	case ALTQT_PRIQ:
434 		error = priq_pfattach(a, ifq);
435 		break;
436 #endif
437 #ifdef ALTQ_HFSC
438 	case ALTQT_HFSC:
439 		error = hfsc_pfattach(a, ifq);
440 		break;
441 #endif
442 #ifdef ALTQ_FAIRQ
443 	case ALTQT_FAIRQ:
444 		error = fairq_pfattach(a, ifq);
445 		break;
446 #endif
447 	default:
448 		error = ENXIO;
449 		goto back;
450 	}
451 
452 	/* if the state is running, enable altq */
453 	if (error == 0 && pfaltq_running && ifq->altq_type != ALTQT_NONE &&
454 	    !ifq_is_enabled(ifq))
455 		error = altq_enable_locked(ifq);
456 
457 	/* if altq is already enabled, reset set tokenbucket regulator */
458 	if (error == 0 && ifq_is_enabled(ifq)) {
459 		struct tb_profile tb;
460 
461 		tb.rate = a->ifbandwidth;
462 		tb.depth = a->tbrsize;
463 		error = tbr_set_locked(ifq, &tb);
464 	}
465 back:
466 	ifq_unlock_all(ifq);
467 	return (error);
468 }
469 
470 /*
471  * detach a discipline from the interface.
472  * it is possible that the discipline was already overridden by another
473  * discipline.
474  */
475 int
476 altq_pfdetach(struct pf_altq *a)
477 {
478 	struct ifnet *ifp;
479 	struct ifaltq *ifq;
480 	int error = 0;
481 
482 	ifp = ifunit(a->ifname);
483 	if (ifp == NULL)
484 		return (EINVAL);
485 	ifq = &ifp->if_snd;
486 
487 	/* if this discipline is no longer referenced, just return */
488 	if (a->altq_disc == NULL)
489 		return (0);
490 
491 	ifq_lock_all(ifq);
492 
493 	if (a->altq_disc != ifq->altq_disc)
494 		goto back;
495 
496 	if (ifq_is_enabled(ifq))
497 		error = altq_disable_locked(ifq);
498 	if (error == 0)
499 		error = altq_detach_locked(ifq);
500 
501 back:
502 	ifq_unlock_all(ifq);
503 	return (error);
504 }
505 
506 /*
507  * add a discipline or a queue
508  */
509 int
510 altq_add(struct pf_altq *a)
511 {
512 	int error = 0;
513 
514 	if (a->qname[0] != 0)
515 		return (altq_add_queue(a));
516 
517 	if (machclk_freq == 0)
518 		init_machclk();
519 	if (machclk_freq == 0)
520 		panic("altq_add: no cpu clock");
521 
522 	switch (a->scheduler) {
523 #ifdef ALTQ_CBQ
524 	case ALTQT_CBQ:
525 		error = cbq_add_altq(a);
526 		break;
527 #endif
528 #ifdef ALTQ_PRIQ
529 	case ALTQT_PRIQ:
530 		error = priq_add_altq(a);
531 		break;
532 #endif
533 #ifdef ALTQ_HFSC
534 	case ALTQT_HFSC:
535 		error = hfsc_add_altq(a);
536 		break;
537 #endif
538 #ifdef ALTQ_FAIRQ
539 	case ALTQT_FAIRQ:
540 		error = fairq_add_altq(a);
541 		break;
542 #endif
543 	default:
544 		error = ENXIO;
545 	}
546 
547 	return (error);
548 }
549 
550 /*
551  * remove a discipline or a queue
552  */
553 int
554 altq_remove(struct pf_altq *a)
555 {
556 	int error = 0;
557 
558 	if (a->qname[0] != 0)
559 		return (altq_remove_queue(a));
560 
561 	switch (a->scheduler) {
562 #ifdef ALTQ_CBQ
563 	case ALTQT_CBQ:
564 		error = cbq_remove_altq(a);
565 		break;
566 #endif
567 #ifdef ALTQ_PRIQ
568 	case ALTQT_PRIQ:
569 		error = priq_remove_altq(a);
570 		break;
571 #endif
572 #ifdef ALTQ_HFSC
573 	case ALTQT_HFSC:
574 		error = hfsc_remove_altq(a);
575 		break;
576 #endif
577 #ifdef ALTQ_FAIRQ
578 	case ALTQT_FAIRQ:
579 		error = fairq_remove_altq(a);
580 		break;
581 #endif
582 	default:
583 		error = ENXIO;
584 	}
585 
586 	return (error);
587 }
588 
589 /*
590  * add a queue to the discipline
591  */
592 int
593 altq_add_queue(struct pf_altq *a)
594 {
595 	int error = 0;
596 
597 	switch (a->scheduler) {
598 #ifdef ALTQ_CBQ
599 	case ALTQT_CBQ:
600 		error = cbq_add_queue(a);
601 		break;
602 #endif
603 #ifdef ALTQ_PRIQ
604 	case ALTQT_PRIQ:
605 		error = priq_add_queue(a);
606 		break;
607 #endif
608 #ifdef ALTQ_HFSC
609 	case ALTQT_HFSC:
610 		error = hfsc_add_queue(a);
611 		break;
612 #endif
613 #ifdef ALTQ_FAIRQ
614 	case ALTQT_FAIRQ:
615 		error = fairq_add_queue(a);
616 		break;
617 #endif
618 	default:
619 		error = ENXIO;
620 	}
621 
622 	return (error);
623 }
624 
625 /*
626  * remove a queue from the discipline
627  */
628 int
629 altq_remove_queue(struct pf_altq *a)
630 {
631 	int error = 0;
632 
633 	switch (a->scheduler) {
634 #ifdef ALTQ_CBQ
635 	case ALTQT_CBQ:
636 		error = cbq_remove_queue(a);
637 		break;
638 #endif
639 #ifdef ALTQ_PRIQ
640 	case ALTQT_PRIQ:
641 		error = priq_remove_queue(a);
642 		break;
643 #endif
644 #ifdef ALTQ_HFSC
645 	case ALTQT_HFSC:
646 		error = hfsc_remove_queue(a);
647 		break;
648 #endif
649 #ifdef ALTQ_FAIRQ
650 	case ALTQT_FAIRQ:
651 		error = fairq_remove_queue(a);
652 		break;
653 #endif
654 	default:
655 		error = ENXIO;
656 	}
657 
658 	return (error);
659 }
660 
661 /*
662  * get queue statistics
663  */
664 int
665 altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
666 {
667 	int error = 0;
668 
669 	switch (a->scheduler) {
670 #ifdef ALTQ_CBQ
671 	case ALTQT_CBQ:
672 		error = cbq_getqstats(a, ubuf, nbytes);
673 		break;
674 #endif
675 #ifdef ALTQ_PRIQ
676 	case ALTQT_PRIQ:
677 		error = priq_getqstats(a, ubuf, nbytes);
678 		break;
679 #endif
680 #ifdef ALTQ_HFSC
681 	case ALTQT_HFSC:
682 		error = hfsc_getqstats(a, ubuf, nbytes);
683 		break;
684 #endif
685 #ifdef ALTQ_FAIRQ
686 	case ALTQT_FAIRQ:
687 		error = fairq_getqstats(a, ubuf, nbytes);
688 		break;
689 #endif
690 	default:
691 		error = ENXIO;
692 	}
693 
694 	return (error);
695 }
696 
697 /*
698  * read and write diffserv field in IPv4 or IPv6 header
699  */
700 uint8_t
701 read_dsfield(struct mbuf *m, struct altq_pktattr *pktattr)
702 {
703 	struct mbuf *m0;
704 	uint8_t ds_field = 0;
705 
706 	if (pktattr == NULL ||
707 	    (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
708 		return ((uint8_t)0);
709 
710 	/* verify that pattr_hdr is within the mbuf data */
711 	for (m0 = m; m0 != NULL; m0 = m0->m_next) {
712 		if ((pktattr->pattr_hdr >= m0->m_data) &&
713 		    (pktattr->pattr_hdr < m0->m_data + m0->m_len))
714 			break;
715 	}
716 	if (m0 == NULL) {
717 		/* ick, pattr_hdr is stale */
718 		pktattr->pattr_af = AF_UNSPEC;
719 #ifdef ALTQ_DEBUG
720 		kprintf("read_dsfield: can't locate header!\n");
721 #endif
722 		return ((uint8_t)0);
723 	}
724 
725 	if (pktattr->pattr_af == AF_INET) {
726 		struct ip *ip = (struct ip *)pktattr->pattr_hdr;
727 
728 		if (ip->ip_v != 4)
729 			return ((uint8_t)0);	/* version mismatch! */
730 		ds_field = ip->ip_tos;
731 	}
732 #ifdef INET6
733 	else if (pktattr->pattr_af == AF_INET6) {
734 		struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
735 		uint32_t flowlabel;
736 
737 		flowlabel = ntohl(ip6->ip6_flow);
738 		if ((flowlabel >> 28) != 6)
739 			return ((uint8_t)0);	/* version mismatch! */
740 		ds_field = (flowlabel >> 20) & 0xff;
741 	}
742 #endif
743 	return (ds_field);
744 }
745 
746 void
747 write_dsfield(struct mbuf *m, struct altq_pktattr *pktattr, uint8_t dsfield)
748 {
749 	struct mbuf *m0;
750 
751 	if (pktattr == NULL ||
752 	    (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
753 		return;
754 
755 	/* verify that pattr_hdr is within the mbuf data */
756 	for (m0 = m; m0 != NULL; m0 = m0->m_next) {
757 		if ((pktattr->pattr_hdr >= m0->m_data) &&
758 		    (pktattr->pattr_hdr < m0->m_data + m0->m_len))
759 			break;
760 	}
761 	if (m0 == NULL) {
762 		/* ick, pattr_hdr is stale */
763 		pktattr->pattr_af = AF_UNSPEC;
764 #ifdef ALTQ_DEBUG
765 		kprintf("write_dsfield: can't locate header!\n");
766 #endif
767 		return;
768 	}
769 
770 	if (pktattr->pattr_af == AF_INET) {
771 		struct ip *ip = (struct ip *)pktattr->pattr_hdr;
772 		uint8_t old;
773 		int32_t sum;
774 
775 		if (ip->ip_v != 4)
776 			return;		/* version mismatch! */
777 		old = ip->ip_tos;
778 		dsfield |= old & 3;	/* leave CU bits */
779 		if (old == dsfield)
780 			return;
781 		ip->ip_tos = dsfield;
782 		/*
783 		 * update checksum (from RFC1624)
784 		 *	   HC' = ~(~HC + ~m + m')
785 		 */
786 		sum = ~ntohs(ip->ip_sum) & 0xffff;
787 		sum += 0xff00 + (~old & 0xff) + dsfield;
788 		sum = (sum >> 16) + (sum & 0xffff);
789 		sum += (sum >> 16);  /* add carry */
790 
791 		ip->ip_sum = htons(~sum & 0xffff);
792 	}
793 #ifdef INET6
794 	else if (pktattr->pattr_af == AF_INET6) {
795 		struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
796 		uint32_t flowlabel;
797 
798 		flowlabel = ntohl(ip6->ip6_flow);
799 		if ((flowlabel >> 28) != 6)
800 			return;		/* version mismatch! */
801 		flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20);
802 		ip6->ip6_flow = htonl(flowlabel);
803 	}
804 #endif
805 }
806 
807 /*
808  * high resolution clock support taking advantage of a machine dependent
809  * high resolution time counter (e.g., timestamp counter of intel pentium).
810  * we assume
811  *  - 64-bit-long monotonically-increasing counter
812  *  - frequency range is 100M-4GHz (CPU speed)
813  */
814 /* if pcc is not available or disabled, emulate 256MHz using microtime() */
815 #define	MACHCLK_SHIFT	8
816 
817 static int machclk_usepcc;
818 uint64_t machclk_freq = 0;
819 uint32_t machclk_per_tick = 0;
820 
821 void
822 init_machclk(void)
823 {
824 	callout_init_mp(&tbr_callout);
825 	netmsg_init(&tbr_timeout_netmsg, NULL, &netisr_adone_rport,
826 	    MSGF_PRIORITY, tbr_timeout_dispatch);
827 
828 #ifdef ALTQ_NOPCC
829 	machclk_usepcc = 0;
830 #else
831 	machclk_usepcc = 1;
832 #endif
833 
834 #if defined(__i386__) || defined(__x86_64__)
835 	if (!tsc_mpsync)
836 		machclk_usepcc = 0;
837 #else
838 	machclk_usepcc = 0;
839 #endif
840 
841 	if (!machclk_usepcc) {
842 		/* emulate 256MHz using microtime() */
843 		machclk_freq = 1000000LLU << MACHCLK_SHIFT;
844 		machclk_per_tick = machclk_freq / hz;
845 #ifdef ALTQ_DEBUG
846 		kprintf("altq: emulate %juHz cpu clock\n",
847 		    (uintmax_t)machclk_freq);
848 #endif
849 		return;
850 	}
851 
852 	/*
853 	 * If the clock frequency (of Pentium TSC) is accessible,
854 	 * just use it.
855 	 */
856 #ifdef _RDTSC_SUPPORTED_
857 	if (tsc_present)
858 		machclk_freq = (uint64_t)tsc_frequency;
859 #endif
860 
861 	/*
862 	 * If we don't know the clock frequency, measure it.
863 	 */
864 	if (machclk_freq == 0) {
865 		static int	wait;
866 		struct timeval	tv_start, tv_end;
867 		uint64_t	start, end, diff;
868 		int		timo;
869 
870 		microtime(&tv_start);
871 		start = read_machclk();
872 		timo = hz;	/* 1 sec */
873 		tsleep(&wait, PCATCH, "init_machclk", timo);
874 		microtime(&tv_end);
875 		end = read_machclk();
876 		diff = (uint64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000
877 		    + tv_end.tv_usec - tv_start.tv_usec;
878 		if (diff != 0)
879 			machclk_freq = (end - start) * 1000000 / diff;
880 	}
881 
882 	machclk_per_tick = machclk_freq / hz;
883 
884 #ifdef ALTQ_DEBUG
885 	kprintf("altq: CPU clock: %juHz\n", (uintmax_t)machclk_freq);
886 #endif
887 }
888 
889 uint64_t
890 read_machclk(void)
891 {
892 	uint64_t val;
893 
894 	if (machclk_usepcc) {
895 #ifdef _RDTSC_SUPPORTED_
896 		val = rdtsc();
897 #else
898 		panic("read_machclk");
899 #endif
900 	} else {
901 		struct timeval tv;
902 
903 		microtime(&tv);
904 		val = (((uint64_t)(tv.tv_sec - boottime.tv_sec) * 1000000
905 		    + tv.tv_usec) << MACHCLK_SHIFT);
906 	}
907 	return (val);
908 }
909