xref: /netbsd-src/sys/altq/altq_blue.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: altq_blue.c,v 1.24 2016/04/20 08:58:48 knakahara Exp $	*/
2 /*	$KAME: altq_blue.c,v 1.15 2005/04/13 03:44:24 suz Exp $	*/
3 
4 /*
5  * Copyright (C) 1997-2002
6  *	Sony Computer Science Laboratories Inc.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 /*
31  * Copyright (c) 1990-1994 Regents of the University of California.
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  * 3. All advertising materials mentioning features or use of this software
43  *    must display the following acknowledgement:
44  *	This product includes software developed by the Computer Systems
45  *	Engineering Group at Lawrence Berkeley Laboratory.
46  * 4. Neither the name of the University nor of the Laboratory may be used
47  *    to endorse or promote products derived from this software without
48  *    specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60  * SUCH DAMAGE.
61  */
62 
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: altq_blue.c,v 1.24 2016/04/20 08:58:48 knakahara Exp $");
65 
66 #ifdef _KERNEL_OPT
67 #include "opt_altq.h"
68 #include "opt_inet.h"
69 #endif
70 
71 #ifdef ALTQ_BLUE	/* blue is enabled by ALTQ_BLUE option in opt_altq.h */
72 
73 #include <sys/param.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80 #include <sys/errno.h>
81 #include <sys/kernel.h>
82 #include <sys/kauth.h>
83 #include <sys/cprng.h>
84 
85 #include <net/if.h>
86 #include <net/if_types.h>
87 #include <netinet/in.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/ip.h>
90 #ifdef INET6
91 #include <netinet/ip6.h>
92 #endif
93 
94 #include <altq/altq.h>
95 #include <altq/altq_conf.h>
96 #include <altq/altq_blue.h>
97 
98 #ifdef ALTQ3_COMPAT
99 /*
100  * Blue is proposed and implemented by Wu-chang Feng <wuchang@eecs.umich.edu>.
101  * more information on Blue is available from
102  * http://www.eecs.umich.edu/~wuchang/blue/
103  */
104 
105 /* fixed-point uses 12-bit decimal places */
106 #define	FP_SHIFT	12	/* fixed-point shift */
107 
108 #define	BLUE_LIMIT	200	/* default max queue lenght */
109 #define	BLUE_STATS		/* collect statistics */
110 
111 /* blue_list keeps all blue_state_t's allocated. */
112 static blue_queue_t *blue_list = NULL;
113 
114 /* internal function prototypes */
115 static int blue_enqueue(struct ifaltq *, struct mbuf *);
116 static struct mbuf *blue_dequeue(struct ifaltq *, int);
117 static int drop_early(blue_t *);
118 static int mark_ecn(struct mbuf *, struct altq_pktattr *, int);
119 static int blue_detach(blue_queue_t *);
120 static int blue_request(struct ifaltq *, int, void *);
121 
122 /*
123  * blue device interface
124  */
125 altqdev_decl(blue);
126 
127 int
128 blueopen(dev_t dev, int flag, int fmt,
129     struct lwp *l)
130 {
131 	/* everything will be done when the queueing scheme is attached. */
132 	return 0;
133 }
134 
135 int
136 blueclose(dev_t dev, int flag, int fmt,
137     struct lwp *l)
138 {
139 	blue_queue_t *rqp;
140 	int err, error = 0;
141 
142 	while ((rqp = blue_list) != NULL) {
143 		/* destroy all */
144 		err = blue_detach(rqp);
145 		if (err != 0 && error == 0)
146 			error = err;
147 	}
148 
149 	return error;
150 }
151 
152 int
153 blueioctl(dev_t dev, ioctlcmd_t cmd, void *addr, int flag,
154     struct lwp *l)
155 {
156 	blue_queue_t *rqp;
157 	struct blue_interface *ifacep;
158 	struct ifnet *ifp;
159 	int	error = 0;
160 
161 	/* check super-user privilege */
162 	switch (cmd) {
163 	case BLUE_GETSTATS:
164 		break;
165 	default:
166 #if (__FreeBSD_version > 400000)
167 		if ((error = suser(p)) != 0)
168 			return (error);
169 #else
170 		if ((error = kauth_authorize_network(l->l_cred,
171 		    KAUTH_NETWORK_ALTQ, KAUTH_REQ_NETWORK_ALTQ_BLUE, NULL,
172 		    NULL, NULL)) != 0)
173 			return (error);
174 #endif
175 		break;
176 	}
177 
178 	switch (cmd) {
179 
180 	case BLUE_ENABLE:
181 		ifacep = (struct blue_interface *)addr;
182 		if ((rqp = altq_lookup(ifacep->blue_ifname, ALTQT_BLUE)) == NULL) {
183 			error = EBADF;
184 			break;
185 		}
186 		error = altq_enable(rqp->rq_ifq);
187 		break;
188 
189 	case BLUE_DISABLE:
190 		ifacep = (struct blue_interface *)addr;
191 		if ((rqp = altq_lookup(ifacep->blue_ifname, ALTQT_BLUE)) == NULL) {
192 			error = EBADF;
193 			break;
194 		}
195 		error = altq_disable(rqp->rq_ifq);
196 		break;
197 
198 	case BLUE_IF_ATTACH:
199 		ifp = ifunit(((struct blue_interface *)addr)->blue_ifname);
200 		if (ifp == NULL) {
201 			error = ENXIO;
202 			break;
203 		}
204 
205 		/* allocate and initialize blue_state_t */
206 		rqp = malloc(sizeof(blue_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
207 		if (rqp == NULL) {
208 			error = ENOMEM;
209 			break;
210 		}
211 
212 		rqp->rq_q = malloc(sizeof(class_queue_t), M_DEVBUF,
213 		    M_WAITOK|M_ZERO);
214 		if (rqp->rq_q == NULL) {
215 			free(rqp, M_DEVBUF);
216 			error = ENOMEM;
217 			break;
218 		}
219 
220 		rqp->rq_blue = malloc(sizeof(blue_t), M_DEVBUF,
221 		    M_WAITOK|M_ZERO);
222 		if (rqp->rq_blue == NULL) {
223 			free(rqp->rq_q, M_DEVBUF);
224 			free(rqp, M_DEVBUF);
225 			error = ENOMEM;
226 			break;
227 		}
228 
229 		rqp->rq_ifq = &ifp->if_snd;
230 		qtail(rqp->rq_q) = NULL;
231 		qlen(rqp->rq_q) = 0;
232 		qlimit(rqp->rq_q) = BLUE_LIMIT;
233 
234 		/* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
235 		blue_init(rqp->rq_blue, 0, 800, 1000, 50000);
236 
237 		/*
238 		 * set BLUE to this ifnet structure.
239 		 */
240 		error = altq_attach(rqp->rq_ifq, ALTQT_BLUE, rqp,
241 				    blue_enqueue, blue_dequeue, blue_request,
242 				    NULL, NULL);
243 		if (error) {
244 			free(rqp->rq_blue, M_DEVBUF);
245 			free(rqp->rq_q, M_DEVBUF);
246 			free(rqp, M_DEVBUF);
247 			break;
248 		}
249 
250 		/* add this state to the blue list */
251 		rqp->rq_next = blue_list;
252 		blue_list = rqp;
253 		break;
254 
255 	case BLUE_IF_DETACH:
256 		ifacep = (struct blue_interface *)addr;
257 		if ((rqp = altq_lookup(ifacep->blue_ifname, ALTQT_BLUE)) == NULL) {
258 			error = EBADF;
259 			break;
260 		}
261 		error = blue_detach(rqp);
262 		break;
263 
264 	case BLUE_GETSTATS:
265 		do {
266 			struct blue_stats *q_stats;
267 			blue_t *rp;
268 
269 			q_stats = (struct blue_stats *)addr;
270 			if ((rqp = altq_lookup(q_stats->iface.blue_ifname,
271 					     ALTQT_BLUE)) == NULL) {
272 				error = EBADF;
273 				break;
274 			}
275 
276 			q_stats->q_len 	   = qlen(rqp->rq_q);
277 			q_stats->q_limit   = qlimit(rqp->rq_q);
278 
279 			rp = rqp->rq_blue;
280 			q_stats->q_pmark = rp->blue_pmark;
281 			q_stats->xmit_packets  = rp->blue_stats.xmit_packets;
282 			q_stats->xmit_bytes    = rp->blue_stats.xmit_bytes;
283 			q_stats->drop_packets  = rp->blue_stats.drop_packets;
284 			q_stats->drop_bytes    = rp->blue_stats.drop_bytes;
285 			q_stats->drop_forced   = rp->blue_stats.drop_forced;
286 			q_stats->drop_unforced = rp->blue_stats.drop_unforced;
287 			q_stats->marked_packets = rp->blue_stats.marked_packets;
288 
289 		} while (/*CONSTCOND*/ 0);
290 		break;
291 
292 	case BLUE_CONFIG:
293 		do {
294 			struct blue_conf *fc;
295 			int limit;
296 
297 			fc = (struct blue_conf *)addr;
298 			if ((rqp = altq_lookup(fc->iface.blue_ifname,
299 					       ALTQT_BLUE)) == NULL) {
300 				error = EBADF;
301 				break;
302 			}
303 			limit = fc->blue_limit;
304 			qlimit(rqp->rq_q) = limit;
305 			fc->blue_limit = limit;	/* write back the new value */
306 			if (fc->blue_pkttime > 0)
307 				rqp->rq_blue->blue_pkttime = fc->blue_pkttime;
308 			if (fc->blue_max_pmark > 0)
309 				rqp->rq_blue->blue_max_pmark = fc->blue_max_pmark;
310 			if (fc->blue_hold_time > 0)
311 				rqp->rq_blue->blue_hold_time = fc->blue_hold_time;
312 			rqp->rq_blue->blue_flags = fc->blue_flags;
313 
314 			blue_init(rqp->rq_blue, rqp->rq_blue->blue_flags,
315 				  rqp->rq_blue->blue_pkttime,
316 				  rqp->rq_blue->blue_max_pmark,
317 				  rqp->rq_blue->blue_hold_time);
318 		} while (/*CONSTCOND*/ 0);
319 		break;
320 
321 	default:
322 		error = EINVAL;
323 		break;
324 	}
325 	return error;
326 }
327 
328 static int
329 blue_detach(blue_queue_t *rqp)
330 {
331 	blue_queue_t *tmp;
332 	int error = 0;
333 
334 	if (ALTQ_IS_ENABLED(rqp->rq_ifq))
335 		altq_disable(rqp->rq_ifq);
336 
337 	if ((error = altq_detach(rqp->rq_ifq)))
338 		return (error);
339 
340 	if (blue_list == rqp)
341 		blue_list = rqp->rq_next;
342 	else {
343 		for (tmp = blue_list; tmp != NULL; tmp = tmp->rq_next)
344 			if (tmp->rq_next == rqp) {
345 				tmp->rq_next = rqp->rq_next;
346 				break;
347 			}
348 		if (tmp == NULL)
349 			printf("blue_detach: no state found in blue_list!\n");
350 	}
351 
352 	free(rqp->rq_q, M_DEVBUF);
353 	free(rqp->rq_blue, M_DEVBUF);
354 	free(rqp, M_DEVBUF);
355 	return (error);
356 }
357 
358 /*
359  * blue support routines
360  */
361 
362 int
363 blue_init(blue_t *rp, int flags, int pkttime, int blue_max_pmark,
364     int blue_hold_time)
365 {
366 	int npkts_per_sec;
367 
368 	rp->blue_idle = 1;
369 	rp->blue_flags = flags;
370 	rp->blue_pkttime = pkttime;
371 	rp->blue_max_pmark = blue_max_pmark;
372 	rp->blue_hold_time = blue_hold_time;
373 	if (pkttime == 0)
374 		rp->blue_pkttime = 1;
375 
376 	/* when the link is very slow, adjust blue parameters */
377 	npkts_per_sec = 1000000 / rp->blue_pkttime;
378 	if (npkts_per_sec < 50) {
379 	}
380 	else if (npkts_per_sec < 300) {
381 	}
382 
383 	microtime(&rp->blue_last);
384 	return (0);
385 }
386 
387 /*
388  * enqueue routine:
389  *
390  *	returns: 0 when successfully queued.
391  *		 ENOBUFS when drop occurs.
392  */
393 static int
394 blue_enqueue(struct ifaltq *ifq, struct mbuf *m)
395 {
396 	struct altq_pktattr pktattr;
397 	blue_queue_t *rqp = (blue_queue_t *)ifq->altq_disc;
398 	int error = 0;
399 
400 	pktattr.pattr_class = m->m_pkthdr.pattr_class;
401 	pktattr.pattr_af = m->m_pkthdr.pattr_af;
402 	pktattr.pattr_hdr = m->m_pkthdr.pattr_hdr;
403 
404 	if (blue_addq(rqp->rq_blue, rqp->rq_q, m, &pktattr) == 0)
405 		ifq->ifq_len++;
406 	else
407 		error = ENOBUFS;
408 	return error;
409 }
410 
411 #define	DTYPE_NODROP	0	/* no drop */
412 #define	DTYPE_FORCED	1	/* a "forced" drop */
413 #define	DTYPE_EARLY	2	/* an "unforced" (early) drop */
414 
415 int
416 blue_addq(blue_t *rp, class_queue_t *q, struct mbuf *m,
417     struct altq_pktattr *pktattr)
418 {
419 	int droptype;
420 
421 	/*
422 	 * if we were idle, this is an enqueue onto an empty queue
423 	 * and we should decrement marking probability
424 	 *
425 	 */
426 	if (rp->blue_idle) {
427 		struct timeval now;
428 		int t;
429 		rp->blue_idle = 0;
430 		microtime(&now);
431 		t = (now.tv_sec - rp->blue_last.tv_sec);
432 		if ( t > 1) {
433 			rp->blue_pmark = 1;
434 			microtime(&rp->blue_last);
435 		} else {
436 			t = t * 1000000 + (now.tv_usec - rp->blue_last.tv_usec);
437 			if (t > rp->blue_hold_time) {
438 				rp->blue_pmark--;
439 				if (rp->blue_pmark < 0) rp->blue_pmark = 0;
440 				microtime(&rp->blue_last);
441 			}
442 		}
443 	}
444 
445 	/* see if we drop early */
446 	droptype = DTYPE_NODROP;
447 	if (drop_early(rp) && qlen(q) > 1) {
448 		/* mark or drop by blue */
449 		if ((rp->blue_flags & BLUEF_ECN) &&
450 		    mark_ecn(m, pktattr, rp->blue_flags)) {
451 			/* successfully marked.  do not drop. */
452 #ifdef BLUE_STATS
453 			rp->blue_stats.marked_packets++;
454 #endif
455 		} else {
456 			/* unforced drop by blue */
457 			droptype = DTYPE_EARLY;
458 		}
459 	}
460 
461 	/*
462 	 * if the queue length hits the hard limit, it's a forced drop.
463 	 */
464 	if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
465 		droptype = DTYPE_FORCED;
466 
467 	/* if successful or forced drop, enqueue this packet. */
468 	if (droptype != DTYPE_EARLY)
469 		_addq(q, m);
470 
471 	if (droptype != DTYPE_NODROP) {
472 		if (droptype == DTYPE_EARLY) {
473 			/* drop the incoming packet */
474 #ifdef BLUE_STATS
475 			rp->blue_stats.drop_unforced++;
476 #endif
477 		} else {
478 			struct timeval now;
479 			int t;
480 			/* forced drop, select a victim packet in the queue. */
481 			m = _getq_random(q);
482 			microtime(&now);
483 			t = (now.tv_sec - rp->blue_last.tv_sec);
484 			t = t * 1000000 + (now.tv_usec - rp->blue_last.tv_usec);
485 			if (t > rp->blue_hold_time) {
486 				rp->blue_pmark += rp->blue_max_pmark >> 3;
487 				if (rp->blue_pmark > rp->blue_max_pmark)
488 					rp->blue_pmark = rp->blue_max_pmark;
489 				microtime(&rp->blue_last);
490 			}
491 #ifdef BLUE_STATS
492 			rp->blue_stats.drop_forced++;
493 #endif
494 		}
495 #ifdef BLUE_STATS
496 		rp->blue_stats.drop_packets++;
497 		rp->blue_stats.drop_bytes += m->m_pkthdr.len;
498 #endif
499 		m_freem(m);
500 		return (-1);
501 	}
502 	/* successfully queued */
503 	return (0);
504 }
505 
506 /*
507  * early-drop probability is kept in blue_pmark
508  *
509  */
510 static int
511 drop_early(blue_t *rp)
512 {
513 	if ((cprng_fast32() % rp->blue_max_pmark) < rp->blue_pmark) {
514 		/* drop or mark */
515 		return (1);
516 	}
517 	/* no drop/mark */
518 	return (0);
519 }
520 
521 /*
522  * try to mark CE bit to the packet.
523  *    returns 1 if successfully marked, 0 otherwise.
524  */
525 static int
526 mark_ecn(struct mbuf *m, struct altq_pktattr *pktattr, int flags)
527 {
528 	struct mbuf *m0;
529 
530 	if (pktattr == NULL ||
531 	    (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
532 		return (0);
533 
534 	/* verify that pattr_hdr is within the mbuf data */
535 	for (m0 = m; m0 != NULL; m0 = m0->m_next)
536 		if (((char *)pktattr->pattr_hdr >= m0->m_data) &&
537 		    ((char *)pktattr->pattr_hdr < m0->m_data + m0->m_len))
538 			break;
539 	if (m0 == NULL) {
540 		/* ick, pattr_hdr is stale */
541 		pktattr->pattr_af = AF_UNSPEC;
542 		return (0);
543 	}
544 
545 	switch (pktattr->pattr_af) {
546 	case AF_INET:
547 		if (flags & BLUEF_ECN4) {
548 			struct ip *ip = (struct ip *)pktattr->pattr_hdr;
549 			u_int8_t otos;
550 			int sum;
551 
552 			if (ip->ip_v != 4)
553 				return (0);	/* version mismatch! */
554 			if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT)
555 				return (0);	/* not-ECT */
556 			if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE)
557 				return (1);	/* already marked */
558 
559 			/*
560 			 * ecn-capable but not marked,
561 			 * mark CE and update checksum
562 			 */
563 			otos = ip->ip_tos;
564 			ip->ip_tos |= IPTOS_ECN_CE;
565 			/*
566 			 * update checksum (from RFC1624)
567 			 *	   HC' = ~(~HC + ~m + m')
568 			 */
569 			sum = ~ntohs(ip->ip_sum) & 0xffff;
570 			sum += (~otos & 0xffff) + ip->ip_tos;
571 			sum = (sum >> 16) + (sum & 0xffff);
572 			sum += (sum >> 16);  /* add carry */
573 			ip->ip_sum = htons(~sum & 0xffff);
574 			return (1);
575 		}
576 		break;
577 #ifdef INET6
578 	case AF_INET6:
579 		if (flags & BLUEF_ECN6) {
580 			struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
581 			u_int32_t flowlabel;
582 
583 			flowlabel = ntohl(ip6->ip6_flow);
584 			if ((flowlabel >> 28) != 6)
585 				return (0);	/* version mismatch! */
586 			if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
587 			    (IPTOS_ECN_NOTECT << 20))
588 				return (0);	/* not-ECT */
589 			if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
590 			    (IPTOS_ECN_CE << 20))
591 				return (1);	/* already marked */
592 			/*
593 			 * ecn-capable but not marked,  mark CE
594 			 */
595 			flowlabel |= (IPTOS_ECN_CE << 20);
596 			ip6->ip6_flow = htonl(flowlabel);
597 			return (1);
598 		}
599 		break;
600 #endif  /* INET6 */
601 	}
602 
603 	/* not marked */
604 	return (0);
605 }
606 
607 /*
608  * dequeue routine:
609  *	must be called in splnet.
610  *
611  *	returns: mbuf dequeued.
612  *		 NULL when no packet is available in the queue.
613  */
614 
615 static struct mbuf *
616 blue_dequeue(struct ifaltq * ifq, int op)
617 {
618 	blue_queue_t *rqp = (blue_queue_t *)ifq->altq_disc;
619 	struct mbuf *m = NULL;
620 
621 	if (op == ALTDQ_POLL)
622 		return (qhead(rqp->rq_q));
623 
624 	m = blue_getq(rqp->rq_blue, rqp->rq_q);
625 	if (m != NULL)
626 		ifq->ifq_len--;
627 	return m;
628 }
629 
630 struct mbuf *
631 blue_getq(blue_t *rp, class_queue_t *q)
632 {
633 	struct mbuf *m;
634 
635 	if ((m = _getq(q)) == NULL) {
636 		if (rp->blue_idle == 0) {
637 			rp->blue_idle = 1;
638 			microtime(&rp->blue_last);
639 		}
640 		return NULL;
641 	}
642 
643 	rp->blue_idle = 0;
644 #ifdef BLUE_STATS
645 	rp->blue_stats.xmit_packets++;
646 	rp->blue_stats.xmit_bytes += m->m_pkthdr.len;
647 #endif
648 	return (m);
649 }
650 
651 static int
652 blue_request(struct ifaltq *ifq, int req, void *arg)
653 {
654 	blue_queue_t *rqp = (blue_queue_t *)ifq->altq_disc;
655 
656 	switch (req) {
657 	case ALTRQ_PURGE:
658 		_flushq(rqp->rq_q);
659 		if (ALTQ_IS_ENABLED(ifq))
660 			ifq->ifq_len = 0;
661 		break;
662 	}
663 	return (0);
664 }
665 
666 
667 #ifdef KLD_MODULE
668 
669 static struct altqsw blue_sw =
670 	{"blue", blueopen, blueclose, blueioctl};
671 
672 ALTQ_MODULE(altq_blue, ALTQT_BLUE, &blue_sw);
673 
674 #endif /* KLD_MODULE */
675 
676 #endif /* ALTQ3_COMPAT */
677 #endif /* ALTQ_BLUE */
678