xref: /netbsd-src/sys/altq/altq_blue.c (revision ce2c90c7c172d95d2402a5b3d96d8f8e6d138a21)
1 /*	$NetBSD: altq_blue.c,v 1.19 2006/10/12 19:59:08 peter Exp $	*/
2 /*	$KAME: altq_blue.c,v 1.15 2005/04/13 03:44:24 suz Exp $	*/
3 
4 /*
5  * Copyright (C) 1997-2002
6  *	Sony Computer Science Laboratories Inc.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 /*
31  * Copyright (c) 1990-1994 Regents of the University of California.
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  * 3. All advertising materials mentioning features or use of this software
43  *    must display the following acknowledgement:
44  *	This product includes software developed by the Computer Systems
45  *	Engineering Group at Lawrence Berkeley Laboratory.
46  * 4. Neither the name of the University nor of the Laboratory may be used
47  *    to endorse or promote products derived from this software without
48  *    specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60  * SUCH DAMAGE.
61  */
62 
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: altq_blue.c,v 1.19 2006/10/12 19:59:08 peter Exp $");
65 
66 #ifdef _KERNEL_OPT
67 #include "opt_altq.h"
68 #include "opt_inet.h"
69 #endif
70 
71 #ifdef ALTQ_BLUE	/* blue is enabled by ALTQ_BLUE option in opt_altq.h */
72 
73 #include <sys/param.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80 #include <sys/errno.h>
81 #include <sys/kernel.h>
82 #include <sys/kauth.h>
83 
84 #include <net/if.h>
85 #include <net/if_types.h>
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif
92 
93 #include <altq/altq.h>
94 #include <altq/altq_conf.h>
95 #include <altq/altq_blue.h>
96 
97 #ifdef ALTQ3_COMPAT
98 /*
99  * Blue is proposed and implemented by Wu-chang Feng <wuchang@eecs.umich.edu>.
100  * more information on Blue is available from
101  * http://www.eecs.umich.edu/~wuchang/blue/
102  */
103 
104 /* fixed-point uses 12-bit decimal places */
105 #define	FP_SHIFT	12	/* fixed-point shift */
106 
107 #define	BLUE_LIMIT	200	/* default max queue lenght */
108 #define	BLUE_STATS		/* collect statistics */
109 
110 /* blue_list keeps all blue_state_t's allocated. */
111 static blue_queue_t *blue_list = NULL;
112 
113 /* internal function prototypes */
114 static int blue_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
115 static struct mbuf *blue_dequeue(struct ifaltq *, int);
116 static int drop_early(blue_t *);
117 static int mark_ecn(struct mbuf *, struct altq_pktattr *, int);
118 static int blue_detach(blue_queue_t *);
119 static int blue_request(struct ifaltq *, int, void *);
120 
121 /*
122  * blue device interface
123  */
124 altqdev_decl(blue);
125 
126 int
127 blueopen(dev_t dev __unused, int flag __unused, int fmt __unused,
128     struct lwp *l __unused)
129 {
130 	/* everything will be done when the queueing scheme is attached. */
131 	return 0;
132 }
133 
134 int
135 blueclose(dev_t dev __unused, int flag __unused, int fmt __unused,
136     struct lwp *l __unused)
137 {
138 	blue_queue_t *rqp;
139 	int err, error = 0;
140 
141 	while ((rqp = blue_list) != NULL) {
142 		/* destroy all */
143 		err = blue_detach(rqp);
144 		if (err != 0 && error == 0)
145 			error = err;
146 	}
147 
148 	return error;
149 }
150 
151 int
152 blueioctl(dev_t dev __unused, ioctlcmd_t cmd, caddr_t addr, int flag __unused,
153     struct lwp *l)
154 {
155 	blue_queue_t *rqp;
156 	struct blue_interface *ifacep;
157 	struct ifnet *ifp;
158 	int	error = 0;
159 
160 	/* check super-user privilege */
161 	switch (cmd) {
162 	case BLUE_GETSTATS:
163 		break;
164 	default:
165 #if (__FreeBSD_version > 400000)
166 		if ((error = suser(p)) != 0)
167 			return (error);
168 #else
169 		if ((error = kauth_authorize_generic(l->l_cred,
170 		    KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
171 			return (error);
172 #endif
173 		break;
174 	}
175 
176 	switch (cmd) {
177 
178 	case BLUE_ENABLE:
179 		ifacep = (struct blue_interface *)addr;
180 		if ((rqp = altq_lookup(ifacep->blue_ifname, ALTQT_BLUE)) == NULL) {
181 			error = EBADF;
182 			break;
183 		}
184 		error = altq_enable(rqp->rq_ifq);
185 		break;
186 
187 	case BLUE_DISABLE:
188 		ifacep = (struct blue_interface *)addr;
189 		if ((rqp = altq_lookup(ifacep->blue_ifname, ALTQT_BLUE)) == NULL) {
190 			error = EBADF;
191 			break;
192 		}
193 		error = altq_disable(rqp->rq_ifq);
194 		break;
195 
196 	case BLUE_IF_ATTACH:
197 		ifp = ifunit(((struct blue_interface *)addr)->blue_ifname);
198 		if (ifp == NULL) {
199 			error = ENXIO;
200 			break;
201 		}
202 
203 		/* allocate and initialize blue_state_t */
204 		rqp = malloc(sizeof(blue_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
205 		if (rqp == NULL) {
206 			error = ENOMEM;
207 			break;
208 		}
209 
210 		rqp->rq_q = malloc(sizeof(class_queue_t), M_DEVBUF,
211 		    M_WAITOK|M_ZERO);
212 		if (rqp->rq_q == NULL) {
213 			free(rqp, M_DEVBUF);
214 			error = ENOMEM;
215 			break;
216 		}
217 
218 		rqp->rq_blue = malloc(sizeof(blue_t), M_DEVBUF,
219 		    M_WAITOK|M_ZERO);
220 		if (rqp->rq_blue == NULL) {
221 			free(rqp->rq_q, M_DEVBUF);
222 			free(rqp, M_DEVBUF);
223 			error = ENOMEM;
224 			break;
225 		}
226 
227 		rqp->rq_ifq = &ifp->if_snd;
228 		qtail(rqp->rq_q) = NULL;
229 		qlen(rqp->rq_q) = 0;
230 		qlimit(rqp->rq_q) = BLUE_LIMIT;
231 
232 		/* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
233 		blue_init(rqp->rq_blue, 0, 800, 1000, 50000);
234 
235 		/*
236 		 * set BLUE to this ifnet structure.
237 		 */
238 		error = altq_attach(rqp->rq_ifq, ALTQT_BLUE, rqp,
239 				    blue_enqueue, blue_dequeue, blue_request,
240 				    NULL, NULL);
241 		if (error) {
242 			free(rqp->rq_blue, M_DEVBUF);
243 			free(rqp->rq_q, M_DEVBUF);
244 			free(rqp, M_DEVBUF);
245 			break;
246 		}
247 
248 		/* add this state to the blue list */
249 		rqp->rq_next = blue_list;
250 		blue_list = rqp;
251 		break;
252 
253 	case BLUE_IF_DETACH:
254 		ifacep = (struct blue_interface *)addr;
255 		if ((rqp = altq_lookup(ifacep->blue_ifname, ALTQT_BLUE)) == NULL) {
256 			error = EBADF;
257 			break;
258 		}
259 		error = blue_detach(rqp);
260 		break;
261 
262 	case BLUE_GETSTATS:
263 		do {
264 			struct blue_stats *q_stats;
265 			blue_t *rp;
266 
267 			q_stats = (struct blue_stats *)addr;
268 			if ((rqp = altq_lookup(q_stats->iface.blue_ifname,
269 					     ALTQT_BLUE)) == NULL) {
270 				error = EBADF;
271 				break;
272 			}
273 
274 			q_stats->q_len 	   = qlen(rqp->rq_q);
275 			q_stats->q_limit   = qlimit(rqp->rq_q);
276 
277 			rp = rqp->rq_blue;
278 			q_stats->q_pmark = rp->blue_pmark;
279 			q_stats->xmit_packets  = rp->blue_stats.xmit_packets;
280 			q_stats->xmit_bytes    = rp->blue_stats.xmit_bytes;
281 			q_stats->drop_packets  = rp->blue_stats.drop_packets;
282 			q_stats->drop_bytes    = rp->blue_stats.drop_bytes;
283 			q_stats->drop_forced   = rp->blue_stats.drop_forced;
284 			q_stats->drop_unforced = rp->blue_stats.drop_unforced;
285 			q_stats->marked_packets = rp->blue_stats.marked_packets;
286 
287 		} while (/*CONSTCOND*/ 0);
288 		break;
289 
290 	case BLUE_CONFIG:
291 		do {
292 			struct blue_conf *fc;
293 			int limit;
294 
295 			fc = (struct blue_conf *)addr;
296 			if ((rqp = altq_lookup(fc->iface.blue_ifname,
297 					       ALTQT_BLUE)) == NULL) {
298 				error = EBADF;
299 				break;
300 			}
301 			limit = fc->blue_limit;
302 			qlimit(rqp->rq_q) = limit;
303 			fc->blue_limit = limit;	/* write back the new value */
304 			if (fc->blue_pkttime > 0)
305 				rqp->rq_blue->blue_pkttime = fc->blue_pkttime;
306 			if (fc->blue_max_pmark > 0)
307 				rqp->rq_blue->blue_max_pmark = fc->blue_max_pmark;
308 			if (fc->blue_hold_time > 0)
309 				rqp->rq_blue->blue_hold_time = fc->blue_hold_time;
310 			rqp->rq_blue->blue_flags = fc->blue_flags;
311 
312 			blue_init(rqp->rq_blue, rqp->rq_blue->blue_flags,
313 				  rqp->rq_blue->blue_pkttime,
314 				  rqp->rq_blue->blue_max_pmark,
315 				  rqp->rq_blue->blue_hold_time);
316 		} while (/*CONSTCOND*/ 0);
317 		break;
318 
319 	default:
320 		error = EINVAL;
321 		break;
322 	}
323 	return error;
324 }
325 
326 static int
327 blue_detach(blue_queue_t *rqp)
328 {
329 	blue_queue_t *tmp;
330 	int error = 0;
331 
332 	if (ALTQ_IS_ENABLED(rqp->rq_ifq))
333 		altq_disable(rqp->rq_ifq);
334 
335 	if ((error = altq_detach(rqp->rq_ifq)))
336 		return (error);
337 
338 	if (blue_list == rqp)
339 		blue_list = rqp->rq_next;
340 	else {
341 		for (tmp = blue_list; tmp != NULL; tmp = tmp->rq_next)
342 			if (tmp->rq_next == rqp) {
343 				tmp->rq_next = rqp->rq_next;
344 				break;
345 			}
346 		if (tmp == NULL)
347 			printf("blue_detach: no state found in blue_list!\n");
348 	}
349 
350 	free(rqp->rq_q, M_DEVBUF);
351 	free(rqp->rq_blue, M_DEVBUF);
352 	free(rqp, M_DEVBUF);
353 	return (error);
354 }
355 
356 /*
357  * blue support routines
358  */
359 
360 int
361 blue_init(blue_t *rp, int flags, int pkttime, int blue_max_pmark,
362     int blue_hold_time)
363 {
364 	int npkts_per_sec;
365 
366 	rp->blue_idle = 1;
367 	rp->blue_flags = flags;
368 	rp->blue_pkttime = pkttime;
369 	rp->blue_max_pmark = blue_max_pmark;
370 	rp->blue_hold_time = blue_hold_time;
371 	if (pkttime == 0)
372 		rp->blue_pkttime = 1;
373 
374 	/* when the link is very slow, adjust blue parameters */
375 	npkts_per_sec = 1000000 / rp->blue_pkttime;
376 	if (npkts_per_sec < 50) {
377 	}
378 	else if (npkts_per_sec < 300) {
379 	}
380 
381 	microtime(&rp->blue_last);
382 	return (0);
383 }
384 
385 /*
386  * enqueue routine:
387  *
388  *	returns: 0 when successfully queued.
389  *		 ENOBUFS when drop occurs.
390  */
391 static int
392 blue_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
393 {
394 	blue_queue_t *rqp = (blue_queue_t *)ifq->altq_disc;
395 	int error = 0;
396 
397 	if (blue_addq(rqp->rq_blue, rqp->rq_q, m, pktattr) == 0)
398 		ifq->ifq_len++;
399 	else
400 		error = ENOBUFS;
401 	return error;
402 }
403 
404 #define	DTYPE_NODROP	0	/* no drop */
405 #define	DTYPE_FORCED	1	/* a "forced" drop */
406 #define	DTYPE_EARLY	2	/* an "unforced" (early) drop */
407 
408 int
409 blue_addq(blue_t *rp, class_queue_t *q, struct mbuf *m,
410     struct altq_pktattr *pktattr)
411 {
412 	int droptype;
413 
414 	/*
415 	 * if we were idle, this is an enqueue onto an empty queue
416 	 * and we should decrement marking probability
417 	 *
418 	 */
419 	if (rp->blue_idle) {
420 		struct timeval now;
421 		int t;
422 		rp->blue_idle = 0;
423 		microtime(&now);
424 		t = (now.tv_sec - rp->blue_last.tv_sec);
425 		if ( t > 1) {
426 			rp->blue_pmark = 1;
427 			microtime(&rp->blue_last);
428 		} else {
429 			t = t * 1000000 + (now.tv_usec - rp->blue_last.tv_usec);
430 			if (t > rp->blue_hold_time) {
431 				rp->blue_pmark--;
432 				if (rp->blue_pmark < 0) rp->blue_pmark = 0;
433 				microtime(&rp->blue_last);
434 			}
435 		}
436 	}
437 
438 	/* see if we drop early */
439 	droptype = DTYPE_NODROP;
440 	if (drop_early(rp) && qlen(q) > 1) {
441 		/* mark or drop by blue */
442 		if ((rp->blue_flags & BLUEF_ECN) &&
443 		    mark_ecn(m, pktattr, rp->blue_flags)) {
444 			/* successfully marked.  do not drop. */
445 #ifdef BLUE_STATS
446 			rp->blue_stats.marked_packets++;
447 #endif
448 		} else {
449 			/* unforced drop by blue */
450 			droptype = DTYPE_EARLY;
451 		}
452 	}
453 
454 	/*
455 	 * if the queue length hits the hard limit, it's a forced drop.
456 	 */
457 	if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
458 		droptype = DTYPE_FORCED;
459 
460 	/* if successful or forced drop, enqueue this packet. */
461 	if (droptype != DTYPE_EARLY)
462 		_addq(q, m);
463 
464 	if (droptype != DTYPE_NODROP) {
465 		if (droptype == DTYPE_EARLY) {
466 			/* drop the incoming packet */
467 #ifdef BLUE_STATS
468 			rp->blue_stats.drop_unforced++;
469 #endif
470 		} else {
471 			struct timeval now;
472 			int t;
473 			/* forced drop, select a victim packet in the queue. */
474 			m = _getq_random(q);
475 			microtime(&now);
476 			t = (now.tv_sec - rp->blue_last.tv_sec);
477 			t = t * 1000000 + (now.tv_usec - rp->blue_last.tv_usec);
478 			if (t > rp->blue_hold_time) {
479 				rp->blue_pmark += rp->blue_max_pmark >> 3;
480 				if (rp->blue_pmark > rp->blue_max_pmark)
481 					rp->blue_pmark = rp->blue_max_pmark;
482 				microtime(&rp->blue_last);
483 			}
484 #ifdef BLUE_STATS
485 			rp->blue_stats.drop_forced++;
486 #endif
487 		}
488 #ifdef BLUE_STATS
489 		rp->blue_stats.drop_packets++;
490 		rp->blue_stats.drop_bytes += m->m_pkthdr.len;
491 #endif
492 		m_freem(m);
493 		return (-1);
494 	}
495 	/* successfully queued */
496 	return (0);
497 }
498 
499 /*
500  * early-drop probability is kept in blue_pmark
501  *
502  */
503 static int
504 drop_early(blue_t *rp)
505 {
506 	if ((arc4random() % rp->blue_max_pmark) < rp->blue_pmark) {
507 		/* drop or mark */
508 		return (1);
509 	}
510 	/* no drop/mark */
511 	return (0);
512 }
513 
514 /*
515  * try to mark CE bit to the packet.
516  *    returns 1 if successfully marked, 0 otherwise.
517  */
518 static int
519 mark_ecn(struct mbuf *m, struct altq_pktattr *pktattr, int flags)
520 {
521 	struct mbuf *m0;
522 
523 	if (pktattr == NULL ||
524 	    (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
525 		return (0);
526 
527 	/* verify that pattr_hdr is within the mbuf data */
528 	for (m0 = m; m0 != NULL; m0 = m0->m_next)
529 		if ((pktattr->pattr_hdr >= m0->m_data) &&
530 		    (pktattr->pattr_hdr < m0->m_data + m0->m_len))
531 			break;
532 	if (m0 == NULL) {
533 		/* ick, pattr_hdr is stale */
534 		pktattr->pattr_af = AF_UNSPEC;
535 		return (0);
536 	}
537 
538 	switch (pktattr->pattr_af) {
539 	case AF_INET:
540 		if (flags & BLUEF_ECN4) {
541 			struct ip *ip = (struct ip *)pktattr->pattr_hdr;
542 			u_int8_t otos;
543 			int sum;
544 
545 			if (ip->ip_v != 4)
546 				return (0);	/* version mismatch! */
547 			if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT)
548 				return (0);	/* not-ECT */
549 			if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE)
550 				return (1);	/* already marked */
551 
552 			/*
553 			 * ecn-capable but not marked,
554 			 * mark CE and update checksum
555 			 */
556 			otos = ip->ip_tos;
557 			ip->ip_tos |= IPTOS_ECN_CE;
558 			/*
559 			 * update checksum (from RFC1624)
560 			 *	   HC' = ~(~HC + ~m + m')
561 			 */
562 			sum = ~ntohs(ip->ip_sum) & 0xffff;
563 			sum += (~otos & 0xffff) + ip->ip_tos;
564 			sum = (sum >> 16) + (sum & 0xffff);
565 			sum += (sum >> 16);  /* add carry */
566 			ip->ip_sum = htons(~sum & 0xffff);
567 			return (1);
568 		}
569 		break;
570 #ifdef INET6
571 	case AF_INET6:
572 		if (flags & BLUEF_ECN6) {
573 			struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
574 			u_int32_t flowlabel;
575 
576 			flowlabel = ntohl(ip6->ip6_flow);
577 			if ((flowlabel >> 28) != 6)
578 				return (0);	/* version mismatch! */
579 			if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
580 			    (IPTOS_ECN_NOTECT << 20))
581 				return (0);	/* not-ECT */
582 			if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
583 			    (IPTOS_ECN_CE << 20))
584 				return (1);	/* already marked */
585 			/*
586 			 * ecn-capable but not marked,  mark CE
587 			 */
588 			flowlabel |= (IPTOS_ECN_CE << 20);
589 			ip6->ip6_flow = htonl(flowlabel);
590 			return (1);
591 		}
592 		break;
593 #endif  /* INET6 */
594 	}
595 
596 	/* not marked */
597 	return (0);
598 }
599 
600 /*
601  * dequeue routine:
602  *	must be called in splnet.
603  *
604  *	returns: mbuf dequeued.
605  *		 NULL when no packet is available in the queue.
606  */
607 
608 static struct mbuf *
609 blue_dequeue(struct ifaltq * ifq, int op)
610 {
611 	blue_queue_t *rqp = (blue_queue_t *)ifq->altq_disc;
612 	struct mbuf *m = NULL;
613 
614 	if (op == ALTDQ_POLL)
615 		return (qhead(rqp->rq_q));
616 
617 	m = blue_getq(rqp->rq_blue, rqp->rq_q);
618 	if (m != NULL)
619 		ifq->ifq_len--;
620 	return m;
621 }
622 
623 struct mbuf *
624 blue_getq(blue_t *rp, class_queue_t *q)
625 {
626 	struct mbuf *m;
627 
628 	if ((m = _getq(q)) == NULL) {
629 		if (rp->blue_idle == 0) {
630 			rp->blue_idle = 1;
631 			microtime(&rp->blue_last);
632 		}
633 		return NULL;
634 	}
635 
636 	rp->blue_idle = 0;
637 #ifdef BLUE_STATS
638 	rp->blue_stats.xmit_packets++;
639 	rp->blue_stats.xmit_bytes += m->m_pkthdr.len;
640 #endif
641 	return (m);
642 }
643 
644 static int
645 blue_request(struct ifaltq *ifq, int req, void *arg __unused)
646 {
647 	blue_queue_t *rqp = (blue_queue_t *)ifq->altq_disc;
648 
649 	switch (req) {
650 	case ALTRQ_PURGE:
651 		_flushq(rqp->rq_q);
652 		if (ALTQ_IS_ENABLED(ifq))
653 			ifq->ifq_len = 0;
654 		break;
655 	}
656 	return (0);
657 }
658 
659 
660 #ifdef KLD_MODULE
661 
662 static struct altqsw blue_sw =
663 	{"blue", blueopen, blueclose, blueioctl};
664 
665 ALTQ_MODULE(altq_blue, ALTQT_BLUE, &blue_sw);
666 
667 #endif /* KLD_MODULE */
668 
669 #endif /* ALTQ3_COMPAT */
670 #endif /* ALTQ_BLUE */
671