10f506cedSSascha Wildner /* @(#)rm_class.c 1.48 97/12/05 SMI */
24d723e5aSJoerg Sonnenberger /* $KAME: altq_rmclass.c,v 1.18 2003/11/06 06:32:53 kjc Exp $ */
34d723e5aSJoerg Sonnenberger
44d723e5aSJoerg Sonnenberger /*
54d723e5aSJoerg Sonnenberger * Copyright (c) 1991-1997 Regents of the University of California.
64d723e5aSJoerg Sonnenberger * All rights reserved.
74d723e5aSJoerg Sonnenberger *
84d723e5aSJoerg Sonnenberger * Redistribution and use in source and binary forms, with or without
94d723e5aSJoerg Sonnenberger * modification, are permitted provided that the following conditions
104d723e5aSJoerg Sonnenberger * are met:
114d723e5aSJoerg Sonnenberger * 1. Redistributions of source code must retain the above copyright
124d723e5aSJoerg Sonnenberger * notice, this list of conditions and the following disclaimer.
134d723e5aSJoerg Sonnenberger * 2. Redistributions in binary form must reproduce the above copyright
144d723e5aSJoerg Sonnenberger * notice, this list of conditions and the following disclaimer in the
154d723e5aSJoerg Sonnenberger * documentation and/or other materials provided with the distribution.
164d723e5aSJoerg Sonnenberger * 3. All advertising materials mentioning features or use of this software
174d723e5aSJoerg Sonnenberger * must display the following acknowledgement:
184d723e5aSJoerg Sonnenberger * This product includes software developed by the Network Research
194d723e5aSJoerg Sonnenberger * Group at Lawrence Berkeley Laboratory.
204d723e5aSJoerg Sonnenberger * 4. Neither the name of the University nor of the Laboratory may be used
214d723e5aSJoerg Sonnenberger * to endorse or promote products derived from this software without
224d723e5aSJoerg Sonnenberger * specific prior written permission.
234d723e5aSJoerg Sonnenberger *
244d723e5aSJoerg Sonnenberger * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
254d723e5aSJoerg Sonnenberger * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
264d723e5aSJoerg Sonnenberger * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
274d723e5aSJoerg Sonnenberger * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
284d723e5aSJoerg Sonnenberger * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
294d723e5aSJoerg Sonnenberger * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
304d723e5aSJoerg Sonnenberger * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
314d723e5aSJoerg Sonnenberger * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
324d723e5aSJoerg Sonnenberger * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
334d723e5aSJoerg Sonnenberger * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
344d723e5aSJoerg Sonnenberger * SUCH DAMAGE.
354d723e5aSJoerg Sonnenberger *
364d723e5aSJoerg Sonnenberger * LBL code modified by speer@eng.sun.com, May 1977.
374d723e5aSJoerg Sonnenberger * For questions and/or comments, please send mail to cbq@ee.lbl.gov
384d723e5aSJoerg Sonnenberger */
394d723e5aSJoerg Sonnenberger
404d723e5aSJoerg Sonnenberger #include "opt_altq.h"
414d723e5aSJoerg Sonnenberger #include "opt_inet.h"
424d723e5aSJoerg Sonnenberger #include "opt_inet6.h"
434d723e5aSJoerg Sonnenberger
444d723e5aSJoerg Sonnenberger #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
454d723e5aSJoerg Sonnenberger
464d723e5aSJoerg Sonnenberger #include <sys/param.h>
474d723e5aSJoerg Sonnenberger #include <sys/malloc.h>
484d723e5aSJoerg Sonnenberger #include <sys/mbuf.h>
494d723e5aSJoerg Sonnenberger #include <sys/socket.h>
504d723e5aSJoerg Sonnenberger #include <sys/systm.h>
514d723e5aSJoerg Sonnenberger #include <sys/callout.h>
524d723e5aSJoerg Sonnenberger #include <sys/errno.h>
534d723e5aSJoerg Sonnenberger #include <sys/time.h>
549c095379SMatthew Dillon #include <sys/thread.h>
55ef3829f0SSepherosa Ziehau #include <sys/thread2.h>
564d723e5aSJoerg Sonnenberger
574d723e5aSJoerg Sonnenberger #include <net/if.h>
58bff82488SAaron LI #include <net/ifq_var.h>
59ef3829f0SSepherosa Ziehau #include <net/netmsg2.h>
60ef3829f0SSepherosa Ziehau #include <net/netisr2.h>
614d723e5aSJoerg Sonnenberger
624d723e5aSJoerg Sonnenberger #include <net/altq/altq.h>
634d723e5aSJoerg Sonnenberger #include <net/altq/altq_rmclass.h>
644d723e5aSJoerg Sonnenberger #include <net/altq/altq_rmclass_debug.h>
654d723e5aSJoerg Sonnenberger #include <net/altq/altq_red.h>
664d723e5aSJoerg Sonnenberger #include <net/altq/altq_rio.h>
674d723e5aSJoerg Sonnenberger
684d723e5aSJoerg Sonnenberger #ifdef CBQ_TRACE
694d723e5aSJoerg Sonnenberger static struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
704d723e5aSJoerg Sonnenberger static struct cbqtrace *cbqtrace_ptr = NULL;
714d723e5aSJoerg Sonnenberger static int cbqtrace_count;
724d723e5aSJoerg Sonnenberger #endif
734d723e5aSJoerg Sonnenberger
744d723e5aSJoerg Sonnenberger /*
754d723e5aSJoerg Sonnenberger * Local Macros
764d723e5aSJoerg Sonnenberger */
774d723e5aSJoerg Sonnenberger
784d723e5aSJoerg Sonnenberger #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
794d723e5aSJoerg Sonnenberger
804d723e5aSJoerg Sonnenberger /*
814d723e5aSJoerg Sonnenberger * Local routines.
824d723e5aSJoerg Sonnenberger */
834d723e5aSJoerg Sonnenberger
844d723e5aSJoerg Sonnenberger static int rmc_satisfied(struct rm_class *, struct timeval *);
854d723e5aSJoerg Sonnenberger static void rmc_wrr_set_weights(struct rm_ifdat *);
864d723e5aSJoerg Sonnenberger static void rmc_depth_compute(struct rm_class *);
874d723e5aSJoerg Sonnenberger static void rmc_depth_recompute(rm_class_t *);
884d723e5aSJoerg Sonnenberger
894d723e5aSJoerg Sonnenberger static struct mbuf *_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
904d723e5aSJoerg Sonnenberger static struct mbuf *_rmc_prr_dequeue_next(struct rm_ifdat *, int);
914d723e5aSJoerg Sonnenberger
924d723e5aSJoerg Sonnenberger static int _rmc_addq(rm_class_t *, struct mbuf *);
934d723e5aSJoerg Sonnenberger static void _rmc_dropq(rm_class_t *);
944d723e5aSJoerg Sonnenberger static struct mbuf *_rmc_getq(rm_class_t *);
954d723e5aSJoerg Sonnenberger static struct mbuf *_rmc_pollq(rm_class_t *);
964d723e5aSJoerg Sonnenberger
974d723e5aSJoerg Sonnenberger static int rmc_under_limit(struct rm_class *, struct timeval *);
984d723e5aSJoerg Sonnenberger static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
994d723e5aSJoerg Sonnenberger static void rmc_drop_action(struct rm_class *);
1004d723e5aSJoerg Sonnenberger static void rmc_restart(void *);
101ef3829f0SSepherosa Ziehau static void rmc_restart_dispatch(netmsg_t);
1024d723e5aSJoerg Sonnenberger static void rmc_root_overlimit(struct rm_class *, struct rm_class *);
1034d723e5aSJoerg Sonnenberger
1044d723e5aSJoerg Sonnenberger #define BORROW_OFFTIME
1054d723e5aSJoerg Sonnenberger /*
1064d723e5aSJoerg Sonnenberger * BORROW_OFFTIME (experimental):
1074d723e5aSJoerg Sonnenberger * borrow the offtime of the class borrowing from.
1084d723e5aSJoerg Sonnenberger * the reason is that when its own offtime is set, the class is unable
1094d723e5aSJoerg Sonnenberger * to borrow much, especially when cutoff is taking effect.
1104d723e5aSJoerg Sonnenberger * but when the borrowed class is overloaded (advidle is close to minidle),
1114d723e5aSJoerg Sonnenberger * use the borrowing class's offtime to avoid overload.
1124d723e5aSJoerg Sonnenberger */
1134d723e5aSJoerg Sonnenberger #define ADJUST_CUTOFF
1144d723e5aSJoerg Sonnenberger /*
1154d723e5aSJoerg Sonnenberger * ADJUST_CUTOFF (experimental):
1164d723e5aSJoerg Sonnenberger * if no underlimit class is found due to cutoff, increase cutoff and
1174d723e5aSJoerg Sonnenberger * retry the scheduling loop.
1184d723e5aSJoerg Sonnenberger * also, don't invoke delay_actions while cutoff is taking effect,
1194d723e5aSJoerg Sonnenberger * since a sleeping class won't have a chance to be scheduled in the
1204d723e5aSJoerg Sonnenberger * next loop.
1214d723e5aSJoerg Sonnenberger *
1224d723e5aSJoerg Sonnenberger * now heuristics for setting the top-level variable (cutoff_) becomes:
1234d723e5aSJoerg Sonnenberger * 1. if a packet arrives for a not-overlimit class, set cutoff
1244d723e5aSJoerg Sonnenberger * to the depth of the class.
1254d723e5aSJoerg Sonnenberger * 2. if cutoff is i, and a packet arrives for an overlimit class
1264d723e5aSJoerg Sonnenberger * with an underlimit ancestor at a lower level than i (say j),
1274d723e5aSJoerg Sonnenberger * then set cutoff to j.
1284d723e5aSJoerg Sonnenberger * 3. at scheduling a packet, if there is no underlimit class
1294d723e5aSJoerg Sonnenberger * due to the current cutoff level, increase cutoff by 1 and
1304d723e5aSJoerg Sonnenberger * then try to schedule again.
1314d723e5aSJoerg Sonnenberger */
1324d723e5aSJoerg Sonnenberger
1334d723e5aSJoerg Sonnenberger /*
1344d723e5aSJoerg Sonnenberger * rm_class_t *
1354d723e5aSJoerg Sonnenberger * rmc_newclass(...) - Create a new resource management class at priority
1364d723e5aSJoerg Sonnenberger * 'pri' on the interface given by 'ifd'.
1374d723e5aSJoerg Sonnenberger *
1384d723e5aSJoerg Sonnenberger * nsecPerByte is the data rate of the interface in nanoseconds/byte.
1394d723e5aSJoerg Sonnenberger * E.g., 800 for a 10Mb/s ethernet. If the class gets less
1404d723e5aSJoerg Sonnenberger * than 100% of the bandwidth, this number should be the
1414d723e5aSJoerg Sonnenberger * 'effective' rate for the class. Let f be the
1424d723e5aSJoerg Sonnenberger * bandwidth fraction allocated to this class, and let
1434d723e5aSJoerg Sonnenberger * nsPerByte be the data rate of the output link in
1444d723e5aSJoerg Sonnenberger * nanoseconds/byte. Then nsecPerByte is set to
1454d723e5aSJoerg Sonnenberger * nsPerByte / f. E.g., 1600 (= 800 / .5)
1464d723e5aSJoerg Sonnenberger * for a class that gets 50% of an ethernet's bandwidth.
1474d723e5aSJoerg Sonnenberger *
1484d723e5aSJoerg Sonnenberger * action the routine to call when the class is over limit.
1494d723e5aSJoerg Sonnenberger *
1504d723e5aSJoerg Sonnenberger * maxq max allowable queue size for class (in packets).
1514d723e5aSJoerg Sonnenberger *
1524d723e5aSJoerg Sonnenberger * parent parent class pointer.
1534d723e5aSJoerg Sonnenberger *
1544d723e5aSJoerg Sonnenberger * borrow class to borrow from (should be either 'parent' or null).
1554d723e5aSJoerg Sonnenberger *
1564d723e5aSJoerg Sonnenberger * maxidle max value allowed for class 'idle' time estimate (this
1574d723e5aSJoerg Sonnenberger * parameter determines how large an initial burst of packets
1584d723e5aSJoerg Sonnenberger * can be before overlimit action is invoked.
1594d723e5aSJoerg Sonnenberger *
1604d723e5aSJoerg Sonnenberger * offtime how long 'delay' action will delay when class goes over
1614d723e5aSJoerg Sonnenberger * limit (this parameter determines the steady-state burst
1624d723e5aSJoerg Sonnenberger * size when a class is running over its limit).
1634d723e5aSJoerg Sonnenberger *
1644d723e5aSJoerg Sonnenberger * Maxidle and offtime have to be computed from the following: If the
1654d723e5aSJoerg Sonnenberger * average packet size is s, the bandwidth fraction allocated to this
1664d723e5aSJoerg Sonnenberger * class is f, we want to allow b packet bursts, and the gain of the
1674d723e5aSJoerg Sonnenberger * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
1684d723e5aSJoerg Sonnenberger *
1694d723e5aSJoerg Sonnenberger * ptime = s * nsPerByte * (1 - f) / f
1704d723e5aSJoerg Sonnenberger * maxidle = ptime * (1 - g^b) / g^b
1714d723e5aSJoerg Sonnenberger * minidle = -ptime * (1 / (f - 1))
1724d723e5aSJoerg Sonnenberger * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
1734d723e5aSJoerg Sonnenberger *
1744d723e5aSJoerg Sonnenberger * Operationally, it's convenient to specify maxidle & offtime in units
1754d723e5aSJoerg Sonnenberger * independent of the link bandwidth so the maxidle & offtime passed to
1764d723e5aSJoerg Sonnenberger * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
1774d723e5aSJoerg Sonnenberger * (The constant factor is a scale factor needed to make the parameters
1784d723e5aSJoerg Sonnenberger * integers. This scaling also means that the 'unscaled' values of
1794d723e5aSJoerg Sonnenberger * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
1804d723e5aSJoerg Sonnenberger * not nanoseconds.) Also note that the 'idle' filter computation keeps
1814d723e5aSJoerg Sonnenberger * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
1824d723e5aSJoerg Sonnenberger * maxidle also must be scaled upward by this value. Thus, the passed
1834d723e5aSJoerg Sonnenberger * values for maxidle and offtime can be computed as follows:
1844d723e5aSJoerg Sonnenberger *
1854d723e5aSJoerg Sonnenberger * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
1864d723e5aSJoerg Sonnenberger * offtime = offtime * 8 / (1000 * nsecPerByte)
1874d723e5aSJoerg Sonnenberger *
1884d723e5aSJoerg Sonnenberger * When USE_HRTIME is employed, then maxidle and offtime become:
1894d723e5aSJoerg Sonnenberger * maxidle = maxilde * (8.0 / nsecPerByte);
1904d723e5aSJoerg Sonnenberger * offtime = offtime * (8.0 / nsecPerByte);
1914d723e5aSJoerg Sonnenberger */
1924d723e5aSJoerg Sonnenberger struct rm_class *
rmc_newclass(int pri,struct rm_ifdat * ifd,u_int nsecPerByte,void (* action)(rm_class_t *,rm_class_t *),int maxq,struct rm_class * parent,struct rm_class * borrow,u_int maxidle,int minidle,u_int offtime,int pktsize,int flags)1934d723e5aSJoerg Sonnenberger rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
1944d723e5aSJoerg Sonnenberger void (*action)(rm_class_t *, rm_class_t *), int maxq,
1954d723e5aSJoerg Sonnenberger struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
1964d723e5aSJoerg Sonnenberger int minidle, u_int offtime, int pktsize, int flags)
1974d723e5aSJoerg Sonnenberger {
1984d723e5aSJoerg Sonnenberger struct rm_class *cl;
1994d723e5aSJoerg Sonnenberger struct rm_class *peer;
2004d723e5aSJoerg Sonnenberger
2014d723e5aSJoerg Sonnenberger if (pri >= RM_MAXPRIO)
2024d723e5aSJoerg Sonnenberger return (NULL);
2034d723e5aSJoerg Sonnenberger #ifndef ALTQ_RED
2044d723e5aSJoerg Sonnenberger if (flags & RMCF_RED) {
2054d723e5aSJoerg Sonnenberger #ifdef ALTQ_DEBUG
2064b1cf444SSascha Wildner kprintf("rmc_newclass: RED not configured for CBQ!\n");
2074d723e5aSJoerg Sonnenberger #endif
2084d723e5aSJoerg Sonnenberger return (NULL);
2094d723e5aSJoerg Sonnenberger }
2104d723e5aSJoerg Sonnenberger #endif
2114d723e5aSJoerg Sonnenberger #ifndef ALTQ_RIO
2124d723e5aSJoerg Sonnenberger if (flags & RMCF_RIO) {
2134d723e5aSJoerg Sonnenberger #ifdef ALTQ_DEBUG
2144b1cf444SSascha Wildner kprintf("rmc_newclass: RIO not configured for CBQ!\n");
2154d723e5aSJoerg Sonnenberger #endif
2164d723e5aSJoerg Sonnenberger return (NULL);
2174d723e5aSJoerg Sonnenberger }
2184d723e5aSJoerg Sonnenberger #endif
2194d723e5aSJoerg Sonnenberger
220efda3bd0SMatthew Dillon cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
2214d723e5aSJoerg Sonnenberger callout_init(&cl->callout_);
222ef3829f0SSepherosa Ziehau netmsg_init(&cl->callout_nmsg_, NULL, &netisr_adone_rport,
223ef3829f0SSepherosa Ziehau MSGF_PRIORITY, rmc_restart_dispatch);
224ef3829f0SSepherosa Ziehau cl->callout_nmsg_.lmsg.u.ms_resultp = cl;
225ef3829f0SSepherosa Ziehau
226efda3bd0SMatthew Dillon cl->q_ = kmalloc(sizeof(*cl->q_), M_ALTQ, M_WAITOK | M_ZERO);
2274d723e5aSJoerg Sonnenberger
2284d723e5aSJoerg Sonnenberger /*
2294d723e5aSJoerg Sonnenberger * Class initialization.
2304d723e5aSJoerg Sonnenberger */
2314d723e5aSJoerg Sonnenberger cl->children_ = NULL;
2324d723e5aSJoerg Sonnenberger cl->parent_ = parent;
2334d723e5aSJoerg Sonnenberger cl->borrow_ = borrow;
2344d723e5aSJoerg Sonnenberger cl->leaf_ = 1;
2354d723e5aSJoerg Sonnenberger cl->ifdat_ = ifd;
2364d723e5aSJoerg Sonnenberger cl->pri_ = pri;
2374d723e5aSJoerg Sonnenberger cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
2384d723e5aSJoerg Sonnenberger cl->depth_ = 0;
2394d723e5aSJoerg Sonnenberger cl->qthresh_ = 0;
2404d723e5aSJoerg Sonnenberger cl->ns_per_byte_ = nsecPerByte;
2414d723e5aSJoerg Sonnenberger
2424d723e5aSJoerg Sonnenberger qlimit(cl->q_) = maxq;
2434d723e5aSJoerg Sonnenberger qtype(cl->q_) = Q_DROPHEAD;
2444d723e5aSJoerg Sonnenberger qlen(cl->q_) = 0;
2454d723e5aSJoerg Sonnenberger cl->flags_ = flags;
2464d723e5aSJoerg Sonnenberger
2474d723e5aSJoerg Sonnenberger #if 1 /* minidle is also scaled in ALTQ */
2484d723e5aSJoerg Sonnenberger cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
2494d723e5aSJoerg Sonnenberger if (cl->minidle_ > 0)
2504d723e5aSJoerg Sonnenberger cl->minidle_ = 0;
2514d723e5aSJoerg Sonnenberger #else
2524d723e5aSJoerg Sonnenberger cl->minidle_ = minidle;
2534d723e5aSJoerg Sonnenberger #endif
2544d723e5aSJoerg Sonnenberger cl->maxidle_ = (maxidle * nsecPerByte) / 8;
2554d723e5aSJoerg Sonnenberger if (cl->maxidle_ == 0)
2564d723e5aSJoerg Sonnenberger cl->maxidle_ = 1;
2574d723e5aSJoerg Sonnenberger #if 1 /* offtime is also scaled in ALTQ */
2584d723e5aSJoerg Sonnenberger cl->avgidle_ = cl->maxidle_;
2594d723e5aSJoerg Sonnenberger cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
2604d723e5aSJoerg Sonnenberger if (cl->offtime_ == 0)
2614d723e5aSJoerg Sonnenberger cl->offtime_ = 1;
2624d723e5aSJoerg Sonnenberger #else
2634d723e5aSJoerg Sonnenberger cl->avgidle_ = 0;
2644d723e5aSJoerg Sonnenberger cl->offtime_ = (offtime * nsecPerByte) / 8;
2654d723e5aSJoerg Sonnenberger #endif
2664d723e5aSJoerg Sonnenberger cl->overlimit = action;
2674d723e5aSJoerg Sonnenberger
2684d723e5aSJoerg Sonnenberger #ifdef ALTQ_RED
2694d723e5aSJoerg Sonnenberger if (flags & (RMCF_RED|RMCF_RIO)) {
2704d723e5aSJoerg Sonnenberger int red_flags, red_pkttime;
2714d723e5aSJoerg Sonnenberger
2724d723e5aSJoerg Sonnenberger red_flags = 0;
2734d723e5aSJoerg Sonnenberger if (flags & RMCF_ECN)
2744d723e5aSJoerg Sonnenberger red_flags |= REDF_ECN;
2754d723e5aSJoerg Sonnenberger #ifdef ALTQ_RIO
2764d723e5aSJoerg Sonnenberger if (flags & RMCF_CLEARDSCP)
2774d723e5aSJoerg Sonnenberger red_flags |= RIOF_CLEARDSCP;
2784d723e5aSJoerg Sonnenberger #endif
2794d723e5aSJoerg Sonnenberger red_pkttime = nsecPerByte * pktsize / 1000;
2804d723e5aSJoerg Sonnenberger
2814d723e5aSJoerg Sonnenberger if (flags & RMCF_RED) {
2824d723e5aSJoerg Sonnenberger cl->red_ = red_alloc(0, 0,
2834d723e5aSJoerg Sonnenberger qlimit(cl->q_) * 10/100,
2844d723e5aSJoerg Sonnenberger qlimit(cl->q_) * 30/100,
2854d723e5aSJoerg Sonnenberger red_flags, red_pkttime);
2864d723e5aSJoerg Sonnenberger if (cl->red_ != NULL)
2874d723e5aSJoerg Sonnenberger qtype(cl->q_) = Q_RED;
2884d723e5aSJoerg Sonnenberger }
2894d723e5aSJoerg Sonnenberger #ifdef ALTQ_RIO
2904d723e5aSJoerg Sonnenberger else {
2914d723e5aSJoerg Sonnenberger cl->red_ = (red_t *)rio_alloc(0, NULL,
2924d723e5aSJoerg Sonnenberger red_flags, red_pkttime);
2934d723e5aSJoerg Sonnenberger if (cl->red_ != NULL)
2944d723e5aSJoerg Sonnenberger qtype(cl->q_) = Q_RIO;
2954d723e5aSJoerg Sonnenberger }
2964d723e5aSJoerg Sonnenberger #endif
2974d723e5aSJoerg Sonnenberger }
2984d723e5aSJoerg Sonnenberger #endif /* ALTQ_RED */
2994d723e5aSJoerg Sonnenberger
3004d723e5aSJoerg Sonnenberger /*
3014d723e5aSJoerg Sonnenberger * put the class into the class tree
3024d723e5aSJoerg Sonnenberger */
3030b31d406SSascha Wildner crit_enter();
3044d723e5aSJoerg Sonnenberger if ((peer = ifd->active_[pri]) != NULL) {
3054d723e5aSJoerg Sonnenberger /* find the last class at this pri */
3064d723e5aSJoerg Sonnenberger cl->peer_ = peer;
3074d723e5aSJoerg Sonnenberger while (peer->peer_ != ifd->active_[pri])
3084d723e5aSJoerg Sonnenberger peer = peer->peer_;
3094d723e5aSJoerg Sonnenberger peer->peer_ = cl;
3104d723e5aSJoerg Sonnenberger } else {
3114d723e5aSJoerg Sonnenberger ifd->active_[pri] = cl;
3124d723e5aSJoerg Sonnenberger cl->peer_ = cl;
3134d723e5aSJoerg Sonnenberger }
3144d723e5aSJoerg Sonnenberger
3154d723e5aSJoerg Sonnenberger if (cl->parent_) {
3164d723e5aSJoerg Sonnenberger cl->next_ = parent->children_;
3174d723e5aSJoerg Sonnenberger parent->children_ = cl;
3184d723e5aSJoerg Sonnenberger parent->leaf_ = 0;
3194d723e5aSJoerg Sonnenberger }
3204d723e5aSJoerg Sonnenberger
3214d723e5aSJoerg Sonnenberger /*
3224d723e5aSJoerg Sonnenberger * Compute the depth of this class and its ancestors in the class
3234d723e5aSJoerg Sonnenberger * hierarchy.
3244d723e5aSJoerg Sonnenberger */
3254d723e5aSJoerg Sonnenberger rmc_depth_compute(cl);
3264d723e5aSJoerg Sonnenberger
3274d723e5aSJoerg Sonnenberger /*
3284d723e5aSJoerg Sonnenberger * If CBQ's WRR is enabled, then initialize the class WRR state.
3294d723e5aSJoerg Sonnenberger */
3304d723e5aSJoerg Sonnenberger if (ifd->wrr_) {
3314d723e5aSJoerg Sonnenberger ifd->num_[pri]++;
3324d723e5aSJoerg Sonnenberger ifd->alloc_[pri] += cl->allotment_;
3334d723e5aSJoerg Sonnenberger rmc_wrr_set_weights(ifd);
3344d723e5aSJoerg Sonnenberger }
3350b31d406SSascha Wildner crit_exit();
3364d723e5aSJoerg Sonnenberger return (cl);
3374d723e5aSJoerg Sonnenberger }
3384d723e5aSJoerg Sonnenberger
3394d723e5aSJoerg Sonnenberger int
rmc_modclass(struct rm_class * cl,u_int nsecPerByte,int maxq,u_int maxidle,int minidle,u_int offtime,int pktsize)3404d723e5aSJoerg Sonnenberger rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
3414d723e5aSJoerg Sonnenberger int minidle, u_int offtime, int pktsize)
3424d723e5aSJoerg Sonnenberger {
3434d723e5aSJoerg Sonnenberger struct rm_ifdat *ifd;
3444d723e5aSJoerg Sonnenberger u_int old_allotment;
3454d723e5aSJoerg Sonnenberger
3464d723e5aSJoerg Sonnenberger ifd = cl->ifdat_;
3474d723e5aSJoerg Sonnenberger old_allotment = cl->allotment_;
3484d723e5aSJoerg Sonnenberger
3490b31d406SSascha Wildner crit_enter();
3504d723e5aSJoerg Sonnenberger cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
3514d723e5aSJoerg Sonnenberger cl->qthresh_ = 0;
3524d723e5aSJoerg Sonnenberger cl->ns_per_byte_ = nsecPerByte;
3534d723e5aSJoerg Sonnenberger
3544d723e5aSJoerg Sonnenberger qlimit(cl->q_) = maxq;
3554d723e5aSJoerg Sonnenberger
3564d723e5aSJoerg Sonnenberger #if 1 /* minidle is also scaled in ALTQ */
3574d723e5aSJoerg Sonnenberger cl->minidle_ = (minidle * nsecPerByte) / 8;
3584d723e5aSJoerg Sonnenberger if (cl->minidle_ > 0)
3594d723e5aSJoerg Sonnenberger cl->minidle_ = 0;
3604d723e5aSJoerg Sonnenberger #else
3614d723e5aSJoerg Sonnenberger cl->minidle_ = minidle;
3624d723e5aSJoerg Sonnenberger #endif
3634d723e5aSJoerg Sonnenberger cl->maxidle_ = (maxidle * nsecPerByte) / 8;
3644d723e5aSJoerg Sonnenberger if (cl->maxidle_ == 0)
3654d723e5aSJoerg Sonnenberger cl->maxidle_ = 1;
3664d723e5aSJoerg Sonnenberger #if 1 /* offtime is also scaled in ALTQ */
3674d723e5aSJoerg Sonnenberger cl->avgidle_ = cl->maxidle_;
3684d723e5aSJoerg Sonnenberger cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
3694d723e5aSJoerg Sonnenberger if (cl->offtime_ == 0)
3704d723e5aSJoerg Sonnenberger cl->offtime_ = 1;
3714d723e5aSJoerg Sonnenberger #else
3724d723e5aSJoerg Sonnenberger cl->avgidle_ = 0;
3734d723e5aSJoerg Sonnenberger cl->offtime_ = (offtime * nsecPerByte) / 8;
3744d723e5aSJoerg Sonnenberger #endif
3754d723e5aSJoerg Sonnenberger
3764d723e5aSJoerg Sonnenberger /*
3774d723e5aSJoerg Sonnenberger * If CBQ's WRR is enabled, then initialize the class WRR state.
3784d723e5aSJoerg Sonnenberger */
3794d723e5aSJoerg Sonnenberger if (ifd->wrr_) {
3804d723e5aSJoerg Sonnenberger ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
3814d723e5aSJoerg Sonnenberger rmc_wrr_set_weights(ifd);
3824d723e5aSJoerg Sonnenberger }
3830b31d406SSascha Wildner crit_exit();
3844d723e5aSJoerg Sonnenberger return (0);
3854d723e5aSJoerg Sonnenberger }
3864d723e5aSJoerg Sonnenberger
3874d723e5aSJoerg Sonnenberger /*
3884d723e5aSJoerg Sonnenberger * static void
3894d723e5aSJoerg Sonnenberger * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
3904d723e5aSJoerg Sonnenberger * the appropriate run robin weights for the CBQ weighted round robin
3914d723e5aSJoerg Sonnenberger * algorithm.
3924d723e5aSJoerg Sonnenberger *
3934d723e5aSJoerg Sonnenberger * Returns: NONE
3944d723e5aSJoerg Sonnenberger */
3954d723e5aSJoerg Sonnenberger
3964d723e5aSJoerg Sonnenberger static void
rmc_wrr_set_weights(struct rm_ifdat * ifd)3974d723e5aSJoerg Sonnenberger rmc_wrr_set_weights(struct rm_ifdat *ifd)
3984d723e5aSJoerg Sonnenberger {
3994d723e5aSJoerg Sonnenberger int i;
4004d723e5aSJoerg Sonnenberger struct rm_class *cl, *clh;
4014d723e5aSJoerg Sonnenberger
4024d723e5aSJoerg Sonnenberger for (i = 0; i < RM_MAXPRIO; i++) {
4034d723e5aSJoerg Sonnenberger /*
4044d723e5aSJoerg Sonnenberger * This is inverted from that of the simulator to
4054d723e5aSJoerg Sonnenberger * maintain precision.
4064d723e5aSJoerg Sonnenberger */
4074d723e5aSJoerg Sonnenberger if (ifd->num_[i] == 0)
4084d723e5aSJoerg Sonnenberger ifd->M_[i] = 0;
4094d723e5aSJoerg Sonnenberger else
4104d723e5aSJoerg Sonnenberger ifd->M_[i] = ifd->alloc_[i] /
4114d723e5aSJoerg Sonnenberger (ifd->num_[i] * ifd->maxpkt_);
4124d723e5aSJoerg Sonnenberger /*
4134d723e5aSJoerg Sonnenberger * Compute the weighted allotment for each class.
4144d723e5aSJoerg Sonnenberger * This takes the expensive div instruction out
4154d723e5aSJoerg Sonnenberger * of the main loop for the wrr scheduling path.
4164d723e5aSJoerg Sonnenberger * These only get recomputed when a class comes or
4174d723e5aSJoerg Sonnenberger * goes.
4184d723e5aSJoerg Sonnenberger */
4194d723e5aSJoerg Sonnenberger if (ifd->active_[i] != NULL) {
4204d723e5aSJoerg Sonnenberger clh = cl = ifd->active_[i];
4214d723e5aSJoerg Sonnenberger do {
4224d723e5aSJoerg Sonnenberger /* safe-guard for slow link or alloc_ == 0 */
4234d723e5aSJoerg Sonnenberger if (ifd->M_[i] == 0)
4244d723e5aSJoerg Sonnenberger cl->w_allotment_ = 0;
4254d723e5aSJoerg Sonnenberger else
4264d723e5aSJoerg Sonnenberger cl->w_allotment_ = cl->allotment_ /
4274d723e5aSJoerg Sonnenberger ifd->M_[i];
4284d723e5aSJoerg Sonnenberger cl = cl->peer_;
4294d723e5aSJoerg Sonnenberger } while ((cl != NULL) && (cl != clh));
4304d723e5aSJoerg Sonnenberger }
4314d723e5aSJoerg Sonnenberger }
4324d723e5aSJoerg Sonnenberger }
4334d723e5aSJoerg Sonnenberger
4344d723e5aSJoerg Sonnenberger int
rmc_get_weight(struct rm_ifdat * ifd,int pri)4354d723e5aSJoerg Sonnenberger rmc_get_weight(struct rm_ifdat *ifd, int pri)
4364d723e5aSJoerg Sonnenberger {
4374d723e5aSJoerg Sonnenberger if ((pri >= 0) && (pri < RM_MAXPRIO))
4384d723e5aSJoerg Sonnenberger return (ifd->M_[pri]);
4394d723e5aSJoerg Sonnenberger else
4404d723e5aSJoerg Sonnenberger return (0);
4414d723e5aSJoerg Sonnenberger }
4424d723e5aSJoerg Sonnenberger
4434d723e5aSJoerg Sonnenberger /*
4444d723e5aSJoerg Sonnenberger * static void
4454d723e5aSJoerg Sonnenberger * rmc_depth_compute(struct rm_class *cl) - This function computes the
4464d723e5aSJoerg Sonnenberger * appropriate depth of class 'cl' and its ancestors.
4474d723e5aSJoerg Sonnenberger *
4484d723e5aSJoerg Sonnenberger * Returns: NONE
4494d723e5aSJoerg Sonnenberger */
4504d723e5aSJoerg Sonnenberger
4514d723e5aSJoerg Sonnenberger static void
rmc_depth_compute(struct rm_class * cl)4524d723e5aSJoerg Sonnenberger rmc_depth_compute(struct rm_class *cl)
4534d723e5aSJoerg Sonnenberger {
4544d723e5aSJoerg Sonnenberger rm_class_t *t = cl, *p;
4554d723e5aSJoerg Sonnenberger
4564d723e5aSJoerg Sonnenberger /*
4574d723e5aSJoerg Sonnenberger * Recompute the depth for the branch of the tree.
4584d723e5aSJoerg Sonnenberger */
4594d723e5aSJoerg Sonnenberger while (t != NULL) {
4604d723e5aSJoerg Sonnenberger p = t->parent_;
4614d723e5aSJoerg Sonnenberger if (p && (t->depth_ >= p->depth_)) {
4624d723e5aSJoerg Sonnenberger p->depth_ = t->depth_ + 1;
4634d723e5aSJoerg Sonnenberger t = p;
4644d723e5aSJoerg Sonnenberger } else
4654d723e5aSJoerg Sonnenberger t = NULL;
4664d723e5aSJoerg Sonnenberger }
4674d723e5aSJoerg Sonnenberger }
4684d723e5aSJoerg Sonnenberger
4694d723e5aSJoerg Sonnenberger /*
4704d723e5aSJoerg Sonnenberger * static void
4714d723e5aSJoerg Sonnenberger * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
4724d723e5aSJoerg Sonnenberger * the depth of the tree after a class has been deleted.
4734d723e5aSJoerg Sonnenberger *
4744d723e5aSJoerg Sonnenberger * Returns: NONE
4754d723e5aSJoerg Sonnenberger */
4764d723e5aSJoerg Sonnenberger
4774d723e5aSJoerg Sonnenberger static void
rmc_depth_recompute(rm_class_t * cl)4784d723e5aSJoerg Sonnenberger rmc_depth_recompute(rm_class_t *cl)
4794d723e5aSJoerg Sonnenberger {
4804d723e5aSJoerg Sonnenberger #if 1 /* ALTQ */
4814d723e5aSJoerg Sonnenberger rm_class_t *p, *t;
4824d723e5aSJoerg Sonnenberger
4834d723e5aSJoerg Sonnenberger p = cl;
4844d723e5aSJoerg Sonnenberger while (p != NULL) {
4854d723e5aSJoerg Sonnenberger if ((t = p->children_) == NULL) {
4864d723e5aSJoerg Sonnenberger p->depth_ = 0;
4874d723e5aSJoerg Sonnenberger } else {
4884d723e5aSJoerg Sonnenberger int cdepth = 0;
4894d723e5aSJoerg Sonnenberger
4904d723e5aSJoerg Sonnenberger while (t != NULL) {
4914d723e5aSJoerg Sonnenberger if (t->depth_ > cdepth)
4924d723e5aSJoerg Sonnenberger cdepth = t->depth_;
4934d723e5aSJoerg Sonnenberger t = t->next_;
4944d723e5aSJoerg Sonnenberger }
4954d723e5aSJoerg Sonnenberger
4964d723e5aSJoerg Sonnenberger if (p->depth_ == cdepth + 1)
4974d723e5aSJoerg Sonnenberger /* no change to this parent */
4984d723e5aSJoerg Sonnenberger return;
4994d723e5aSJoerg Sonnenberger
5004d723e5aSJoerg Sonnenberger p->depth_ = cdepth + 1;
5014d723e5aSJoerg Sonnenberger }
5024d723e5aSJoerg Sonnenberger
5034d723e5aSJoerg Sonnenberger p = p->parent_;
5044d723e5aSJoerg Sonnenberger }
5054d723e5aSJoerg Sonnenberger #else
5064d723e5aSJoerg Sonnenberger rm_class_t *t;
5074d723e5aSJoerg Sonnenberger
5084d723e5aSJoerg Sonnenberger if (cl->depth_ >= 1) {
5094d723e5aSJoerg Sonnenberger if (cl->children_ == NULL) {
5104d723e5aSJoerg Sonnenberger cl->depth_ = 0;
5114d723e5aSJoerg Sonnenberger } else if ((t = cl->children_) != NULL) {
5124d723e5aSJoerg Sonnenberger while (t != NULL) {
5134d723e5aSJoerg Sonnenberger if (t->children_ != NULL)
5144d723e5aSJoerg Sonnenberger rmc_depth_recompute(t);
5154d723e5aSJoerg Sonnenberger t = t->next_;
5164d723e5aSJoerg Sonnenberger }
5174d723e5aSJoerg Sonnenberger } else
5184d723e5aSJoerg Sonnenberger rmc_depth_compute(cl);
5194d723e5aSJoerg Sonnenberger }
5204d723e5aSJoerg Sonnenberger #endif
5214d723e5aSJoerg Sonnenberger }
5224d723e5aSJoerg Sonnenberger
5234d723e5aSJoerg Sonnenberger /*
5244d723e5aSJoerg Sonnenberger * void
5254d723e5aSJoerg Sonnenberger * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
5264d723e5aSJoerg Sonnenberger * function deletes a class from the link-sharing structure and frees
5274d723e5aSJoerg Sonnenberger * all resources associated with the class.
5284d723e5aSJoerg Sonnenberger *
5294d723e5aSJoerg Sonnenberger * Returns: NONE
5304d723e5aSJoerg Sonnenberger */
5314d723e5aSJoerg Sonnenberger
5324d723e5aSJoerg Sonnenberger void
rmc_delete_class(struct rm_ifdat * ifd,struct rm_class * cl)5334d723e5aSJoerg Sonnenberger rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
5344d723e5aSJoerg Sonnenberger {
5354d723e5aSJoerg Sonnenberger struct rm_class *p, *head, *previous;
536bb0b9a4eSSepherosa Ziehau struct netmsg_base smsg;
537bb0b9a4eSSepherosa Ziehau struct ifaltq_subque *ifsq =
538bb0b9a4eSSepherosa Ziehau &ifd->ifq_->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
5394d723e5aSJoerg Sonnenberger
5404d723e5aSJoerg Sonnenberger KKASSERT(cl->children_ == NULL);
5414d723e5aSJoerg Sonnenberger
542bb0b9a4eSSepherosa Ziehau ALTQ_SQ_ASSERT_LOCKED(ifsq);
543bb0b9a4eSSepherosa Ziehau ALTQ_SQ_UNLOCK(ifsq);
544*eb67213aSMatthew Dillon callout_cancel(&cl->callout_);
545bb0b9a4eSSepherosa Ziehau /* Make sure that cl->callout_nmsg_ stops. */
546bb0b9a4eSSepherosa Ziehau netmsg_init(&smsg, NULL, &curthread->td_msgport, 0,
547bb0b9a4eSSepherosa Ziehau netmsg_sync_handler);
548bb0b9a4eSSepherosa Ziehau lwkt_domsg(netisr_cpuport(0), &smsg.lmsg, 0);
549*eb67213aSMatthew Dillon callout_terminate(&cl->callout_);
550bb0b9a4eSSepherosa Ziehau ALTQ_SQ_LOCK(ifsq);
5514d723e5aSJoerg Sonnenberger
5520b31d406SSascha Wildner crit_enter();
553f96a3201SSepherosa Ziehau
554f96a3201SSepherosa Ziehau if (ifd->pollcache_ == cl)
555f96a3201SSepherosa Ziehau ifd->pollcache_ = NULL;
556f96a3201SSepherosa Ziehau
5574d723e5aSJoerg Sonnenberger /*
5584d723e5aSJoerg Sonnenberger * Free packets in the packet queue.
5594d723e5aSJoerg Sonnenberger * XXX - this may not be a desired behavior. Packets should be
5604d723e5aSJoerg Sonnenberger * re-queued.
5614d723e5aSJoerg Sonnenberger */
5624d723e5aSJoerg Sonnenberger rmc_dropall(cl);
5634d723e5aSJoerg Sonnenberger
5644d723e5aSJoerg Sonnenberger /*
5654d723e5aSJoerg Sonnenberger * If the class has a parent, then remove the class from the
5664d723e5aSJoerg Sonnenberger * class from the parent's children chain.
5674d723e5aSJoerg Sonnenberger */
5684d723e5aSJoerg Sonnenberger if (cl->parent_ != NULL) {
5694d723e5aSJoerg Sonnenberger head = cl->parent_->children_;
5704d723e5aSJoerg Sonnenberger p = previous = head;
5714d723e5aSJoerg Sonnenberger if (head->next_ == NULL) {
5724d723e5aSJoerg Sonnenberger KKASSERT(head == cl);
5734d723e5aSJoerg Sonnenberger cl->parent_->children_ = NULL;
5744d723e5aSJoerg Sonnenberger cl->parent_->leaf_ = 1;
5754d723e5aSJoerg Sonnenberger } else while (p != NULL) {
5764d723e5aSJoerg Sonnenberger if (p == cl) {
5774d723e5aSJoerg Sonnenberger if (cl == head)
5784d723e5aSJoerg Sonnenberger cl->parent_->children_ = cl->next_;
5794d723e5aSJoerg Sonnenberger else
5804d723e5aSJoerg Sonnenberger previous->next_ = cl->next_;
5814d723e5aSJoerg Sonnenberger cl->next_ = NULL;
5824d723e5aSJoerg Sonnenberger p = NULL;
5834d723e5aSJoerg Sonnenberger } else {
5844d723e5aSJoerg Sonnenberger previous = p;
5854d723e5aSJoerg Sonnenberger p = p->next_;
5864d723e5aSJoerg Sonnenberger }
5874d723e5aSJoerg Sonnenberger }
5884d723e5aSJoerg Sonnenberger }
5894d723e5aSJoerg Sonnenberger
5904d723e5aSJoerg Sonnenberger /*
5914d723e5aSJoerg Sonnenberger * Delete class from class priority peer list.
5924d723e5aSJoerg Sonnenberger */
5934d723e5aSJoerg Sonnenberger if ((p = ifd->active_[cl->pri_]) != NULL) {
5944d723e5aSJoerg Sonnenberger /*
5954d723e5aSJoerg Sonnenberger * If there is more than one member of this priority
5964d723e5aSJoerg Sonnenberger * level, then look for class(cl) in the priority level.
5974d723e5aSJoerg Sonnenberger */
5984d723e5aSJoerg Sonnenberger if (p != p->peer_) {
5994d723e5aSJoerg Sonnenberger while (p->peer_ != cl)
6004d723e5aSJoerg Sonnenberger p = p->peer_;
6014d723e5aSJoerg Sonnenberger p->peer_ = cl->peer_;
6024d723e5aSJoerg Sonnenberger
6034d723e5aSJoerg Sonnenberger if (ifd->active_[cl->pri_] == cl)
6044d723e5aSJoerg Sonnenberger ifd->active_[cl->pri_] = cl->peer_;
6054d723e5aSJoerg Sonnenberger } else {
6064d723e5aSJoerg Sonnenberger KKASSERT(p == cl);
6074d723e5aSJoerg Sonnenberger ifd->active_[cl->pri_] = NULL;
6084d723e5aSJoerg Sonnenberger }
6094d723e5aSJoerg Sonnenberger }
6104d723e5aSJoerg Sonnenberger
6114d723e5aSJoerg Sonnenberger /*
6124d723e5aSJoerg Sonnenberger * Recompute the WRR weights.
6134d723e5aSJoerg Sonnenberger */
6144d723e5aSJoerg Sonnenberger if (ifd->wrr_) {
6154d723e5aSJoerg Sonnenberger ifd->alloc_[cl->pri_] -= cl->allotment_;
6164d723e5aSJoerg Sonnenberger ifd->num_[cl->pri_]--;
6174d723e5aSJoerg Sonnenberger rmc_wrr_set_weights(ifd);
6184d723e5aSJoerg Sonnenberger }
6194d723e5aSJoerg Sonnenberger
6204d723e5aSJoerg Sonnenberger /*
6214d723e5aSJoerg Sonnenberger * Re-compute the depth of the tree.
6224d723e5aSJoerg Sonnenberger */
6234d723e5aSJoerg Sonnenberger #if 1 /* ALTQ */
6244d723e5aSJoerg Sonnenberger rmc_depth_recompute(cl->parent_);
6254d723e5aSJoerg Sonnenberger #else
6264d723e5aSJoerg Sonnenberger rmc_depth_recompute(ifd->root_);
6274d723e5aSJoerg Sonnenberger #endif
6284d723e5aSJoerg Sonnenberger
6290b31d406SSascha Wildner crit_exit();
6304d723e5aSJoerg Sonnenberger
6314d723e5aSJoerg Sonnenberger /*
6324d723e5aSJoerg Sonnenberger * Free the class structure.
6334d723e5aSJoerg Sonnenberger */
6344d723e5aSJoerg Sonnenberger if (cl->red_ != NULL) {
6354d723e5aSJoerg Sonnenberger #ifdef ALTQ_RIO
6364d723e5aSJoerg Sonnenberger if (q_is_rio(cl->q_))
6374d723e5aSJoerg Sonnenberger rio_destroy((rio_t *)cl->red_);
6384d723e5aSJoerg Sonnenberger #endif
6394d723e5aSJoerg Sonnenberger #ifdef ALTQ_RED
6404d723e5aSJoerg Sonnenberger if (q_is_red(cl->q_))
6414d723e5aSJoerg Sonnenberger red_destroy(cl->red_);
6424d723e5aSJoerg Sonnenberger #endif
6434d723e5aSJoerg Sonnenberger }
644efda3bd0SMatthew Dillon kfree(cl->q_, M_ALTQ);
645efda3bd0SMatthew Dillon kfree(cl, M_ALTQ);
6464d723e5aSJoerg Sonnenberger }
6474d723e5aSJoerg Sonnenberger
6484d723e5aSJoerg Sonnenberger /*
6494d723e5aSJoerg Sonnenberger * void
6504d723e5aSJoerg Sonnenberger * rmc_init(...) - Initialize the resource management data structures
6514d723e5aSJoerg Sonnenberger * associated with the output portion of interface 'ifp'. 'ifd' is
6524d723e5aSJoerg Sonnenberger * where the structures will be built (for backwards compatibility, the
6534d723e5aSJoerg Sonnenberger * structures aren't kept in the ifnet struct). 'nsecPerByte'
6544d723e5aSJoerg Sonnenberger * gives the link speed (inverse of bandwidth) in nanoseconds/byte.
6554d723e5aSJoerg Sonnenberger * 'restart' is the driver-specific routine that the generic 'delay
6564d723e5aSJoerg Sonnenberger * until under limit' action will call to restart output. `maxq'
6574d723e5aSJoerg Sonnenberger * is the queue size of the 'link' & 'default' classes. 'maxqueued'
6584d723e5aSJoerg Sonnenberger * is the maximum number of packets that the resource management
6594d723e5aSJoerg Sonnenberger * code will allow to be queued 'downstream' (this is typically 1).
6604d723e5aSJoerg Sonnenberger *
6614d723e5aSJoerg Sonnenberger * Returns: NONE
6624d723e5aSJoerg Sonnenberger */
6634d723e5aSJoerg Sonnenberger
6644d723e5aSJoerg Sonnenberger void
rmc_init(struct ifaltq * ifq,struct rm_ifdat * ifd,u_int nsecPerByte,void (* restart)(struct ifaltq *),int maxq,int maxqueued,u_int maxidle,int minidle,u_int offtime,int flags)6654d723e5aSJoerg Sonnenberger rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
6664d723e5aSJoerg Sonnenberger void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
6674d723e5aSJoerg Sonnenberger int minidle, u_int offtime, int flags)
6684d723e5aSJoerg Sonnenberger {
6694d723e5aSJoerg Sonnenberger int i, mtu;
6704d723e5aSJoerg Sonnenberger
6714d723e5aSJoerg Sonnenberger /*
6724d723e5aSJoerg Sonnenberger * Initialize the CBQ tracing/debug facility.
6734d723e5aSJoerg Sonnenberger */
6744d723e5aSJoerg Sonnenberger CBQTRACEINIT();
6754d723e5aSJoerg Sonnenberger
6764d723e5aSJoerg Sonnenberger bzero(ifd, sizeof (*ifd));
6774d723e5aSJoerg Sonnenberger mtu = ifq->altq_ifp->if_mtu;
6784d723e5aSJoerg Sonnenberger ifd->ifq_ = ifq;
6794d723e5aSJoerg Sonnenberger ifd->restart = restart;
6804d723e5aSJoerg Sonnenberger ifd->maxqueued_ = maxqueued;
6814d723e5aSJoerg Sonnenberger ifd->ns_per_byte_ = nsecPerByte;
6824d723e5aSJoerg Sonnenberger ifd->maxpkt_ = mtu;
6834d723e5aSJoerg Sonnenberger ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
6844d723e5aSJoerg Sonnenberger ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
6854d723e5aSJoerg Sonnenberger #if 1
6864d723e5aSJoerg Sonnenberger ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
6874d723e5aSJoerg Sonnenberger if (mtu * nsecPerByte > 10 * 1000000)
6884d723e5aSJoerg Sonnenberger ifd->maxiftime_ /= 4;
6894d723e5aSJoerg Sonnenberger #endif
6904d723e5aSJoerg Sonnenberger
6914d723e5aSJoerg Sonnenberger reset_cutoff(ifd);
6924d723e5aSJoerg Sonnenberger CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
6934d723e5aSJoerg Sonnenberger
6944d723e5aSJoerg Sonnenberger /*
6954d723e5aSJoerg Sonnenberger * Initialize the CBQ's WRR state.
6964d723e5aSJoerg Sonnenberger */
6974d723e5aSJoerg Sonnenberger for (i = 0; i < RM_MAXPRIO; i++) {
6984d723e5aSJoerg Sonnenberger ifd->alloc_[i] = 0;
6994d723e5aSJoerg Sonnenberger ifd->M_[i] = 0;
7004d723e5aSJoerg Sonnenberger ifd->num_[i] = 0;
7014d723e5aSJoerg Sonnenberger ifd->na_[i] = 0;
7024d723e5aSJoerg Sonnenberger ifd->active_[i] = NULL;
7034d723e5aSJoerg Sonnenberger }
7044d723e5aSJoerg Sonnenberger
7054d723e5aSJoerg Sonnenberger /*
7064d723e5aSJoerg Sonnenberger * Initialize current packet state.
7074d723e5aSJoerg Sonnenberger */
7084d723e5aSJoerg Sonnenberger ifd->qi_ = 0;
7094d723e5aSJoerg Sonnenberger ifd->qo_ = 0;
7104d723e5aSJoerg Sonnenberger for (i = 0; i < RM_MAXQUEUED; i++) {
7114d723e5aSJoerg Sonnenberger ifd->class_[i] = NULL;
7124d723e5aSJoerg Sonnenberger ifd->curlen_[i] = 0;
7134d723e5aSJoerg Sonnenberger ifd->borrowed_[i] = NULL;
7144d723e5aSJoerg Sonnenberger }
7154d723e5aSJoerg Sonnenberger
7164d723e5aSJoerg Sonnenberger /*
7174d723e5aSJoerg Sonnenberger * Create the root class of the link-sharing structure.
7184d723e5aSJoerg Sonnenberger */
7194d723e5aSJoerg Sonnenberger ifd->root_ = rmc_newclass(0, ifd, nsecPerByte, rmc_root_overlimit,
7204d723e5aSJoerg Sonnenberger maxq, 0, 0, maxidle, minidle, offtime, 0, 0);
7214d723e5aSJoerg Sonnenberger if (ifd->root_ == NULL) {
7224b1cf444SSascha Wildner kprintf("rmc_init: root class not allocated\n");
7234d723e5aSJoerg Sonnenberger return ;
7244d723e5aSJoerg Sonnenberger }
7254d723e5aSJoerg Sonnenberger ifd->root_->depth_ = 0;
7264d723e5aSJoerg Sonnenberger }
7274d723e5aSJoerg Sonnenberger
7284d723e5aSJoerg Sonnenberger /*
7294d723e5aSJoerg Sonnenberger * void
7304d723e5aSJoerg Sonnenberger * rmc_queue_packet(struct rm_class *cl, struct mbuf *m) - Add packet given by
7314d723e5aSJoerg Sonnenberger * mbuf 'm' to queue for resource class 'cl'. This routine is called
7324d723e5aSJoerg Sonnenberger * by a driver's if_output routine. This routine must be called with
7334d723e5aSJoerg Sonnenberger * output packet completion interrupts locked out (to avoid racing with
7344d723e5aSJoerg Sonnenberger * rmc_dequeue_next).
7354d723e5aSJoerg Sonnenberger *
7364d723e5aSJoerg Sonnenberger * Returns: 0 on successful queueing
7374d723e5aSJoerg Sonnenberger * -1 when packet drop occurs
7384d723e5aSJoerg Sonnenberger */
7394d723e5aSJoerg Sonnenberger int
rmc_queue_packet(struct rm_class * cl,struct mbuf * m)7404d723e5aSJoerg Sonnenberger rmc_queue_packet(struct rm_class *cl, struct mbuf *m)
7414d723e5aSJoerg Sonnenberger {
7424d723e5aSJoerg Sonnenberger struct timeval now;
7434d723e5aSJoerg Sonnenberger struct rm_ifdat *ifd = cl->ifdat_;
7444d723e5aSJoerg Sonnenberger int cpri = cl->pri_;
7454d723e5aSJoerg Sonnenberger int is_empty = qempty(cl->q_);
7464d723e5aSJoerg Sonnenberger
7474d723e5aSJoerg Sonnenberger RM_GETTIME(now);
7484d723e5aSJoerg Sonnenberger if (ifd->cutoff_ > 0) {
7494d723e5aSJoerg Sonnenberger if (TV_LT(&cl->undertime_, &now)) {
7504d723e5aSJoerg Sonnenberger if (ifd->cutoff_ > cl->depth_)
7514d723e5aSJoerg Sonnenberger ifd->cutoff_ = cl->depth_;
7524d723e5aSJoerg Sonnenberger CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
7534d723e5aSJoerg Sonnenberger }
7544d723e5aSJoerg Sonnenberger #if 1 /* ALTQ */
7554d723e5aSJoerg Sonnenberger else {
7564d723e5aSJoerg Sonnenberger /*
7574d723e5aSJoerg Sonnenberger * the class is overlimit. if the class has
7584d723e5aSJoerg Sonnenberger * underlimit ancestors, set cutoff to the lowest
7594d723e5aSJoerg Sonnenberger * depth among them.
7604d723e5aSJoerg Sonnenberger */
7614d723e5aSJoerg Sonnenberger struct rm_class *borrow = cl->borrow_;
7624d723e5aSJoerg Sonnenberger
7634d723e5aSJoerg Sonnenberger while (borrow != NULL &&
7644d723e5aSJoerg Sonnenberger borrow->depth_ < ifd->cutoff_) {
7654d723e5aSJoerg Sonnenberger if (TV_LT(&borrow->undertime_, &now)) {
7664d723e5aSJoerg Sonnenberger ifd->cutoff_ = borrow->depth_;
7674d723e5aSJoerg Sonnenberger CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
7684d723e5aSJoerg Sonnenberger break;
7694d723e5aSJoerg Sonnenberger }
7704d723e5aSJoerg Sonnenberger borrow = borrow->borrow_;
7714d723e5aSJoerg Sonnenberger }
7724d723e5aSJoerg Sonnenberger }
7734d723e5aSJoerg Sonnenberger #else /* !ALTQ */
7744d723e5aSJoerg Sonnenberger else if ((ifd->cutoff_ > 1) && cl->borrow_) {
7754d723e5aSJoerg Sonnenberger if (TV_LT(&cl->borrow_->undertime_, &now)) {
7764d723e5aSJoerg Sonnenberger ifd->cutoff_ = cl->borrow_->depth_;
7774d723e5aSJoerg Sonnenberger CBQTRACE(rmc_queue_packet, 'ffob',
7784d723e5aSJoerg Sonnenberger cl->borrow_->depth_);
7794d723e5aSJoerg Sonnenberger }
7804d723e5aSJoerg Sonnenberger }
7814d723e5aSJoerg Sonnenberger #endif /* !ALTQ */
7824d723e5aSJoerg Sonnenberger }
7834d723e5aSJoerg Sonnenberger
7844d723e5aSJoerg Sonnenberger if (_rmc_addq(cl, m) < 0)
7854d723e5aSJoerg Sonnenberger /* failed */
7864d723e5aSJoerg Sonnenberger return (-1);
7874d723e5aSJoerg Sonnenberger
7884d723e5aSJoerg Sonnenberger if (is_empty) {
7894d723e5aSJoerg Sonnenberger CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
7904d723e5aSJoerg Sonnenberger ifd->na_[cpri]++;
7914d723e5aSJoerg Sonnenberger }
7924d723e5aSJoerg Sonnenberger
7934d723e5aSJoerg Sonnenberger if (qlen(cl->q_) > qlimit(cl->q_)) {
7944d723e5aSJoerg Sonnenberger /* note: qlimit can be set to 0 or 1 */
7954d723e5aSJoerg Sonnenberger rmc_drop_action(cl);
7964d723e5aSJoerg Sonnenberger return (-1);
7974d723e5aSJoerg Sonnenberger }
7984d723e5aSJoerg Sonnenberger return (0);
7994d723e5aSJoerg Sonnenberger }
8004d723e5aSJoerg Sonnenberger
8014d723e5aSJoerg Sonnenberger /*
8024d723e5aSJoerg Sonnenberger * void
8034d723e5aSJoerg Sonnenberger * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
8044d723e5aSJoerg Sonnenberger * classes to see if there are satified.
8054d723e5aSJoerg Sonnenberger */
8064d723e5aSJoerg Sonnenberger
8074d723e5aSJoerg Sonnenberger static void
rmc_tl_satisfied(struct rm_ifdat * ifd,struct timeval * now)8084d723e5aSJoerg Sonnenberger rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
8094d723e5aSJoerg Sonnenberger {
8104d723e5aSJoerg Sonnenberger int i;
8114d723e5aSJoerg Sonnenberger rm_class_t *p, *bp;
8124d723e5aSJoerg Sonnenberger
8134d723e5aSJoerg Sonnenberger for (i = RM_MAXPRIO - 1; i >= 0; i--) {
8144d723e5aSJoerg Sonnenberger if ((bp = ifd->active_[i]) != NULL) {
8154d723e5aSJoerg Sonnenberger p = bp;
8164d723e5aSJoerg Sonnenberger do {
8174d723e5aSJoerg Sonnenberger if (!rmc_satisfied(p, now)) {
8184d723e5aSJoerg Sonnenberger ifd->cutoff_ = p->depth_;
8194d723e5aSJoerg Sonnenberger return;
8204d723e5aSJoerg Sonnenberger }
8214d723e5aSJoerg Sonnenberger p = p->peer_;
8224d723e5aSJoerg Sonnenberger } while (p != bp);
8234d723e5aSJoerg Sonnenberger }
8244d723e5aSJoerg Sonnenberger }
8254d723e5aSJoerg Sonnenberger
8264d723e5aSJoerg Sonnenberger reset_cutoff(ifd);
8274d723e5aSJoerg Sonnenberger }
8284d723e5aSJoerg Sonnenberger
8294d723e5aSJoerg Sonnenberger /*
8304d723e5aSJoerg Sonnenberger * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
8314d723e5aSJoerg Sonnenberger */
8324d723e5aSJoerg Sonnenberger
8334d723e5aSJoerg Sonnenberger static int
rmc_satisfied(struct rm_class * cl,struct timeval * now)8344d723e5aSJoerg Sonnenberger rmc_satisfied(struct rm_class *cl, struct timeval *now)
8354d723e5aSJoerg Sonnenberger {
8364d723e5aSJoerg Sonnenberger rm_class_t *p;
8374d723e5aSJoerg Sonnenberger
8384d723e5aSJoerg Sonnenberger if (cl == NULL)
8394d723e5aSJoerg Sonnenberger return (1);
8404d723e5aSJoerg Sonnenberger if (TV_LT(now, &cl->undertime_))
8414d723e5aSJoerg Sonnenberger return (1);
8424d723e5aSJoerg Sonnenberger if (cl->depth_ == 0) {
8434d723e5aSJoerg Sonnenberger if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
8444d723e5aSJoerg Sonnenberger return (0);
8454d723e5aSJoerg Sonnenberger else
8464d723e5aSJoerg Sonnenberger return (1);
8474d723e5aSJoerg Sonnenberger }
8484d723e5aSJoerg Sonnenberger if (cl->children_ != NULL) {
8494d723e5aSJoerg Sonnenberger p = cl->children_;
8504d723e5aSJoerg Sonnenberger while (p != NULL) {
8514d723e5aSJoerg Sonnenberger if (!rmc_satisfied(p, now))
8524d723e5aSJoerg Sonnenberger return (0);
8534d723e5aSJoerg Sonnenberger p = p->next_;
8544d723e5aSJoerg Sonnenberger }
8554d723e5aSJoerg Sonnenberger }
8564d723e5aSJoerg Sonnenberger
8574d723e5aSJoerg Sonnenberger return (1);
8584d723e5aSJoerg Sonnenberger }
8594d723e5aSJoerg Sonnenberger
8604d723e5aSJoerg Sonnenberger /*
8614d723e5aSJoerg Sonnenberger * Return 1 if class 'cl' is under limit or can borrow from a parent,
8624d723e5aSJoerg Sonnenberger * 0 if overlimit. As a side-effect, this routine will invoke the
8634d723e5aSJoerg Sonnenberger * class overlimit action if the class if overlimit.
8644d723e5aSJoerg Sonnenberger */
8654d723e5aSJoerg Sonnenberger
8664d723e5aSJoerg Sonnenberger static int
rmc_under_limit(struct rm_class * cl,struct timeval * now)8674d723e5aSJoerg Sonnenberger rmc_under_limit(struct rm_class *cl, struct timeval *now)
8684d723e5aSJoerg Sonnenberger {
8694d723e5aSJoerg Sonnenberger rm_class_t *p = cl;
8704d723e5aSJoerg Sonnenberger rm_class_t *top;
8714d723e5aSJoerg Sonnenberger struct rm_ifdat *ifd = cl->ifdat_;
8724d723e5aSJoerg Sonnenberger
8734d723e5aSJoerg Sonnenberger ifd->borrowed_[ifd->qi_] = NULL;
8744d723e5aSJoerg Sonnenberger /*
8754d723e5aSJoerg Sonnenberger * If cl is the root class, then always return that it is
8764d723e5aSJoerg Sonnenberger * underlimit. Otherwise, check to see if the class is underlimit.
8774d723e5aSJoerg Sonnenberger */
8784d723e5aSJoerg Sonnenberger if (cl->parent_ == NULL)
8794d723e5aSJoerg Sonnenberger return (1);
8804d723e5aSJoerg Sonnenberger
8814d723e5aSJoerg Sonnenberger if (cl->sleeping_) {
8824d723e5aSJoerg Sonnenberger if (TV_LT(now, &cl->undertime_))
8834d723e5aSJoerg Sonnenberger return (0);
8844d723e5aSJoerg Sonnenberger
8854d723e5aSJoerg Sonnenberger callout_stop(&cl->callout_);
8864d723e5aSJoerg Sonnenberger cl->sleeping_ = 0;
8874d723e5aSJoerg Sonnenberger cl->undertime_.tv_sec = 0;
8884d723e5aSJoerg Sonnenberger return (1);
8894d723e5aSJoerg Sonnenberger }
8904d723e5aSJoerg Sonnenberger
8914d723e5aSJoerg Sonnenberger top = NULL;
8924d723e5aSJoerg Sonnenberger while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
8934d723e5aSJoerg Sonnenberger if (((cl = cl->borrow_) == NULL) ||
8944d723e5aSJoerg Sonnenberger (cl->depth_ > ifd->cutoff_)) {
8954d723e5aSJoerg Sonnenberger #ifdef ADJUST_CUTOFF
8964d723e5aSJoerg Sonnenberger if (cl != NULL)
8974d723e5aSJoerg Sonnenberger /* cutoff is taking effect, just
8984d723e5aSJoerg Sonnenberger return false without calling
8994d723e5aSJoerg Sonnenberger the delay action. */
9004d723e5aSJoerg Sonnenberger return (0);
9014d723e5aSJoerg Sonnenberger #endif
9024d723e5aSJoerg Sonnenberger #ifdef BORROW_OFFTIME
9034d723e5aSJoerg Sonnenberger /*
9044d723e5aSJoerg Sonnenberger * check if the class can borrow offtime too.
9054d723e5aSJoerg Sonnenberger * borrow offtime from the top of the borrow
9064d723e5aSJoerg Sonnenberger * chain if the top class is not overloaded.
9074d723e5aSJoerg Sonnenberger */
9084d723e5aSJoerg Sonnenberger if (cl != NULL) {
9094d723e5aSJoerg Sonnenberger /* cutoff is taking effect, use this class as top. */
9104d723e5aSJoerg Sonnenberger top = cl;
9114d723e5aSJoerg Sonnenberger CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
9124d723e5aSJoerg Sonnenberger }
9134d723e5aSJoerg Sonnenberger if (top != NULL && top->avgidle_ == top->minidle_)
9144d723e5aSJoerg Sonnenberger top = NULL;
9154d723e5aSJoerg Sonnenberger p->overtime_ = *now;
9164d723e5aSJoerg Sonnenberger (p->overlimit)(p, top);
9174d723e5aSJoerg Sonnenberger #else
9184d723e5aSJoerg Sonnenberger p->overtime_ = *now;
9194d723e5aSJoerg Sonnenberger (p->overlimit)(p, NULL);
9204d723e5aSJoerg Sonnenberger #endif
9214d723e5aSJoerg Sonnenberger return (0);
9224d723e5aSJoerg Sonnenberger }
9234d723e5aSJoerg Sonnenberger top = cl;
9244d723e5aSJoerg Sonnenberger }
9254d723e5aSJoerg Sonnenberger
9264d723e5aSJoerg Sonnenberger if (cl != p)
9274d723e5aSJoerg Sonnenberger ifd->borrowed_[ifd->qi_] = cl;
9284d723e5aSJoerg Sonnenberger return (1);
9294d723e5aSJoerg Sonnenberger }
9304d723e5aSJoerg Sonnenberger
9314d723e5aSJoerg Sonnenberger /*
9324d723e5aSJoerg Sonnenberger * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
9334d723e5aSJoerg Sonnenberger * Packet-by-packet round robin.
9344d723e5aSJoerg Sonnenberger *
9354d723e5aSJoerg Sonnenberger * The heart of the weighted round-robin scheduler, which decides which
9364d723e5aSJoerg Sonnenberger * class next gets to send a packet. Highest priority first, then
9374d723e5aSJoerg Sonnenberger * weighted round-robin within priorites.
9384d723e5aSJoerg Sonnenberger *
9394d723e5aSJoerg Sonnenberger * Each able-to-send class gets to send until its byte allocation is
9404d723e5aSJoerg Sonnenberger * exhausted. Thus, the active pointer is only changed after a class has
9414d723e5aSJoerg Sonnenberger * exhausted its allocation.
9424d723e5aSJoerg Sonnenberger *
9434d723e5aSJoerg Sonnenberger * If the scheduler finds no class that is underlimit or able to borrow,
9444d723e5aSJoerg Sonnenberger * then the first class found that had a nonzero queue and is allowed to
9454d723e5aSJoerg Sonnenberger * borrow gets to send.
9464d723e5aSJoerg Sonnenberger */
9474d723e5aSJoerg Sonnenberger
9484d723e5aSJoerg Sonnenberger static struct mbuf *
_rmc_wrr_dequeue_next(struct rm_ifdat * ifd,int op)9494d723e5aSJoerg Sonnenberger _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
9504d723e5aSJoerg Sonnenberger {
9514d723e5aSJoerg Sonnenberger struct rm_class *cl = NULL, *first = NULL;
9524d723e5aSJoerg Sonnenberger u_int deficit;
9534d723e5aSJoerg Sonnenberger int cpri;
9544d723e5aSJoerg Sonnenberger struct mbuf *m;
9554d723e5aSJoerg Sonnenberger struct timeval now;
9564d723e5aSJoerg Sonnenberger
9574d723e5aSJoerg Sonnenberger RM_GETTIME(now);
9584d723e5aSJoerg Sonnenberger
9594d723e5aSJoerg Sonnenberger /*
9604d723e5aSJoerg Sonnenberger * if the driver polls the top of the queue and then removes
9614d723e5aSJoerg Sonnenberger * the polled packet, we must return the same packet.
9624d723e5aSJoerg Sonnenberger */
9634d723e5aSJoerg Sonnenberger if (op == ALTDQ_REMOVE && ifd->pollcache_) {
9644d723e5aSJoerg Sonnenberger cl = ifd->pollcache_;
9654d723e5aSJoerg Sonnenberger cpri = cl->pri_;
9664d723e5aSJoerg Sonnenberger if (ifd->efficient_) {
9674d723e5aSJoerg Sonnenberger /* check if this class is overlimit */
9684d723e5aSJoerg Sonnenberger if (cl->undertime_.tv_sec != 0 &&
9694d723e5aSJoerg Sonnenberger rmc_under_limit(cl, &now) == 0)
9704d723e5aSJoerg Sonnenberger first = cl;
9714d723e5aSJoerg Sonnenberger }
9724d723e5aSJoerg Sonnenberger ifd->pollcache_ = NULL;
9734d723e5aSJoerg Sonnenberger goto _wrr_out;
9744d723e5aSJoerg Sonnenberger }
9754d723e5aSJoerg Sonnenberger /* mode == ALTDQ_POLL || pollcache == NULL */
9764d723e5aSJoerg Sonnenberger ifd->pollcache_ = NULL;
9774d723e5aSJoerg Sonnenberger ifd->borrowed_[ifd->qi_] = NULL;
9784d723e5aSJoerg Sonnenberger #ifdef ADJUST_CUTOFF
9794d723e5aSJoerg Sonnenberger _again:
9804d723e5aSJoerg Sonnenberger #endif
9814d723e5aSJoerg Sonnenberger for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
9824d723e5aSJoerg Sonnenberger if (ifd->na_[cpri] == 0)
9834d723e5aSJoerg Sonnenberger continue;
9844d723e5aSJoerg Sonnenberger deficit = 0;
9854d723e5aSJoerg Sonnenberger /*
9864d723e5aSJoerg Sonnenberger * Loop through twice for a priority level, if some class
9874d723e5aSJoerg Sonnenberger * was unable to send a packet the first round because
9884d723e5aSJoerg Sonnenberger * of the weighted round-robin mechanism.
9894d723e5aSJoerg Sonnenberger * During the second loop at this level, deficit==2.
9904d723e5aSJoerg Sonnenberger * (This second loop is not needed if for every class,
9914d723e5aSJoerg Sonnenberger * "M[cl->pri_])" times "cl->allotment" is greater than
9924d723e5aSJoerg Sonnenberger * the byte size for the largest packet in the class.)
9934d723e5aSJoerg Sonnenberger */
9944d723e5aSJoerg Sonnenberger _wrr_loop:
9954d723e5aSJoerg Sonnenberger cl = ifd->active_[cpri];
9964d723e5aSJoerg Sonnenberger KKASSERT(cl != NULL);
9974d723e5aSJoerg Sonnenberger do {
9984d723e5aSJoerg Sonnenberger if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
9994d723e5aSJoerg Sonnenberger cl->bytes_alloc_ += cl->w_allotment_;
10004d723e5aSJoerg Sonnenberger if (!qempty(cl->q_)) {
10014d723e5aSJoerg Sonnenberger if ((cl->undertime_.tv_sec == 0) ||
10024d723e5aSJoerg Sonnenberger rmc_under_limit(cl, &now)) {
10034d723e5aSJoerg Sonnenberger if (cl->bytes_alloc_ > 0 || deficit > 1)
10044d723e5aSJoerg Sonnenberger goto _wrr_out;
10054d723e5aSJoerg Sonnenberger
10064d723e5aSJoerg Sonnenberger /* underlimit but no alloc */
10074d723e5aSJoerg Sonnenberger deficit = 1;
10084d723e5aSJoerg Sonnenberger #if 1
10094d723e5aSJoerg Sonnenberger ifd->borrowed_[ifd->qi_] = NULL;
10104d723e5aSJoerg Sonnenberger #endif
10114d723e5aSJoerg Sonnenberger }
10124d723e5aSJoerg Sonnenberger else if (first == NULL && cl->borrow_ != NULL)
10134d723e5aSJoerg Sonnenberger first = cl; /* borrowing candidate */
10144d723e5aSJoerg Sonnenberger }
10154d723e5aSJoerg Sonnenberger
10164d723e5aSJoerg Sonnenberger cl->bytes_alloc_ = 0;
10174d723e5aSJoerg Sonnenberger cl = cl->peer_;
10184d723e5aSJoerg Sonnenberger } while (cl != ifd->active_[cpri]);
10194d723e5aSJoerg Sonnenberger
10204d723e5aSJoerg Sonnenberger if (deficit == 1) {
10214d723e5aSJoerg Sonnenberger /* first loop found an underlimit class with deficit */
10224d723e5aSJoerg Sonnenberger /* Loop on same priority level, with new deficit. */
10234d723e5aSJoerg Sonnenberger deficit = 2;
10244d723e5aSJoerg Sonnenberger goto _wrr_loop;
10254d723e5aSJoerg Sonnenberger }
10264d723e5aSJoerg Sonnenberger }
10274d723e5aSJoerg Sonnenberger
10284d723e5aSJoerg Sonnenberger #ifdef ADJUST_CUTOFF
10294d723e5aSJoerg Sonnenberger /*
10304d723e5aSJoerg Sonnenberger * no underlimit class found. if cutoff is taking effect,
10314d723e5aSJoerg Sonnenberger * increase cutoff and try again.
10324d723e5aSJoerg Sonnenberger */
10334d723e5aSJoerg Sonnenberger if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
10344d723e5aSJoerg Sonnenberger ifd->cutoff_++;
10354d723e5aSJoerg Sonnenberger CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
10364d723e5aSJoerg Sonnenberger goto _again;
10374d723e5aSJoerg Sonnenberger }
10384d723e5aSJoerg Sonnenberger #endif /* ADJUST_CUTOFF */
10394d723e5aSJoerg Sonnenberger /*
10404d723e5aSJoerg Sonnenberger * If LINK_EFFICIENCY is turned on, then the first overlimit
10414d723e5aSJoerg Sonnenberger * class we encounter will send a packet if all the classes
10424d723e5aSJoerg Sonnenberger * of the link-sharing structure are overlimit.
10434d723e5aSJoerg Sonnenberger */
10444d723e5aSJoerg Sonnenberger reset_cutoff(ifd);
10454d723e5aSJoerg Sonnenberger CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
10464d723e5aSJoerg Sonnenberger
10474d723e5aSJoerg Sonnenberger if (!ifd->efficient_ || first == NULL)
10484d723e5aSJoerg Sonnenberger return (NULL);
10494d723e5aSJoerg Sonnenberger
10504d723e5aSJoerg Sonnenberger cl = first;
10514d723e5aSJoerg Sonnenberger cpri = cl->pri_;
10524d723e5aSJoerg Sonnenberger #if 0 /* too time-consuming for nothing */
10534d723e5aSJoerg Sonnenberger if (cl->sleeping_)
10544d723e5aSJoerg Sonnenberger callout_stop(&cl->callout_);
10554d723e5aSJoerg Sonnenberger cl->sleeping_ = 0;
10564d723e5aSJoerg Sonnenberger cl->undertime_.tv_sec = 0;
10574d723e5aSJoerg Sonnenberger #endif
10584d723e5aSJoerg Sonnenberger ifd->borrowed_[ifd->qi_] = cl->borrow_;
10594d723e5aSJoerg Sonnenberger ifd->cutoff_ = cl->borrow_->depth_;
10604d723e5aSJoerg Sonnenberger
10614d723e5aSJoerg Sonnenberger /*
10624d723e5aSJoerg Sonnenberger * Deque the packet and do the book keeping...
10634d723e5aSJoerg Sonnenberger */
10644d723e5aSJoerg Sonnenberger _wrr_out:
10654d723e5aSJoerg Sonnenberger if (op == ALTDQ_REMOVE) {
10664d723e5aSJoerg Sonnenberger m = _rmc_getq(cl);
10674d723e5aSJoerg Sonnenberger if (m == NULL)
10684d723e5aSJoerg Sonnenberger panic("_rmc_wrr_dequeue_next");
10694d723e5aSJoerg Sonnenberger if (qempty(cl->q_))
10704d723e5aSJoerg Sonnenberger ifd->na_[cpri]--;
10714d723e5aSJoerg Sonnenberger
10724d723e5aSJoerg Sonnenberger /*
10734d723e5aSJoerg Sonnenberger * Update class statistics and link data.
10744d723e5aSJoerg Sonnenberger */
10754d723e5aSJoerg Sonnenberger if (cl->bytes_alloc_ > 0)
10764d723e5aSJoerg Sonnenberger cl->bytes_alloc_ -= m_pktlen(m);
10774d723e5aSJoerg Sonnenberger
10784d723e5aSJoerg Sonnenberger if ((cl->bytes_alloc_ <= 0) || first == cl)
10794d723e5aSJoerg Sonnenberger ifd->active_[cl->pri_] = cl->peer_;
10804d723e5aSJoerg Sonnenberger else
10814d723e5aSJoerg Sonnenberger ifd->active_[cl->pri_] = cl;
10824d723e5aSJoerg Sonnenberger
10834d723e5aSJoerg Sonnenberger ifd->class_[ifd->qi_] = cl;
10844d723e5aSJoerg Sonnenberger ifd->curlen_[ifd->qi_] = m_pktlen(m);
10854d723e5aSJoerg Sonnenberger ifd->now_[ifd->qi_] = now;
10864d723e5aSJoerg Sonnenberger ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
10874d723e5aSJoerg Sonnenberger ifd->queued_++;
10884d723e5aSJoerg Sonnenberger } else {
10894d723e5aSJoerg Sonnenberger /* mode == ALTDQ_PPOLL */
10904d723e5aSJoerg Sonnenberger m = _rmc_pollq(cl);
1091893c125dSSepherosa Ziehau #ifdef foo
1092893c125dSSepherosa Ziehau /*
1093893c125dSSepherosa Ziehau * Don't use poll cache; the poll/dequeue
1094893c125dSSepherosa Ziehau * model is no longer applicable to SMP
1095893c125dSSepherosa Ziehau * system. e.g.
1096893c125dSSepherosa Ziehau * CPU-A CPU-B
1097893c125dSSepherosa Ziehau * : :
1098893c125dSSepherosa Ziehau * poll :
1099893c125dSSepherosa Ziehau * : poll
1100893c125dSSepherosa Ziehau * dequeue (+) :
1101893c125dSSepherosa Ziehau *
1102893c125dSSepherosa Ziehau * The dequeue at (+) will hit the poll
1103893c125dSSepherosa Ziehau * cache set by CPU-B.
1104893c125dSSepherosa Ziehau */
11054d723e5aSJoerg Sonnenberger ifd->pollcache_ = cl;
1106893c125dSSepherosa Ziehau #endif
11074d723e5aSJoerg Sonnenberger }
11084d723e5aSJoerg Sonnenberger return (m);
11094d723e5aSJoerg Sonnenberger }
11104d723e5aSJoerg Sonnenberger
11114d723e5aSJoerg Sonnenberger /*
11124d723e5aSJoerg Sonnenberger * Dequeue & return next packet from the highest priority class that
11134d723e5aSJoerg Sonnenberger * has a packet to send & has enough allocation to send it. This
11144d723e5aSJoerg Sonnenberger * routine is called by a driver whenever it needs a new packet to
11154d723e5aSJoerg Sonnenberger * output.
11164d723e5aSJoerg Sonnenberger */
11174d723e5aSJoerg Sonnenberger static struct mbuf *
_rmc_prr_dequeue_next(struct rm_ifdat * ifd,int op)11184d723e5aSJoerg Sonnenberger _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
11194d723e5aSJoerg Sonnenberger {
11204d723e5aSJoerg Sonnenberger struct mbuf *m;
11214d723e5aSJoerg Sonnenberger int cpri;
11224d723e5aSJoerg Sonnenberger struct rm_class *cl, *first = NULL;
11234d723e5aSJoerg Sonnenberger struct timeval now;
11244d723e5aSJoerg Sonnenberger
11254d723e5aSJoerg Sonnenberger RM_GETTIME(now);
11264d723e5aSJoerg Sonnenberger
11274d723e5aSJoerg Sonnenberger /*
11284d723e5aSJoerg Sonnenberger * if the driver polls the top of the queue and then removes
11294d723e5aSJoerg Sonnenberger * the polled packet, we must return the same packet.
11304d723e5aSJoerg Sonnenberger */
11314d723e5aSJoerg Sonnenberger if (op == ALTDQ_REMOVE && ifd->pollcache_) {
11324d723e5aSJoerg Sonnenberger cl = ifd->pollcache_;
11334d723e5aSJoerg Sonnenberger cpri = cl->pri_;
11344d723e5aSJoerg Sonnenberger ifd->pollcache_ = NULL;
11354d723e5aSJoerg Sonnenberger goto _prr_out;
11360c77d800SSascha Wildner }
11374d723e5aSJoerg Sonnenberger /* mode == ALTDQ_POLL || pollcache == NULL */
11384d723e5aSJoerg Sonnenberger ifd->pollcache_ = NULL;
11394d723e5aSJoerg Sonnenberger ifd->borrowed_[ifd->qi_] = NULL;
11404d723e5aSJoerg Sonnenberger #ifdef ADJUST_CUTOFF
11414d723e5aSJoerg Sonnenberger _again:
11424d723e5aSJoerg Sonnenberger #endif
11434d723e5aSJoerg Sonnenberger for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
11444d723e5aSJoerg Sonnenberger if (ifd->na_[cpri] == 0)
11454d723e5aSJoerg Sonnenberger continue;
11464d723e5aSJoerg Sonnenberger cl = ifd->active_[cpri];
11474d723e5aSJoerg Sonnenberger KKASSERT(cl != NULL);
11484d723e5aSJoerg Sonnenberger do {
11494d723e5aSJoerg Sonnenberger if (!qempty(cl->q_)) {
11504d723e5aSJoerg Sonnenberger if ((cl->undertime_.tv_sec == 0) ||
11514d723e5aSJoerg Sonnenberger rmc_under_limit(cl, &now))
11524d723e5aSJoerg Sonnenberger goto _prr_out;
11534d723e5aSJoerg Sonnenberger if (first == NULL && cl->borrow_ != NULL)
11544d723e5aSJoerg Sonnenberger first = cl;
11554d723e5aSJoerg Sonnenberger }
11564d723e5aSJoerg Sonnenberger cl = cl->peer_;
11574d723e5aSJoerg Sonnenberger } while (cl != ifd->active_[cpri]);
11584d723e5aSJoerg Sonnenberger }
11594d723e5aSJoerg Sonnenberger
11604d723e5aSJoerg Sonnenberger #ifdef ADJUST_CUTOFF
11614d723e5aSJoerg Sonnenberger /*
11624d723e5aSJoerg Sonnenberger * no underlimit class found. if cutoff is taking effect, increase
11634d723e5aSJoerg Sonnenberger * cutoff and try again.
11644d723e5aSJoerg Sonnenberger */
11654d723e5aSJoerg Sonnenberger if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
11664d723e5aSJoerg Sonnenberger ifd->cutoff_++;
11674d723e5aSJoerg Sonnenberger goto _again;
11684d723e5aSJoerg Sonnenberger }
11694d723e5aSJoerg Sonnenberger #endif /* ADJUST_CUTOFF */
11704d723e5aSJoerg Sonnenberger /*
11714d723e5aSJoerg Sonnenberger * If LINK_EFFICIENCY is turned on, then the first overlimit
11724d723e5aSJoerg Sonnenberger * class we encounter will send a packet if all the classes
11734d723e5aSJoerg Sonnenberger * of the link-sharing structure are overlimit.
11744d723e5aSJoerg Sonnenberger */
11754d723e5aSJoerg Sonnenberger reset_cutoff(ifd);
11764d723e5aSJoerg Sonnenberger if (!ifd->efficient_ || first == NULL)
11774d723e5aSJoerg Sonnenberger return (NULL);
11784d723e5aSJoerg Sonnenberger
11794d723e5aSJoerg Sonnenberger cl = first;
11804d723e5aSJoerg Sonnenberger cpri = cl->pri_;
11814d723e5aSJoerg Sonnenberger #if 0 /* too time-consuming for nothing */
11824d723e5aSJoerg Sonnenberger if (cl->sleeping_)
11834d723e5aSJoerg Sonnenberger callout_stop(&cl->callout_);
11844d723e5aSJoerg Sonnenberger cl->sleeping_ = 0;
11854d723e5aSJoerg Sonnenberger cl->undertime_.tv_sec = 0;
11864d723e5aSJoerg Sonnenberger #endif
11874d723e5aSJoerg Sonnenberger ifd->borrowed_[ifd->qi_] = cl->borrow_;
11884d723e5aSJoerg Sonnenberger ifd->cutoff_ = cl->borrow_->depth_;
11894d723e5aSJoerg Sonnenberger
11904d723e5aSJoerg Sonnenberger /*
11914d723e5aSJoerg Sonnenberger * Deque the packet and do the book keeping...
11924d723e5aSJoerg Sonnenberger */
11934d723e5aSJoerg Sonnenberger _prr_out:
11944d723e5aSJoerg Sonnenberger if (op == ALTDQ_REMOVE) {
11954d723e5aSJoerg Sonnenberger m = _rmc_getq(cl);
11964d723e5aSJoerg Sonnenberger if (m == NULL)
11974d723e5aSJoerg Sonnenberger panic("_rmc_prr_dequeue_next");
11984d723e5aSJoerg Sonnenberger if (qempty(cl->q_))
11994d723e5aSJoerg Sonnenberger ifd->na_[cpri]--;
12004d723e5aSJoerg Sonnenberger
12014d723e5aSJoerg Sonnenberger ifd->active_[cpri] = cl->peer_;
12024d723e5aSJoerg Sonnenberger
12034d723e5aSJoerg Sonnenberger ifd->class_[ifd->qi_] = cl;
12044d723e5aSJoerg Sonnenberger ifd->curlen_[ifd->qi_] = m_pktlen(m);
12054d723e5aSJoerg Sonnenberger ifd->now_[ifd->qi_] = now;
12064d723e5aSJoerg Sonnenberger ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
12074d723e5aSJoerg Sonnenberger ifd->queued_++;
12084d723e5aSJoerg Sonnenberger } else {
12094d723e5aSJoerg Sonnenberger /* mode == ALTDQ_POLL */
12104d723e5aSJoerg Sonnenberger m = _rmc_pollq(cl);
1211893c125dSSepherosa Ziehau #ifdef foo
1212893c125dSSepherosa Ziehau /*
1213893c125dSSepherosa Ziehau * Don't use poll cache; the poll/dequeue
1214893c125dSSepherosa Ziehau * model is no longer applicable to SMP
1215893c125dSSepherosa Ziehau * system. e.g.
1216893c125dSSepherosa Ziehau * CPU-A CPU-B
1217893c125dSSepherosa Ziehau * : :
1218893c125dSSepherosa Ziehau * poll :
1219893c125dSSepherosa Ziehau * : poll
1220893c125dSSepherosa Ziehau * dequeue (+) :
1221893c125dSSepherosa Ziehau *
1222893c125dSSepherosa Ziehau * The dequeue at (+) will hit the poll
1223893c125dSSepherosa Ziehau * cache set by CPU-B.
1224893c125dSSepherosa Ziehau */
12254d723e5aSJoerg Sonnenberger ifd->pollcache_ = cl;
1226893c125dSSepherosa Ziehau #endif
12274d723e5aSJoerg Sonnenberger }
12284d723e5aSJoerg Sonnenberger return (m);
12294d723e5aSJoerg Sonnenberger }
12304d723e5aSJoerg Sonnenberger
12314d723e5aSJoerg Sonnenberger /*
12324d723e5aSJoerg Sonnenberger * struct mbuf *
12334d723e5aSJoerg Sonnenberger * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
12344d723e5aSJoerg Sonnenberger * is invoked by the packet driver to get the next packet to be
12354d723e5aSJoerg Sonnenberger * dequeued and output on the link. If WRR is enabled, then the
12364d723e5aSJoerg Sonnenberger * WRR dequeue next routine will determine the next packet to sent.
12374d723e5aSJoerg Sonnenberger * Otherwise, packet-by-packet round robin is invoked.
12384d723e5aSJoerg Sonnenberger *
12394d723e5aSJoerg Sonnenberger * Returns: NULL, if a packet is not available or if all
12404d723e5aSJoerg Sonnenberger * classes are overlimit.
12414d723e5aSJoerg Sonnenberger *
12424d723e5aSJoerg Sonnenberger * Otherwise, Pointer to the next packet.
12434d723e5aSJoerg Sonnenberger */
12444d723e5aSJoerg Sonnenberger
12454d723e5aSJoerg Sonnenberger struct mbuf *
rmc_dequeue_next(struct rm_ifdat * ifd,int mode)12464d723e5aSJoerg Sonnenberger rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
12474d723e5aSJoerg Sonnenberger {
12484d723e5aSJoerg Sonnenberger if (ifd->queued_ >= ifd->maxqueued_)
12494d723e5aSJoerg Sonnenberger return (NULL);
12504d723e5aSJoerg Sonnenberger else if (ifd->wrr_)
12514d723e5aSJoerg Sonnenberger return (_rmc_wrr_dequeue_next(ifd, mode));
12524d723e5aSJoerg Sonnenberger else
12534d723e5aSJoerg Sonnenberger return (_rmc_prr_dequeue_next(ifd, mode));
12544d723e5aSJoerg Sonnenberger }
12554d723e5aSJoerg Sonnenberger
12564d723e5aSJoerg Sonnenberger /*
12574d723e5aSJoerg Sonnenberger * Update the utilization estimate for the packet that just completed.
12584d723e5aSJoerg Sonnenberger * The packet's class & the parent(s) of that class all get their
12594d723e5aSJoerg Sonnenberger * estimators updated. This routine is called by the driver's output-
12604d723e5aSJoerg Sonnenberger * packet-completion interrupt service routine.
12614d723e5aSJoerg Sonnenberger */
12624d723e5aSJoerg Sonnenberger
12634d723e5aSJoerg Sonnenberger /*
12644d723e5aSJoerg Sonnenberger * a macro to approximate "divide by 1000" that gives 0.000999,
12654d723e5aSJoerg Sonnenberger * if a value has enough effective digits.
12664d723e5aSJoerg Sonnenberger * (on pentium, mul takes 9 cycles but div takes 46!)
12674d723e5aSJoerg Sonnenberger */
12684d723e5aSJoerg Sonnenberger #define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17))
12694d723e5aSJoerg Sonnenberger void
rmc_update_class_util(struct rm_ifdat * ifd)12704d723e5aSJoerg Sonnenberger rmc_update_class_util(struct rm_ifdat *ifd)
12714d723e5aSJoerg Sonnenberger {
12724d723e5aSJoerg Sonnenberger int idle, avgidle, pktlen;
12734d723e5aSJoerg Sonnenberger int pkt_time, tidle;
12744d723e5aSJoerg Sonnenberger rm_class_t *cl, *borrowed;
12754d723e5aSJoerg Sonnenberger rm_class_t *borrows;
12764d723e5aSJoerg Sonnenberger struct timeval *nowp;
12774d723e5aSJoerg Sonnenberger
12784d723e5aSJoerg Sonnenberger /*
12794d723e5aSJoerg Sonnenberger * Get the most recent completed class.
12804d723e5aSJoerg Sonnenberger */
12814d723e5aSJoerg Sonnenberger if ((cl = ifd->class_[ifd->qo_]) == NULL)
12824d723e5aSJoerg Sonnenberger return;
12834d723e5aSJoerg Sonnenberger
12844d723e5aSJoerg Sonnenberger pktlen = ifd->curlen_[ifd->qo_];
12854d723e5aSJoerg Sonnenberger borrowed = ifd->borrowed_[ifd->qo_];
12864d723e5aSJoerg Sonnenberger borrows = borrowed;
12874d723e5aSJoerg Sonnenberger
12884d723e5aSJoerg Sonnenberger PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
12894d723e5aSJoerg Sonnenberger
12904d723e5aSJoerg Sonnenberger /*
12914d723e5aSJoerg Sonnenberger * Run estimator on class and its ancestors.
12924d723e5aSJoerg Sonnenberger */
12934d723e5aSJoerg Sonnenberger /*
12944d723e5aSJoerg Sonnenberger * rm_update_class_util is designed to be called when the
12954d723e5aSJoerg Sonnenberger * transfer is completed from a xmit complete interrupt,
12964d723e5aSJoerg Sonnenberger * but most drivers don't implement an upcall for that.
12974d723e5aSJoerg Sonnenberger * so, just use estimated completion time.
12984d723e5aSJoerg Sonnenberger * as a result, ifd->qi_ and ifd->qo_ are always synced.
12994d723e5aSJoerg Sonnenberger */
13004d723e5aSJoerg Sonnenberger nowp = &ifd->now_[ifd->qo_];
13014d723e5aSJoerg Sonnenberger /* get pkt_time (for link) in usec */
13024d723e5aSJoerg Sonnenberger #if 1 /* use approximation */
13034d723e5aSJoerg Sonnenberger pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
13044d723e5aSJoerg Sonnenberger pkt_time = NSEC_TO_USEC(pkt_time);
13054d723e5aSJoerg Sonnenberger #else
13064d723e5aSJoerg Sonnenberger pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
13074d723e5aSJoerg Sonnenberger #endif
13084d723e5aSJoerg Sonnenberger #if 1 /* ALTQ4PPP */
13094d723e5aSJoerg Sonnenberger if (TV_LT(nowp, &ifd->ifnow_)) {
13104d723e5aSJoerg Sonnenberger int iftime;
13114d723e5aSJoerg Sonnenberger
13124d723e5aSJoerg Sonnenberger /*
13134d723e5aSJoerg Sonnenberger * make sure the estimated completion time does not go
13144d723e5aSJoerg Sonnenberger * too far. it can happen when the link layer supports
13154d723e5aSJoerg Sonnenberger * data compression or the interface speed is set to
13164d723e5aSJoerg Sonnenberger * a much lower value.
13174d723e5aSJoerg Sonnenberger */
13184d723e5aSJoerg Sonnenberger TV_DELTA(&ifd->ifnow_, nowp, iftime);
13194d723e5aSJoerg Sonnenberger if (iftime+pkt_time < ifd->maxiftime_) {
13204d723e5aSJoerg Sonnenberger TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
13214d723e5aSJoerg Sonnenberger } else {
13224d723e5aSJoerg Sonnenberger TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
13234d723e5aSJoerg Sonnenberger }
13244d723e5aSJoerg Sonnenberger } else {
13254d723e5aSJoerg Sonnenberger TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
13264d723e5aSJoerg Sonnenberger }
13274d723e5aSJoerg Sonnenberger #else
13284d723e5aSJoerg Sonnenberger if (TV_LT(nowp, &ifd->ifnow_)) {
13294d723e5aSJoerg Sonnenberger TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
13304d723e5aSJoerg Sonnenberger } else {
13314d723e5aSJoerg Sonnenberger TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
13324d723e5aSJoerg Sonnenberger }
13334d723e5aSJoerg Sonnenberger #endif
13344d723e5aSJoerg Sonnenberger
13354d723e5aSJoerg Sonnenberger while (cl != NULL) {
13364d723e5aSJoerg Sonnenberger TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
13374d723e5aSJoerg Sonnenberger if (idle >= 2000000)
13384d723e5aSJoerg Sonnenberger /*
13394d723e5aSJoerg Sonnenberger * this class is idle enough, reset avgidle.
13404d723e5aSJoerg Sonnenberger * (TV_DELTA returns 2000000 us when delta is large.)
13414d723e5aSJoerg Sonnenberger */
13424d723e5aSJoerg Sonnenberger cl->avgidle_ = cl->maxidle_;
13434d723e5aSJoerg Sonnenberger
13444d723e5aSJoerg Sonnenberger /* get pkt_time (for class) in usec */
13454d723e5aSJoerg Sonnenberger #if 1 /* use approximation */
13464d723e5aSJoerg Sonnenberger pkt_time = pktlen * cl->ns_per_byte_;
13474d723e5aSJoerg Sonnenberger pkt_time = NSEC_TO_USEC(pkt_time);
13484d723e5aSJoerg Sonnenberger #else
13494d723e5aSJoerg Sonnenberger pkt_time = pktlen * cl->ns_per_byte_ / 1000;
13504d723e5aSJoerg Sonnenberger #endif
13514d723e5aSJoerg Sonnenberger idle -= pkt_time;
13524d723e5aSJoerg Sonnenberger
13534d723e5aSJoerg Sonnenberger avgidle = cl->avgidle_;
13544d723e5aSJoerg Sonnenberger avgidle += idle - (avgidle >> RM_FILTER_GAIN);
13554d723e5aSJoerg Sonnenberger cl->avgidle_ = avgidle;
13564d723e5aSJoerg Sonnenberger
13574d723e5aSJoerg Sonnenberger /* Are we overlimit ? */
13584d723e5aSJoerg Sonnenberger if (avgidle <= 0) {
13594d723e5aSJoerg Sonnenberger CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
13604d723e5aSJoerg Sonnenberger #if 1 /* ALTQ */
13614d723e5aSJoerg Sonnenberger /*
13624d723e5aSJoerg Sonnenberger * need some lower bound for avgidle, otherwise
13634d723e5aSJoerg Sonnenberger * a borrowing class gets unbounded penalty.
13644d723e5aSJoerg Sonnenberger */
13654d723e5aSJoerg Sonnenberger if (avgidle < cl->minidle_)
13664d723e5aSJoerg Sonnenberger avgidle = cl->avgidle_ = cl->minidle_;
13674d723e5aSJoerg Sonnenberger #endif
13684d723e5aSJoerg Sonnenberger /* set next idle to make avgidle 0 */
13694d723e5aSJoerg Sonnenberger tidle = pkt_time +
13704d723e5aSJoerg Sonnenberger (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
13714d723e5aSJoerg Sonnenberger TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
13724d723e5aSJoerg Sonnenberger ++cl->stats_.over;
13734d723e5aSJoerg Sonnenberger } else {
13744d723e5aSJoerg Sonnenberger cl->avgidle_ =
13754d723e5aSJoerg Sonnenberger (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
13764d723e5aSJoerg Sonnenberger cl->undertime_.tv_sec = 0;
13774d723e5aSJoerg Sonnenberger if (cl->sleeping_) {
13784d723e5aSJoerg Sonnenberger callout_stop(&cl->callout_);
13794d723e5aSJoerg Sonnenberger cl->sleeping_ = 0;
13804d723e5aSJoerg Sonnenberger }
13814d723e5aSJoerg Sonnenberger }
13824d723e5aSJoerg Sonnenberger
13834d723e5aSJoerg Sonnenberger if (borrows != NULL) {
13844d723e5aSJoerg Sonnenberger if (borrows != cl)
13854d723e5aSJoerg Sonnenberger ++cl->stats_.borrows;
13864d723e5aSJoerg Sonnenberger else
13874d723e5aSJoerg Sonnenberger borrows = NULL;
13884d723e5aSJoerg Sonnenberger }
13894d723e5aSJoerg Sonnenberger cl->last_ = ifd->ifnow_;
13904d723e5aSJoerg Sonnenberger cl->last_pkttime_ = pkt_time;
13914d723e5aSJoerg Sonnenberger
13924d723e5aSJoerg Sonnenberger #if 1
13934d723e5aSJoerg Sonnenberger if (cl->parent_ == NULL) {
13944d723e5aSJoerg Sonnenberger /* take stats of root class */
13954d723e5aSJoerg Sonnenberger PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
13964d723e5aSJoerg Sonnenberger }
13974d723e5aSJoerg Sonnenberger #endif
13984d723e5aSJoerg Sonnenberger
13994d723e5aSJoerg Sonnenberger cl = cl->parent_;
14004d723e5aSJoerg Sonnenberger }
14014d723e5aSJoerg Sonnenberger
14024d723e5aSJoerg Sonnenberger /*
14034d723e5aSJoerg Sonnenberger * Check to see if cutoff needs to set to a new level.
14044d723e5aSJoerg Sonnenberger */
14054d723e5aSJoerg Sonnenberger cl = ifd->class_[ifd->qo_];
14064d723e5aSJoerg Sonnenberger if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
14074d723e5aSJoerg Sonnenberger #if 1 /* ALTQ */
14084d723e5aSJoerg Sonnenberger if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
14094d723e5aSJoerg Sonnenberger rmc_tl_satisfied(ifd, nowp);
14104d723e5aSJoerg Sonnenberger CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
14114d723e5aSJoerg Sonnenberger } else {
14124d723e5aSJoerg Sonnenberger ifd->cutoff_ = borrowed->depth_;
14134d723e5aSJoerg Sonnenberger CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
14144d723e5aSJoerg Sonnenberger }
14154d723e5aSJoerg Sonnenberger #else /* !ALTQ */
14164d723e5aSJoerg Sonnenberger if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
14174d723e5aSJoerg Sonnenberger reset_cutoff(ifd);
14184d723e5aSJoerg Sonnenberger #ifdef notdef
14194d723e5aSJoerg Sonnenberger rmc_tl_satisfied(ifd, &now);
14204d723e5aSJoerg Sonnenberger #endif
14214d723e5aSJoerg Sonnenberger CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
14224d723e5aSJoerg Sonnenberger } else {
14234d723e5aSJoerg Sonnenberger ifd->cutoff_ = borrowed->depth_;
14244d723e5aSJoerg Sonnenberger CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
14254d723e5aSJoerg Sonnenberger }
14264d723e5aSJoerg Sonnenberger #endif /* !ALTQ */
14274d723e5aSJoerg Sonnenberger }
14284d723e5aSJoerg Sonnenberger
14294d723e5aSJoerg Sonnenberger /*
14304d723e5aSJoerg Sonnenberger * Release class slot
14314d723e5aSJoerg Sonnenberger */
14324d723e5aSJoerg Sonnenberger ifd->borrowed_[ifd->qo_] = NULL;
14334d723e5aSJoerg Sonnenberger ifd->class_[ifd->qo_] = NULL;
14344d723e5aSJoerg Sonnenberger ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
14354d723e5aSJoerg Sonnenberger ifd->queued_--;
14364d723e5aSJoerg Sonnenberger }
14374d723e5aSJoerg Sonnenberger
14384d723e5aSJoerg Sonnenberger /*
14394d723e5aSJoerg Sonnenberger * void
14404d723e5aSJoerg Sonnenberger * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
14414d723e5aSJoerg Sonnenberger * over-limit action routines. These get invoked by rmc_under_limit()
14424d723e5aSJoerg Sonnenberger * if a class with packets to send if over its bandwidth limit & can't
14434d723e5aSJoerg Sonnenberger * borrow from a parent class.
14444d723e5aSJoerg Sonnenberger *
14454d723e5aSJoerg Sonnenberger * Returns: NONE
14464d723e5aSJoerg Sonnenberger */
14474d723e5aSJoerg Sonnenberger
14484d723e5aSJoerg Sonnenberger static void
rmc_drop_action(struct rm_class * cl)14494d723e5aSJoerg Sonnenberger rmc_drop_action(struct rm_class *cl)
14504d723e5aSJoerg Sonnenberger {
14514d723e5aSJoerg Sonnenberger struct rm_ifdat *ifd = cl->ifdat_;
14524d723e5aSJoerg Sonnenberger
14534d723e5aSJoerg Sonnenberger KKASSERT(qlen(cl->q_) > 0);
14544d723e5aSJoerg Sonnenberger _rmc_dropq(cl);
14554d723e5aSJoerg Sonnenberger if (qempty(cl->q_))
14564d723e5aSJoerg Sonnenberger ifd->na_[cl->pri_]--;
14574d723e5aSJoerg Sonnenberger }
14584d723e5aSJoerg Sonnenberger
14593bf25ce1SSascha Wildner void
rmc_dropall(struct rm_class * cl)14603bf25ce1SSascha Wildner rmc_dropall(struct rm_class *cl)
14614d723e5aSJoerg Sonnenberger {
14624d723e5aSJoerg Sonnenberger struct rm_ifdat *ifd = cl->ifdat_;
14634d723e5aSJoerg Sonnenberger
14644d723e5aSJoerg Sonnenberger if (!qempty(cl->q_)) {
14654d723e5aSJoerg Sonnenberger _flushq(cl->q_);
14664d723e5aSJoerg Sonnenberger
14674d723e5aSJoerg Sonnenberger ifd->na_[cl->pri_]--;
14684d723e5aSJoerg Sonnenberger }
14694d723e5aSJoerg Sonnenberger }
14704d723e5aSJoerg Sonnenberger
14714d723e5aSJoerg Sonnenberger /*
14724d723e5aSJoerg Sonnenberger * void
14734d723e5aSJoerg Sonnenberger * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
14744d723e5aSJoerg Sonnenberger * delay action routine. It is invoked via rmc_under_limit when the
14754d723e5aSJoerg Sonnenberger * packet is discoverd to be overlimit.
14764d723e5aSJoerg Sonnenberger *
14774d723e5aSJoerg Sonnenberger * If the delay action is result of borrow class being overlimit, then
14784d723e5aSJoerg Sonnenberger * delay for the offtime of the borrowing class that is overlimit.
14794d723e5aSJoerg Sonnenberger *
14804d723e5aSJoerg Sonnenberger * Returns: NONE
14814d723e5aSJoerg Sonnenberger */
14824d723e5aSJoerg Sonnenberger
14834d723e5aSJoerg Sonnenberger void
rmc_delay_action(struct rm_class * cl,struct rm_class * borrow)14844d723e5aSJoerg Sonnenberger rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
14854d723e5aSJoerg Sonnenberger {
14864d723e5aSJoerg Sonnenberger int delay, t, extradelay;
14874d723e5aSJoerg Sonnenberger
14884d723e5aSJoerg Sonnenberger cl->stats_.overactions++;
14894d723e5aSJoerg Sonnenberger TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
14904d723e5aSJoerg Sonnenberger #ifndef BORROW_OFFTIME
14914d723e5aSJoerg Sonnenberger delay += cl->offtime_;
14924d723e5aSJoerg Sonnenberger #endif
14934d723e5aSJoerg Sonnenberger
14944d723e5aSJoerg Sonnenberger if (!cl->sleeping_) {
14954d723e5aSJoerg Sonnenberger CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
14964d723e5aSJoerg Sonnenberger #ifdef BORROW_OFFTIME
14974d723e5aSJoerg Sonnenberger if (borrow != NULL)
14984d723e5aSJoerg Sonnenberger extradelay = borrow->offtime_;
14994d723e5aSJoerg Sonnenberger else
15004d723e5aSJoerg Sonnenberger #endif
15014d723e5aSJoerg Sonnenberger extradelay = cl->offtime_;
15024d723e5aSJoerg Sonnenberger
15034d723e5aSJoerg Sonnenberger #ifdef ALTQ
15044d723e5aSJoerg Sonnenberger /*
15054d723e5aSJoerg Sonnenberger * XXX recalculate suspend time:
15064d723e5aSJoerg Sonnenberger * current undertime is (tidle + pkt_time) calculated
15074d723e5aSJoerg Sonnenberger * from the last transmission.
15084d723e5aSJoerg Sonnenberger * tidle: time required to bring avgidle back to 0
15094d723e5aSJoerg Sonnenberger * pkt_time: target waiting time for this class
15104d723e5aSJoerg Sonnenberger * we need to replace pkt_time by offtime
15114d723e5aSJoerg Sonnenberger */
15124d723e5aSJoerg Sonnenberger extradelay -= cl->last_pkttime_;
15134d723e5aSJoerg Sonnenberger #endif
15144d723e5aSJoerg Sonnenberger if (extradelay > 0) {
15154d723e5aSJoerg Sonnenberger TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
15164d723e5aSJoerg Sonnenberger delay += extradelay;
15174d723e5aSJoerg Sonnenberger }
15184d723e5aSJoerg Sonnenberger
15194d723e5aSJoerg Sonnenberger cl->sleeping_ = 1;
15204d723e5aSJoerg Sonnenberger cl->stats_.delays++;
15214d723e5aSJoerg Sonnenberger
15224d723e5aSJoerg Sonnenberger /*
15234d723e5aSJoerg Sonnenberger * Since packets are phased randomly with respect to the
15244d723e5aSJoerg Sonnenberger * clock, 1 tick (the next clock tick) can be an arbitrarily
15254d723e5aSJoerg Sonnenberger * short time so we have to wait for at least two ticks.
15264d723e5aSJoerg Sonnenberger * NOTE: If there's no other traffic, we need the timer as
15274d723e5aSJoerg Sonnenberger * a 'backstop' to restart this class.
15284d723e5aSJoerg Sonnenberger */
1529a591f597SMatthew Dillon if (delay > ustick * 2)
1530a591f597SMatthew Dillon t = (delay + ustick - 1) / ustick;
15314d723e5aSJoerg Sonnenberger else
15324d723e5aSJoerg Sonnenberger t = 2;
1533ef3829f0SSepherosa Ziehau callout_reset_bycpu(&cl->callout_, t, rmc_restart, cl, 0);
15344d723e5aSJoerg Sonnenberger }
15354d723e5aSJoerg Sonnenberger }
15364d723e5aSJoerg Sonnenberger
15374d723e5aSJoerg Sonnenberger /*
15384d723e5aSJoerg Sonnenberger * void
15394d723e5aSJoerg Sonnenberger * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
15404d723e5aSJoerg Sonnenberger * called by the system timer code & is responsible checking if the
15414d723e5aSJoerg Sonnenberger * class is still sleeping (it might have been restarted as a side
15424d723e5aSJoerg Sonnenberger * effect of the queue scan on a packet arrival) and, if so, restarting
15434d723e5aSJoerg Sonnenberger * output for the class. Inspecting the class state & restarting output
15444d723e5aSJoerg Sonnenberger * require locking the class structure. In general the driver is
15454d723e5aSJoerg Sonnenberger * responsible for locking but this is the only routine that is not
15464d723e5aSJoerg Sonnenberger * called directly or indirectly from the interface driver so it has
15474d723e5aSJoerg Sonnenberger * know about system locking conventions. Under bsd, locking is done
15484d723e5aSJoerg Sonnenberger * by raising IPL to splimp so that's what's implemented here. On a
15494d723e5aSJoerg Sonnenberger * different system this would probably need to be changed.
15504d723e5aSJoerg Sonnenberger *
155178195a76SMatthew Dillon * Since this function is called from an independant timeout, we
155278195a76SMatthew Dillon * have to set up the lock conditions expected for the ALTQ operation.
155378195a76SMatthew Dillon * Note that the restart will probably fall through to an if_start.
155478195a76SMatthew Dillon *
15554d723e5aSJoerg Sonnenberger * Returns: NONE
15564d723e5aSJoerg Sonnenberger */
15574d723e5aSJoerg Sonnenberger
15584d723e5aSJoerg Sonnenberger static void
rmc_restart_dispatch(netmsg_t nmsg)1559ef3829f0SSepherosa Ziehau rmc_restart_dispatch(netmsg_t nmsg)
15604d723e5aSJoerg Sonnenberger {
1561ef3829f0SSepherosa Ziehau struct rm_class *cl = nmsg->lmsg.u.ms_resultp;
15624d723e5aSJoerg Sonnenberger struct rm_ifdat *ifd = cl->ifdat_;
1563bb0b9a4eSSepherosa Ziehau struct ifaltq_subque *ifsq =
1564bb0b9a4eSSepherosa Ziehau &ifd->ifq_->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
15654d723e5aSJoerg Sonnenberger
15665204e13cSSepherosa Ziehau ASSERT_NETISR0;
1567ef3829f0SSepherosa Ziehau
1568ef3829f0SSepherosa Ziehau crit_enter();
1569ef3829f0SSepherosa Ziehau lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */
1570ef3829f0SSepherosa Ziehau crit_exit();
1571ef3829f0SSepherosa Ziehau
1572f0a26983SSepherosa Ziehau ALTQ_SQ_LOCK(ifsq);
15734d723e5aSJoerg Sonnenberger if (cl->sleeping_) {
15744d723e5aSJoerg Sonnenberger cl->sleeping_ = 0;
15754d723e5aSJoerg Sonnenberger cl->undertime_.tv_sec = 0;
15764d723e5aSJoerg Sonnenberger
15774d723e5aSJoerg Sonnenberger if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
15784d723e5aSJoerg Sonnenberger CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
15794d723e5aSJoerg Sonnenberger (ifd->restart)(ifd->ifq_);
15804d723e5aSJoerg Sonnenberger }
15814d723e5aSJoerg Sonnenberger }
1582f0a26983SSepherosa Ziehau ALTQ_SQ_UNLOCK(ifsq);
15834d723e5aSJoerg Sonnenberger }
15844d723e5aSJoerg Sonnenberger
1585ef3829f0SSepherosa Ziehau static void
rmc_restart(void * xcl)1586ef3829f0SSepherosa Ziehau rmc_restart(void *xcl)
1587ef3829f0SSepherosa Ziehau {
1588ef3829f0SSepherosa Ziehau struct rm_class *cl = xcl;
1589ef3829f0SSepherosa Ziehau struct lwkt_msg *lmsg = &cl->callout_nmsg_.lmsg;
1590ef3829f0SSepherosa Ziehau
1591ef3829f0SSepherosa Ziehau KASSERT(mycpuid == 0, ("not on cpu0"));
1592ef3829f0SSepherosa Ziehau crit_enter();
1593ef3829f0SSepherosa Ziehau if (lmsg->ms_flags & MSGF_DONE)
1594ef3829f0SSepherosa Ziehau lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg);
1595ef3829f0SSepherosa Ziehau crit_exit();
1596ef3829f0SSepherosa Ziehau }
1597ef3829f0SSepherosa Ziehau
15984d723e5aSJoerg Sonnenberger /*
15994d723e5aSJoerg Sonnenberger * void
16004d723e5aSJoerg Sonnenberger * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
16014d723e5aSJoerg Sonnenberger * handling routine for the root class of the link sharing structure.
16024d723e5aSJoerg Sonnenberger *
16034d723e5aSJoerg Sonnenberger * Returns: NONE
16044d723e5aSJoerg Sonnenberger */
16054d723e5aSJoerg Sonnenberger
16064d723e5aSJoerg Sonnenberger static void
rmc_root_overlimit(struct rm_class * cl,struct rm_class * borrow)16074d723e5aSJoerg Sonnenberger rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
16084d723e5aSJoerg Sonnenberger {
16094d723e5aSJoerg Sonnenberger panic("rmc_root_overlimit");
16104d723e5aSJoerg Sonnenberger }
16114d723e5aSJoerg Sonnenberger
16124d723e5aSJoerg Sonnenberger /*
16134d723e5aSJoerg Sonnenberger * Packet Queue handling routines. Eventually, this is to localize the
16144d723e5aSJoerg Sonnenberger * effects on the code whether queues are red queues or droptail
16154d723e5aSJoerg Sonnenberger * queues.
16164d723e5aSJoerg Sonnenberger */
16174d723e5aSJoerg Sonnenberger
16184d723e5aSJoerg Sonnenberger static int
_rmc_addq(rm_class_t * cl,struct mbuf * m)16194d723e5aSJoerg Sonnenberger _rmc_addq(rm_class_t *cl, struct mbuf *m)
16204d723e5aSJoerg Sonnenberger {
16214d723e5aSJoerg Sonnenberger #ifdef ALTQ_RIO
16224d723e5aSJoerg Sonnenberger if (q_is_rio(cl->q_))
16234d723e5aSJoerg Sonnenberger return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
16244d723e5aSJoerg Sonnenberger #endif
16254d723e5aSJoerg Sonnenberger #ifdef ALTQ_RED
16264d723e5aSJoerg Sonnenberger if (q_is_red(cl->q_))
16274d723e5aSJoerg Sonnenberger return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
16284d723e5aSJoerg Sonnenberger #endif /* ALTQ_RED */
16294d723e5aSJoerg Sonnenberger
16304d723e5aSJoerg Sonnenberger if (cl->flags_ & RMCF_CLEARDSCP)
16314d723e5aSJoerg Sonnenberger write_dsfield(m, cl->pktattr_, 0);
16324d723e5aSJoerg Sonnenberger
16334d723e5aSJoerg Sonnenberger _addq(cl->q_, m);
16344d723e5aSJoerg Sonnenberger return (0);
16354d723e5aSJoerg Sonnenberger }
16364d723e5aSJoerg Sonnenberger
16374d723e5aSJoerg Sonnenberger /* note: _rmc_dropq is not called for red */
16384d723e5aSJoerg Sonnenberger static void
_rmc_dropq(rm_class_t * cl)16394d723e5aSJoerg Sonnenberger _rmc_dropq(rm_class_t *cl)
16404d723e5aSJoerg Sonnenberger {
16414d723e5aSJoerg Sonnenberger struct mbuf *m;
16424d723e5aSJoerg Sonnenberger
16434d723e5aSJoerg Sonnenberger if ((m = _getq(cl->q_)) != NULL)
16444d723e5aSJoerg Sonnenberger m_freem(m);
16454d723e5aSJoerg Sonnenberger }
16464d723e5aSJoerg Sonnenberger
16474d723e5aSJoerg Sonnenberger static struct mbuf *
_rmc_getq(rm_class_t * cl)16484d723e5aSJoerg Sonnenberger _rmc_getq(rm_class_t *cl)
16494d723e5aSJoerg Sonnenberger {
16504d723e5aSJoerg Sonnenberger #ifdef ALTQ_RIO
16514d723e5aSJoerg Sonnenberger if (q_is_rio(cl->q_))
16524d723e5aSJoerg Sonnenberger return rio_getq((rio_t *)cl->red_, cl->q_);
16534d723e5aSJoerg Sonnenberger #endif
16544d723e5aSJoerg Sonnenberger #ifdef ALTQ_RED
16554d723e5aSJoerg Sonnenberger if (q_is_red(cl->q_))
16564d723e5aSJoerg Sonnenberger return red_getq(cl->red_, cl->q_);
16574d723e5aSJoerg Sonnenberger #endif
16584d723e5aSJoerg Sonnenberger return _getq(cl->q_);
16594d723e5aSJoerg Sonnenberger }
16604d723e5aSJoerg Sonnenberger
16614d723e5aSJoerg Sonnenberger static struct mbuf *
_rmc_pollq(rm_class_t * cl)16624d723e5aSJoerg Sonnenberger _rmc_pollq(rm_class_t *cl)
16634d723e5aSJoerg Sonnenberger {
16644d723e5aSJoerg Sonnenberger return qhead(cl->q_);
16654d723e5aSJoerg Sonnenberger }
16664d723e5aSJoerg Sonnenberger
16674d723e5aSJoerg Sonnenberger #ifdef CBQ_TRACE
16684d723e5aSJoerg Sonnenberger /*
16694d723e5aSJoerg Sonnenberger * DDB hook to trace cbq events:
16704d723e5aSJoerg Sonnenberger * the last 1024 events are held in a circular buffer.
16714d723e5aSJoerg Sonnenberger * use "call cbqtrace_dump(N)" to display 20 events from Nth event.
16724d723e5aSJoerg Sonnenberger */
16734d723e5aSJoerg Sonnenberger void cbqtrace_dump(int);
16744d723e5aSJoerg Sonnenberger static char *rmc_funcname(void *);
16754d723e5aSJoerg Sonnenberger
16764d723e5aSJoerg Sonnenberger static struct rmc_funcs {
16774d723e5aSJoerg Sonnenberger void *func;
16784d723e5aSJoerg Sonnenberger char *name;
16794d723e5aSJoerg Sonnenberger } rmc_funcs[] = {
16804d723e5aSJoerg Sonnenberger rmc_init, "rmc_init",
16814d723e5aSJoerg Sonnenberger rmc_queue_packet, "rmc_queue_packet",
16824d723e5aSJoerg Sonnenberger rmc_under_limit, "rmc_under_limit",
16834d723e5aSJoerg Sonnenberger rmc_update_class_util, "rmc_update_class_util",
16844d723e5aSJoerg Sonnenberger rmc_delay_action, "rmc_delay_action",
16854d723e5aSJoerg Sonnenberger rmc_restart, "rmc_restart",
16864d723e5aSJoerg Sonnenberger _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next",
16874d723e5aSJoerg Sonnenberger NULL, NULL
16884d723e5aSJoerg Sonnenberger };
16894d723e5aSJoerg Sonnenberger
1690c1a682daSSepherosa Ziehau static char *
rmc_funcname(void * func)16913bf25ce1SSascha Wildner rmc_funcname(void *func)
16924d723e5aSJoerg Sonnenberger {
16934d723e5aSJoerg Sonnenberger struct rmc_funcs *fp;
16944d723e5aSJoerg Sonnenberger
16954d723e5aSJoerg Sonnenberger for (fp = rmc_funcs; fp->func != NULL; fp++) {
16964d723e5aSJoerg Sonnenberger if (fp->func == func)
16974d723e5aSJoerg Sonnenberger return (fp->name);
16984d723e5aSJoerg Sonnenberger }
16994d723e5aSJoerg Sonnenberger
17004d723e5aSJoerg Sonnenberger return ("unknown");
17014d723e5aSJoerg Sonnenberger }
17024d723e5aSJoerg Sonnenberger
17034d723e5aSJoerg Sonnenberger void
cbqtrace_dump(int counter)17044d723e5aSJoerg Sonnenberger cbqtrace_dump(int counter)
17054d723e5aSJoerg Sonnenberger {
17064d723e5aSJoerg Sonnenberger int i, *p;
17074d723e5aSJoerg Sonnenberger char *cp;
17084d723e5aSJoerg Sonnenberger
17094d723e5aSJoerg Sonnenberger counter = counter % NCBQTRACE;
17104d723e5aSJoerg Sonnenberger p = (int *)&cbqtrace_buffer[counter];
17114d723e5aSJoerg Sonnenberger
17124d723e5aSJoerg Sonnenberger for (i=0; i<20; i++) {
17134b1cf444SSascha Wildner kprintf("[0x%x] ", *p++);
17144b1cf444SSascha Wildner kprintf("%s: ", rmc_funcname((void *)*p++));
17154d723e5aSJoerg Sonnenberger cp = (char *)p++;
17164b1cf444SSascha Wildner kprintf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
17174b1cf444SSascha Wildner kprintf("%d\n",*p++);
17184d723e5aSJoerg Sonnenberger
17194d723e5aSJoerg Sonnenberger if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
17204d723e5aSJoerg Sonnenberger p = (int *)cbqtrace_buffer;
17214d723e5aSJoerg Sonnenberger }
17224d723e5aSJoerg Sonnenberger }
17234d723e5aSJoerg Sonnenberger #endif /* CBQ_TRACE */
17244d723e5aSJoerg Sonnenberger #endif /* ALTQ_CBQ */
1725