xref: /onnv-gate/usr/src/uts/common/io/mac/mac_sched.c (revision 11878:ac93462db6d7)
18275SEric Cheng /*
28275SEric Cheng  * CDDL HEADER START
38275SEric Cheng  *
48275SEric Cheng  * The contents of this file are subject to the terms of the
58275SEric Cheng  * Common Development and Distribution License (the "License").
68275SEric Cheng  * You may not use this file except in compliance with the License.
78275SEric Cheng  *
88275SEric Cheng  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
98275SEric Cheng  * or http://www.opensolaris.org/os/licensing.
108275SEric Cheng  * See the License for the specific language governing permissions
118275SEric Cheng  * and limitations under the License.
128275SEric Cheng  *
138275SEric Cheng  * When distributing Covered Code, include this CDDL HEADER in each
148275SEric Cheng  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
158275SEric Cheng  * If applicable, add the following below this CDDL HEADER, with the
168275SEric Cheng  * fields enclosed by brackets "[]" replaced with your own identifying
178275SEric Cheng  * information: Portions Copyright [yyyy] [name of copyright owner]
188275SEric Cheng  *
198275SEric Cheng  * CDDL HEADER END
208275SEric Cheng  */
218275SEric Cheng /*
2211528SBaban.Kenkre@Sun.COM  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
238275SEric Cheng  * Use is subject to license terms.
248275SEric Cheng  */
258275SEric Cheng 
268275SEric Cheng #include <sys/types.h>
278275SEric Cheng #include <sys/callb.h>
288275SEric Cheng #include <sys/sdt.h>
298275SEric Cheng #include <sys/strsubr.h>
308275SEric Cheng #include <sys/strsun.h>
318275SEric Cheng #include <sys/vlan.h>
328275SEric Cheng #include <inet/ipsec_impl.h>
338275SEric Cheng #include <inet/ip_impl.h>
348275SEric Cheng #include <inet/sadb.h>
358275SEric Cheng #include <inet/ipsecesp.h>
368275SEric Cheng #include <inet/ipsecah.h>
378275SEric Cheng #include <inet/ip6.h>
388275SEric Cheng 
398275SEric Cheng #include <sys/mac_impl.h>
408275SEric Cheng #include <sys/mac_client_impl.h>
418275SEric Cheng #include <sys/mac_client_priv.h>
428275SEric Cheng #include <sys/mac_soft_ring.h>
438275SEric Cheng #include <sys/mac_flow_impl.h>
448275SEric Cheng 
458275SEric Cheng static mac_tx_cookie_t mac_tx_single_ring_mode(mac_soft_ring_set_t *, mblk_t *,
468275SEric Cheng     uintptr_t, uint16_t, mblk_t **);
478275SEric Cheng static mac_tx_cookie_t mac_tx_serializer_mode(mac_soft_ring_set_t *, mblk_t *,
488275SEric Cheng     uintptr_t, uint16_t, mblk_t **);
498275SEric Cheng static mac_tx_cookie_t mac_tx_fanout_mode(mac_soft_ring_set_t *, mblk_t *,
508275SEric Cheng     uintptr_t, uint16_t, mblk_t **);
518275SEric Cheng static mac_tx_cookie_t mac_tx_bw_mode(mac_soft_ring_set_t *, mblk_t *,
528275SEric Cheng     uintptr_t, uint16_t, mblk_t **);
53*11878SVenu.Iyer@Sun.COM static mac_tx_cookie_t mac_tx_aggr_mode(mac_soft_ring_set_t *, mblk_t *,
54*11878SVenu.Iyer@Sun.COM     uintptr_t, uint16_t, mblk_t **);
558275SEric Cheng 
568275SEric Cheng typedef struct mac_tx_mode_s {
578275SEric Cheng 	mac_tx_srs_mode_t	mac_tx_mode;
588275SEric Cheng 	mac_tx_func_t		mac_tx_func;
598275SEric Cheng } mac_tx_mode_t;
608275SEric Cheng 
618275SEric Cheng /*
62*11878SVenu.Iyer@Sun.COM  * There are seven modes of operation on the Tx side. These modes get set
638275SEric Cheng  * in mac_tx_srs_setup(). Except for the experimental TX_SERIALIZE mode,
648275SEric Cheng  * none of the other modes are user configurable. They get selected by
658275SEric Cheng  * the system depending upon whether the link (or flow) has multiple Tx
66*11878SVenu.Iyer@Sun.COM  * rings or a bandwidth configured, or if the link is an aggr, etc.
67*11878SVenu.Iyer@Sun.COM  *
68*11878SVenu.Iyer@Sun.COM  * When the Tx SRS is operating in aggr mode (st_mode) or if there are
69*11878SVenu.Iyer@Sun.COM  * multiple Tx rings owned by Tx SRS, then each Tx ring (pseudo or
70*11878SVenu.Iyer@Sun.COM  * otherwise) will have a soft ring associated with it. These soft rings
71*11878SVenu.Iyer@Sun.COM  * are stored in srs_tx_soft_rings[] array.
72*11878SVenu.Iyer@Sun.COM  *
73*11878SVenu.Iyer@Sun.COM  * Additionally in the case of aggr, there is the st_soft_rings[] array
74*11878SVenu.Iyer@Sun.COM  * in the mac_srs_tx_t structure. This array is used to store the same
75*11878SVenu.Iyer@Sun.COM  * set of soft rings that are present in srs_tx_soft_rings[] array but
76*11878SVenu.Iyer@Sun.COM  * in a different manner. The soft ring associated with the pseudo Tx
77*11878SVenu.Iyer@Sun.COM  * ring is saved at mr_index (of the pseudo ring) in st_soft_rings[]
78*11878SVenu.Iyer@Sun.COM  * array. This helps in quickly getting the soft ring associated with the
79*11878SVenu.Iyer@Sun.COM  * Tx ring when aggr_find_tx_ring() returns the pseudo Tx ring that is to
80*11878SVenu.Iyer@Sun.COM  * be used for transmit.
818275SEric Cheng  */
828275SEric Cheng mac_tx_mode_t mac_tx_mode_list[] = {
838275SEric Cheng 	{SRS_TX_DEFAULT,	mac_tx_single_ring_mode},
848275SEric Cheng 	{SRS_TX_SERIALIZE,	mac_tx_serializer_mode},
858275SEric Cheng 	{SRS_TX_FANOUT,		mac_tx_fanout_mode},
868275SEric Cheng 	{SRS_TX_BW,		mac_tx_bw_mode},
87*11878SVenu.Iyer@Sun.COM 	{SRS_TX_BW_FANOUT,	mac_tx_bw_mode},
88*11878SVenu.Iyer@Sun.COM 	{SRS_TX_AGGR,		mac_tx_aggr_mode},
89*11878SVenu.Iyer@Sun.COM 	{SRS_TX_BW_AGGR,	mac_tx_bw_mode}
908275SEric Cheng };
918275SEric Cheng 
928275SEric Cheng /*
938275SEric Cheng  * Soft Ring Set (SRS) - The Run time code that deals with
948275SEric Cheng  * dynamic polling from the hardware, bandwidth enforcement,
958275SEric Cheng  * fanout etc.
968275SEric Cheng  *
978275SEric Cheng  * We try to use H/W classification on NIC and assign traffic for
988275SEric Cheng  * a MAC address to a particular Rx ring or ring group. There is a
998275SEric Cheng  * 1-1 mapping between a SRS and a Rx ring. The SRS dynamically
1008275SEric Cheng  * switches the underlying Rx ring between interrupt and
1018275SEric Cheng  * polling mode and enforces any specified B/W control.
1028275SEric Cheng  *
1038275SEric Cheng  * There is always a SRS created and tied to each H/W and S/W rule.
1048275SEric Cheng  * Whenever we create a H/W rule, we always add the the same rule to
1058275SEric Cheng  * S/W classifier and tie a SRS to it.
1068275SEric Cheng  *
1078275SEric Cheng  * In case a B/W control is specified, it is broken into bytes
1088275SEric Cheng  * per ticks and as soon as the quota for a tick is exhausted,
1098275SEric Cheng  * the underlying Rx ring is forced into poll mode for remainder of
1108275SEric Cheng  * the tick. The SRS poll thread only polls for bytes that are
1118275SEric Cheng  * allowed to come in the SRS. We typically let 4x the configured
1128275SEric Cheng  * B/W worth of packets to come in the SRS (to prevent unnecessary
1138275SEric Cheng  * drops due to bursts) but only process the specified amount.
1148275SEric Cheng  *
1158275SEric Cheng  * A MAC client (e.g. a VNIC or aggr) can have 1 or more
1168275SEric Cheng  * Rx rings (and corresponding SRSs) assigned to it. The SRS
1178275SEric Cheng  * in turn can have softrings to do protocol level fanout or
1188275SEric Cheng  * softrings to do S/W based fanout or both. In case the NIC
1198275SEric Cheng  * has no Rx rings, we do S/W classification to respective SRS.
1208275SEric Cheng  * The S/W classification rule is always setup and ready. This
1218275SEric Cheng  * allows the MAC layer to reassign Rx rings whenever needed
1228275SEric Cheng  * but packets still continue to flow via the default path and
1238275SEric Cheng  * getting S/W classified to correct SRS.
1248275SEric Cheng  *
1258275SEric Cheng  * The SRS's are used on both Tx and Rx side. They use the same
1268275SEric Cheng  * data structure but the processing routines have slightly different
1278275SEric Cheng  * semantics due to the fact that Rx side needs to do dynamic
1288275SEric Cheng  * polling etc.
1298275SEric Cheng  *
1308275SEric Cheng  * Dynamic Polling Notes
1318275SEric Cheng  * =====================
1328275SEric Cheng  *
1338275SEric Cheng  * Each Soft ring set is capable of switching its Rx ring between
1348275SEric Cheng  * interrupt and poll mode and actively 'polls' for packets in
1358275SEric Cheng  * poll mode. If the SRS is implementing a B/W limit, it makes
1368275SEric Cheng  * sure that only Max allowed packets are pulled in poll mode
1378275SEric Cheng  * and goes to poll mode as soon as B/W limit is exceeded. As
1388275SEric Cheng  * such, there are no overheads to implement B/W limits.
1398275SEric Cheng  *
1408275SEric Cheng  * In poll mode, its better to keep the pipeline going where the
1418275SEric Cheng  * SRS worker thread keeps processing packets and poll thread
1428275SEric Cheng  * keeps bringing more packets (specially if they get to run
1438275SEric Cheng  * on different CPUs). This also prevents the overheads associated
1448275SEric Cheng  * by excessive signalling (on NUMA machines, this can be
1458275SEric Cheng  * pretty devastating). The exception is latency optimized case
1468275SEric Cheng  * where worker thread does no work and interrupt and poll thread
1478275SEric Cheng  * are allowed to do their own drain.
1488275SEric Cheng  *
1498275SEric Cheng  * We use the following policy to control Dynamic Polling:
1508275SEric Cheng  * 1) We switch to poll mode anytime the processing
1518275SEric Cheng  *    thread causes a backlog to build up in SRS and
1528275SEric Cheng  *    its associated Soft Rings (sr_poll_pkt_cnt > 0).
1538275SEric Cheng  * 2) As long as the backlog stays under the low water
1548275SEric Cheng  *    mark (sr_lowat), we poll the H/W for more packets.
1558275SEric Cheng  * 3) If the backlog (sr_poll_pkt_cnt) exceeds low
1568275SEric Cheng  *    water mark, we stay in poll mode but don't poll
1578275SEric Cheng  *    the H/W for more packets.
1588275SEric Cheng  * 4) Anytime in polling mode, if we poll the H/W for
1598275SEric Cheng  *    packets and find nothing plus we have an existing
1608275SEric Cheng  *    backlog (sr_poll_pkt_cnt > 0), we stay in polling
1618275SEric Cheng  *    mode but don't poll the H/W for packets anymore
1628275SEric Cheng  *    (let the polling thread go to sleep).
1638275SEric Cheng  * 5) Once the backlog is relived (packets are processed)
1648275SEric Cheng  *    we reenable polling (by signalling the poll thread)
1658275SEric Cheng  *    only when the backlog dips below sr_poll_thres.
1668275SEric Cheng  * 6) sr_hiwat is used exclusively when we are not
1678275SEric Cheng  *    polling capable and is used to decide when to
1688275SEric Cheng  *    drop packets so the SRS queue length doesn't grow
1698275SEric Cheng  *    infinitely.
1708275SEric Cheng  *
1718275SEric Cheng  * NOTE: Also see the block level comment on top of mac_soft_ring.c
1728275SEric Cheng  */
1738275SEric Cheng 
1748275SEric Cheng /*
1758275SEric Cheng  * mac_latency_optimize
1768275SEric Cheng  *
1778275SEric Cheng  * Controls whether the poll thread can process the packets inline
1788275SEric Cheng  * or let the SRS worker thread do the processing. This applies if
1798275SEric Cheng  * the SRS was not being processed. For latency sensitive traffic,
1808275SEric Cheng  * this needs to be true to allow inline processing. For throughput
1818275SEric Cheng  * under load, this should be false.
1828275SEric Cheng  *
1838275SEric Cheng  * This (and other similar) tunable should be rolled into a link
1848275SEric Cheng  * or flow specific workload hint that can be set using dladm
1858275SEric Cheng  * linkprop (instead of multiple such tunables).
1868275SEric Cheng  */
1878275SEric Cheng boolean_t mac_latency_optimize = B_TRUE;
1888275SEric Cheng 
1898275SEric Cheng /*
1908275SEric Cheng  * MAC_RX_SRS_ENQUEUE_CHAIN and MAC_TX_SRS_ENQUEUE_CHAIN
1918275SEric Cheng  *
1928275SEric Cheng  * queue a mp or chain in soft ring set and increment the
1938275SEric Cheng  * local count (srs_count) for the SRS and the shared counter
1948275SEric Cheng  * (srs_poll_pkt_cnt - shared between SRS and its soft rings
1958275SEric Cheng  * to track the total unprocessed packets for polling to work
1968275SEric Cheng  * correctly).
1978275SEric Cheng  *
1988275SEric Cheng  * The size (total bytes queued) counters are incremented only
1998275SEric Cheng  * if we are doing B/W control.
2008275SEric Cheng  */
2018275SEric Cheng #define	MAC_SRS_ENQUEUE_CHAIN(mac_srs, head, tail, count, sz) {		\
2028275SEric Cheng 	ASSERT(MUTEX_HELD(&(mac_srs)->srs_lock));			\
2038275SEric Cheng 	if ((mac_srs)->srs_last != NULL)				\
2048275SEric Cheng 		(mac_srs)->srs_last->b_next = (head);			\
2058275SEric Cheng 	else								\
2068275SEric Cheng 		(mac_srs)->srs_first = (head);				\
2078275SEric Cheng 	(mac_srs)->srs_last = (tail);					\
2088275SEric Cheng 	(mac_srs)->srs_count += count;					\
2098275SEric Cheng }
2108275SEric Cheng 
2118275SEric Cheng #define	MAC_RX_SRS_ENQUEUE_CHAIN(mac_srs, head, tail, count, sz) {	\
2128275SEric Cheng 	mac_srs_rx_t	*srs_rx = &(mac_srs)->srs_rx;			\
2138275SEric Cheng 									\
2148275SEric Cheng 	MAC_SRS_ENQUEUE_CHAIN(mac_srs, head, tail, count, sz);		\
2158275SEric Cheng 	srs_rx->sr_poll_pkt_cnt += count;				\
2168275SEric Cheng 	ASSERT(srs_rx->sr_poll_pkt_cnt > 0);				\
2178275SEric Cheng 	if ((mac_srs)->srs_type & SRST_BW_CONTROL) {			\
2188275SEric Cheng 		(mac_srs)->srs_size += (sz);				\
2198275SEric Cheng 		mutex_enter(&(mac_srs)->srs_bw->mac_bw_lock);		\
2208275SEric Cheng 		(mac_srs)->srs_bw->mac_bw_sz += (sz);			\
2218275SEric Cheng 		mutex_exit(&(mac_srs)->srs_bw->mac_bw_lock);		\
2228275SEric Cheng 	}								\
2238275SEric Cheng }
2248275SEric Cheng 
2258275SEric Cheng #define	MAC_TX_SRS_ENQUEUE_CHAIN(mac_srs, head, tail, count, sz) {	\
2268275SEric Cheng 	mac_srs->srs_state |= SRS_ENQUEUED;				\
2278275SEric Cheng 	MAC_SRS_ENQUEUE_CHAIN(mac_srs, head, tail, count, sz);		\
2288275SEric Cheng 	if ((mac_srs)->srs_type & SRST_BW_CONTROL) {			\
2298275SEric Cheng 		(mac_srs)->srs_size += (sz);				\
2308275SEric Cheng 		(mac_srs)->srs_bw->mac_bw_sz += (sz);			\
2318275SEric Cheng 	}								\
2328275SEric Cheng }
2338275SEric Cheng 
2348275SEric Cheng /*
2358275SEric Cheng  * Turn polling on routines
2368275SEric Cheng  */
2378275SEric Cheng #define	MAC_SRS_POLLING_ON(mac_srs) {					\
2388275SEric Cheng 	ASSERT(MUTEX_HELD(&(mac_srs)->srs_lock));			\
2398275SEric Cheng 	if (((mac_srs)->srs_state &					\
2408275SEric Cheng 	    (SRS_POLLING_CAPAB|SRS_POLLING)) == SRS_POLLING_CAPAB) {	\
2418275SEric Cheng 		(mac_srs)->srs_state |= SRS_POLLING;			\
2428275SEric Cheng 		(void) mac_hwring_disable_intr((mac_ring_handle_t)	\
2438275SEric Cheng 		    (mac_srs)->srs_ring);				\
2448275SEric Cheng 		(mac_srs)->srs_rx.sr_poll_on++;				\
2458275SEric Cheng 	}								\
2468275SEric Cheng }
2478275SEric Cheng 
2488275SEric Cheng #define	MAC_SRS_WORKER_POLLING_ON(mac_srs) {				\
2498275SEric Cheng 	ASSERT(MUTEX_HELD(&(mac_srs)->srs_lock));			\
2508275SEric Cheng 	if (((mac_srs)->srs_state &					\
2518275SEric Cheng 	    (SRS_POLLING_CAPAB|SRS_WORKER|SRS_POLLING)) == 		\
2528275SEric Cheng 	    (SRS_POLLING_CAPAB|SRS_WORKER)) {				\
2538275SEric Cheng 		(mac_srs)->srs_state |= SRS_POLLING;			\
2548275SEric Cheng 		(void) mac_hwring_disable_intr((mac_ring_handle_t)	\
2558275SEric Cheng 		    (mac_srs)->srs_ring);				\
2568275SEric Cheng 		(mac_srs)->srs_rx.sr_worker_poll_on++;			\
2578275SEric Cheng 	}								\
2588275SEric Cheng }
2598275SEric Cheng 
2608275SEric Cheng /*
2618275SEric Cheng  * MAC_SRS_POLL_RING
2628275SEric Cheng  *
2638275SEric Cheng  * Signal the SRS poll thread to poll the underlying H/W ring
2648275SEric Cheng  * provided it wasn't already polling (SRS_GET_PKTS was set).
2658275SEric Cheng  *
2668275SEric Cheng  * Poll thread gets to run only from mac_rx_srs_drain() and only
2678275SEric Cheng  * if the drain was being done by the worker thread.
2688275SEric Cheng  */
2698275SEric Cheng #define	MAC_SRS_POLL_RING(mac_srs) {					\
2708275SEric Cheng 	mac_srs_rx_t	*srs_rx = &(mac_srs)->srs_rx;			\
2718275SEric Cheng 									\
2728275SEric Cheng 	ASSERT(MUTEX_HELD(&(mac_srs)->srs_lock));			\
2738275SEric Cheng 	srs_rx->sr_poll_thr_sig++;					\
2748275SEric Cheng 	if (((mac_srs)->srs_state & 					\
2758275SEric Cheng 	    (SRS_POLLING_CAPAB|SRS_WORKER|SRS_GET_PKTS)) ==		\
2768275SEric Cheng 		(SRS_WORKER|SRS_POLLING_CAPAB)) {			\
2778275SEric Cheng 		(mac_srs)->srs_state |= SRS_GET_PKTS;			\
2788275SEric Cheng 		cv_signal(&(mac_srs)->srs_cv);   			\
2798275SEric Cheng 	} else {							\
2808275SEric Cheng 		srs_rx->sr_poll_thr_busy++;				\
2818275SEric Cheng 	}								\
2828275SEric Cheng }
2838275SEric Cheng 
2848275SEric Cheng /*
2858275SEric Cheng  * MAC_SRS_CHECK_BW_CONTROL
2868275SEric Cheng  *
2878275SEric Cheng  * Check to see if next tick has started so we can reset the
2888275SEric Cheng  * SRS_BW_ENFORCED flag and allow more packets to come in the
2898275SEric Cheng  * system.
2908275SEric Cheng  */
2918275SEric Cheng #define	MAC_SRS_CHECK_BW_CONTROL(mac_srs) {				\
2928275SEric Cheng 	ASSERT(MUTEX_HELD(&(mac_srs)->srs_lock));			\
2938275SEric Cheng 	ASSERT(((mac_srs)->srs_type & SRST_TX) ||			\
2948275SEric Cheng 	    MUTEX_HELD(&(mac_srs)->srs_bw->mac_bw_lock));		\
29511066Srafael.vanoni@sun.com 	clock_t now = ddi_get_lbolt();					\
29611066Srafael.vanoni@sun.com 	if ((mac_srs)->srs_bw->mac_bw_curr_time != now) {		\
29711066Srafael.vanoni@sun.com 		(mac_srs)->srs_bw->mac_bw_curr_time = now;		\
2988275SEric Cheng 		(mac_srs)->srs_bw->mac_bw_used = 0;	       		\
2998275SEric Cheng 		if ((mac_srs)->srs_bw->mac_bw_state & SRS_BW_ENFORCED)	\
3008275SEric Cheng 			(mac_srs)->srs_bw->mac_bw_state &= ~SRS_BW_ENFORCED; \
3018275SEric Cheng 	}								\
3028275SEric Cheng }
3038275SEric Cheng 
3048275SEric Cheng /*
3058275SEric Cheng  * MAC_SRS_WORKER_WAKEUP
3068275SEric Cheng  *
3078275SEric Cheng  * Wake up the SRS worker thread to process the queue as long as
3088275SEric Cheng  * no one else is processing the queue. If we are optimizing for
3098275SEric Cheng  * latency, we wake up the worker thread immediately or else we
3108275SEric Cheng  * wait mac_srs_worker_wakeup_ticks before worker thread gets
3118275SEric Cheng  * woken up.
3128275SEric Cheng  */
3138275SEric Cheng int mac_srs_worker_wakeup_ticks = 0;
3148275SEric Cheng #define	MAC_SRS_WORKER_WAKEUP(mac_srs) {				\
3158275SEric Cheng 	ASSERT(MUTEX_HELD(&(mac_srs)->srs_lock));			\
3168275SEric Cheng 	if (!((mac_srs)->srs_state & SRS_PROC) &&			\
3178275SEric Cheng 		(mac_srs)->srs_tid == NULL) {				\
3189618SRajagopal.Kunhappan@Sun.COM 		if (((mac_srs)->srs_state & SRS_LATENCY_OPT) ||		\
3198275SEric Cheng 			(mac_srs_worker_wakeup_ticks == 0))		\
3208275SEric Cheng 			cv_signal(&(mac_srs)->srs_async);		\
3218275SEric Cheng 		else							\
3228275SEric Cheng 			(mac_srs)->srs_tid =				\
3238275SEric Cheng 				timeout(mac_srs_fire, (mac_srs),	\
3248275SEric Cheng 					mac_srs_worker_wakeup_ticks);	\
3258275SEric Cheng 	}								\
3268275SEric Cheng }
3278275SEric Cheng 
3288275SEric Cheng #define	TX_BANDWIDTH_MODE(mac_srs)				\
3298275SEric Cheng 	((mac_srs)->srs_tx.st_mode == SRS_TX_BW ||		\
330*11878SVenu.Iyer@Sun.COM 	    (mac_srs)->srs_tx.st_mode == SRS_TX_BW_FANOUT ||	\
331*11878SVenu.Iyer@Sun.COM 	    (mac_srs)->srs_tx.st_mode == SRS_TX_BW_AGGR)
3328275SEric Cheng 
3338275SEric Cheng #define	TX_SRS_TO_SOFT_RING(mac_srs, head, hint) {			\
334*11878SVenu.Iyer@Sun.COM 	if (tx_mode == SRS_TX_BW_FANOUT)				\
335*11878SVenu.Iyer@Sun.COM 		(void) mac_tx_fanout_mode(mac_srs, head, hint, 0, NULL);\
336*11878SVenu.Iyer@Sun.COM 	else								\
337*11878SVenu.Iyer@Sun.COM 		(void) mac_tx_aggr_mode(mac_srs, head, hint, 0, NULL);	\
3388275SEric Cheng }
3398275SEric Cheng 
3408275SEric Cheng /*
3418275SEric Cheng  * MAC_TX_SRS_BLOCK
3428275SEric Cheng  *
3438275SEric Cheng  * Always called from mac_tx_srs_drain() function. SRS_TX_BLOCKED
3448275SEric Cheng  * will be set only if srs_tx_woken_up is FALSE. If
3458275SEric Cheng  * srs_tx_woken_up is TRUE, it indicates that the wakeup arrived
3468275SEric Cheng  * before we grabbed srs_lock to set SRS_TX_BLOCKED. We need to
3478275SEric Cheng  * attempt to transmit again and not setting SRS_TX_BLOCKED does
3488275SEric Cheng  * that.
3498275SEric Cheng  */
3508275SEric Cheng #define	MAC_TX_SRS_BLOCK(srs, mp)	{			\
3518275SEric Cheng 	ASSERT(MUTEX_HELD(&(srs)->srs_lock));			\
3528275SEric Cheng 	if ((srs)->srs_tx.st_woken_up) {			\
3538275SEric Cheng 		(srs)->srs_tx.st_woken_up = B_FALSE;		\
3548275SEric Cheng 	} else {						\
3558275SEric Cheng 		ASSERT(!((srs)->srs_state & SRS_TX_BLOCKED));	\
3568275SEric Cheng 		(srs)->srs_state |= SRS_TX_BLOCKED;		\
357*11878SVenu.Iyer@Sun.COM 		(srs)->srs_tx.st_stat.mts_blockcnt++;		\
3588275SEric Cheng 	}							\
3598275SEric Cheng }
3608275SEric Cheng 
3618275SEric Cheng /*
3628275SEric Cheng  * MAC_TX_SRS_TEST_HIWAT
3638275SEric Cheng  *
3648275SEric Cheng  * Called before queueing a packet onto Tx SRS to test and set
3658275SEric Cheng  * SRS_TX_HIWAT if srs_count exceeds srs_tx_hiwat.
3668275SEric Cheng  */
3678275SEric Cheng #define	MAC_TX_SRS_TEST_HIWAT(srs, mp, tail, cnt, sz, cookie) {		\
3688275SEric Cheng 	boolean_t enqueue = 1;						\
3698275SEric Cheng 									\
3708275SEric Cheng 	if ((srs)->srs_count > (srs)->srs_tx.st_hiwat) {		\
3718275SEric Cheng 		/*							\
3728275SEric Cheng 		 * flow-controlled. Store srs in cookie so that it	\
3738275SEric Cheng 		 * can be returned as mac_tx_cookie_t to client		\
3748275SEric Cheng 		 */							\
3758275SEric Cheng 		(srs)->srs_state |= SRS_TX_HIWAT;			\
3768275SEric Cheng 		cookie = (mac_tx_cookie_t)srs;				\
3778275SEric Cheng 		(srs)->srs_tx.st_hiwat_cnt++;				\
3788275SEric Cheng 		if ((srs)->srs_count > (srs)->srs_tx.st_max_q_cnt) {	\
3798275SEric Cheng 			/* increment freed stats */			\
380*11878SVenu.Iyer@Sun.COM 			(srs)->srs_tx.st_stat.mts_sdrops += cnt;	\
3818275SEric Cheng 			/*						\
3828275SEric Cheng 			 * b_prev may be set to the fanout hint		\
3838275SEric Cheng 			 * hence can't use freemsg directly		\
3848275SEric Cheng 			 */						\
3858275SEric Cheng 			mac_pkt_drop(NULL, NULL, mp_chain, B_FALSE);	\
3868275SEric Cheng 			DTRACE_PROBE1(tx_queued_hiwat,			\
3878275SEric Cheng 			    mac_soft_ring_set_t *, srs);		\
3888275SEric Cheng 			enqueue = 0;					\
3898275SEric Cheng 		}							\
3908275SEric Cheng 	}								\
3918275SEric Cheng 	if (enqueue)							\
3928275SEric Cheng 		MAC_TX_SRS_ENQUEUE_CHAIN(srs, mp, tail, cnt, sz);	\
3938275SEric Cheng }
3948275SEric Cheng 
3958275SEric Cheng /* Some utility macros */
3968275SEric Cheng #define	MAC_SRS_BW_LOCK(srs)						\
3978275SEric Cheng 	if (!(srs->srs_type & SRST_TX))					\
3988275SEric Cheng 		mutex_enter(&srs->srs_bw->mac_bw_lock);
3998275SEric Cheng 
4008275SEric Cheng #define	MAC_SRS_BW_UNLOCK(srs)						\
4018275SEric Cheng 	if (!(srs->srs_type & SRST_TX))					\
4028275SEric Cheng 		mutex_exit(&srs->srs_bw->mac_bw_lock);
4038275SEric Cheng 
4048275SEric Cheng #define	MAC_TX_SRS_DROP_MESSAGE(srs, mp, cookie) {		\
4058275SEric Cheng 	mac_pkt_drop(NULL, NULL, mp, B_FALSE);			\
4068275SEric Cheng 	/* increment freed stats */				\
407*11878SVenu.Iyer@Sun.COM 	mac_srs->srs_tx.st_stat.mts_sdrops++;			\
4088275SEric Cheng 	cookie = (mac_tx_cookie_t)srs;				\
4098275SEric Cheng }
4108275SEric Cheng 
4118275SEric Cheng #define	MAC_TX_SET_NO_ENQUEUE(srs, mp_chain, ret_mp, cookie) {		\
4128275SEric Cheng 	mac_srs->srs_state |= SRS_TX_WAKEUP_CLIENT;			\
4138275SEric Cheng 	cookie = (mac_tx_cookie_t)srs;					\
4148275SEric Cheng 	*ret_mp = mp_chain;						\
4158275SEric Cheng }
4168275SEric Cheng 
4178275SEric Cheng /*
4188275SEric Cheng  * Drop the rx packet and advance to the next one in the chain.
4198275SEric Cheng  */
4208275SEric Cheng static void
mac_rx_drop_pkt(mac_soft_ring_set_t * srs,mblk_t * mp)4218275SEric Cheng mac_rx_drop_pkt(mac_soft_ring_set_t *srs, mblk_t *mp)
4228275SEric Cheng {
4238275SEric Cheng 	mac_srs_rx_t	*srs_rx = &srs->srs_rx;
4248275SEric Cheng 
4258275SEric Cheng 	ASSERT(mp->b_next == NULL);
4268275SEric Cheng 	mutex_enter(&srs->srs_lock);
4278275SEric Cheng 	MAC_UPDATE_SRS_COUNT_LOCKED(srs, 1);
4288275SEric Cheng 	MAC_UPDATE_SRS_SIZE_LOCKED(srs, msgdsize(mp));
4298275SEric Cheng 	mutex_exit(&srs->srs_lock);
4308275SEric Cheng 
431*11878SVenu.Iyer@Sun.COM 	srs_rx->sr_stat.mrs_sdrops++;
4328275SEric Cheng 	freemsg(mp);
4338275SEric Cheng }
4348275SEric Cheng 
4358275SEric Cheng /* DATAPATH RUNTIME ROUTINES */
4368275SEric Cheng 
4378275SEric Cheng /*
4388275SEric Cheng  * mac_srs_fire
4398275SEric Cheng  *
4408275SEric Cheng  * Timer callback routine for waking up the SRS worker thread.
4418275SEric Cheng  */
4428275SEric Cheng static void
mac_srs_fire(void * arg)4438275SEric Cheng mac_srs_fire(void *arg)
4448275SEric Cheng {
4458275SEric Cheng 	mac_soft_ring_set_t *mac_srs = (mac_soft_ring_set_t *)arg;
4468275SEric Cheng 
4478275SEric Cheng 	mutex_enter(&mac_srs->srs_lock);
4488275SEric Cheng 	if (mac_srs->srs_tid == 0) {
4498275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
4508275SEric Cheng 		return;
4518275SEric Cheng 	}
4528275SEric Cheng 
4538275SEric Cheng 	mac_srs->srs_tid = 0;
4548275SEric Cheng 	if (!(mac_srs->srs_state & SRS_PROC))
4558275SEric Cheng 		cv_signal(&mac_srs->srs_async);
4568275SEric Cheng 
4578275SEric Cheng 	mutex_exit(&mac_srs->srs_lock);
4588275SEric Cheng }
4598275SEric Cheng 
4608275SEric Cheng /*
4618275SEric Cheng  * 'hint' is fanout_hint (type of uint64_t) which is given by the TCP/IP stack,
4628275SEric Cheng  * and it is used on the TX path.
4638275SEric Cheng  */
464*11878SVenu.Iyer@Sun.COM #define	HASH_HINT(hint)	\
46511608SRao.Shoaib@Sun.COM 	((hint) ^ ((hint) >> 24) ^ ((hint) >> 16) ^ ((hint) >> 8))
46611608SRao.Shoaib@Sun.COM 
4678275SEric Cheng 
4688275SEric Cheng /*
4698275SEric Cheng  * hash based on the src address and the port information.
4708275SEric Cheng  */
4718275SEric Cheng #define	HASH_ADDR(src, ports)					\
4728275SEric Cheng 	(ntohl((src)) ^ ((ports) >> 24) ^ ((ports) >> 16) ^	\
4738275SEric Cheng 	((ports) >> 8) ^ (ports))
4748275SEric Cheng 
4758275SEric Cheng #define	COMPUTE_INDEX(key, sz)	(key % sz)
4768275SEric Cheng 
4778275SEric Cheng #define	FANOUT_ENQUEUE_MP(head, tail, cnt, bw_ctl, sz, sz0, mp) {	\
4788275SEric Cheng 	if ((tail) != NULL) {						\
4798275SEric Cheng 		ASSERT((tail)->b_next == NULL);				\
4808275SEric Cheng 		(tail)->b_next = (mp);					\
4818275SEric Cheng 	} else {							\
4828275SEric Cheng 		ASSERT((head) == NULL);					\
4838275SEric Cheng 		(head) = (mp);						\
4848275SEric Cheng 	}								\
4858275SEric Cheng 	(tail) = (mp);							\
4868275SEric Cheng 	(cnt)++;							\
4878275SEric Cheng 	if ((bw_ctl))							\
4888275SEric Cheng 		(sz) += (sz0);						\
4898275SEric Cheng }
4908275SEric Cheng 
4918275SEric Cheng #define	MAC_FANOUT_DEFAULT	0
4928275SEric Cheng #define	MAC_FANOUT_RND_ROBIN	1
4938275SEric Cheng int mac_fanout_type = MAC_FANOUT_DEFAULT;
4948275SEric Cheng 
4958275SEric Cheng #define	MAX_SR_TYPES	3
4968275SEric Cheng /* fanout types for port based hashing */
4978275SEric Cheng enum pkt_type {
4988275SEric Cheng 	V4_TCP = 0,
4998275SEric Cheng 	V4_UDP,
5008275SEric Cheng 	OTH,
5018275SEric Cheng 	UNDEF
5028275SEric Cheng };
5038275SEric Cheng 
5048275SEric Cheng /*
5058275SEric Cheng  * In general we do port based hashing to spread traffic over different
5068275SEric Cheng  * softrings. The below tunable allows to override that behavior. Setting it
5078275SEric Cheng  * to B_TRUE allows to do a fanout based on src ipv6 address. This behavior
5088275SEric Cheng  * is also the applicable to ipv6 packets carrying multiple optional headers
5098275SEric Cheng  * and other uncommon packet types.
5108275SEric Cheng  */
5118275SEric Cheng boolean_t mac_src_ipv6_fanout = B_FALSE;
5128275SEric Cheng 
5138275SEric Cheng /*
5148275SEric Cheng  * Pair of local and remote ports in the transport header
5158275SEric Cheng  */
5168275SEric Cheng #define	PORTS_SIZE 4
5178275SEric Cheng 
5188275SEric Cheng /*
5198275SEric Cheng  * mac_rx_srs_proto_fanout
5208275SEric Cheng  *
5218275SEric Cheng  * This routine delivers packets destined to an SRS into one of the
5228275SEric Cheng  * protocol soft rings.
5238275SEric Cheng  *
5248275SEric Cheng  * Given a chain of packets we need to split it up into multiple sub chains
5258275SEric Cheng  * destined into TCP, UDP or OTH soft ring. Instead of entering
5268275SEric Cheng  * the soft ring one packet at a time, we want to enter it in the form of a
5278275SEric Cheng  * chain otherwise we get this start/stop behaviour where the worker thread
5288275SEric Cheng  * goes to sleep and then next packets comes in forcing it to wake up etc.
5298275SEric Cheng  */
5308275SEric Cheng static void
mac_rx_srs_proto_fanout(mac_soft_ring_set_t * mac_srs,mblk_t * head)5318275SEric Cheng mac_rx_srs_proto_fanout(mac_soft_ring_set_t *mac_srs, mblk_t *head)
5328275SEric Cheng {
5338275SEric Cheng 	struct ether_header		*ehp;
5348833SVenu.Iyer@Sun.COM 	struct ether_vlan_header	*evhp;
5358833SVenu.Iyer@Sun.COM 	uint32_t			sap;
5368275SEric Cheng 	ipha_t				*ipha;
5378833SVenu.Iyer@Sun.COM 	uint8_t				*dstaddr;
5388833SVenu.Iyer@Sun.COM 	size_t				hdrsize;
5398275SEric Cheng 	mblk_t				*mp;
5408275SEric Cheng 	mblk_t				*headmp[MAX_SR_TYPES];
5418275SEric Cheng 	mblk_t				*tailmp[MAX_SR_TYPES];
5428275SEric Cheng 	int				cnt[MAX_SR_TYPES];
5438275SEric Cheng 	size_t				sz[MAX_SR_TYPES];
5448275SEric Cheng 	size_t				sz1;
5458833SVenu.Iyer@Sun.COM 	boolean_t			bw_ctl;
5468275SEric Cheng 	boolean_t			hw_classified;
5478833SVenu.Iyer@Sun.COM 	boolean_t			dls_bypass;
5488833SVenu.Iyer@Sun.COM 	boolean_t			is_ether;
5498833SVenu.Iyer@Sun.COM 	boolean_t			is_unicast;
5508833SVenu.Iyer@Sun.COM 	enum pkt_type			type;
5518275SEric Cheng 	mac_client_impl_t		*mcip = mac_srs->srs_mcip;
5528833SVenu.Iyer@Sun.COM 
5538833SVenu.Iyer@Sun.COM 	is_ether = (mcip->mci_mip->mi_info.mi_nativemedia == DL_ETHER);
5548833SVenu.Iyer@Sun.COM 	bw_ctl = ((mac_srs->srs_type & SRST_BW_CONTROL) != 0);
5558275SEric Cheng 
5568275SEric Cheng 	/*
5578275SEric Cheng 	 * If we don't have a Rx ring, S/W classification would have done
5588275SEric Cheng 	 * its job and its a packet meant for us. If we were polling on
5598275SEric Cheng 	 * the default ring (i.e. there was a ring assigned to this SRS),
5608275SEric Cheng 	 * then we need to make sure that the mac address really belongs
5618275SEric Cheng 	 * to us.
5628275SEric Cheng 	 */
5638275SEric Cheng 	hw_classified = mac_srs->srs_ring != NULL &&
5648275SEric Cheng 	    mac_srs->srs_ring->mr_classify_type == MAC_HW_CLASSIFIER;
5658275SEric Cheng 
5668275SEric Cheng 	/*
5678275SEric Cheng 	 * Special clients (eg. VLAN, non ether, etc) need DLS
5688275SEric Cheng 	 * processing in the Rx path. SRST_DLS_BYPASS will be clear for
56911021SEric.Cheng@Sun.COM 	 * such SRSs. Another way of disabling bypass is to set the
57011021SEric.Cheng@Sun.COM 	 * MCIS_RX_BYPASS_DISABLE flag.
5718275SEric Cheng 	 */
57211021SEric.Cheng@Sun.COM 	dls_bypass = ((mac_srs->srs_type & SRST_DLS_BYPASS) != 0) &&
57311021SEric.Cheng@Sun.COM 	    ((mcip->mci_state_flags & MCIS_RX_BYPASS_DISABLE) == 0);
5748275SEric Cheng 
5758275SEric Cheng 	bzero(headmp, MAX_SR_TYPES * sizeof (mblk_t *));
5768275SEric Cheng 	bzero(tailmp, MAX_SR_TYPES * sizeof (mblk_t *));
5778275SEric Cheng 	bzero(cnt, MAX_SR_TYPES * sizeof (int));
5788275SEric Cheng 	bzero(sz, MAX_SR_TYPES * sizeof (size_t));
5798275SEric Cheng 
5808275SEric Cheng 	/*
5818275SEric Cheng 	 * We got a chain from SRS that we need to send to the soft rings.
5828275SEric Cheng 	 * Since squeues for TCP & IPv4 sap poll their soft rings (for
5838275SEric Cheng 	 * performance reasons), we need to separate out v4_tcp, v4_udp
5848275SEric Cheng 	 * and the rest goes in other.
5858275SEric Cheng 	 */
5868275SEric Cheng 	while (head != NULL) {
5878275SEric Cheng 		mp = head;
5888275SEric Cheng 		head = head->b_next;
5898275SEric Cheng 		mp->b_next = NULL;
5908275SEric Cheng 
5918275SEric Cheng 		type = OTH;
5928833SVenu.Iyer@Sun.COM 		sz1 = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp);
5938833SVenu.Iyer@Sun.COM 
5948833SVenu.Iyer@Sun.COM 		if (is_ether) {
5958833SVenu.Iyer@Sun.COM 			/*
5968833SVenu.Iyer@Sun.COM 			 * At this point we can be sure the packet at least
5978833SVenu.Iyer@Sun.COM 			 * has an ether header.
5988833SVenu.Iyer@Sun.COM 			 */
5998833SVenu.Iyer@Sun.COM 			if (sz1 < sizeof (struct ether_header)) {
6008833SVenu.Iyer@Sun.COM 				mac_rx_drop_pkt(mac_srs, mp);
6018833SVenu.Iyer@Sun.COM 				continue;
6028833SVenu.Iyer@Sun.COM 			}
6038275SEric Cheng 			ehp = (struct ether_header *)mp->b_rptr;
6048275SEric Cheng 
6058275SEric Cheng 			/*
6068833SVenu.Iyer@Sun.COM 			 * Determine if this is a VLAN or non-VLAN packet.
6078275SEric Cheng 			 */
6088833SVenu.Iyer@Sun.COM 			if ((sap = ntohs(ehp->ether_type)) == VLAN_TPID) {
6098833SVenu.Iyer@Sun.COM 				evhp = (struct ether_vlan_header *)mp->b_rptr;
6108833SVenu.Iyer@Sun.COM 				sap = ntohs(evhp->ether_type);
6118833SVenu.Iyer@Sun.COM 				hdrsize = sizeof (struct ether_vlan_header);
6128275SEric Cheng 				/*
6138833SVenu.Iyer@Sun.COM 				 * Check if the VID of the packet, if any,
6148833SVenu.Iyer@Sun.COM 				 * belongs to this client.
6158275SEric Cheng 				 */
6168275SEric Cheng 				if (!mac_client_check_flow_vid(mcip,
6178275SEric Cheng 				    VLAN_ID(ntohs(evhp->ether_tci)))) {
6188275SEric Cheng 					mac_rx_drop_pkt(mac_srs, mp);
6198275SEric Cheng 					continue;
6208275SEric Cheng 				}
6218833SVenu.Iyer@Sun.COM 			} else {
6228833SVenu.Iyer@Sun.COM 				hdrsize = sizeof (struct ether_header);
6238275SEric Cheng 			}
6248833SVenu.Iyer@Sun.COM 			is_unicast =
6258833SVenu.Iyer@Sun.COM 			    ((((uint8_t *)&ehp->ether_dhost)[0] & 0x01) == 0);
6268833SVenu.Iyer@Sun.COM 			dstaddr = (uint8_t *)&ehp->ether_dhost;
6278833SVenu.Iyer@Sun.COM 		} else {
6288833SVenu.Iyer@Sun.COM 			mac_header_info_t		mhi;
6298833SVenu.Iyer@Sun.COM 
6308833SVenu.Iyer@Sun.COM 			if (mac_header_info((mac_handle_t)mcip->mci_mip,
6318833SVenu.Iyer@Sun.COM 			    mp, &mhi) != 0) {
6328833SVenu.Iyer@Sun.COM 				mac_rx_drop_pkt(mac_srs, mp);
6338833SVenu.Iyer@Sun.COM 				continue;
6348833SVenu.Iyer@Sun.COM 			}
6358833SVenu.Iyer@Sun.COM 			hdrsize = mhi.mhi_hdrsize;
6368833SVenu.Iyer@Sun.COM 			sap = mhi.mhi_bindsap;
6378833SVenu.Iyer@Sun.COM 			is_unicast = (mhi.mhi_dsttype == MAC_ADDRTYPE_UNICAST);
6388833SVenu.Iyer@Sun.COM 			dstaddr = (uint8_t *)mhi.mhi_daddr;
6398833SVenu.Iyer@Sun.COM 		}
6408833SVenu.Iyer@Sun.COM 
6418833SVenu.Iyer@Sun.COM 		if (!dls_bypass) {
6428275SEric Cheng 			FANOUT_ENQUEUE_MP(headmp[type], tailmp[type],
6438275SEric Cheng 			    cnt[type], bw_ctl, sz[type], sz1, mp);
6448275SEric Cheng 			continue;
6458275SEric Cheng 		}
6468275SEric Cheng 
6478833SVenu.Iyer@Sun.COM 		if (sap == ETHERTYPE_IP) {
6488275SEric Cheng 			/*
6498275SEric Cheng 			 * If we are H/W classified, but we have promisc
6508275SEric Cheng 			 * on, then we need to check for the unicast address.
6518275SEric Cheng 			 */
6528275SEric Cheng 			if (hw_classified && mcip->mci_promisc_list != NULL) {
6538275SEric Cheng 				mac_address_t		*map;
6548275SEric Cheng 
6558275SEric Cheng 				rw_enter(&mcip->mci_rw_lock, RW_READER);
6568275SEric Cheng 				map = mcip->mci_unicast;
6578833SVenu.Iyer@Sun.COM 				if (bcmp(dstaddr, map->ma_addr,
6588275SEric Cheng 				    map->ma_len) == 0)
6598275SEric Cheng 					type = UNDEF;
6608275SEric Cheng 				rw_exit(&mcip->mci_rw_lock);
6618833SVenu.Iyer@Sun.COM 			} else if (is_unicast) {
6628275SEric Cheng 				type = UNDEF;
6638275SEric Cheng 			}
6648275SEric Cheng 		}
6658275SEric Cheng 
6668275SEric Cheng 		/*
6678275SEric Cheng 		 * This needs to become a contract with the driver for
6688275SEric Cheng 		 * the fast path.
6698275SEric Cheng 		 *
6708275SEric Cheng 		 * In the normal case the packet will have at least the L2
6718275SEric Cheng 		 * header and the IP + Transport header in the same mblk.
6728275SEric Cheng 		 * This is usually the case when the NIC driver sends up
6738275SEric Cheng 		 * the packet. This is also true when the stack generates
6748275SEric Cheng 		 * a packet that is looped back and when the stack uses the
6758275SEric Cheng 		 * fastpath mechanism. The normal case is optimized for
6768275SEric Cheng 		 * performance and may bypass DLS. All other cases go through
6778275SEric Cheng 		 * the 'OTH' type path without DLS bypass.
6788275SEric Cheng 		 */
6798275SEric Cheng 
6808833SVenu.Iyer@Sun.COM 		ipha = (ipha_t *)(mp->b_rptr + hdrsize);
6818275SEric Cheng 		if ((type != OTH) && MBLK_RX_FANOUT_SLOWPATH(mp, ipha))
6828275SEric Cheng 			type = OTH;
6838275SEric Cheng 
6848275SEric Cheng 		if (type == OTH) {
6858275SEric Cheng 			FANOUT_ENQUEUE_MP(headmp[type], tailmp[type],
6868275SEric Cheng 			    cnt[type], bw_ctl, sz[type], sz1, mp);
6878275SEric Cheng 			continue;
6888275SEric Cheng 		}
6898275SEric Cheng 
6908275SEric Cheng 		ASSERT(type == UNDEF);
6918275SEric Cheng 		/*
6928275SEric Cheng 		 * We look for at least 4 bytes past the IP header to get
6938275SEric Cheng 		 * the port information. If we get an IP fragment, we don't
6948275SEric Cheng 		 * have the port information, and we use just the protocol
6958275SEric Cheng 		 * information.
6968275SEric Cheng 		 */
6978275SEric Cheng 		switch (ipha->ipha_protocol) {
6988275SEric Cheng 		case IPPROTO_TCP:
6998275SEric Cheng 			type = V4_TCP;
7008833SVenu.Iyer@Sun.COM 			mp->b_rptr += hdrsize;
7018275SEric Cheng 			break;
7028275SEric Cheng 		case IPPROTO_UDP:
7038275SEric Cheng 			type = V4_UDP;
7048833SVenu.Iyer@Sun.COM 			mp->b_rptr += hdrsize;
7058275SEric Cheng 			break;
7068275SEric Cheng 		default:
7078275SEric Cheng 			type = OTH;
7088275SEric Cheng 			break;
7098275SEric Cheng 		}
7108275SEric Cheng 
7118275SEric Cheng 		FANOUT_ENQUEUE_MP(headmp[type], tailmp[type], cnt[type],
7128275SEric Cheng 		    bw_ctl, sz[type], sz1, mp);
7138275SEric Cheng 	}
7148275SEric Cheng 
7158275SEric Cheng 	for (type = V4_TCP; type < UNDEF; type++) {
7168275SEric Cheng 		if (headmp[type] != NULL) {
7178833SVenu.Iyer@Sun.COM 			mac_soft_ring_t			*softring;
7188833SVenu.Iyer@Sun.COM 
7198275SEric Cheng 			ASSERT(tailmp[type]->b_next == NULL);
7208275SEric Cheng 			switch (type) {
7218275SEric Cheng 			case V4_TCP:
7228275SEric Cheng 				softring = mac_srs->srs_tcp_soft_rings[0];
7238275SEric Cheng 				break;
7248275SEric Cheng 			case V4_UDP:
7258275SEric Cheng 				softring = mac_srs->srs_udp_soft_rings[0];
7268275SEric Cheng 				break;
7278275SEric Cheng 			case OTH:
7288275SEric Cheng 				softring = mac_srs->srs_oth_soft_rings[0];
7298275SEric Cheng 			}
7308833SVenu.Iyer@Sun.COM 			mac_rx_soft_ring_process(mcip, softring,
7318275SEric Cheng 			    headmp[type], tailmp[type], cnt[type], sz[type]);
7328275SEric Cheng 		}
7338275SEric Cheng 	}
7348275SEric Cheng }
7358275SEric Cheng 
7368275SEric Cheng int	fanout_unalligned = 0;
7378275SEric Cheng 
7388275SEric Cheng /*
7398275SEric Cheng  * mac_rx_srs_long_fanout
7408275SEric Cheng  *
7418275SEric Cheng  * The fanout routine for IPv6
7428275SEric Cheng  */
7438275SEric Cheng static int
mac_rx_srs_long_fanout(mac_soft_ring_set_t * mac_srs,mblk_t * mp,uint32_t sap,size_t hdrsize,enum pkt_type * type,uint_t * indx)7448275SEric Cheng mac_rx_srs_long_fanout(mac_soft_ring_set_t *mac_srs, mblk_t *mp,
7458833SVenu.Iyer@Sun.COM     uint32_t sap, size_t hdrsize, enum pkt_type *type, uint_t *indx)
7468275SEric Cheng {
7478275SEric Cheng 	ip6_t		*ip6h;
7488275SEric Cheng 	uint8_t		*whereptr;
7498275SEric Cheng 	uint_t		hash;
7508275SEric Cheng 	uint16_t	remlen;
7518275SEric Cheng 	uint8_t		nexthdr;
7528275SEric Cheng 	uint16_t	hdr_len;
7538275SEric Cheng 
7548833SVenu.Iyer@Sun.COM 	if (sap == ETHERTYPE_IPV6) {
7558275SEric Cheng 		boolean_t	modifiable = B_TRUE;
7568275SEric Cheng 
7578833SVenu.Iyer@Sun.COM 		ASSERT(MBLKL(mp) >= hdrsize);
7588833SVenu.Iyer@Sun.COM 
7598833SVenu.Iyer@Sun.COM 		ip6h = (ip6_t *)(mp->b_rptr + hdrsize);
7608275SEric Cheng 		if ((unsigned char *)ip6h == mp->b_wptr) {
7618275SEric Cheng 			/*
7628833SVenu.Iyer@Sun.COM 			 * The first mblk_t only includes the mac header.
7638275SEric Cheng 			 * Note that it is safe to change the mp pointer here,
7648275SEric Cheng 			 * as the subsequent operation does not assume mp
7658833SVenu.Iyer@Sun.COM 			 * points to the start of the mac header.
7668275SEric Cheng 			 */
7678275SEric Cheng 			mp = mp->b_cont;
7688275SEric Cheng 
7698275SEric Cheng 			/*
7708275SEric Cheng 			 * Make sure ip6h holds the full ip6_t structure.
7718275SEric Cheng 			 */
7728275SEric Cheng 			if (mp == NULL)
7738275SEric Cheng 				return (-1);
7748275SEric Cheng 
7758275SEric Cheng 			if (MBLKL(mp) < IPV6_HDR_LEN) {
7768275SEric Cheng 				modifiable = (DB_REF(mp) == 1);
7778275SEric Cheng 
7788275SEric Cheng 				if (modifiable &&
7798275SEric Cheng 				    !pullupmsg(mp, IPV6_HDR_LEN)) {
7808275SEric Cheng 					return (-1);
7818275SEric Cheng 				}
7828275SEric Cheng 			}
7838275SEric Cheng 
7848275SEric Cheng 			ip6h = (ip6_t *)mp->b_rptr;
7858275SEric Cheng 		}
7868275SEric Cheng 
7878275SEric Cheng 		if (!modifiable || !(OK_32PTR((char *)ip6h)) ||
7888275SEric Cheng 		    ((unsigned char *)ip6h + IPV6_HDR_LEN > mp->b_wptr)) {
7898275SEric Cheng 			/*
7908275SEric Cheng 			 * If either ip6h is not alligned, or ip6h does not
7918275SEric Cheng 			 * hold the complete ip6_t structure (a pullupmsg()
7928275SEric Cheng 			 * is not an option since it would result in an
7938275SEric Cheng 			 * unalligned ip6h), fanout to the default ring. Note
7948275SEric Cheng 			 * that this may cause packets reordering.
7958275SEric Cheng 			 */
7968275SEric Cheng 			*indx = 0;
7978275SEric Cheng 			*type = OTH;
7988275SEric Cheng 			fanout_unalligned++;
7998275SEric Cheng 			return (0);
8008275SEric Cheng 		}
8018275SEric Cheng 
8028275SEric Cheng 		remlen = ntohs(ip6h->ip6_plen);
8038275SEric Cheng 		nexthdr = ip6h->ip6_nxt;
8048275SEric Cheng 
8058275SEric Cheng 		if (remlen < MIN_EHDR_LEN)
8068275SEric Cheng 			return (-1);
8078275SEric Cheng 		/*
8088275SEric Cheng 		 * Do src based fanout if below tunable is set to B_TRUE or
8098275SEric Cheng 		 * when mac_ip_hdr_length_v6() fails because of malformed
8108275SEric Cheng 		 * packets or because mblk's need to be concatenated using
8118275SEric Cheng 		 * pullupmsg().
8128275SEric Cheng 		 */
813*11878SVenu.Iyer@Sun.COM 		if (mac_src_ipv6_fanout || !mac_ip_hdr_length_v6(ip6h,
814*11878SVenu.Iyer@Sun.COM 		    mp->b_wptr, &hdr_len, &nexthdr, NULL)) {
8158275SEric Cheng 			goto src_based_fanout;
8168275SEric Cheng 		}
8178275SEric Cheng 		whereptr = (uint8_t *)ip6h + hdr_len;
8188275SEric Cheng 
8198275SEric Cheng 		/* If the transport is one of below, we do port based fanout */
8208275SEric Cheng 		switch (nexthdr) {
8218275SEric Cheng 		case IPPROTO_TCP:
8228275SEric Cheng 		case IPPROTO_UDP:
8238275SEric Cheng 		case IPPROTO_SCTP:
8248275SEric Cheng 		case IPPROTO_ESP:
8258275SEric Cheng 			/*
8268275SEric Cheng 			 * If the ports in the transport header is not part of
8278275SEric Cheng 			 * the mblk, do src_based_fanout, instead of calling
8288275SEric Cheng 			 * pullupmsg().
8298275SEric Cheng 			 */
8308275SEric Cheng 			if (mp->b_cont != NULL &&
8318275SEric Cheng 			    whereptr + PORTS_SIZE > mp->b_wptr) {
8328275SEric Cheng 				goto src_based_fanout;
8338275SEric Cheng 			}
8348275SEric Cheng 			break;
8358275SEric Cheng 		default:
8368275SEric Cheng 			break;
8378275SEric Cheng 		}
8388275SEric Cheng 
8398275SEric Cheng 		switch (nexthdr) {
8408275SEric Cheng 		case IPPROTO_TCP:
8418275SEric Cheng 			hash = HASH_ADDR(V4_PART_OF_V6(ip6h->ip6_src),
8428275SEric Cheng 			    *(uint32_t *)whereptr);
8438275SEric Cheng 			*indx = COMPUTE_INDEX(hash,
8448275SEric Cheng 			    mac_srs->srs_tcp_ring_count);
8458275SEric Cheng 			*type = OTH;
8468275SEric Cheng 			break;
8478275SEric Cheng 
8488275SEric Cheng 		case IPPROTO_UDP:
8498275SEric Cheng 		case IPPROTO_SCTP:
8508275SEric Cheng 		case IPPROTO_ESP:
8518275SEric Cheng 			if (mac_fanout_type == MAC_FANOUT_DEFAULT) {
8528275SEric Cheng 				hash = HASH_ADDR(V4_PART_OF_V6(ip6h->ip6_src),
8538275SEric Cheng 				    *(uint32_t *)whereptr);
8548275SEric Cheng 				*indx = COMPUTE_INDEX(hash,
8558275SEric Cheng 				    mac_srs->srs_udp_ring_count);
8568275SEric Cheng 			} else {
8578275SEric Cheng 				*indx = mac_srs->srs_ind %
8588275SEric Cheng 				    mac_srs->srs_udp_ring_count;
8598275SEric Cheng 				mac_srs->srs_ind++;
8608275SEric Cheng 			}
8618275SEric Cheng 			*type = OTH;
8628275SEric Cheng 			break;
8638275SEric Cheng 
8648275SEric Cheng 			/* For all other protocol, do source based fanout */
8658275SEric Cheng 		default:
8668275SEric Cheng 			goto src_based_fanout;
8678275SEric Cheng 		}
8688275SEric Cheng 	} else {
8698275SEric Cheng 		*indx = 0;
8708275SEric Cheng 		*type = OTH;
8718275SEric Cheng 	}
8728275SEric Cheng 	return (0);
8738275SEric Cheng 
8748275SEric Cheng src_based_fanout:
8758275SEric Cheng 	hash = HASH_ADDR(V4_PART_OF_V6(ip6h->ip6_src), (uint32_t)0);
8768275SEric Cheng 	*indx = COMPUTE_INDEX(hash, mac_srs->srs_oth_ring_count);
8778275SEric Cheng 	*type = OTH;
8788275SEric Cheng 	return (0);
8798275SEric Cheng }
8808275SEric Cheng 
8818275SEric Cheng /*
8828275SEric Cheng  * mac_rx_srs_fanout
8838275SEric Cheng  *
8848275SEric Cheng  * This routine delivers packets destined to an SRS into a soft ring member
8858275SEric Cheng  * of the set.
8868275SEric Cheng  *
8878275SEric Cheng  * Given a chain of packets we need to split it up into multiple sub chains
8888275SEric Cheng  * destined for one of the TCP, UDP or OTH soft rings. Instead of entering
8898275SEric Cheng  * the soft ring one packet at a time, we want to enter it in the form of a
8908275SEric Cheng  * chain otherwise we get this start/stop behaviour where the worker thread
8918275SEric Cheng  * goes to sleep and then next packets comes in forcing it to wake up etc.
8928275SEric Cheng  *
8938275SEric Cheng  * Note:
8948275SEric Cheng  * Since we know what is the maximum fanout possible, we create a 2D array
8958275SEric Cheng  * of 'softring types * MAX_SR_FANOUT' for the head, tail, cnt and sz
8968275SEric Cheng  * variables so that we can enter the softrings with chain. We need the
8978275SEric Cheng  * MAX_SR_FANOUT so we can allocate the arrays on the stack (a kmem_alloc
8988275SEric Cheng  * for each packet would be expensive). If we ever want to have the
8998275SEric Cheng  * ability to have unlimited fanout, we should probably declare a head,
9008275SEric Cheng  * tail, cnt, sz with each soft ring (a data struct which contains a softring
9018275SEric Cheng  * along with these members) and create an array of this uber struct so we
9028275SEric Cheng  * don't have to do kmem_alloc.
9038275SEric Cheng  */
9048275SEric Cheng int	fanout_oth1 = 0;
9058275SEric Cheng int	fanout_oth2 = 0;
9068275SEric Cheng int	fanout_oth3 = 0;
9078275SEric Cheng int	fanout_oth4 = 0;
9088275SEric Cheng int	fanout_oth5 = 0;
9098275SEric Cheng 
9108275SEric Cheng static void
mac_rx_srs_fanout(mac_soft_ring_set_t * mac_srs,mblk_t * head)9118275SEric Cheng mac_rx_srs_fanout(mac_soft_ring_set_t *mac_srs, mblk_t *head)
9128275SEric Cheng {
9138275SEric Cheng 	struct ether_header		*ehp;
9148833SVenu.Iyer@Sun.COM 	struct ether_vlan_header	*evhp;
9158833SVenu.Iyer@Sun.COM 	uint32_t			sap;
9168275SEric Cheng 	ipha_t				*ipha;
9178833SVenu.Iyer@Sun.COM 	uint8_t				*dstaddr;
9188275SEric Cheng 	uint_t				indx;
9198833SVenu.Iyer@Sun.COM 	size_t				ports_offset;
9208833SVenu.Iyer@Sun.COM 	size_t				ipha_len;
9218833SVenu.Iyer@Sun.COM 	size_t				hdrsize;
9228275SEric Cheng 	uint_t				hash;
9238275SEric Cheng 	mblk_t				*mp;
9248275SEric Cheng 	mblk_t				*headmp[MAX_SR_TYPES][MAX_SR_FANOUT];
9258275SEric Cheng 	mblk_t				*tailmp[MAX_SR_TYPES][MAX_SR_FANOUT];
9268275SEric Cheng 	int				cnt[MAX_SR_TYPES][MAX_SR_FANOUT];
9278275SEric Cheng 	size_t				sz[MAX_SR_TYPES][MAX_SR_FANOUT];
9288275SEric Cheng 	size_t				sz1;
9298833SVenu.Iyer@Sun.COM 	boolean_t			bw_ctl;
9308275SEric Cheng 	boolean_t			hw_classified;
9318833SVenu.Iyer@Sun.COM 	boolean_t			dls_bypass;
9328833SVenu.Iyer@Sun.COM 	boolean_t			is_ether;
9338833SVenu.Iyer@Sun.COM 	boolean_t			is_unicast;
9348275SEric Cheng 	int				fanout_cnt;
9358833SVenu.Iyer@Sun.COM 	enum pkt_type			type;
9368275SEric Cheng 	mac_client_impl_t		*mcip = mac_srs->srs_mcip;
9378833SVenu.Iyer@Sun.COM 
9388833SVenu.Iyer@Sun.COM 	is_ether = (mcip->mci_mip->mi_info.mi_nativemedia == DL_ETHER);
9398833SVenu.Iyer@Sun.COM 	bw_ctl = ((mac_srs->srs_type & SRST_BW_CONTROL) != 0);
9408275SEric Cheng 
9418275SEric Cheng 	/*
9428275SEric Cheng 	 * If we don't have a Rx ring, S/W classification would have done
9438275SEric Cheng 	 * its job and its a packet meant for us. If we were polling on
9448275SEric Cheng 	 * the default ring (i.e. there was a ring assigned to this SRS),
9458275SEric Cheng 	 * then we need to make sure that the mac address really belongs
9468275SEric Cheng 	 * to us.
9478275SEric Cheng 	 */
9488275SEric Cheng 	hw_classified = mac_srs->srs_ring != NULL &&
9498275SEric Cheng 	    mac_srs->srs_ring->mr_classify_type == MAC_HW_CLASSIFIER;
9508275SEric Cheng 
9518275SEric Cheng 	/*
9528275SEric Cheng 	 * Special clients (eg. VLAN, non ether, etc) need DLS
9538275SEric Cheng 	 * processing in the Rx path. SRST_DLS_BYPASS will be clear for
95411021SEric.Cheng@Sun.COM 	 * such SRSs. Another way of disabling bypass is to set the
95511021SEric.Cheng@Sun.COM 	 * MCIS_RX_BYPASS_DISABLE flag.
9568275SEric Cheng 	 */
95711021SEric.Cheng@Sun.COM 	dls_bypass = ((mac_srs->srs_type & SRST_DLS_BYPASS) != 0) &&
95811021SEric.Cheng@Sun.COM 	    ((mcip->mci_state_flags & MCIS_RX_BYPASS_DISABLE) == 0);
9598275SEric Cheng 
9608275SEric Cheng 	/*
9618275SEric Cheng 	 * Since the softrings are never destroyed and we always
9628275SEric Cheng 	 * create equal number of softrings for TCP, UDP and rest,
9638275SEric Cheng 	 * its OK to check one of them for count and use it without
9648275SEric Cheng 	 * any lock. In future, if soft rings get destroyed because
9658275SEric Cheng 	 * of reduction in fanout, we will need to ensure that happens
9668275SEric Cheng 	 * behind the SRS_PROC.
9678275SEric Cheng 	 */
9688275SEric Cheng 	fanout_cnt = mac_srs->srs_tcp_ring_count;
9698275SEric Cheng 
9708275SEric Cheng 	bzero(headmp, MAX_SR_TYPES * MAX_SR_FANOUT * sizeof (mblk_t *));
9718275SEric Cheng 	bzero(tailmp, MAX_SR_TYPES * MAX_SR_FANOUT * sizeof (mblk_t *));
9728275SEric Cheng 	bzero(cnt, MAX_SR_TYPES * MAX_SR_FANOUT * sizeof (int));
9738275SEric Cheng 	bzero(sz, MAX_SR_TYPES * MAX_SR_FANOUT * sizeof (size_t));
9748275SEric Cheng 
9758275SEric Cheng 	/*
9768275SEric Cheng 	 * We got a chain from SRS that we need to send to the soft rings.
9778275SEric Cheng 	 * Since squeues for TCP & IPv4 sap poll their soft rings (for
9788275SEric Cheng 	 * performance reasons), we need to separate out v4_tcp, v4_udp
9798275SEric Cheng 	 * and the rest goes in other.
9808275SEric Cheng 	 */
9818275SEric Cheng 	while (head != NULL) {
9828275SEric Cheng 		mp = head;
9838275SEric Cheng 		head = head->b_next;
9848275SEric Cheng 		mp->b_next = NULL;
9858275SEric Cheng 
9868275SEric Cheng 		type = OTH;
9878833SVenu.Iyer@Sun.COM 		sz1 = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp);
9888833SVenu.Iyer@Sun.COM 
9898833SVenu.Iyer@Sun.COM 		if (is_ether) {
9908833SVenu.Iyer@Sun.COM 			/*
9918833SVenu.Iyer@Sun.COM 			 * At this point we can be sure the packet at least
9928833SVenu.Iyer@Sun.COM 			 * has an ether header.
9938833SVenu.Iyer@Sun.COM 			 */
9948833SVenu.Iyer@Sun.COM 			if (sz1 < sizeof (struct ether_header)) {
9958833SVenu.Iyer@Sun.COM 				mac_rx_drop_pkt(mac_srs, mp);
9968833SVenu.Iyer@Sun.COM 				continue;
9978833SVenu.Iyer@Sun.COM 			}
9988833SVenu.Iyer@Sun.COM 			ehp = (struct ether_header *)mp->b_rptr;
9998833SVenu.Iyer@Sun.COM 
10008833SVenu.Iyer@Sun.COM 			/*
10018833SVenu.Iyer@Sun.COM 			 * Determine if this is a VLAN or non-VLAN packet.
10028833SVenu.Iyer@Sun.COM 			 */
10038833SVenu.Iyer@Sun.COM 			if ((sap = ntohs(ehp->ether_type)) == VLAN_TPID) {
10048833SVenu.Iyer@Sun.COM 				evhp = (struct ether_vlan_header *)mp->b_rptr;
10058833SVenu.Iyer@Sun.COM 				sap = ntohs(evhp->ether_type);
10068833SVenu.Iyer@Sun.COM 				hdrsize = sizeof (struct ether_vlan_header);
10078275SEric Cheng 				/*
10088833SVenu.Iyer@Sun.COM 				 * Check if the VID of the packet, if any,
10098833SVenu.Iyer@Sun.COM 				 * belongs to this client.
10108275SEric Cheng 				 */
10118833SVenu.Iyer@Sun.COM 				if (!mac_client_check_flow_vid(mcip,
10128833SVenu.Iyer@Sun.COM 				    VLAN_ID(ntohs(evhp->ether_tci)))) {
10138275SEric Cheng 					mac_rx_drop_pkt(mac_srs, mp);
10148275SEric Cheng 					continue;
10158275SEric Cheng 				}
10168833SVenu.Iyer@Sun.COM 			} else {
10178833SVenu.Iyer@Sun.COM 				hdrsize = sizeof (struct ether_header);
10188833SVenu.Iyer@Sun.COM 			}
10198833SVenu.Iyer@Sun.COM 			is_unicast =
10208833SVenu.Iyer@Sun.COM 			    ((((uint8_t *)&ehp->ether_dhost)[0] & 0x01) == 0);
10218833SVenu.Iyer@Sun.COM 			dstaddr = (uint8_t *)&ehp->ether_dhost;
10228833SVenu.Iyer@Sun.COM 		} else {
10238833SVenu.Iyer@Sun.COM 			mac_header_info_t		mhi;
10248833SVenu.Iyer@Sun.COM 
10258833SVenu.Iyer@Sun.COM 			if (mac_header_info((mac_handle_t)mcip->mci_mip,
10268833SVenu.Iyer@Sun.COM 			    mp, &mhi) != 0) {
10278833SVenu.Iyer@Sun.COM 				mac_rx_drop_pkt(mac_srs, mp);
10288833SVenu.Iyer@Sun.COM 				continue;
10298833SVenu.Iyer@Sun.COM 			}
10308833SVenu.Iyer@Sun.COM 			hdrsize = mhi.mhi_hdrsize;
10318833SVenu.Iyer@Sun.COM 			sap = mhi.mhi_bindsap;
10328833SVenu.Iyer@Sun.COM 			is_unicast = (mhi.mhi_dsttype == MAC_ADDRTYPE_UNICAST);
10338833SVenu.Iyer@Sun.COM 			dstaddr = (uint8_t *)mhi.mhi_daddr;
10348833SVenu.Iyer@Sun.COM 		}
10358833SVenu.Iyer@Sun.COM 
10368833SVenu.Iyer@Sun.COM 		if (!dls_bypass) {
10378833SVenu.Iyer@Sun.COM 			if (mac_rx_srs_long_fanout(mac_srs, mp, sap,
10388833SVenu.Iyer@Sun.COM 			    hdrsize, &type, &indx) == -1) {
10398833SVenu.Iyer@Sun.COM 				mac_rx_drop_pkt(mac_srs, mp);
10408833SVenu.Iyer@Sun.COM 				continue;
10418275SEric Cheng 			}
10428275SEric Cheng 
10438275SEric Cheng 			FANOUT_ENQUEUE_MP(headmp[type][indx],
10448275SEric Cheng 			    tailmp[type][indx], cnt[type][indx], bw_ctl,
10458275SEric Cheng 			    sz[type][indx], sz1, mp);
10468275SEric Cheng 			continue;
10478275SEric Cheng 		}
10488275SEric Cheng 
10498275SEric Cheng 
10508275SEric Cheng 		/*
10518275SEric Cheng 		 * If we are using the default Rx ring where H/W or S/W
10528275SEric Cheng 		 * classification has not happened, we need to verify if
10538275SEric Cheng 		 * this unicast packet really belongs to us.
10548275SEric Cheng 		 */
10558833SVenu.Iyer@Sun.COM 		if (sap == ETHERTYPE_IP) {
10568275SEric Cheng 			/*
10578275SEric Cheng 			 * If we are H/W classified, but we have promisc
10588275SEric Cheng 			 * on, then we need to check for the unicast address.
10598275SEric Cheng 			 */
10608275SEric Cheng 			if (hw_classified && mcip->mci_promisc_list != NULL) {
10618275SEric Cheng 				mac_address_t		*map;
10628275SEric Cheng 
10638275SEric Cheng 				rw_enter(&mcip->mci_rw_lock, RW_READER);
10648275SEric Cheng 				map = mcip->mci_unicast;
10658833SVenu.Iyer@Sun.COM 				if (bcmp(dstaddr, map->ma_addr,
10668275SEric Cheng 				    map->ma_len) == 0)
10678275SEric Cheng 					type = UNDEF;
10688275SEric Cheng 				rw_exit(&mcip->mci_rw_lock);
10698833SVenu.Iyer@Sun.COM 			} else if (is_unicast) {
10708275SEric Cheng 				type = UNDEF;
10718275SEric Cheng 			}
10728275SEric Cheng 		}
10738275SEric Cheng 
10748275SEric Cheng 		/*
10758275SEric Cheng 		 * This needs to become a contract with the driver for
10768275SEric Cheng 		 * the fast path.
10778275SEric Cheng 		 */
10788275SEric Cheng 
10798833SVenu.Iyer@Sun.COM 		ipha = (ipha_t *)(mp->b_rptr + hdrsize);
10808275SEric Cheng 		if ((type != OTH) && MBLK_RX_FANOUT_SLOWPATH(mp, ipha)) {
10818275SEric Cheng 			type = OTH;
10828275SEric Cheng 			fanout_oth1++;
10838275SEric Cheng 		}
10848275SEric Cheng 
10858275SEric Cheng 		if (type != OTH) {
10868833SVenu.Iyer@Sun.COM 			uint16_t	frag_offset_flags;
10878833SVenu.Iyer@Sun.COM 
10888275SEric Cheng 			switch (ipha->ipha_protocol) {
10898275SEric Cheng 			case IPPROTO_TCP:
10908275SEric Cheng 			case IPPROTO_UDP:
10918275SEric Cheng 			case IPPROTO_SCTP:
10928275SEric Cheng 			case IPPROTO_ESP:
10938275SEric Cheng 				ipha_len = IPH_HDR_LENGTH(ipha);
10948275SEric Cheng 				if ((uchar_t *)ipha + ipha_len + PORTS_SIZE >
10958275SEric Cheng 				    mp->b_wptr) {
10968275SEric Cheng 					type = OTH;
10978275SEric Cheng 					break;
10988275SEric Cheng 				}
10998275SEric Cheng 				frag_offset_flags =
11008275SEric Cheng 				    ntohs(ipha->ipha_fragment_offset_and_flags);
11018275SEric Cheng 				if ((frag_offset_flags &
11028275SEric Cheng 				    (IPH_MF | IPH_OFFSET)) != 0) {
11038275SEric Cheng 					type = OTH;
11048275SEric Cheng 					fanout_oth3++;
11058275SEric Cheng 					break;
11068275SEric Cheng 				}
11078833SVenu.Iyer@Sun.COM 				ports_offset = hdrsize + ipha_len;
11088275SEric Cheng 				break;
11098275SEric Cheng 			default:
11108275SEric Cheng 				type = OTH;
11118275SEric Cheng 				fanout_oth4++;
11128275SEric Cheng 				break;
11138275SEric Cheng 			}
11148275SEric Cheng 		}
11158275SEric Cheng 
11168275SEric Cheng 		if (type == OTH) {
11178833SVenu.Iyer@Sun.COM 			if (mac_rx_srs_long_fanout(mac_srs, mp, sap,
11188833SVenu.Iyer@Sun.COM 			    hdrsize, &type, &indx) == -1) {
11198275SEric Cheng 				mac_rx_drop_pkt(mac_srs, mp);
11208275SEric Cheng 				continue;
11218275SEric Cheng 			}
11228275SEric Cheng 
11238275SEric Cheng 			FANOUT_ENQUEUE_MP(headmp[type][indx],
11248275SEric Cheng 			    tailmp[type][indx], cnt[type][indx], bw_ctl,
11258275SEric Cheng 			    sz[type][indx], sz1, mp);
11268275SEric Cheng 			continue;
11278275SEric Cheng 		}
11288275SEric Cheng 
11298275SEric Cheng 		ASSERT(type == UNDEF);
11308275SEric Cheng 
11318275SEric Cheng 		/*
11328275SEric Cheng 		 * XXX-Sunay: We should hold srs_lock since ring_count
11338275SEric Cheng 		 * below can change. But if we are always called from
11348275SEric Cheng 		 * mac_rx_srs_drain and SRS_PROC is set, then we can
11358275SEric Cheng 		 * enforce that ring_count can't be changed i.e.
11368275SEric Cheng 		 * to change fanout type or ring count, the calling
11378275SEric Cheng 		 * thread needs to be behind SRS_PROC.
11388275SEric Cheng 		 */
11398275SEric Cheng 		switch (ipha->ipha_protocol) {
11408275SEric Cheng 		case IPPROTO_TCP:
11418275SEric Cheng 			/*
11428275SEric Cheng 			 * Note that for ESP, we fanout on SPI and it is at the
11438275SEric Cheng 			 * same offset as the 2x16-bit ports. So it is clumped
11448275SEric Cheng 			 * along with TCP, UDP and SCTP.
11458275SEric Cheng 			 */
11468275SEric Cheng 			hash = HASH_ADDR(ipha->ipha_src,
11478275SEric Cheng 			    *(uint32_t *)(mp->b_rptr + ports_offset));
11488275SEric Cheng 			indx = COMPUTE_INDEX(hash, mac_srs->srs_tcp_ring_count);
11498275SEric Cheng 			type = V4_TCP;
11508833SVenu.Iyer@Sun.COM 			mp->b_rptr += hdrsize;
11518275SEric Cheng 			break;
11528275SEric Cheng 		case IPPROTO_UDP:
11538275SEric Cheng 		case IPPROTO_SCTP:
11548275SEric Cheng 		case IPPROTO_ESP:
11558275SEric Cheng 			if (mac_fanout_type == MAC_FANOUT_DEFAULT) {
11568275SEric Cheng 				hash = HASH_ADDR(ipha->ipha_src,
11578275SEric Cheng 				    *(uint32_t *)(mp->b_rptr + ports_offset));
11588275SEric Cheng 				indx = COMPUTE_INDEX(hash,
11598275SEric Cheng 				    mac_srs->srs_udp_ring_count);
11608275SEric Cheng 			} else {
11618275SEric Cheng 				indx = mac_srs->srs_ind %
11628275SEric Cheng 				    mac_srs->srs_udp_ring_count;
11638275SEric Cheng 				mac_srs->srs_ind++;
11648275SEric Cheng 			}
11658275SEric Cheng 			type = V4_UDP;
11668833SVenu.Iyer@Sun.COM 			mp->b_rptr += hdrsize;
11678275SEric Cheng 			break;
11688833SVenu.Iyer@Sun.COM 		default:
11698833SVenu.Iyer@Sun.COM 			indx = 0;
11708833SVenu.Iyer@Sun.COM 			type = OTH;
11718275SEric Cheng 		}
11728275SEric Cheng 
11738275SEric Cheng 		FANOUT_ENQUEUE_MP(headmp[type][indx], tailmp[type][indx],
11748275SEric Cheng 		    cnt[type][indx], bw_ctl, sz[type][indx], sz1, mp);
11758275SEric Cheng 	}
11768275SEric Cheng 
11778275SEric Cheng 	for (type = V4_TCP; type < UNDEF; type++) {
11788833SVenu.Iyer@Sun.COM 		int	i;
11798833SVenu.Iyer@Sun.COM 
11808275SEric Cheng 		for (i = 0; i < fanout_cnt; i++) {
11818275SEric Cheng 			if (headmp[type][i] != NULL) {
11828833SVenu.Iyer@Sun.COM 				mac_soft_ring_t	*softring;
11838833SVenu.Iyer@Sun.COM 
11848275SEric Cheng 				ASSERT(tailmp[type][i]->b_next == NULL);
11858275SEric Cheng 				switch (type) {
11868275SEric Cheng 				case V4_TCP:
11878275SEric Cheng 					softring =
11888275SEric Cheng 					    mac_srs->srs_tcp_soft_rings[i];
11898275SEric Cheng 					break;
11908275SEric Cheng 				case V4_UDP:
11918275SEric Cheng 					softring =
11928275SEric Cheng 					    mac_srs->srs_udp_soft_rings[i];
11938275SEric Cheng 					break;
11948275SEric Cheng 				case OTH:
11958275SEric Cheng 					softring =
11968275SEric Cheng 					    mac_srs->srs_oth_soft_rings[i];
11978275SEric Cheng 					break;
11988275SEric Cheng 				}
11998833SVenu.Iyer@Sun.COM 				mac_rx_soft_ring_process(mcip,
12008275SEric Cheng 				    softring, headmp[type][i], tailmp[type][i],
12018275SEric Cheng 				    cnt[type][i], sz[type][i]);
12028275SEric Cheng 			}
12038275SEric Cheng 		}
12048275SEric Cheng 	}
12058275SEric Cheng }
12068275SEric Cheng 
12078275SEric Cheng #define	SRS_BYTES_TO_PICKUP	150000
12088275SEric Cheng ssize_t	max_bytes_to_pickup = SRS_BYTES_TO_PICKUP;
12098275SEric Cheng 
12108275SEric Cheng /*
12118275SEric Cheng  * mac_rx_srs_poll_ring
12128275SEric Cheng  *
12138275SEric Cheng  * This SRS Poll thread uses this routine to poll the underlying hardware
12148275SEric Cheng  * Rx ring to get a chain of packets. It can inline process that chain
12158275SEric Cheng  * if mac_latency_optimize is set (default) or signal the SRS worker thread
12168275SEric Cheng  * to do the remaining processing.
12178275SEric Cheng  *
12188275SEric Cheng  * Since packets come in the system via interrupt or poll path, we also
12198275SEric Cheng  * update the stats and deal with promiscous clients here.
12208275SEric Cheng  */
12218275SEric Cheng void
mac_rx_srs_poll_ring(mac_soft_ring_set_t * mac_srs)12228275SEric Cheng mac_rx_srs_poll_ring(mac_soft_ring_set_t *mac_srs)
12238275SEric Cheng {
12248275SEric Cheng 	kmutex_t 		*lock = &mac_srs->srs_lock;
12258275SEric Cheng 	kcondvar_t 		*async = &mac_srs->srs_cv;
12268275SEric Cheng 	mac_srs_rx_t		*srs_rx = &mac_srs->srs_rx;
12278275SEric Cheng 	mblk_t 			*head, *tail, *mp;
12288275SEric Cheng 	callb_cpr_t 		cprinfo;
12298275SEric Cheng 	ssize_t 		bytes_to_pickup;
12308275SEric Cheng 	size_t 			sz;
12318275SEric Cheng 	int			count;
12328275SEric Cheng 	mac_client_impl_t	*smcip;
12338275SEric Cheng 
12348275SEric Cheng 	CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "mac_srs_poll");
12358275SEric Cheng 	mutex_enter(lock);
12368275SEric Cheng 
12378275SEric Cheng start:
12388275SEric Cheng 	for (;;) {
12398275SEric Cheng 		if (mac_srs->srs_state & SRS_PAUSE)
12408275SEric Cheng 			goto done;
12418275SEric Cheng 
12428275SEric Cheng 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
12438275SEric Cheng 		cv_wait(async, lock);
12448275SEric Cheng 		CALLB_CPR_SAFE_END(&cprinfo, lock);
12458275SEric Cheng 
12468275SEric Cheng 		if (mac_srs->srs_state & SRS_PAUSE)
12478275SEric Cheng 			goto done;
12488275SEric Cheng 
12498275SEric Cheng check_again:
12508275SEric Cheng 		if (mac_srs->srs_type & SRST_BW_CONTROL) {
12518275SEric Cheng 			/*
12528275SEric Cheng 			 * We pick as many bytes as we are allowed to queue.
12538275SEric Cheng 			 * Its possible that we will exceed the total
12548275SEric Cheng 			 * packets queued in case this SRS is part of the
12558275SEric Cheng 			 * Rx ring group since > 1 poll thread can be pulling
12568275SEric Cheng 			 * upto the max allowed packets at the same time
12578275SEric Cheng 			 * but that should be OK.
12588275SEric Cheng 			 */
12598275SEric Cheng 			mutex_enter(&mac_srs->srs_bw->mac_bw_lock);
12608275SEric Cheng 			bytes_to_pickup =
12618275SEric Cheng 			    mac_srs->srs_bw->mac_bw_drop_threshold -
12628275SEric Cheng 			    mac_srs->srs_bw->mac_bw_sz;
12638275SEric Cheng 			/*
12648275SEric Cheng 			 * We shouldn't have been signalled if we
12658275SEric Cheng 			 * have 0 or less bytes to pick but since
12668275SEric Cheng 			 * some of the bytes accounting is driver
12678275SEric Cheng 			 * dependant, we do the safety check.
12688275SEric Cheng 			 */
12698275SEric Cheng 			if (bytes_to_pickup < 0)
12708275SEric Cheng 				bytes_to_pickup = 0;
12718275SEric Cheng 			mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
12728275SEric Cheng 		} else {
12738275SEric Cheng 			/*
12748275SEric Cheng 			 * ToDO: Need to change the polling API
12758275SEric Cheng 			 * to add a packet count and a flag which
12768275SEric Cheng 			 * tells the driver whether we want packets
12778275SEric Cheng 			 * based on a count, or bytes, or all the
12788275SEric Cheng 			 * packets queued in the driver/HW. This
12798275SEric Cheng 			 * way, we never have to check the limits
12808275SEric Cheng 			 * on poll path. We truly let only as many
12818275SEric Cheng 			 * packets enter the system as we are willing
12828275SEric Cheng 			 * to process or queue.
12838275SEric Cheng 			 *
12848275SEric Cheng 			 * Something along the lines of
12858275SEric Cheng 			 * pkts_to_pickup = mac_soft_ring_max_q_cnt -
12868275SEric Cheng 			 *	mac_srs->srs_poll_pkt_cnt
12878275SEric Cheng 			 */
12888275SEric Cheng 
12898275SEric Cheng 			/*
12908275SEric Cheng 			 * Since we are not doing B/W control, pick
12918275SEric Cheng 			 * as many packets as allowed.
12928275SEric Cheng 			 */
12938275SEric Cheng 			bytes_to_pickup = max_bytes_to_pickup;
12948275SEric Cheng 		}
12958275SEric Cheng 
12968275SEric Cheng 		/* Poll the underlying Hardware */
12978275SEric Cheng 		mutex_exit(lock);
12988275SEric Cheng 		head = MAC_HWRING_POLL(mac_srs->srs_ring, (int)bytes_to_pickup);
12998275SEric Cheng 		mutex_enter(lock);
13008275SEric Cheng 
13018275SEric Cheng 		ASSERT((mac_srs->srs_state & SRS_POLL_THR_OWNER) ==
13028275SEric Cheng 		    SRS_POLL_THR_OWNER);
13038275SEric Cheng 
13048275SEric Cheng 		mp = tail = head;
13058275SEric Cheng 		count = 0;
13068275SEric Cheng 		sz = 0;
13078275SEric Cheng 		while (mp != NULL) {
13088275SEric Cheng 			tail = mp;
13098275SEric Cheng 			sz += msgdsize(mp);
13108275SEric Cheng 			mp = mp->b_next;
13118275SEric Cheng 			count++;
13128275SEric Cheng 		}
13138275SEric Cheng 
13148275SEric Cheng 		if (head != NULL) {
13158275SEric Cheng 			tail->b_next = NULL;
13168275SEric Cheng 			smcip = mac_srs->srs_mcip;
13178275SEric Cheng 
1318*11878SVenu.Iyer@Sun.COM 			SRS_RX_STAT_UPDATE(mac_srs, pollbytes, sz);
1319*11878SVenu.Iyer@Sun.COM 			SRS_RX_STAT_UPDATE(mac_srs, pollcnt, count);
13208275SEric Cheng 
13218275SEric Cheng 			/*
13228275SEric Cheng 			 * If there are any promiscuous mode callbacks
13238275SEric Cheng 			 * defined for this MAC client, pass them a copy
13248275SEric Cheng 			 * if appropriate and also update the counters.
13258275SEric Cheng 			 */
13268275SEric Cheng 			if (smcip != NULL) {
13278275SEric Cheng 				if (smcip->mci_mip->mi_promisc_list != NULL) {
13288275SEric Cheng 					mutex_exit(lock);
13298275SEric Cheng 					mac_promisc_dispatch(smcip->mci_mip,
13308275SEric Cheng 					    head, NULL);
13318275SEric Cheng 					mutex_enter(lock);
13328275SEric Cheng 				}
13338275SEric Cheng 			}
13348275SEric Cheng 			if (mac_srs->srs_type & SRST_BW_CONTROL) {
13358275SEric Cheng 				mutex_enter(&mac_srs->srs_bw->mac_bw_lock);
13368275SEric Cheng 				mac_srs->srs_bw->mac_bw_polled += sz;
13378275SEric Cheng 				mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
13388275SEric Cheng 			}
13398275SEric Cheng 			MAC_RX_SRS_ENQUEUE_CHAIN(mac_srs, head, tail,
13408275SEric Cheng 			    count, sz);
13418275SEric Cheng 			if (count <= 10)
1342*11878SVenu.Iyer@Sun.COM 				srs_rx->sr_stat.mrs_chaincntundr10++;
13438275SEric Cheng 			else if (count > 10 && count <= 50)
1344*11878SVenu.Iyer@Sun.COM 				srs_rx->sr_stat.mrs_chaincnt10to50++;
13458275SEric Cheng 			else
1346*11878SVenu.Iyer@Sun.COM 				srs_rx->sr_stat.mrs_chaincntover50++;
13478275SEric Cheng 		}
13488275SEric Cheng 
13498275SEric Cheng 		/*
13508275SEric Cheng 		 * We are guaranteed that SRS_PROC will be set if we
13518275SEric Cheng 		 * are here. Also, poll thread gets to run only if
13528275SEric Cheng 		 * the drain was being done by a worker thread although
13538275SEric Cheng 		 * its possible that worker thread is still running
13548275SEric Cheng 		 * and poll thread was sent down to keep the pipeline
13558275SEric Cheng 		 * going instead of doing a complete drain and then
13568275SEric Cheng 		 * trying to poll the NIC.
13578275SEric Cheng 		 *
13588275SEric Cheng 		 * So we need to check SRS_WORKER flag to make sure
13598275SEric Cheng 		 * that the worker thread is not processing the queue
13608275SEric Cheng 		 * in parallel to us. The flags and conditions are
13618275SEric Cheng 		 * protected by the srs_lock to prevent any race. We
13628275SEric Cheng 		 * ensure that we don't drop the srs_lock from now
13638275SEric Cheng 		 * till the end and similarly we don't drop the srs_lock
13648275SEric Cheng 		 * in mac_rx_srs_drain() till similar condition check
13658275SEric Cheng 		 * are complete. The mac_rx_srs_drain() needs to ensure
13668275SEric Cheng 		 * that SRS_WORKER flag remains set as long as its
13678275SEric Cheng 		 * processing the queue.
13688275SEric Cheng 		 */
13698275SEric Cheng 		if (!(mac_srs->srs_state & SRS_WORKER) &&
13708275SEric Cheng 		    (mac_srs->srs_first != NULL)) {
13718275SEric Cheng 			/*
13728275SEric Cheng 			 * We have packets to process and worker thread
13738833SVenu.Iyer@Sun.COM 			 * is not running. Check to see if poll thread is
13748833SVenu.Iyer@Sun.COM 			 * allowed to process.
13758275SEric Cheng 			 */
13768833SVenu.Iyer@Sun.COM 			if (mac_srs->srs_state & SRS_LATENCY_OPT) {
13778275SEric Cheng 				mac_srs->srs_drain_func(mac_srs, SRS_POLL_PROC);
13789209SEric Cheng 				if (!(mac_srs->srs_state & SRS_PAUSE) &&
13799209SEric Cheng 				    srs_rx->sr_poll_pkt_cnt <=
13808275SEric Cheng 				    srs_rx->sr_lowat) {
13818275SEric Cheng 					srs_rx->sr_poll_again++;
13828275SEric Cheng 					goto check_again;
13838833SVenu.Iyer@Sun.COM 				}
13848833SVenu.Iyer@Sun.COM 				/*
13858833SVenu.Iyer@Sun.COM 				 * We are already above low water mark
13868833SVenu.Iyer@Sun.COM 				 * so stay in the polling mode but no
13878833SVenu.Iyer@Sun.COM 				 * need to poll. Once we dip below
13888833SVenu.Iyer@Sun.COM 				 * the polling threshold, the processing
13898833SVenu.Iyer@Sun.COM 				 * thread (soft ring) will signal us
13908833SVenu.Iyer@Sun.COM 				 * to poll again (MAC_UPDATE_SRS_COUNT)
13918833SVenu.Iyer@Sun.COM 				 */
13928833SVenu.Iyer@Sun.COM 				srs_rx->sr_poll_drain_no_poll++;
13938833SVenu.Iyer@Sun.COM 				mac_srs->srs_state &= ~(SRS_PROC|SRS_GET_PKTS);
13948833SVenu.Iyer@Sun.COM 				/*
13958833SVenu.Iyer@Sun.COM 				 * In B/W control case, its possible
13968833SVenu.Iyer@Sun.COM 				 * that the backlog built up due to
13978833SVenu.Iyer@Sun.COM 				 * B/W limit being reached and packets
13988833SVenu.Iyer@Sun.COM 				 * are queued only in SRS. In this case,
13998833SVenu.Iyer@Sun.COM 				 * we should schedule worker thread
14008833SVenu.Iyer@Sun.COM 				 * since no one else will wake us up.
14018833SVenu.Iyer@Sun.COM 				 */
14028833SVenu.Iyer@Sun.COM 				if ((mac_srs->srs_type & SRST_BW_CONTROL) &&
14038833SVenu.Iyer@Sun.COM 				    (mac_srs->srs_tid == NULL)) {
14048833SVenu.Iyer@Sun.COM 					mac_srs->srs_tid =
14058833SVenu.Iyer@Sun.COM 					    timeout(mac_srs_fire, mac_srs, 1);
14068833SVenu.Iyer@Sun.COM 					srs_rx->sr_poll_worker_wakeup++;
14078275SEric Cheng 				}
14088275SEric Cheng 			} else {
14098275SEric Cheng 				/*
14108275SEric Cheng 				 * Wakeup the worker thread for more processing.
14118275SEric Cheng 				 * We optimize for throughput in this case.
14128275SEric Cheng 				 */
14138275SEric Cheng 				mac_srs->srs_state &= ~(SRS_PROC|SRS_GET_PKTS);
14148275SEric Cheng 				MAC_SRS_WORKER_WAKEUP(mac_srs);
14158275SEric Cheng 				srs_rx->sr_poll_sig_worker++;
14168275SEric Cheng 			}
14178275SEric Cheng 		} else if ((mac_srs->srs_first == NULL) &&
14188275SEric Cheng 		    !(mac_srs->srs_state & SRS_WORKER)) {
14198275SEric Cheng 			/*
14208275SEric Cheng 			 * There is nothing queued in SRS and
14218275SEric Cheng 			 * no worker thread running. Plus we
14228275SEric Cheng 			 * didn't get anything from the H/W
14238275SEric Cheng 			 * as well (head == NULL);
14248275SEric Cheng 			 */
14258275SEric Cheng 			ASSERT(head == NULL);
14268275SEric Cheng 			mac_srs->srs_state &=
14278275SEric Cheng 			    ~(SRS_PROC|SRS_GET_PKTS);
14288275SEric Cheng 
14298275SEric Cheng 			/*
14308275SEric Cheng 			 * If we have a packets in soft ring, don't allow
14318275SEric Cheng 			 * more packets to come into this SRS by keeping the
14328275SEric Cheng 			 * interrupts off but not polling the H/W. The
14338275SEric Cheng 			 * poll thread will get signaled as soon as
14348275SEric Cheng 			 * srs_poll_pkt_cnt dips below poll threshold.
14358275SEric Cheng 			 */
14368275SEric Cheng 			if (srs_rx->sr_poll_pkt_cnt == 0) {
14378275SEric Cheng 				srs_rx->sr_poll_intr_enable++;
14388275SEric Cheng 				MAC_SRS_POLLING_OFF(mac_srs);
14398275SEric Cheng 			} else {
14408275SEric Cheng 				/*
14418275SEric Cheng 				 * We know nothing is queued in SRS
14428275SEric Cheng 				 * since we are here after checking
14438275SEric Cheng 				 * srs_first is NULL. The backlog
14448275SEric Cheng 				 * is entirely due to packets queued
14458275SEric Cheng 				 * in Soft ring which will wake us up
14468275SEric Cheng 				 * and get the interface out of polling
14478275SEric Cheng 				 * mode once the backlog dips below
14488275SEric Cheng 				 * sr_poll_thres.
14498275SEric Cheng 				 */
14508275SEric Cheng 				srs_rx->sr_poll_no_poll++;
14518275SEric Cheng 			}
14528275SEric Cheng 		} else {
14538275SEric Cheng 			/*
14548275SEric Cheng 			 * Worker thread is already running.
14558275SEric Cheng 			 * Nothing much to do. If the polling
14568275SEric Cheng 			 * was enabled, worker thread will deal
14578275SEric Cheng 			 * with that.
14588275SEric Cheng 			 */
14598275SEric Cheng 			mac_srs->srs_state &= ~SRS_GET_PKTS;
14608275SEric Cheng 			srs_rx->sr_poll_goto_sleep++;
14618275SEric Cheng 		}
14628275SEric Cheng 	}
14638275SEric Cheng done:
14648275SEric Cheng 	mac_srs->srs_state |= SRS_POLL_THR_QUIESCED;
14658275SEric Cheng 	cv_signal(&mac_srs->srs_async);
14668275SEric Cheng 	/*
14678275SEric Cheng 	 * If this is a temporary quiesce then wait for the restart signal
14688275SEric Cheng 	 * from the srs worker. Then clear the flags and signal the srs worker
14698275SEric Cheng 	 * to ensure a positive handshake and go back to start.
14708275SEric Cheng 	 */
14718275SEric Cheng 	while (!(mac_srs->srs_state & (SRS_CONDEMNED | SRS_POLL_THR_RESTART)))
14728275SEric Cheng 		cv_wait(async, lock);
14738275SEric Cheng 	if (mac_srs->srs_state & SRS_POLL_THR_RESTART) {
14748275SEric Cheng 		ASSERT(!(mac_srs->srs_state & SRS_CONDEMNED));
14758275SEric Cheng 		mac_srs->srs_state &=
14768275SEric Cheng 		    ~(SRS_POLL_THR_QUIESCED | SRS_POLL_THR_RESTART);
14778275SEric Cheng 		cv_signal(&mac_srs->srs_async);
14788275SEric Cheng 		goto start;
14798275SEric Cheng 	} else {
14808275SEric Cheng 		mac_srs->srs_state |= SRS_POLL_THR_EXITED;
14818275SEric Cheng 		cv_signal(&mac_srs->srs_async);
14828275SEric Cheng 		CALLB_CPR_EXIT(&cprinfo);
14838275SEric Cheng 		thread_exit();
14848275SEric Cheng 	}
14858275SEric Cheng }
14868275SEric Cheng 
14878275SEric Cheng /*
14888275SEric Cheng  * mac_srs_pick_chain
14898275SEric Cheng  *
14908275SEric Cheng  * In Bandwidth control case, checks how many packets can be processed
14918275SEric Cheng  * and return them in a sub chain.
14928275SEric Cheng  */
14938275SEric Cheng static mblk_t *
mac_srs_pick_chain(mac_soft_ring_set_t * mac_srs,mblk_t ** chain_tail,size_t * chain_sz,int * chain_cnt)14948275SEric Cheng mac_srs_pick_chain(mac_soft_ring_set_t *mac_srs, mblk_t **chain_tail,
14958275SEric Cheng     size_t *chain_sz, int *chain_cnt)
14968275SEric Cheng {
14978275SEric Cheng 	mblk_t 			*head = NULL;
14988275SEric Cheng 	mblk_t 			*tail = NULL;
14998275SEric Cheng 	size_t			sz;
15008275SEric Cheng 	size_t 			tsz = 0;
15018275SEric Cheng 	int			cnt = 0;
15028275SEric Cheng 	mblk_t 			*mp;
15038275SEric Cheng 
15048275SEric Cheng 	ASSERT(MUTEX_HELD(&mac_srs->srs_lock));
15058275SEric Cheng 	mutex_enter(&mac_srs->srs_bw->mac_bw_lock);
15068275SEric Cheng 	if (((mac_srs->srs_bw->mac_bw_used + mac_srs->srs_size) <=
15078275SEric Cheng 	    mac_srs->srs_bw->mac_bw_limit) ||
15088275SEric Cheng 	    (mac_srs->srs_bw->mac_bw_limit == 0)) {
15098275SEric Cheng 		mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
15108275SEric Cheng 		head = mac_srs->srs_first;
15118275SEric Cheng 		mac_srs->srs_first = NULL;
15128275SEric Cheng 		*chain_tail = mac_srs->srs_last;
15138275SEric Cheng 		mac_srs->srs_last = NULL;
15148275SEric Cheng 		*chain_sz = mac_srs->srs_size;
15158275SEric Cheng 		*chain_cnt = mac_srs->srs_count;
15168275SEric Cheng 		mac_srs->srs_count = 0;
15178275SEric Cheng 		mac_srs->srs_size = 0;
15188275SEric Cheng 		return (head);
15198275SEric Cheng 	}
15208275SEric Cheng 
15218275SEric Cheng 	/*
15228275SEric Cheng 	 * Can't clear the entire backlog.
15238275SEric Cheng 	 * Need to find how many packets to pick
15248275SEric Cheng 	 */
15258275SEric Cheng 	ASSERT(MUTEX_HELD(&mac_srs->srs_bw->mac_bw_lock));
15268275SEric Cheng 	while ((mp = mac_srs->srs_first) != NULL) {
15278275SEric Cheng 		sz = msgdsize(mp);
15288275SEric Cheng 		if ((tsz + sz + mac_srs->srs_bw->mac_bw_used) >
15298275SEric Cheng 		    mac_srs->srs_bw->mac_bw_limit) {
15308275SEric Cheng 			if (!(mac_srs->srs_bw->mac_bw_state & SRS_BW_ENFORCED))
15318275SEric Cheng 				mac_srs->srs_bw->mac_bw_state |=
15328275SEric Cheng 				    SRS_BW_ENFORCED;
15338275SEric Cheng 			break;
15348275SEric Cheng 		}
15358275SEric Cheng 
15368275SEric Cheng 		/*
15378275SEric Cheng 		 * The _size & cnt is  decremented from the softrings
15388275SEric Cheng 		 * when they send up the packet for polling to work
15398275SEric Cheng 		 * properly.
15408275SEric Cheng 		 */
15418275SEric Cheng 		tsz += sz;
15428275SEric Cheng 		cnt++;
15438275SEric Cheng 		mac_srs->srs_count--;
15448275SEric Cheng 		mac_srs->srs_size -= sz;
15458275SEric Cheng 		if (tail != NULL)
15468275SEric Cheng 			tail->b_next = mp;
15478275SEric Cheng 		else
15488275SEric Cheng 			head = mp;
15498275SEric Cheng 		tail = mp;
15508275SEric Cheng 		mac_srs->srs_first = mac_srs->srs_first->b_next;
15518275SEric Cheng 	}
15528275SEric Cheng 	mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
15538275SEric Cheng 	if (mac_srs->srs_first == NULL)
15548275SEric Cheng 		mac_srs->srs_last = NULL;
15558275SEric Cheng 
15568275SEric Cheng 	if (tail != NULL)
15578275SEric Cheng 		tail->b_next = NULL;
15588275SEric Cheng 	*chain_tail = tail;
15598275SEric Cheng 	*chain_cnt = cnt;
15608275SEric Cheng 	*chain_sz = tsz;
15618275SEric Cheng 
15628275SEric Cheng 	return (head);
15638275SEric Cheng }
15648275SEric Cheng 
15658275SEric Cheng /*
15668275SEric Cheng  * mac_rx_srs_drain
15678275SEric Cheng  *
15688275SEric Cheng  * The SRS drain routine. Gets to run to clear the queue. Any thread
15698275SEric Cheng  * (worker, interrupt, poll) can call this based on processing model.
15708275SEric Cheng  * The first thing we do is disable interrupts if possible and then
15718275SEric Cheng  * drain the queue. we also try to poll the underlying hardware if
15728275SEric Cheng  * there is a dedicated hardware Rx ring assigned to this SRS.
15738275SEric Cheng  *
15748275SEric Cheng  * There is a equivalent drain routine in bandwidth control mode
15758275SEric Cheng  * mac_rx_srs_drain_bw. There is some code duplication between the two
15768275SEric Cheng  * routines but they are highly performance sensitive and are easier
15778275SEric Cheng  * to read/debug if they stay separate. Any code changes here might
15788275SEric Cheng  * also apply to mac_rx_srs_drain_bw as well.
15798275SEric Cheng  */
15808275SEric Cheng void
mac_rx_srs_drain(mac_soft_ring_set_t * mac_srs,uint_t proc_type)15818275SEric Cheng mac_rx_srs_drain(mac_soft_ring_set_t *mac_srs, uint_t proc_type)
15828275SEric Cheng {
15838275SEric Cheng 	mblk_t 			*head;
15848275SEric Cheng 	mblk_t			*tail;
15858275SEric Cheng 	timeout_id_t 		tid;
15868275SEric Cheng 	int			cnt = 0;
15878275SEric Cheng 	mac_client_impl_t	*mcip = mac_srs->srs_mcip;
15888275SEric Cheng 	mac_srs_rx_t		*srs_rx = &mac_srs->srs_rx;
15898275SEric Cheng 
15908275SEric Cheng 	ASSERT(MUTEX_HELD(&mac_srs->srs_lock));
15918275SEric Cheng 	ASSERT(!(mac_srs->srs_type & SRST_BW_CONTROL));
15928833SVenu.Iyer@Sun.COM 
15938275SEric Cheng 	/* If we are blanked i.e. can't do upcalls, then we are done */
15948275SEric Cheng 	if (mac_srs->srs_state & (SRS_BLANK | SRS_PAUSE)) {
15958275SEric Cheng 		ASSERT((mac_srs->srs_type & SRST_NO_SOFT_RINGS) ||
15968275SEric Cheng 		    (mac_srs->srs_state & SRS_PAUSE));
15978275SEric Cheng 		goto out;
15988275SEric Cheng 	}
15998275SEric Cheng 
16008275SEric Cheng 	if (mac_srs->srs_first == NULL)
16018275SEric Cheng 		goto out;
16028275SEric Cheng 
16038833SVenu.Iyer@Sun.COM 	if (!(mac_srs->srs_state & SRS_LATENCY_OPT) &&
16048833SVenu.Iyer@Sun.COM 	    (srs_rx->sr_poll_pkt_cnt <= srs_rx->sr_lowat)) {
16058833SVenu.Iyer@Sun.COM 		/*
16068833SVenu.Iyer@Sun.COM 		 * In the normal case, the SRS worker thread does no
16078833SVenu.Iyer@Sun.COM 		 * work and we wait for a backlog to build up before
16088833SVenu.Iyer@Sun.COM 		 * we switch into polling mode. In case we are
16098833SVenu.Iyer@Sun.COM 		 * optimizing for throughput, we use the worker thread
16108833SVenu.Iyer@Sun.COM 		 * as well. The goal is to let worker thread process
16118833SVenu.Iyer@Sun.COM 		 * the queue and poll thread to feed packets into
16128833SVenu.Iyer@Sun.COM 		 * the queue. As such, we should signal the poll
16138833SVenu.Iyer@Sun.COM 		 * thread to try and get more packets.
16148833SVenu.Iyer@Sun.COM 		 *
16158833SVenu.Iyer@Sun.COM 		 * We could have pulled this check in the POLL_RING
16168833SVenu.Iyer@Sun.COM 		 * macro itself but keeping it explicit here makes
16178833SVenu.Iyer@Sun.COM 		 * the architecture more human understandable.
16188833SVenu.Iyer@Sun.COM 		 */
16198833SVenu.Iyer@Sun.COM 		MAC_SRS_POLL_RING(mac_srs);
16208833SVenu.Iyer@Sun.COM 	}
16218833SVenu.Iyer@Sun.COM 
16228833SVenu.Iyer@Sun.COM again:
16238275SEric Cheng 	head = mac_srs->srs_first;
16248275SEric Cheng 	mac_srs->srs_first = NULL;
16258275SEric Cheng 	tail = mac_srs->srs_last;
16268275SEric Cheng 	mac_srs->srs_last = NULL;
16278275SEric Cheng 	cnt = mac_srs->srs_count;
16288275SEric Cheng 	mac_srs->srs_count = 0;
16298275SEric Cheng 
16308275SEric Cheng 	ASSERT(head != NULL);
16318275SEric Cheng 	ASSERT(tail != NULL);
16328275SEric Cheng 
16338275SEric Cheng 	if ((tid = mac_srs->srs_tid) != 0)
16348275SEric Cheng 		mac_srs->srs_tid = 0;
16358275SEric Cheng 
16368275SEric Cheng 	mac_srs->srs_state |= (SRS_PROC|proc_type);
16378275SEric Cheng 
16388833SVenu.Iyer@Sun.COM 
16398275SEric Cheng 	/*
16408275SEric Cheng 	 * mcip is NULL for broadcast and multicast flows. The promisc
16418275SEric Cheng 	 * callbacks for broadcast and multicast packets are delivered from
16428275SEric Cheng 	 * mac_rx() and we don't need to worry about that case in this path
16438275SEric Cheng 	 */
1644*11878SVenu.Iyer@Sun.COM 	if (mcip != NULL) {
1645*11878SVenu.Iyer@Sun.COM 		if (mcip->mci_promisc_list != NULL) {
1646*11878SVenu.Iyer@Sun.COM 			mutex_exit(&mac_srs->srs_lock);
1647*11878SVenu.Iyer@Sun.COM 			mac_promisc_client_dispatch(mcip, head);
1648*11878SVenu.Iyer@Sun.COM 			mutex_enter(&mac_srs->srs_lock);
1649*11878SVenu.Iyer@Sun.COM 		}
1650*11878SVenu.Iyer@Sun.COM 		if (MAC_PROTECT_ENABLED(mcip, MPT_IPNOSPOOF)) {
1651*11878SVenu.Iyer@Sun.COM 			mutex_exit(&mac_srs->srs_lock);
1652*11878SVenu.Iyer@Sun.COM 			mac_protect_intercept_dhcp(mcip, head);
1653*11878SVenu.Iyer@Sun.COM 			mutex_enter(&mac_srs->srs_lock);
1654*11878SVenu.Iyer@Sun.COM 		}
16558275SEric Cheng 	}
16568275SEric Cheng 
16578275SEric Cheng 	/*
16588275SEric Cheng 	 * Check if SRS itself is doing the processing
16598275SEric Cheng 	 * This direct path does not apply when subflows are present. In this
16608275SEric Cheng 	 * case, packets need to be dispatched to a soft ring according to the
16618275SEric Cheng 	 * flow's bandwidth and other resources contraints.
16628275SEric Cheng 	 */
16638275SEric Cheng 	if (mac_srs->srs_type & SRST_NO_SOFT_RINGS) {
16648275SEric Cheng 		mac_direct_rx_t		proc;
16658275SEric Cheng 		void			*arg1;
16668275SEric Cheng 		mac_resource_handle_t	arg2;
16678275SEric Cheng 
16688275SEric Cheng 		/*
16698275SEric Cheng 		 * This is the case when a Rx is directly
16708275SEric Cheng 		 * assigned and we have a fully classified
16718275SEric Cheng 		 * protocol chain. We can deal with it in
16728275SEric Cheng 		 * one shot.
16738275SEric Cheng 		 */
16748275SEric Cheng 		proc = srs_rx->sr_func;
16758275SEric Cheng 		arg1 = srs_rx->sr_arg1;
16768275SEric Cheng 		arg2 = srs_rx->sr_arg2;
16778275SEric Cheng 
16788275SEric Cheng 		mac_srs->srs_state |= SRS_CLIENT_PROC;
16798275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
16808275SEric Cheng 		if (tid != 0) {
16818275SEric Cheng 			(void) untimeout(tid);
16828275SEric Cheng 			tid = 0;
16838275SEric Cheng 		}
16848275SEric Cheng 
16858275SEric Cheng 		proc(arg1, arg2, head, NULL);
16868275SEric Cheng 		/*
16878275SEric Cheng 		 * Decrement the size and count here itelf
16888275SEric Cheng 		 * since the packet has been processed.
16898275SEric Cheng 		 */
16908275SEric Cheng 		mutex_enter(&mac_srs->srs_lock);
16918275SEric Cheng 		MAC_UPDATE_SRS_COUNT_LOCKED(mac_srs, cnt);
16928275SEric Cheng 		if (mac_srs->srs_state & SRS_CLIENT_WAIT)
16938275SEric Cheng 			cv_signal(&mac_srs->srs_client_cv);
16948275SEric Cheng 		mac_srs->srs_state &= ~SRS_CLIENT_PROC;
16958275SEric Cheng 	} else {
16968275SEric Cheng 		/* Some kind of softrings based fanout is required */
16978275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
16988275SEric Cheng 		if (tid != 0) {
16998275SEric Cheng 			(void) untimeout(tid);
17008275SEric Cheng 			tid = 0;
17018275SEric Cheng 		}
17028275SEric Cheng 
17038275SEric Cheng 		/*
17048275SEric Cheng 		 * Since the fanout routines can deal with chains,
17058275SEric Cheng 		 * shoot the entire chain up.
17068275SEric Cheng 		 */
17078275SEric Cheng 		if (mac_srs->srs_type & SRST_FANOUT_SRC_IP)
17088275SEric Cheng 			mac_rx_srs_fanout(mac_srs, head);
17098275SEric Cheng 		else
17108275SEric Cheng 			mac_rx_srs_proto_fanout(mac_srs, head);
17118275SEric Cheng 		mutex_enter(&mac_srs->srs_lock);
17128275SEric Cheng 	}
17138275SEric Cheng 
17149820SEric Cheng 	if (!(mac_srs->srs_state & (SRS_BLANK|SRS_PAUSE)) &&
17159820SEric Cheng 	    (mac_srs->srs_first != NULL)) {
17168833SVenu.Iyer@Sun.COM 		/*
17179820SEric Cheng 		 * More packets arrived while we were clearing the
17189820SEric Cheng 		 * SRS. This can be possible because of one of
17199820SEric Cheng 		 * three conditions below:
17209820SEric Cheng 		 * 1) The driver is using multiple worker threads
17219820SEric Cheng 		 *    to send the packets to us.
17229820SEric Cheng 		 * 2) The driver has a race in switching
17239820SEric Cheng 		 *    between interrupt and polling mode or
17249820SEric Cheng 		 * 3) Packets are arriving in this SRS via the
17259820SEric Cheng 		 *    S/W classification as well.
17269820SEric Cheng 		 *
17279820SEric Cheng 		 * We should switch to polling mode and see if we
17289820SEric Cheng 		 * need to send the poll thread down. Also, signal
17299820SEric Cheng 		 * the worker thread to process whats just arrived.
17308833SVenu.Iyer@Sun.COM 		 */
17319820SEric Cheng 		MAC_SRS_POLLING_ON(mac_srs);
17328833SVenu.Iyer@Sun.COM 		if (srs_rx->sr_poll_pkt_cnt <= srs_rx->sr_lowat) {
17338833SVenu.Iyer@Sun.COM 			srs_rx->sr_drain_poll_sig++;
17348833SVenu.Iyer@Sun.COM 			MAC_SRS_POLL_RING(mac_srs);
17358833SVenu.Iyer@Sun.COM 		}
17369820SEric Cheng 
17379820SEric Cheng 		/*
17389820SEric Cheng 		 * If we didn't signal the poll thread, we need
17399820SEric Cheng 		 * to deal with the pending packets ourselves.
17409820SEric Cheng 		 */
17419820SEric Cheng 		if (proc_type == SRS_WORKER) {
17428275SEric Cheng 			srs_rx->sr_drain_again++;
17438275SEric Cheng 			goto again;
17449820SEric Cheng 		} else {
17459820SEric Cheng 			srs_rx->sr_drain_worker_sig++;
17469820SEric Cheng 			cv_signal(&mac_srs->srs_async);
17478275SEric Cheng 		}
17488275SEric Cheng 	}
17498275SEric Cheng 
17508275SEric Cheng out:
17518275SEric Cheng 	if (mac_srs->srs_state & SRS_GET_PKTS) {
17528275SEric Cheng 		/*
17538275SEric Cheng 		 * Poll thread is already running. Leave the
17548275SEric Cheng 		 * SRS_RPOC set and hand over the control to
17558275SEric Cheng 		 * poll thread.
17568275SEric Cheng 		 */
17578275SEric Cheng 		mac_srs->srs_state &= ~proc_type;
17588275SEric Cheng 		srs_rx->sr_drain_poll_running++;
17598275SEric Cheng 		return;
17608275SEric Cheng 	}
17618275SEric Cheng 
17628275SEric Cheng 	/*
17638275SEric Cheng 	 * Even if there are no packets queued in SRS, we
17648275SEric Cheng 	 * need to make sure that the shared counter is
17658275SEric Cheng 	 * clear and any associated softrings have cleared
17668275SEric Cheng 	 * all the backlog. Otherwise, leave the interface
17678275SEric Cheng 	 * in polling mode and the poll thread will get
17688275SEric Cheng 	 * signalled once the count goes down to zero.
17698275SEric Cheng 	 *
17708275SEric Cheng 	 * If someone is already draining the queue (SRS_PROC is
17718275SEric Cheng 	 * set) when the srs_poll_pkt_cnt goes down to zero,
17728275SEric Cheng 	 * then it means that drain is already running and we
17738275SEric Cheng 	 * will turn off polling at that time if there is
17748275SEric Cheng 	 * no backlog.
17758275SEric Cheng 	 *
17768275SEric Cheng 	 * As long as there are packets queued either
17778275SEric Cheng 	 * in soft ring set or its soft rings, we will leave
17788275SEric Cheng 	 * the interface in polling mode (even if the drain
17798275SEric Cheng 	 * was done being the interrupt thread). We signal
17808275SEric Cheng 	 * the poll thread as well if we have dipped below
17818275SEric Cheng 	 * low water mark.
17828275SEric Cheng 	 *
17838275SEric Cheng 	 * NOTE: We can't use the MAC_SRS_POLLING_ON macro
17848275SEric Cheng 	 * since that turn polling on only for worker thread.
17858275SEric Cheng 	 * Its not worth turning polling on for interrupt
17868275SEric Cheng 	 * thread (since NIC will not issue another interrupt)
17878275SEric Cheng 	 * unless a backlog builds up.
17888275SEric Cheng 	 */
17898275SEric Cheng 	if ((srs_rx->sr_poll_pkt_cnt > 0) &&
17908275SEric Cheng 	    (mac_srs->srs_state & SRS_POLLING_CAPAB)) {
17918275SEric Cheng 		mac_srs->srs_state &= ~(SRS_PROC|proc_type);
17928275SEric Cheng 		srs_rx->sr_drain_keep_polling++;
17938275SEric Cheng 		MAC_SRS_POLLING_ON(mac_srs);
17948275SEric Cheng 		if (srs_rx->sr_poll_pkt_cnt <= srs_rx->sr_lowat)
17958275SEric Cheng 			MAC_SRS_POLL_RING(mac_srs);
17968275SEric Cheng 		return;
17978275SEric Cheng 	}
17988275SEric Cheng 
17998275SEric Cheng 	/* Nothing else to do. Get out of poll mode */
18008275SEric Cheng 	MAC_SRS_POLLING_OFF(mac_srs);
18018275SEric Cheng 	mac_srs->srs_state &= ~(SRS_PROC|proc_type);
18028275SEric Cheng 	srs_rx->sr_drain_finish_intr++;
18038275SEric Cheng }
18048275SEric Cheng 
18058275SEric Cheng /*
18068275SEric Cheng  * mac_rx_srs_drain_bw
18078275SEric Cheng  *
18088275SEric Cheng  * The SRS BW drain routine. Gets to run to clear the queue. Any thread
18098275SEric Cheng  * (worker, interrupt, poll) can call this based on processing model.
18108275SEric Cheng  * The first thing we do is disable interrupts if possible and then
18118275SEric Cheng  * drain the queue. we also try to poll the underlying hardware if
18128275SEric Cheng  * there is a dedicated hardware Rx ring assigned to this SRS.
18138275SEric Cheng  *
18148275SEric Cheng  * There is a equivalent drain routine in non bandwidth control mode
18158275SEric Cheng  * mac_rx_srs_drain. There is some code duplication between the two
18168275SEric Cheng  * routines but they are highly performance sensitive and are easier
18178275SEric Cheng  * to read/debug if they stay separate. Any code changes here might
18188275SEric Cheng  * also apply to mac_rx_srs_drain as well.
18198275SEric Cheng  */
18208275SEric Cheng void
mac_rx_srs_drain_bw(mac_soft_ring_set_t * mac_srs,uint_t proc_type)18218275SEric Cheng mac_rx_srs_drain_bw(mac_soft_ring_set_t *mac_srs, uint_t proc_type)
18228275SEric Cheng {
18238275SEric Cheng 	mblk_t 			*head;
18248275SEric Cheng 	mblk_t			*tail;
18258275SEric Cheng 	timeout_id_t 		tid;
18268275SEric Cheng 	size_t			sz = 0;
18278275SEric Cheng 	int			cnt = 0;
18288275SEric Cheng 	mac_client_impl_t	*mcip = mac_srs->srs_mcip;
18298275SEric Cheng 	mac_srs_rx_t		*srs_rx = &mac_srs->srs_rx;
183011066Srafael.vanoni@sun.com 	clock_t			now;
18318275SEric Cheng 
18328275SEric Cheng 	ASSERT(MUTEX_HELD(&mac_srs->srs_lock));
18338275SEric Cheng 	ASSERT(mac_srs->srs_type & SRST_BW_CONTROL);
18348275SEric Cheng again:
18358275SEric Cheng 	/* Check if we are doing B/W control */
18368275SEric Cheng 	mutex_enter(&mac_srs->srs_bw->mac_bw_lock);
183711066Srafael.vanoni@sun.com 	now = ddi_get_lbolt();
183811066Srafael.vanoni@sun.com 	if (mac_srs->srs_bw->mac_bw_curr_time != now) {
183911066Srafael.vanoni@sun.com 		mac_srs->srs_bw->mac_bw_curr_time = now;
18408275SEric Cheng 		mac_srs->srs_bw->mac_bw_used = 0;
18418275SEric Cheng 		if (mac_srs->srs_bw->mac_bw_state & SRS_BW_ENFORCED)
18428275SEric Cheng 			mac_srs->srs_bw->mac_bw_state &= ~SRS_BW_ENFORCED;
18438275SEric Cheng 	} else if (mac_srs->srs_bw->mac_bw_state & SRS_BW_ENFORCED) {
18448275SEric Cheng 		mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
18458275SEric Cheng 		goto done;
18468275SEric Cheng 	} else if (mac_srs->srs_bw->mac_bw_used >
18478275SEric Cheng 	    mac_srs->srs_bw->mac_bw_limit) {
18488275SEric Cheng 		mac_srs->srs_bw->mac_bw_state |= SRS_BW_ENFORCED;
18498275SEric Cheng 		mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
18508275SEric Cheng 		goto done;
18518275SEric Cheng 	}
18528275SEric Cheng 	mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
18538275SEric Cheng 
18548275SEric Cheng 	/* If we are blanked i.e. can't do upcalls, then we are done */
18558275SEric Cheng 	if (mac_srs->srs_state & (SRS_BLANK | SRS_PAUSE)) {
18568275SEric Cheng 		ASSERT((mac_srs->srs_type & SRST_NO_SOFT_RINGS) ||
18578275SEric Cheng 		    (mac_srs->srs_state & SRS_PAUSE));
18588275SEric Cheng 		goto done;
18598275SEric Cheng 	}
18608275SEric Cheng 
18618275SEric Cheng 	sz = 0;
18628275SEric Cheng 	cnt = 0;
18638275SEric Cheng 	if ((head = mac_srs_pick_chain(mac_srs, &tail, &sz, &cnt)) == NULL) {
18648275SEric Cheng 		/*
18658275SEric Cheng 		 * We couldn't pick up a single packet.
18668275SEric Cheng 		 */
18678275SEric Cheng 		mutex_enter(&mac_srs->srs_bw->mac_bw_lock);
18688275SEric Cheng 		if ((mac_srs->srs_bw->mac_bw_used == 0) &&
18698275SEric Cheng 		    (mac_srs->srs_size != 0) &&
18708275SEric Cheng 		    !(mac_srs->srs_bw->mac_bw_state & SRS_BW_ENFORCED)) {
18718275SEric Cheng 			/*
18728275SEric Cheng 			 * Seems like configured B/W doesn't
18738275SEric Cheng 			 * even allow processing of 1 packet
18748275SEric Cheng 			 * per tick.
18758275SEric Cheng 			 *
18768275SEric Cheng 			 * XXX: raise the limit to processing
18778275SEric Cheng 			 * at least 1 packet per tick.
18788275SEric Cheng 			 */
18798275SEric Cheng 			mac_srs->srs_bw->mac_bw_limit +=
18808275SEric Cheng 			    mac_srs->srs_bw->mac_bw_limit;
18818275SEric Cheng 			mac_srs->srs_bw->mac_bw_drop_threshold +=
18828275SEric Cheng 			    mac_srs->srs_bw->mac_bw_drop_threshold;
18838275SEric Cheng 			cmn_err(CE_NOTE, "mac_rx_srs_drain: srs(%p) "
18848275SEric Cheng 			    "raised B/W limit to %d since not even a "
18858275SEric Cheng 			    "single packet can be processed per "
18868275SEric Cheng 			    "tick %d\n", (void *)mac_srs,
18878275SEric Cheng 			    (int)mac_srs->srs_bw->mac_bw_limit,
18888275SEric Cheng 			    (int)msgdsize(mac_srs->srs_first));
18898275SEric Cheng 		}
18908275SEric Cheng 		mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
18918275SEric Cheng 		goto done;
18928275SEric Cheng 	}
18938275SEric Cheng 
18948275SEric Cheng 	ASSERT(head != NULL);
18958275SEric Cheng 	ASSERT(tail != NULL);
18968275SEric Cheng 
18978275SEric Cheng 	/* zero bandwidth: drop all and return to interrupt mode */
18988275SEric Cheng 	mutex_enter(&mac_srs->srs_bw->mac_bw_lock);
18998275SEric Cheng 	if (mac_srs->srs_bw->mac_bw_limit == 0) {
1900*11878SVenu.Iyer@Sun.COM 		srs_rx->sr_stat.mrs_sdrops += cnt;
19018275SEric Cheng 		ASSERT(mac_srs->srs_bw->mac_bw_sz >= sz);
19028275SEric Cheng 		mac_srs->srs_bw->mac_bw_sz -= sz;
19038275SEric Cheng 		mac_srs->srs_bw->mac_bw_drop_bytes += sz;
19048275SEric Cheng 		mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
19058275SEric Cheng 		mac_pkt_drop(NULL, NULL, head, B_FALSE);
19068275SEric Cheng 		goto leave_poll;
19078275SEric Cheng 	} else {
19088275SEric Cheng 		mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
19098275SEric Cheng 	}
19108275SEric Cheng 
19118275SEric Cheng 	if ((tid = mac_srs->srs_tid) != 0)
19128275SEric Cheng 		mac_srs->srs_tid = 0;
19138275SEric Cheng 
19148275SEric Cheng 	mac_srs->srs_state |= (SRS_PROC|proc_type);
19158275SEric Cheng 	MAC_SRS_WORKER_POLLING_ON(mac_srs);
19168275SEric Cheng 
19178275SEric Cheng 	/*
19188275SEric Cheng 	 * mcip is NULL for broadcast and multicast flows. The promisc
19198275SEric Cheng 	 * callbacks for broadcast and multicast packets are delivered from
19208275SEric Cheng 	 * mac_rx() and we don't need to worry about that case in this path
19218275SEric Cheng 	 */
1922*11878SVenu.Iyer@Sun.COM 	if (mcip != NULL) {
1923*11878SVenu.Iyer@Sun.COM 		if (mcip->mci_promisc_list != NULL) {
1924*11878SVenu.Iyer@Sun.COM 			mutex_exit(&mac_srs->srs_lock);
1925*11878SVenu.Iyer@Sun.COM 			mac_promisc_client_dispatch(mcip, head);
1926*11878SVenu.Iyer@Sun.COM 			mutex_enter(&mac_srs->srs_lock);
1927*11878SVenu.Iyer@Sun.COM 		}
1928*11878SVenu.Iyer@Sun.COM 		if (MAC_PROTECT_ENABLED(mcip, MPT_IPNOSPOOF)) {
1929*11878SVenu.Iyer@Sun.COM 			mutex_exit(&mac_srs->srs_lock);
1930*11878SVenu.Iyer@Sun.COM 			mac_protect_intercept_dhcp(mcip, head);
1931*11878SVenu.Iyer@Sun.COM 			mutex_enter(&mac_srs->srs_lock);
1932*11878SVenu.Iyer@Sun.COM 		}
19338275SEric Cheng 	}
19348275SEric Cheng 
19358275SEric Cheng 	/*
19368275SEric Cheng 	 * Check if SRS itself is doing the processing
19378275SEric Cheng 	 * This direct path does not apply when subflows are present. In this
19388275SEric Cheng 	 * case, packets need to be dispatched to a soft ring according to the
19398275SEric Cheng 	 * flow's bandwidth and other resources contraints.
19408275SEric Cheng 	 */
19418275SEric Cheng 	if (mac_srs->srs_type & SRST_NO_SOFT_RINGS) {
19428275SEric Cheng 		mac_direct_rx_t		proc;
19438275SEric Cheng 		void			*arg1;
19448275SEric Cheng 		mac_resource_handle_t	arg2;
19458275SEric Cheng 
19468275SEric Cheng 		/*
19478275SEric Cheng 		 * This is the case when a Rx is directly
19488275SEric Cheng 		 * assigned and we have a fully classified
19498275SEric Cheng 		 * protocol chain. We can deal with it in
19508275SEric Cheng 		 * one shot.
19518275SEric Cheng 		 */
19528275SEric Cheng 		proc = srs_rx->sr_func;
19538275SEric Cheng 		arg1 = srs_rx->sr_arg1;
19548275SEric Cheng 		arg2 = srs_rx->sr_arg2;
19558275SEric Cheng 
19568275SEric Cheng 		mac_srs->srs_state |= SRS_CLIENT_PROC;
19578275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
19588275SEric Cheng 		if (tid != 0) {
19598275SEric Cheng 			(void) untimeout(tid);
19608275SEric Cheng 			tid = 0;
19618275SEric Cheng 		}
19628275SEric Cheng 
19638275SEric Cheng 		proc(arg1, arg2, head, NULL);
19648275SEric Cheng 		/*
19658275SEric Cheng 		 * Decrement the size and count here itelf
19668275SEric Cheng 		 * since the packet has been processed.
19678275SEric Cheng 		 */
19688275SEric Cheng 		mutex_enter(&mac_srs->srs_lock);
19698275SEric Cheng 		MAC_UPDATE_SRS_COUNT_LOCKED(mac_srs, cnt);
19708275SEric Cheng 		MAC_UPDATE_SRS_SIZE_LOCKED(mac_srs, sz);
19718275SEric Cheng 
19728275SEric Cheng 		if (mac_srs->srs_state & SRS_CLIENT_WAIT)
19738275SEric Cheng 			cv_signal(&mac_srs->srs_client_cv);
19748275SEric Cheng 		mac_srs->srs_state &= ~SRS_CLIENT_PROC;
19758275SEric Cheng 	} else {
19768275SEric Cheng 		/* Some kind of softrings based fanout is required */
19778275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
19788275SEric Cheng 		if (tid != 0) {
19798275SEric Cheng 			(void) untimeout(tid);
19808275SEric Cheng 			tid = 0;
19818275SEric Cheng 		}
19828275SEric Cheng 
19838275SEric Cheng 		/*
19848275SEric Cheng 		 * Since the fanout routines can deal with chains,
19858275SEric Cheng 		 * shoot the entire chain up.
19868275SEric Cheng 		 */
19878275SEric Cheng 		if (mac_srs->srs_type & SRST_FANOUT_SRC_IP)
19888275SEric Cheng 			mac_rx_srs_fanout(mac_srs, head);
19898275SEric Cheng 		else
19908275SEric Cheng 			mac_rx_srs_proto_fanout(mac_srs, head);
19918275SEric Cheng 		mutex_enter(&mac_srs->srs_lock);
19928275SEric Cheng 	}
19938275SEric Cheng 
19948275SEric Cheng 	/*
19958275SEric Cheng 	 * Send the poll thread to pick up any packets arrived
19968275SEric Cheng 	 * so far. This also serves as the last check in case
19978275SEric Cheng 	 * nothing else is queued in the SRS. The poll thread
19988275SEric Cheng 	 * is signalled only in the case the drain was done
19998275SEric Cheng 	 * by the worker thread and SRS_WORKER is set. The
20008275SEric Cheng 	 * worker thread can run in parallel as long as the
20018275SEric Cheng 	 * SRS_WORKER flag is set. We we have nothing else to
20028275SEric Cheng 	 * process, we can exit while leaving SRS_PROC set
20038275SEric Cheng 	 * which gives the poll thread control to process and
20048275SEric Cheng 	 * cleanup once it returns from the NIC.
20058275SEric Cheng 	 *
20068275SEric Cheng 	 * If we have nothing else to process, we need to
20078275SEric Cheng 	 * ensure that we keep holding the srs_lock till
20088275SEric Cheng 	 * all the checks below are done and control is
20098275SEric Cheng 	 * handed to the poll thread if it was running.
20108275SEric Cheng 	 */
20118275SEric Cheng 	mutex_enter(&mac_srs->srs_bw->mac_bw_lock);
20128275SEric Cheng 	if (!(mac_srs->srs_bw->mac_bw_state & SRS_BW_ENFORCED)) {
20138275SEric Cheng 		if (mac_srs->srs_first != NULL) {
20148275SEric Cheng 			if (proc_type == SRS_WORKER) {
20158275SEric Cheng 				mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
20168275SEric Cheng 				if (srs_rx->sr_poll_pkt_cnt <=
20178275SEric Cheng 				    srs_rx->sr_lowat)
20188275SEric Cheng 					MAC_SRS_POLL_RING(mac_srs);
20198275SEric Cheng 				goto again;
20208275SEric Cheng 			} else {
20218275SEric Cheng 				cv_signal(&mac_srs->srs_async);
20228275SEric Cheng 			}
20238275SEric Cheng 		}
20248275SEric Cheng 	}
20258275SEric Cheng 	mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
20268275SEric Cheng 
20278275SEric Cheng done:
20288275SEric Cheng 
20298275SEric Cheng 	if (mac_srs->srs_state & SRS_GET_PKTS) {
20308275SEric Cheng 		/*
20318275SEric Cheng 		 * Poll thread is already running. Leave the
20328275SEric Cheng 		 * SRS_RPOC set and hand over the control to
20338275SEric Cheng 		 * poll thread.
20348275SEric Cheng 		 */
20358275SEric Cheng 		mac_srs->srs_state &= ~proc_type;
20368275SEric Cheng 		return;
20378275SEric Cheng 	}
20388275SEric Cheng 
20398275SEric Cheng 	/*
20408275SEric Cheng 	 * If we can't process packets because we have exceeded
20418275SEric Cheng 	 * B/W limit for this tick, just set the timeout
20428275SEric Cheng 	 * and leave.
20438275SEric Cheng 	 *
20448275SEric Cheng 	 * Even if there are no packets queued in SRS, we
20458275SEric Cheng 	 * need to make sure that the shared counter is
20468275SEric Cheng 	 * clear and any associated softrings have cleared
20478275SEric Cheng 	 * all the backlog. Otherwise, leave the interface
20488275SEric Cheng 	 * in polling mode and the poll thread will get
20498275SEric Cheng 	 * signalled once the count goes down to zero.
20508275SEric Cheng 	 *
20518275SEric Cheng 	 * If someone is already draining the queue (SRS_PROC is
20528275SEric Cheng 	 * set) when the srs_poll_pkt_cnt goes down to zero,
20538275SEric Cheng 	 * then it means that drain is already running and we
20548275SEric Cheng 	 * will turn off polling at that time if there is
20558275SEric Cheng 	 * no backlog. As long as there are packets queued either
20568275SEric Cheng 	 * is soft ring set or its soft rings, we will leave
20578275SEric Cheng 	 * the interface in polling mode.
20588275SEric Cheng 	 */
20598275SEric Cheng 	mutex_enter(&mac_srs->srs_bw->mac_bw_lock);
20608275SEric Cheng 	if ((mac_srs->srs_state & SRS_POLLING_CAPAB) &&
20618275SEric Cheng 	    ((mac_srs->srs_bw->mac_bw_state & SRS_BW_ENFORCED) ||
20628275SEric Cheng 	    (srs_rx->sr_poll_pkt_cnt > 0))) {
20638275SEric Cheng 		MAC_SRS_POLLING_ON(mac_srs);
20648275SEric Cheng 		mac_srs->srs_state &= ~(SRS_PROC|proc_type);
20658275SEric Cheng 		if ((mac_srs->srs_first != NULL) &&
20668275SEric Cheng 		    (mac_srs->srs_tid == NULL))
20678275SEric Cheng 			mac_srs->srs_tid = timeout(mac_srs_fire,
20688275SEric Cheng 			    mac_srs, 1);
20698275SEric Cheng 		mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
20708275SEric Cheng 		return;
20718275SEric Cheng 	}
20728275SEric Cheng 	mutex_exit(&mac_srs->srs_bw->mac_bw_lock);
20738275SEric Cheng 
20748275SEric Cheng leave_poll:
20758275SEric Cheng 
20768275SEric Cheng 	/* Nothing else to do. Get out of poll mode */
20778275SEric Cheng 	MAC_SRS_POLLING_OFF(mac_srs);
20788275SEric Cheng 	mac_srs->srs_state &= ~(SRS_PROC|proc_type);
20798275SEric Cheng }
20808275SEric Cheng 
20818275SEric Cheng /*
20828275SEric Cheng  * mac_srs_worker
20838275SEric Cheng  *
20848275SEric Cheng  * The SRS worker routine. Drains the queue when no one else is
20858275SEric Cheng  * processing it.
20868275SEric Cheng  */
20878275SEric Cheng void
mac_srs_worker(mac_soft_ring_set_t * mac_srs)20888275SEric Cheng mac_srs_worker(mac_soft_ring_set_t *mac_srs)
20898275SEric Cheng {
20908275SEric Cheng 	kmutex_t 		*lock = &mac_srs->srs_lock;
20918275SEric Cheng 	kcondvar_t 		*async = &mac_srs->srs_async;
20928275SEric Cheng 	callb_cpr_t		cprinfo;
20938275SEric Cheng 	boolean_t		bw_ctl_flag;
20948275SEric Cheng 
20958275SEric Cheng 	CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "srs_worker");
20968275SEric Cheng 	mutex_enter(lock);
20978275SEric Cheng 
20988275SEric Cheng start:
20998275SEric Cheng 	for (;;) {
21008275SEric Cheng 		bw_ctl_flag = B_FALSE;
21018275SEric Cheng 		if (mac_srs->srs_type & SRST_BW_CONTROL) {
21028275SEric Cheng 			MAC_SRS_BW_LOCK(mac_srs);
21038275SEric Cheng 			MAC_SRS_CHECK_BW_CONTROL(mac_srs);
21048275SEric Cheng 			if (mac_srs->srs_bw->mac_bw_state & SRS_BW_ENFORCED)
21058275SEric Cheng 				bw_ctl_flag = B_TRUE;
21068275SEric Cheng 			MAC_SRS_BW_UNLOCK(mac_srs);
21078275SEric Cheng 		}
21088275SEric Cheng 		/*
21098275SEric Cheng 		 * The SRS_BW_ENFORCED flag may change since we have dropped
21108275SEric Cheng 		 * the mac_bw_lock. However the drain function can handle both
21118275SEric Cheng 		 * a drainable SRS or a bandwidth controlled SRS, and the
21128275SEric Cheng 		 * effect of scheduling a timeout is to wakeup the worker
21138275SEric Cheng 		 * thread which in turn will call the drain function. Since
21148275SEric Cheng 		 * we release the srs_lock atomically only in the cv_wait there
21158275SEric Cheng 		 * isn't a fear of waiting for ever.
21168275SEric Cheng 		 */
21178275SEric Cheng 		while (((mac_srs->srs_state & SRS_PROC) ||
21188275SEric Cheng 		    (mac_srs->srs_first == NULL) || bw_ctl_flag ||
21198275SEric Cheng 		    (mac_srs->srs_state & SRS_TX_BLOCKED)) &&
21208275SEric Cheng 		    !(mac_srs->srs_state & SRS_PAUSE)) {
21218275SEric Cheng 			/*
21228275SEric Cheng 			 * If we have packets queued and we are here
21238275SEric Cheng 			 * because B/W control is in place, we better
21248275SEric Cheng 			 * schedule the worker wakeup after 1 tick
21258275SEric Cheng 			 * to see if bandwidth control can be relaxed.
21268275SEric Cheng 			 */
21278275SEric Cheng 			if (bw_ctl_flag && mac_srs->srs_tid == NULL) {
21288275SEric Cheng 				/*
21298275SEric Cheng 				 * We need to ensure that a timer  is already
21308275SEric Cheng 				 * scheduled or we force  schedule one for
21318275SEric Cheng 				 * later so that we can continue processing
21328275SEric Cheng 				 * after this  quanta is over.
21338275SEric Cheng 				 */
21348275SEric Cheng 				mac_srs->srs_tid = timeout(mac_srs_fire,
21358275SEric Cheng 				    mac_srs, 1);
21368275SEric Cheng 			}
21378275SEric Cheng wait:
21388275SEric Cheng 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
21398275SEric Cheng 			cv_wait(async, lock);
21408275SEric Cheng 			CALLB_CPR_SAFE_END(&cprinfo, lock);
21418275SEric Cheng 
21428275SEric Cheng 			if (mac_srs->srs_state & SRS_PAUSE)
21438275SEric Cheng 				goto done;
21448275SEric Cheng 			if (mac_srs->srs_state & SRS_PROC)
21458275SEric Cheng 				goto wait;
21468275SEric Cheng 
21478275SEric Cheng 			if (mac_srs->srs_first != NULL &&
21488275SEric Cheng 			    mac_srs->srs_type & SRST_BW_CONTROL) {
21498275SEric Cheng 				MAC_SRS_BW_LOCK(mac_srs);
21508275SEric Cheng 				if (mac_srs->srs_bw->mac_bw_state &
21518275SEric Cheng 				    SRS_BW_ENFORCED) {
21528275SEric Cheng 					MAC_SRS_CHECK_BW_CONTROL(mac_srs);
21538275SEric Cheng 				}
21548275SEric Cheng 				bw_ctl_flag = mac_srs->srs_bw->mac_bw_state &
21558275SEric Cheng 				    SRS_BW_ENFORCED;
21568275SEric Cheng 				MAC_SRS_BW_UNLOCK(mac_srs);
21578275SEric Cheng 			}
21588275SEric Cheng 		}
21598275SEric Cheng 
21608275SEric Cheng 		if (mac_srs->srs_state & SRS_PAUSE)
21618275SEric Cheng 			goto done;
21628275SEric Cheng 		mac_srs->srs_drain_func(mac_srs, SRS_WORKER);
21638275SEric Cheng 	}
21648275SEric Cheng done:
21658275SEric Cheng 	/*
21668275SEric Cheng 	 * The Rx SRS quiesce logic first cuts off packet supply to the SRS
21678275SEric Cheng 	 * from both hard and soft classifications and waits for such threads
21688275SEric Cheng 	 * to finish before signaling the worker. So at this point the only
21698275SEric Cheng 	 * thread left that could be competing with the worker is the poll
21708275SEric Cheng 	 * thread. In the case of Tx, there shouldn't be any thread holding
21718275SEric Cheng 	 * SRS_PROC at this point.
21728275SEric Cheng 	 */
21738275SEric Cheng 	if (!(mac_srs->srs_state & SRS_PROC)) {
21748275SEric Cheng 		mac_srs->srs_state |= SRS_PROC;
21758275SEric Cheng 	} else {
21768275SEric Cheng 		ASSERT((mac_srs->srs_type & SRST_TX) == 0);
21778275SEric Cheng 		/*
21788275SEric Cheng 		 * Poll thread still owns the SRS and is still running
21798275SEric Cheng 		 */
21808275SEric Cheng 		ASSERT((mac_srs->srs_poll_thr == NULL) ||
21818275SEric Cheng 		    ((mac_srs->srs_state & SRS_POLL_THR_OWNER) ==
21828275SEric Cheng 		    SRS_POLL_THR_OWNER));
21838275SEric Cheng 	}
21848275SEric Cheng 	mac_srs_worker_quiesce(mac_srs);
21858275SEric Cheng 	/*
21868275SEric Cheng 	 * Wait for the SRS_RESTART or SRS_CONDEMNED signal from the initiator
21878275SEric Cheng 	 * of the quiesce operation
21888275SEric Cheng 	 */
21898275SEric Cheng 	while (!(mac_srs->srs_state & (SRS_CONDEMNED | SRS_RESTART)))
21908275SEric Cheng 		cv_wait(&mac_srs->srs_async, &mac_srs->srs_lock);
21918275SEric Cheng 
21928275SEric Cheng 	if (mac_srs->srs_state & SRS_RESTART) {
21938275SEric Cheng 		ASSERT(!(mac_srs->srs_state & SRS_CONDEMNED));
21948275SEric Cheng 		mac_srs_worker_restart(mac_srs);
21958275SEric Cheng 		mac_srs->srs_state &= ~SRS_PROC;
21968275SEric Cheng 		goto start;
21978275SEric Cheng 	}
21988275SEric Cheng 
21998275SEric Cheng 	if (!(mac_srs->srs_state & SRS_CONDEMNED_DONE))
22008275SEric Cheng 		mac_srs_worker_quiesce(mac_srs);
22018275SEric Cheng 
22028275SEric Cheng 	mac_srs->srs_state &= ~SRS_PROC;
22038275SEric Cheng 	/* The macro drops the srs_lock */
22048275SEric Cheng 	CALLB_CPR_EXIT(&cprinfo);
22058275SEric Cheng 	thread_exit();
22068275SEric Cheng }
22078275SEric Cheng 
22088275SEric Cheng /*
22098275SEric Cheng  * mac_rx_srs_subflow_process
22108275SEric Cheng  *
22118275SEric Cheng  * Receive side routine called from interrupt path when there are
22128275SEric Cheng  * sub flows present on this SRS.
22138275SEric Cheng  */
22148275SEric Cheng /* ARGSUSED */
22158275SEric Cheng void
mac_rx_srs_subflow_process(void * arg,mac_resource_handle_t srs,mblk_t * mp_chain,boolean_t loopback)22168275SEric Cheng mac_rx_srs_subflow_process(void *arg, mac_resource_handle_t srs,
22178275SEric Cheng     mblk_t *mp_chain, boolean_t loopback)
22188275SEric Cheng {
22198275SEric Cheng 	flow_entry_t		*flent = NULL;
22208275SEric Cheng 	flow_entry_t		*prev_flent = NULL;
22218275SEric Cheng 	mblk_t			*mp = NULL;
22228275SEric Cheng 	mblk_t			*tail = NULL;
22238275SEric Cheng 	mac_soft_ring_set_t	*mac_srs = (mac_soft_ring_set_t *)srs;
22248275SEric Cheng 	mac_client_impl_t	*mcip;
22258275SEric Cheng 
22268275SEric Cheng 	mcip = mac_srs->srs_mcip;
22278275SEric Cheng 	ASSERT(mcip != NULL);
22288275SEric Cheng 
22298275SEric Cheng 	/*
22308275SEric Cheng 	 * We need to determine the SRS for every packet
22318275SEric Cheng 	 * by walking the flow table, if we don't get any,
22328275SEric Cheng 	 * then we proceed using the SRS we came with.
22338275SEric Cheng 	 */
22348275SEric Cheng 	mp = tail = mp_chain;
22358275SEric Cheng 	while (mp != NULL) {
22368275SEric Cheng 
22378275SEric Cheng 		/*
22388275SEric Cheng 		 * We will increment the stats for the mactching subflow.
22398275SEric Cheng 		 * when we get the bytes/pkt count for the classified packets
22408275SEric Cheng 		 * later in mac_rx_srs_process.
22418275SEric Cheng 		 */
22428275SEric Cheng 		(void) mac_flow_lookup(mcip->mci_subflow_tab, mp,
22438275SEric Cheng 		    FLOW_INBOUND, &flent);
22448275SEric Cheng 
22458275SEric Cheng 		if (mp == mp_chain || flent == prev_flent) {
22468275SEric Cheng 			if (prev_flent != NULL)
22478275SEric Cheng 				FLOW_REFRELE(prev_flent);
22488275SEric Cheng 			prev_flent = flent;
22498275SEric Cheng 			flent = NULL;
22508275SEric Cheng 			tail = mp;
22518275SEric Cheng 			mp = mp->b_next;
22528275SEric Cheng 			continue;
22538275SEric Cheng 		}
22548275SEric Cheng 		tail->b_next = NULL;
22558275SEric Cheng 		/*
22568275SEric Cheng 		 * A null indicates, this is for the mac_srs itself.
22578275SEric Cheng 		 * XXX-venu : probably assert for fe_rx_srs_cnt == 0.
22588275SEric Cheng 		 */
22598275SEric Cheng 		if (prev_flent == NULL || prev_flent->fe_rx_srs_cnt == 0) {
22608275SEric Cheng 			mac_rx_srs_process(arg,
22618275SEric Cheng 			    (mac_resource_handle_t)mac_srs, mp_chain,
22628275SEric Cheng 			    loopback);
22638275SEric Cheng 		} else {
22648275SEric Cheng 			(prev_flent->fe_cb_fn)(prev_flent->fe_cb_arg1,
22658275SEric Cheng 			    prev_flent->fe_cb_arg2, mp_chain, loopback);
22668275SEric Cheng 			FLOW_REFRELE(prev_flent);
22678275SEric Cheng 		}
22688275SEric Cheng 		prev_flent = flent;
22698275SEric Cheng 		flent = NULL;
22708275SEric Cheng 		mp_chain = mp;
22718275SEric Cheng 		tail = mp;
22728275SEric Cheng 		mp = mp->b_next;
22738275SEric Cheng 	}
22748275SEric Cheng 	/* Last chain */
22758275SEric Cheng 	ASSERT(mp_chain != NULL);
22768275SEric Cheng 	if (prev_flent == NULL || prev_flent->fe_rx_srs_cnt == 0) {
22778275SEric Cheng 		mac_rx_srs_process(arg,
22788275SEric Cheng 		    (mac_resource_handle_t)mac_srs, mp_chain, loopback);
22798275SEric Cheng 	} else {
22808275SEric Cheng 		(prev_flent->fe_cb_fn)(prev_flent->fe_cb_arg1,
22818275SEric Cheng 		    prev_flent->fe_cb_arg2, mp_chain, loopback);
22828275SEric Cheng 		FLOW_REFRELE(prev_flent);
22838275SEric Cheng 	}
22848275SEric Cheng }
22858275SEric Cheng 
22868275SEric Cheng /*
22878275SEric Cheng  * mac_rx_srs_process
22888275SEric Cheng  *
22898275SEric Cheng  * Receive side routine called from the interrupt path.
22908275SEric Cheng  *
22918275SEric Cheng  * loopback is set to force a context switch on the loopback
22928275SEric Cheng  * path between MAC clients.
22938275SEric Cheng  */
22948275SEric Cheng /* ARGSUSED */
22958275SEric Cheng void
mac_rx_srs_process(void * arg,mac_resource_handle_t srs,mblk_t * mp_chain,boolean_t loopback)22968275SEric Cheng mac_rx_srs_process(void *arg, mac_resource_handle_t srs, mblk_t *mp_chain,
22978275SEric Cheng     boolean_t loopback)
22988275SEric Cheng {
22998275SEric Cheng 	mac_soft_ring_set_t	*mac_srs = (mac_soft_ring_set_t *)srs;
23008275SEric Cheng 	mblk_t			*mp, *tail, *head;
23018275SEric Cheng 	int			count = 0;
23028275SEric Cheng 	int			count1;
23038275SEric Cheng 	size_t			sz = 0;
23048275SEric Cheng 	size_t			chain_sz, sz1;
23058275SEric Cheng 	mac_bw_ctl_t		*mac_bw;
23068275SEric Cheng 	mac_srs_rx_t		*srs_rx = &mac_srs->srs_rx;
23078275SEric Cheng 
23088275SEric Cheng 	/*
23098275SEric Cheng 	 * Set the tail, count and sz. We set the sz irrespective
23108275SEric Cheng 	 * of whether we are doing B/W control or not for the
23118275SEric Cheng 	 * purpose of updating the stats.
23128275SEric Cheng 	 */
23138275SEric Cheng 	mp = tail = mp_chain;
23148275SEric Cheng 	while (mp != NULL) {
23158275SEric Cheng 		tail = mp;
23168275SEric Cheng 		count++;
23178275SEric Cheng 		sz += msgdsize(mp);
23188275SEric Cheng 		mp = mp->b_next;
23198275SEric Cheng 	}
23208275SEric Cheng 
23218275SEric Cheng 	mutex_enter(&mac_srs->srs_lock);
2322*11878SVenu.Iyer@Sun.COM 
2323*11878SVenu.Iyer@Sun.COM 	if (loopback) {
2324*11878SVenu.Iyer@Sun.COM 		SRS_RX_STAT_UPDATE(mac_srs, lclbytes, sz);
2325*11878SVenu.Iyer@Sun.COM 		SRS_RX_STAT_UPDATE(mac_srs, lclcnt, count);
2326*11878SVenu.Iyer@Sun.COM 
2327*11878SVenu.Iyer@Sun.COM 	} else {
2328*11878SVenu.Iyer@Sun.COM 		SRS_RX_STAT_UPDATE(mac_srs, intrbytes, sz);
2329*11878SVenu.Iyer@Sun.COM 		SRS_RX_STAT_UPDATE(mac_srs, intrcnt, count);
23308275SEric Cheng 	}
23318275SEric Cheng 
23328275SEric Cheng 	/*
23338275SEric Cheng 	 * If the SRS in already being processed; has been blanked;
23348275SEric Cheng 	 * can be processed by worker thread only; or the B/W limit
23358275SEric Cheng 	 * has been reached, then queue the chain and check if
23368275SEric Cheng 	 * worker thread needs to be awakend.
23378275SEric Cheng 	 */
23388275SEric Cheng 	if (mac_srs->srs_type & SRST_BW_CONTROL) {
23398275SEric Cheng 		mac_bw = mac_srs->srs_bw;
23408275SEric Cheng 		ASSERT(mac_bw != NULL);
23418275SEric Cheng 		mutex_enter(&mac_bw->mac_bw_lock);
23428275SEric Cheng 		mac_bw->mac_bw_intr += sz;
23438275SEric Cheng 		if (mac_bw->mac_bw_limit == 0) {
23448275SEric Cheng 			/* zero bandwidth: drop all */
2345*11878SVenu.Iyer@Sun.COM 			srs_rx->sr_stat.mrs_sdrops += count;
23468275SEric Cheng 			mac_bw->mac_bw_drop_bytes += sz;
23478275SEric Cheng 			mutex_exit(&mac_bw->mac_bw_lock);
23488275SEric Cheng 			mutex_exit(&mac_srs->srs_lock);
23498275SEric Cheng 			mac_pkt_drop(NULL, NULL, mp_chain, B_FALSE);
23508275SEric Cheng 			return;
23518275SEric Cheng 		} else {
23528275SEric Cheng 			if ((mac_bw->mac_bw_sz + sz) <=
23538275SEric Cheng 			    mac_bw->mac_bw_drop_threshold) {
23548275SEric Cheng 				mutex_exit(&mac_bw->mac_bw_lock);
23558275SEric Cheng 				MAC_RX_SRS_ENQUEUE_CHAIN(mac_srs, mp_chain,
23568275SEric Cheng 				    tail, count, sz);
23578275SEric Cheng 			} else {
23588275SEric Cheng 				mp = mp_chain;
23598275SEric Cheng 				chain_sz = 0;
23608275SEric Cheng 				count1 = 0;
23618275SEric Cheng 				tail = NULL;
23628275SEric Cheng 				head = NULL;
23638275SEric Cheng 				while (mp != NULL) {
23648275SEric Cheng 					sz1 = msgdsize(mp);
23658275SEric Cheng 					if (mac_bw->mac_bw_sz + chain_sz + sz1 >
23668275SEric Cheng 					    mac_bw->mac_bw_drop_threshold)
23678275SEric Cheng 						break;
23688275SEric Cheng 					chain_sz += sz1;
23698275SEric Cheng 					count1++;
23708275SEric Cheng 					tail = mp;
23718275SEric Cheng 					mp = mp->b_next;
23728275SEric Cheng 				}
23738275SEric Cheng 				mutex_exit(&mac_bw->mac_bw_lock);
23748275SEric Cheng 				if (tail != NULL) {
23758275SEric Cheng 					head = tail->b_next;
23768275SEric Cheng 					tail->b_next = NULL;
23778275SEric Cheng 					MAC_RX_SRS_ENQUEUE_CHAIN(mac_srs,
23788275SEric Cheng 					    mp_chain, tail, count1, chain_sz);
23798275SEric Cheng 					sz -= chain_sz;
23808275SEric Cheng 					count -= count1;
23818275SEric Cheng 				} else {
23828275SEric Cheng 					/* Can't pick up any */
23838275SEric Cheng 					head = mp_chain;
23848275SEric Cheng 				}
23858275SEric Cheng 				if (head != NULL) {
23868275SEric Cheng 					/* Drop any packet over the threshold */
2387*11878SVenu.Iyer@Sun.COM 					srs_rx->sr_stat.mrs_sdrops += count;
23888275SEric Cheng 					mutex_enter(&mac_bw->mac_bw_lock);
23898275SEric Cheng 					mac_bw->mac_bw_drop_bytes += sz;
23908275SEric Cheng 					mutex_exit(&mac_bw->mac_bw_lock);
23918275SEric Cheng 					freemsgchain(head);
23928275SEric Cheng 				}
23938275SEric Cheng 			}
23948275SEric Cheng 			MAC_SRS_WORKER_WAKEUP(mac_srs);
23958275SEric Cheng 			mutex_exit(&mac_srs->srs_lock);
23968275SEric Cheng 			return;
23978275SEric Cheng 		}
23988275SEric Cheng 	}
23998275SEric Cheng 
24008275SEric Cheng 	/*
24018275SEric Cheng 	 * If the total number of packets queued in the SRS and
24028275SEric Cheng 	 * its associated soft rings exceeds the max allowed,
24038275SEric Cheng 	 * then drop the chain. If we are polling capable, this
24048275SEric Cheng 	 * shouldn't be happening.
24058275SEric Cheng 	 */
24068275SEric Cheng 	if (!(mac_srs->srs_type & SRST_BW_CONTROL) &&
24078275SEric Cheng 	    (srs_rx->sr_poll_pkt_cnt > srs_rx->sr_hiwat)) {
24088275SEric Cheng 		mac_bw = mac_srs->srs_bw;
2409*11878SVenu.Iyer@Sun.COM 		srs_rx->sr_stat.mrs_sdrops += count;
24108275SEric Cheng 		mutex_enter(&mac_bw->mac_bw_lock);
24118275SEric Cheng 		mac_bw->mac_bw_drop_bytes += sz;
24128275SEric Cheng 		mutex_exit(&mac_bw->mac_bw_lock);
24138275SEric Cheng 		freemsgchain(mp_chain);
24148275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
24158275SEric Cheng 		return;
24168275SEric Cheng 	}
24178275SEric Cheng 
24188275SEric Cheng 	MAC_RX_SRS_ENQUEUE_CHAIN(mac_srs, mp_chain, tail, count, sz);
24198275SEric Cheng 
24208275SEric Cheng 	if (!(mac_srs->srs_state & SRS_PROC)) {
24218275SEric Cheng 		/*
24228275SEric Cheng 		 * If we are coming via loopback or if we are not
24238275SEric Cheng 		 * optimizing for latency, we should signal the
24248275SEric Cheng 		 * worker thread.
24258275SEric Cheng 		 */
24268833SVenu.Iyer@Sun.COM 		if (loopback || !(mac_srs->srs_state & SRS_LATENCY_OPT)) {
24278275SEric Cheng 			/*
24288275SEric Cheng 			 * For loopback, We need to let the worker take
24298275SEric Cheng 			 * over as we don't want to continue in the same
24308275SEric Cheng 			 * thread even if we can. This could lead to stack
24318275SEric Cheng 			 * overflows and may also end up using
24328275SEric Cheng 			 * resources (cpu) incorrectly.
24338275SEric Cheng 			 */
24348275SEric Cheng 			cv_signal(&mac_srs->srs_async);
24358275SEric Cheng 		} else {
24368275SEric Cheng 			/*
24378275SEric Cheng 			 * Seems like no one is processing the SRS and
24388275SEric Cheng 			 * there is no backlog. We also inline process
24398275SEric Cheng 			 * our packet if its a single packet in non
24408275SEric Cheng 			 * latency optimized case (in latency optimized
24418275SEric Cheng 			 * case, we inline process chains of any size).
24428275SEric Cheng 			 */
24438275SEric Cheng 			mac_srs->srs_drain_func(mac_srs, SRS_PROC_FAST);
24448275SEric Cheng 		}
24458275SEric Cheng 	}
24468275SEric Cheng 	mutex_exit(&mac_srs->srs_lock);
24478275SEric Cheng }
24488275SEric Cheng 
24498275SEric Cheng /* TX SIDE ROUTINES (RUNTIME) */
24508275SEric Cheng 
24518275SEric Cheng /*
24528275SEric Cheng  * mac_tx_srs_no_desc
24538275SEric Cheng  *
24548275SEric Cheng  * This routine is called by Tx single ring default mode
24558275SEric Cheng  * when Tx ring runs out of descs.
24568275SEric Cheng  */
24578275SEric Cheng mac_tx_cookie_t
mac_tx_srs_no_desc(mac_soft_ring_set_t * mac_srs,mblk_t * mp_chain,uint16_t flag,mblk_t ** ret_mp)24588275SEric Cheng mac_tx_srs_no_desc(mac_soft_ring_set_t *mac_srs, mblk_t *mp_chain,
24598275SEric Cheng     uint16_t flag, mblk_t **ret_mp)
24608275SEric Cheng {
24618275SEric Cheng 	mac_tx_cookie_t cookie = NULL;
24628275SEric Cheng 	mac_srs_tx_t *srs_tx = &mac_srs->srs_tx;
24638275SEric Cheng 	boolean_t wakeup_worker = B_TRUE;
24648275SEric Cheng 	uint32_t tx_mode = srs_tx->st_mode;
24658275SEric Cheng 	int cnt, sz;
24668275SEric Cheng 	mblk_t *tail;
24678275SEric Cheng 
24688275SEric Cheng 	ASSERT(tx_mode == SRS_TX_DEFAULT || tx_mode == SRS_TX_BW);
24698275SEric Cheng 	if (flag & MAC_DROP_ON_NO_DESC) {
24708275SEric Cheng 		MAC_TX_SRS_DROP_MESSAGE(mac_srs, mp_chain, cookie);
24718275SEric Cheng 	} else {
24728275SEric Cheng 		if (mac_srs->srs_first != NULL)
24738275SEric Cheng 			wakeup_worker = B_FALSE;
24748275SEric Cheng 		MAC_COUNT_CHAIN(mac_srs, mp_chain, tail, cnt, sz);
24758275SEric Cheng 		if (flag & MAC_TX_NO_ENQUEUE) {
24768275SEric Cheng 			/*
24778275SEric Cheng 			 * If TX_QUEUED is not set, queue the
24788275SEric Cheng 			 * packet and let mac_tx_srs_drain()
24798275SEric Cheng 			 * set the TX_BLOCKED bit for the
24808275SEric Cheng 			 * reasons explained above. Otherwise,
24818275SEric Cheng 			 * return the mblks.
24828275SEric Cheng 			 */
24838275SEric Cheng 			if (wakeup_worker) {
24848275SEric Cheng 				MAC_TX_SRS_ENQUEUE_CHAIN(mac_srs,
24858275SEric Cheng 				    mp_chain, tail, cnt, sz);
24868275SEric Cheng 			} else {
24878275SEric Cheng 				MAC_TX_SET_NO_ENQUEUE(mac_srs,
24888275SEric Cheng 				    mp_chain, ret_mp, cookie);
24898275SEric Cheng 			}
24908275SEric Cheng 		} else {
24918275SEric Cheng 			MAC_TX_SRS_TEST_HIWAT(mac_srs, mp_chain,
24928275SEric Cheng 			    tail, cnt, sz, cookie);
24938275SEric Cheng 		}
24948275SEric Cheng 		if (wakeup_worker)
24958275SEric Cheng 			cv_signal(&mac_srs->srs_async);
24968275SEric Cheng 	}
24978275SEric Cheng 	return (cookie);
24988275SEric Cheng }
24998275SEric Cheng 
25008275SEric Cheng /*
25018275SEric Cheng  * mac_tx_srs_enqueue
25028275SEric Cheng  *
25038275SEric Cheng  * This routine is called when Tx SRS is operating in either serializer
25048275SEric Cheng  * or bandwidth mode. In serializer mode, a packet will get enqueued
25058275SEric Cheng  * when a thread cannot enter SRS exclusively. In bandwidth mode,
25068275SEric Cheng  * packets gets queued if allowed byte-count limit for a tick is
25078275SEric Cheng  * exceeded. The action that gets taken when MAC_DROP_ON_NO_DESC and
25088275SEric Cheng  * MAC_TX_NO_ENQUEUE is set is different than when operaing in either
25098275SEric Cheng  * the default mode or fanout mode. Here packets get dropped or
25108275SEric Cheng  * returned back to the caller only after hi-watermark worth of data
25118275SEric Cheng  * is queued.
25128275SEric Cheng  */
25138275SEric Cheng static mac_tx_cookie_t
mac_tx_srs_enqueue(mac_soft_ring_set_t * mac_srs,mblk_t * mp_chain,uint16_t flag,uintptr_t fanout_hint,mblk_t ** ret_mp)25148275SEric Cheng mac_tx_srs_enqueue(mac_soft_ring_set_t *mac_srs, mblk_t *mp_chain,
25158275SEric Cheng     uint16_t flag, uintptr_t fanout_hint, mblk_t **ret_mp)
25168275SEric Cheng {
25178275SEric Cheng 	mac_tx_cookie_t cookie = NULL;
25188275SEric Cheng 	int cnt, sz;
25198275SEric Cheng 	mblk_t *tail;
25208275SEric Cheng 	boolean_t wakeup_worker = B_TRUE;
25218275SEric Cheng 
25228833SVenu.Iyer@Sun.COM 	/*
25238833SVenu.Iyer@Sun.COM 	 * Ignore fanout hint if we don't have multiple tx rings.
25248833SVenu.Iyer@Sun.COM 	 */
2525*11878SVenu.Iyer@Sun.COM 	if (!MAC_TX_SOFT_RINGS(mac_srs))
25268833SVenu.Iyer@Sun.COM 		fanout_hint = 0;
25278833SVenu.Iyer@Sun.COM 
25288275SEric Cheng 	if (mac_srs->srs_first != NULL)
25298275SEric Cheng 		wakeup_worker = B_FALSE;
25308275SEric Cheng 	MAC_COUNT_CHAIN(mac_srs, mp_chain, tail, cnt, sz);
25318275SEric Cheng 	if (flag & MAC_DROP_ON_NO_DESC) {
25328275SEric Cheng 		if (mac_srs->srs_count > mac_srs->srs_tx.st_hiwat) {
25338275SEric Cheng 			MAC_TX_SRS_DROP_MESSAGE(mac_srs, mp_chain, cookie);
25348275SEric Cheng 		} else {
25358275SEric Cheng 			MAC_TX_SRS_ENQUEUE_CHAIN(mac_srs,
25368275SEric Cheng 			    mp_chain, tail, cnt, sz);
25378275SEric Cheng 		}
25388275SEric Cheng 	} else if (flag & MAC_TX_NO_ENQUEUE) {
25398275SEric Cheng 		if ((mac_srs->srs_count > mac_srs->srs_tx.st_hiwat) ||
25408275SEric Cheng 		    (mac_srs->srs_state & SRS_TX_WAKEUP_CLIENT)) {
25418275SEric Cheng 			MAC_TX_SET_NO_ENQUEUE(mac_srs, mp_chain,
25428275SEric Cheng 			    ret_mp, cookie);
25438275SEric Cheng 		} else {
25448275SEric Cheng 			mp_chain->b_prev = (mblk_t *)fanout_hint;
25458275SEric Cheng 			MAC_TX_SRS_ENQUEUE_CHAIN(mac_srs,
25468275SEric Cheng 			    mp_chain, tail, cnt, sz);
25478275SEric Cheng 		}
25488275SEric Cheng 	} else {
25498275SEric Cheng 		/*
25508275SEric Cheng 		 * If you are BW_ENFORCED, just enqueue the
25518275SEric Cheng 		 * packet. srs_worker will drain it at the
25528275SEric Cheng 		 * prescribed rate. Before enqueueing, save
25538275SEric Cheng 		 * the fanout hint.
25548275SEric Cheng 		 */
25558275SEric Cheng 		mp_chain->b_prev = (mblk_t *)fanout_hint;
25568275SEric Cheng 		MAC_TX_SRS_TEST_HIWAT(mac_srs, mp_chain,
25578275SEric Cheng 		    tail, cnt, sz, cookie);
25588275SEric Cheng 	}
25598275SEric Cheng 	if (wakeup_worker)
25608275SEric Cheng 		cv_signal(&mac_srs->srs_async);
25618275SEric Cheng 	return (cookie);
25628275SEric Cheng }
25638275SEric Cheng 
25648275SEric Cheng /*
2565*11878SVenu.Iyer@Sun.COM  * There are seven tx modes:
25668275SEric Cheng  *
25678275SEric Cheng  * 1) Default mode (SRS_TX_DEFAULT)
25688275SEric Cheng  * 2) Serialization mode (SRS_TX_SERIALIZE)
25698275SEric Cheng  * 3) Fanout mode (SRS_TX_FANOUT)
25708275SEric Cheng  * 4) Bandwdith mode (SRS_TX_BW)
25718275SEric Cheng  * 5) Fanout and Bandwidth mode (SRS_TX_BW_FANOUT)
2572*11878SVenu.Iyer@Sun.COM  * 6) aggr Tx mode (SRS_TX_AGGR)
2573*11878SVenu.Iyer@Sun.COM  * 7) aggr Tx bw mode (SRS_TX_BW_AGGR)
25748275SEric Cheng  *
25758275SEric Cheng  * The tx mode in which an SRS operates is decided in mac_tx_srs_setup()
25768275SEric Cheng  * based on the number of Tx rings requested for an SRS and whether
25778275SEric Cheng  * bandwidth control is requested or not.
25788275SEric Cheng  *
2579*11878SVenu.Iyer@Sun.COM  * The default mode (i.e., no fanout/no bandwidth) is used when the
2580*11878SVenu.Iyer@Sun.COM  * underlying NIC does not have Tx rings or just one Tx ring. In this mode,
2581*11878SVenu.Iyer@Sun.COM  * the SRS acts as a pass-thru. Packets will go directly to mac_tx_send().
2582*11878SVenu.Iyer@Sun.COM  * When the underlying Tx ring runs out of Tx descs, it starts queueing up
2583*11878SVenu.Iyer@Sun.COM  * packets in SRS. When flow-control is relieved, the srs_worker drains
2584*11878SVenu.Iyer@Sun.COM  * the queued packets and informs blocked clients to restart sending
2585*11878SVenu.Iyer@Sun.COM  * packets.
25868275SEric Cheng  *
2587*11878SVenu.Iyer@Sun.COM  * In the SRS_TX_SERIALIZE mode, all calls to mac_tx() are serialized. This
2588*11878SVenu.Iyer@Sun.COM  * mode is used when the link has no Tx rings or only one Tx ring.
25898275SEric Cheng  *
25908275SEric Cheng  * In the SRS_TX_FANOUT mode, packets will be fanned out to multiple
25918275SEric Cheng  * Tx rings. Each Tx ring will have a soft ring associated with it.
25928275SEric Cheng  * These soft rings will be hung off the Tx SRS. Queueing if it happens
25938275SEric Cheng  * due to lack of Tx desc will be in individual soft ring (and not srs)
25948275SEric Cheng  * associated with Tx ring.
25958275SEric Cheng  *
25968275SEric Cheng  * In the TX_BW mode, tx srs will allow packets to go down to Tx ring
25978275SEric Cheng  * only if bw is available. Otherwise the packets will be queued in
25988275SEric Cheng  * SRS. If fanout to multiple Tx rings is configured, the packets will
25998275SEric Cheng  * be fanned out among the soft rings associated with the Tx rings.
26008275SEric Cheng  *
2601*11878SVenu.Iyer@Sun.COM  * In SRS_TX_AGGR mode, mac_tx_aggr_mode() routine is called. This routine
2602*11878SVenu.Iyer@Sun.COM  * invokes an aggr function, aggr_find_tx_ring(), to find a pseudo Tx ring
2603*11878SVenu.Iyer@Sun.COM  * belonging to a port on which the packet has to be sent. Aggr will
2604*11878SVenu.Iyer@Sun.COM  * always have a pseudo Tx ring associated with it even when it is an
2605*11878SVenu.Iyer@Sun.COM  * aggregation over a single NIC that has no Tx rings. Even in such a
2606*11878SVenu.Iyer@Sun.COM  * case, the single pseudo Tx ring will have a soft ring associated with
2607*11878SVenu.Iyer@Sun.COM  * it and the soft ring will hang off the SRS.
2608*11878SVenu.Iyer@Sun.COM  *
2609*11878SVenu.Iyer@Sun.COM  * If a bandwidth is specified for an aggr, SRS_TX_BW_AGGR mode is used.
2610*11878SVenu.Iyer@Sun.COM  * In this mode, the bandwidth is first applied on the outgoing packets
2611*11878SVenu.Iyer@Sun.COM  * and later mac_tx_addr_mode() function is called to send the packet out
2612*11878SVenu.Iyer@Sun.COM  * of one of the pseudo Tx rings.
2613*11878SVenu.Iyer@Sun.COM  *
26148275SEric Cheng  * Four flags are used in srs_state for indicating flow control
26158275SEric Cheng  * conditions : SRS_TX_BLOCKED, SRS_TX_HIWAT, SRS_TX_WAKEUP_CLIENT.
26168275SEric Cheng  * SRS_TX_BLOCKED indicates out of Tx descs. SRS expects a wakeup from the
26178275SEric Cheng  * driver below.
26188275SEric Cheng  * SRS_TX_HIWAT indicates packet count enqueued in Tx SRS exceeded Tx hiwat
26198275SEric Cheng  * and flow-control pressure is applied back to clients. The clients expect
26208275SEric Cheng  * wakeup when flow-control is relieved.
26218275SEric Cheng  * SRS_TX_WAKEUP_CLIENT get set when (flag == MAC_TX_NO_ENQUEUE) and mblk
26228275SEric Cheng  * got returned back to client either due to lack of Tx descs or due to bw
26238275SEric Cheng  * control reasons. The clients expect a wakeup when condition is relieved.
26248275SEric Cheng  *
26258275SEric Cheng  * The fourth argument to mac_tx() is the flag. Normally it will be 0 but
26268275SEric Cheng  * some clients set the following values too: MAC_DROP_ON_NO_DESC,
26278275SEric Cheng  * MAC_TX_NO_ENQUEUE
26288275SEric Cheng  * Mac clients that do not want packets to be enqueued in the mac layer set
26298275SEric Cheng  * MAC_DROP_ON_NO_DESC value. The packets won't be queued in the Tx SRS or
26308275SEric Cheng  * Tx soft rings but instead get dropped when the NIC runs out of desc. The
26318275SEric Cheng  * behaviour of this flag is different when the Tx is running in serializer
26328275SEric Cheng  * or bandwidth mode. Under these (Serializer, bandwidth) modes, the packet
26338275SEric Cheng  * get dropped when Tx high watermark is reached.
26348275SEric Cheng  * There are some mac clients like vsw, aggr that want the mblks to be
26358275SEric Cheng  * returned back to clients instead of being queued in Tx SRS (or Tx soft
26368275SEric Cheng  * rings) under flow-control (i.e., out of desc or exceeding bw limits)
26378275SEric Cheng  * conditions. These clients call mac_tx() with MAC_TX_NO_ENQUEUE flag set.
26388275SEric Cheng  * In the default and Tx fanout mode, the un-transmitted mblks will be
26398275SEric Cheng  * returned back to the clients when the driver runs out of Tx descs.
26408275SEric Cheng  * SRS_TX_WAKEUP_CLIENT (or S_RING_WAKEUP_CLIENT) will be set in SRS (or
26418275SEric Cheng  * soft ring) so that the clients can be woken up when Tx desc become
26428275SEric Cheng  * available. When running in serializer or bandwidth mode mode,
26438275SEric Cheng  * SRS_TX_WAKEUP_CLIENT will be set when tx hi-watermark is reached.
26448275SEric Cheng  */
26458275SEric Cheng 
26468275SEric Cheng mac_tx_func_t
mac_tx_get_func(uint32_t mode)26478275SEric Cheng mac_tx_get_func(uint32_t mode)
26488275SEric Cheng {
26498275SEric Cheng 	return (mac_tx_mode_list[mode].mac_tx_func);
26508275SEric Cheng }
26518275SEric Cheng 
26528275SEric Cheng /* ARGSUSED */
26538275SEric Cheng static mac_tx_cookie_t
mac_tx_single_ring_mode(mac_soft_ring_set_t * mac_srs,mblk_t * mp_chain,uintptr_t fanout_hint,uint16_t flag,mblk_t ** ret_mp)26548275SEric Cheng mac_tx_single_ring_mode(mac_soft_ring_set_t *mac_srs, mblk_t *mp_chain,
26558275SEric Cheng     uintptr_t fanout_hint, uint16_t flag, mblk_t **ret_mp)
26568275SEric Cheng {
26578275SEric Cheng 	mac_srs_tx_t		*srs_tx = &mac_srs->srs_tx;
26588275SEric Cheng 	mac_tx_stats_t		stats;
26598275SEric Cheng 	mac_tx_cookie_t		cookie = NULL;
26608275SEric Cheng 
26618275SEric Cheng 	ASSERT(srs_tx->st_mode == SRS_TX_DEFAULT);
26628275SEric Cheng 
26638275SEric Cheng 	/* Regular case with a single Tx ring */
26648275SEric Cheng 	/*
26658275SEric Cheng 	 * SRS_TX_BLOCKED is set when underlying NIC runs
26668275SEric Cheng 	 * out of Tx descs and messages start getting
26678275SEric Cheng 	 * queued. It won't get reset until
26688275SEric Cheng 	 * tx_srs_drain() completely drains out the
26698275SEric Cheng 	 * messages.
26708275SEric Cheng 	 */
26718275SEric Cheng 	if ((mac_srs->srs_state & SRS_ENQUEUED) != 0) {
26728275SEric Cheng 		/* Tx descs/resources not available */
26738275SEric Cheng 		mutex_enter(&mac_srs->srs_lock);
26748275SEric Cheng 		if ((mac_srs->srs_state & SRS_ENQUEUED) != 0) {
26758275SEric Cheng 			cookie = mac_tx_srs_no_desc(mac_srs, mp_chain,
26768275SEric Cheng 			    flag, ret_mp);
26778275SEric Cheng 			mutex_exit(&mac_srs->srs_lock);
26788275SEric Cheng 			return (cookie);
26798275SEric Cheng 		}
26808275SEric Cheng 		/*
26818275SEric Cheng 		 * While we were computing mblk count, the
26828275SEric Cheng 		 * flow control condition got relieved.
26838275SEric Cheng 		 * Continue with the transmission.
26848275SEric Cheng 		 */
26858275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
26868275SEric Cheng 	}
26878275SEric Cheng 
26888275SEric Cheng 	mp_chain = mac_tx_send(srs_tx->st_arg1, srs_tx->st_arg2,
2689*11878SVenu.Iyer@Sun.COM 	    mp_chain, &stats);
26908275SEric Cheng 
26918275SEric Cheng 	/*
26928275SEric Cheng 	 * Multiple threads could be here sending packets.
26938275SEric Cheng 	 * Under such conditions, it is not possible to
26948275SEric Cheng 	 * automically set SRS_TX_BLOCKED bit to indicate
26958275SEric Cheng 	 * out of tx desc condition. To atomically set
26968275SEric Cheng 	 * this, we queue the returned packet and do
26978275SEric Cheng 	 * the setting of SRS_TX_BLOCKED in
26988275SEric Cheng 	 * mac_tx_srs_drain().
26998275SEric Cheng 	 */
27008275SEric Cheng 	if (mp_chain != NULL) {
27018275SEric Cheng 		mutex_enter(&mac_srs->srs_lock);
27028275SEric Cheng 		cookie = mac_tx_srs_no_desc(mac_srs, mp_chain, flag, ret_mp);
27038275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
27048275SEric Cheng 		return (cookie);
27058275SEric Cheng 	}
2706*11878SVenu.Iyer@Sun.COM 	SRS_TX_STATS_UPDATE(mac_srs, &stats);
27078275SEric Cheng 
27088275SEric Cheng 	return (NULL);
27098275SEric Cheng }
27108275SEric Cheng 
27118275SEric Cheng /*
27128275SEric Cheng  * mac_tx_serialize_mode
27138275SEric Cheng  *
27148275SEric Cheng  * This is an experimental mode implemented as per the request of PAE.
27158275SEric Cheng  * In this mode, all callers attempting to send a packet to the NIC
27168275SEric Cheng  * will get serialized. Only one thread at any time will access the
27178275SEric Cheng  * NIC to send the packet out.
27188275SEric Cheng  */
27198275SEric Cheng /* ARGSUSED */
27208275SEric Cheng static mac_tx_cookie_t
mac_tx_serializer_mode(mac_soft_ring_set_t * mac_srs,mblk_t * mp_chain,uintptr_t fanout_hint,uint16_t flag,mblk_t ** ret_mp)27218275SEric Cheng mac_tx_serializer_mode(mac_soft_ring_set_t *mac_srs, mblk_t *mp_chain,
27228275SEric Cheng     uintptr_t fanout_hint, uint16_t flag, mblk_t **ret_mp)
27238275SEric Cheng {
27248275SEric Cheng 	mac_tx_stats_t		stats;
27258275SEric Cheng 	mac_tx_cookie_t		cookie = NULL;
27268275SEric Cheng 	mac_srs_tx_t		*srs_tx = &mac_srs->srs_tx;
27278275SEric Cheng 
27288275SEric Cheng 	/* Single ring, serialize below */
27298275SEric Cheng 	ASSERT(srs_tx->st_mode == SRS_TX_SERIALIZE);
27308275SEric Cheng 	mutex_enter(&mac_srs->srs_lock);
27318275SEric Cheng 	if ((mac_srs->srs_first != NULL) ||
27328275SEric Cheng 	    (mac_srs->srs_state & SRS_PROC)) {
27338275SEric Cheng 		/*
27348275SEric Cheng 		 * In serialization mode, queue all packets until
27358275SEric Cheng 		 * TX_HIWAT is set.
27368275SEric Cheng 		 * If drop bit is set, drop if TX_HIWAT is set.
27378275SEric Cheng 		 * If no_enqueue is set, still enqueue until hiwat
27388275SEric Cheng 		 * is set and return mblks after TX_HIWAT is set.
27398275SEric Cheng 		 */
27408275SEric Cheng 		cookie = mac_tx_srs_enqueue(mac_srs, mp_chain,
27418275SEric Cheng 		    flag, NULL, ret_mp);
27428275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
27438275SEric Cheng 		return (cookie);
27448275SEric Cheng 	}
27458275SEric Cheng 	/*
27468275SEric Cheng 	 * No packets queued, nothing on proc and no flow
27478275SEric Cheng 	 * control condition. Fast-path, ok. Do inline
27488275SEric Cheng 	 * processing.
27498275SEric Cheng 	 */
27508275SEric Cheng 	mac_srs->srs_state |= SRS_PROC;
27518275SEric Cheng 	mutex_exit(&mac_srs->srs_lock);
27528275SEric Cheng 
27538275SEric Cheng 	mp_chain = mac_tx_send(srs_tx->st_arg1, srs_tx->st_arg2,
2754*11878SVenu.Iyer@Sun.COM 	    mp_chain, &stats);
27558275SEric Cheng 
27568275SEric Cheng 	mutex_enter(&mac_srs->srs_lock);
27578275SEric Cheng 	mac_srs->srs_state &= ~SRS_PROC;
27588275SEric Cheng 	if (mp_chain != NULL) {
27598275SEric Cheng 		cookie = mac_tx_srs_enqueue(mac_srs,
27608275SEric Cheng 		    mp_chain, flag, NULL, ret_mp);
27618275SEric Cheng 	}
27628275SEric Cheng 	if (mac_srs->srs_first != NULL) {
27638275SEric Cheng 		/*
27648275SEric Cheng 		 * We processed inline our packet and a new
27658275SEric Cheng 		 * packet/s got queued while we were
27668275SEric Cheng 		 * processing. Wakeup srs worker
27678275SEric Cheng 		 */
27688275SEric Cheng 		cv_signal(&mac_srs->srs_async);
27698275SEric Cheng 	}
27708275SEric Cheng 	mutex_exit(&mac_srs->srs_lock);
27718275SEric Cheng 
2772*11878SVenu.Iyer@Sun.COM 	if (cookie == NULL)
2773*11878SVenu.Iyer@Sun.COM 		SRS_TX_STATS_UPDATE(mac_srs, &stats);
27748275SEric Cheng 
27758275SEric Cheng 	return (cookie);
27768275SEric Cheng }
27778275SEric Cheng 
27788275SEric Cheng /*
27798275SEric Cheng  * mac_tx_fanout_mode
27808275SEric Cheng  *
27818275SEric Cheng  * In this mode, the SRS will have access to multiple Tx rings to send
27828275SEric Cheng  * the packet out. The fanout hint that is passed as an argument is
27838275SEric Cheng  * used to find an appropriate ring to fanout the traffic. Each Tx
27848275SEric Cheng  * ring, in turn,  will have a soft ring associated with it. If a Tx
27858275SEric Cheng  * ring runs out of Tx desc's the returned packet will be queued in
27868275SEric Cheng  * the soft ring associated with that Tx ring. The srs itself will not
27878275SEric Cheng  * queue any packets.
27888275SEric Cheng  */
27898833SVenu.Iyer@Sun.COM 
27908833SVenu.Iyer@Sun.COM #define	MAC_TX_SOFT_RING_PROCESS(chain) {		       		\
2791*11878SVenu.Iyer@Sun.COM 	index = COMPUTE_INDEX(hash, mac_srs->srs_tx_ring_count),	\
2792*11878SVenu.Iyer@Sun.COM 	softring = mac_srs->srs_tx_soft_rings[index];			\
27938833SVenu.Iyer@Sun.COM 	cookie = mac_tx_soft_ring_process(softring, chain, flag, ret_mp); \
27948833SVenu.Iyer@Sun.COM 	DTRACE_PROBE2(tx__fanout, uint64_t, hash, uint_t, index);	\
27958833SVenu.Iyer@Sun.COM }
27968833SVenu.Iyer@Sun.COM 
27978275SEric Cheng static mac_tx_cookie_t
mac_tx_fanout_mode(mac_soft_ring_set_t * mac_srs,mblk_t * mp_chain,uintptr_t fanout_hint,uint16_t flag,mblk_t ** ret_mp)27988275SEric Cheng mac_tx_fanout_mode(mac_soft_ring_set_t *mac_srs, mblk_t *mp_chain,
27998275SEric Cheng     uintptr_t fanout_hint, uint16_t flag, mblk_t **ret_mp)
28008275SEric Cheng {
28018275SEric Cheng 	mac_soft_ring_t		*softring;
28028833SVenu.Iyer@Sun.COM 	uint64_t		hash;
28038833SVenu.Iyer@Sun.COM 	uint_t			index;
28048833SVenu.Iyer@Sun.COM 	mac_tx_cookie_t		cookie = NULL;
28058275SEric Cheng 
2806*11878SVenu.Iyer@Sun.COM 	ASSERT(mac_srs->srs_tx.st_mode == SRS_TX_FANOUT ||
2807*11878SVenu.Iyer@Sun.COM 	    mac_srs->srs_tx.st_mode == SRS_TX_BW_FANOUT);
28088833SVenu.Iyer@Sun.COM 	if (fanout_hint != 0) {
28098833SVenu.Iyer@Sun.COM 		/*
28108833SVenu.Iyer@Sun.COM 		 * The hint is specified by the caller, simply pass the
28118833SVenu.Iyer@Sun.COM 		 * whole chain to the soft ring.
28128833SVenu.Iyer@Sun.COM 		 */
28138833SVenu.Iyer@Sun.COM 		hash = HASH_HINT(fanout_hint);
28148833SVenu.Iyer@Sun.COM 		MAC_TX_SOFT_RING_PROCESS(mp_chain);
28158833SVenu.Iyer@Sun.COM 	} else {
28168833SVenu.Iyer@Sun.COM 		mblk_t *last_mp, *cur_mp, *sub_chain;
28178833SVenu.Iyer@Sun.COM 		uint64_t last_hash = 0;
28188833SVenu.Iyer@Sun.COM 		uint_t media = mac_srs->srs_mcip->mci_mip->mi_info.mi_media;
28198833SVenu.Iyer@Sun.COM 
28208833SVenu.Iyer@Sun.COM 		/*
28218833SVenu.Iyer@Sun.COM 		 * Compute the hash from the contents (headers) of the
28228833SVenu.Iyer@Sun.COM 		 * packets of the mblk chain. Split the chains into
28238833SVenu.Iyer@Sun.COM 		 * subchains of the same conversation.
28248833SVenu.Iyer@Sun.COM 		 *
28258833SVenu.Iyer@Sun.COM 		 * Since there may be more than one ring used for
28268833SVenu.Iyer@Sun.COM 		 * sub-chains of the same call, and since the caller
28278833SVenu.Iyer@Sun.COM 		 * does not maintain per conversation state since it
28288833SVenu.Iyer@Sun.COM 		 * passed a zero hint, unsent subchains will be
28298833SVenu.Iyer@Sun.COM 		 * dropped.
28308833SVenu.Iyer@Sun.COM 		 */
28318833SVenu.Iyer@Sun.COM 
28328833SVenu.Iyer@Sun.COM 		flag |= MAC_DROP_ON_NO_DESC;
28338833SVenu.Iyer@Sun.COM 		ret_mp = NULL;
28348833SVenu.Iyer@Sun.COM 
28358833SVenu.Iyer@Sun.COM 		ASSERT(ret_mp == NULL);
28368833SVenu.Iyer@Sun.COM 
28378833SVenu.Iyer@Sun.COM 		sub_chain = NULL;
28388833SVenu.Iyer@Sun.COM 		last_mp = NULL;
28398833SVenu.Iyer@Sun.COM 
28408833SVenu.Iyer@Sun.COM 		for (cur_mp = mp_chain; cur_mp != NULL;
28418833SVenu.Iyer@Sun.COM 		    cur_mp = cur_mp->b_next) {
28428833SVenu.Iyer@Sun.COM 			hash = mac_pkt_hash(media, cur_mp, MAC_PKT_HASH_L4,
28438833SVenu.Iyer@Sun.COM 			    B_TRUE);
28448833SVenu.Iyer@Sun.COM 			if (last_hash != 0 && hash != last_hash) {
28458833SVenu.Iyer@Sun.COM 				/*
28468833SVenu.Iyer@Sun.COM 				 * Starting a different subchain, send current
28478833SVenu.Iyer@Sun.COM 				 * chain out.
28488833SVenu.Iyer@Sun.COM 				 */
28498833SVenu.Iyer@Sun.COM 				ASSERT(last_mp != NULL);
28508833SVenu.Iyer@Sun.COM 				last_mp->b_next = NULL;
28518833SVenu.Iyer@Sun.COM 				MAC_TX_SOFT_RING_PROCESS(sub_chain);
28528833SVenu.Iyer@Sun.COM 				sub_chain = NULL;
28538833SVenu.Iyer@Sun.COM 			}
28548833SVenu.Iyer@Sun.COM 
28558833SVenu.Iyer@Sun.COM 			/* add packet to subchain */
28568833SVenu.Iyer@Sun.COM 			if (sub_chain == NULL)
28578833SVenu.Iyer@Sun.COM 				sub_chain = cur_mp;
28588833SVenu.Iyer@Sun.COM 			last_mp = cur_mp;
28598833SVenu.Iyer@Sun.COM 			last_hash = hash;
28608833SVenu.Iyer@Sun.COM 		}
28618833SVenu.Iyer@Sun.COM 
28628833SVenu.Iyer@Sun.COM 		if (sub_chain != NULL) {
28638833SVenu.Iyer@Sun.COM 			/* send last subchain */
28648833SVenu.Iyer@Sun.COM 			ASSERT(last_mp != NULL);
28658833SVenu.Iyer@Sun.COM 			last_mp->b_next = NULL;
28668833SVenu.Iyer@Sun.COM 			MAC_TX_SOFT_RING_PROCESS(sub_chain);
28678833SVenu.Iyer@Sun.COM 		}
28688833SVenu.Iyer@Sun.COM 
28698833SVenu.Iyer@Sun.COM 		cookie = NULL;
28708833SVenu.Iyer@Sun.COM 	}
28718833SVenu.Iyer@Sun.COM 
28728833SVenu.Iyer@Sun.COM 	return (cookie);
28738275SEric Cheng }
28748275SEric Cheng 
28758275SEric Cheng /*
28768275SEric Cheng  * mac_tx_bw_mode
28778275SEric Cheng  *
28788275SEric Cheng  * In the bandwidth mode, Tx srs will allow packets to go down to Tx ring
28798275SEric Cheng  * only if bw is available. Otherwise the packets will be queued in
28808275SEric Cheng  * SRS. If the SRS has multiple Tx rings, then packets will get fanned
28818275SEric Cheng  * out to a Tx rings.
28828275SEric Cheng  */
28838275SEric Cheng static mac_tx_cookie_t
mac_tx_bw_mode(mac_soft_ring_set_t * mac_srs,mblk_t * mp_chain,uintptr_t fanout_hint,uint16_t flag,mblk_t ** ret_mp)28848275SEric Cheng mac_tx_bw_mode(mac_soft_ring_set_t *mac_srs, mblk_t *mp_chain,
28858275SEric Cheng     uintptr_t fanout_hint, uint16_t flag, mblk_t **ret_mp)
28868275SEric Cheng {
28878275SEric Cheng 	int			cnt, sz;
28888275SEric Cheng 	mblk_t			*tail;
28898275SEric Cheng 	mac_tx_cookie_t		cookie = NULL;
28908275SEric Cheng 	mac_srs_tx_t		*srs_tx = &mac_srs->srs_tx;
289111066Srafael.vanoni@sun.com 	clock_t			now;
28928275SEric Cheng 
28938275SEric Cheng 	ASSERT(TX_BANDWIDTH_MODE(mac_srs));
28948275SEric Cheng 	ASSERT(mac_srs->srs_type & SRST_BW_CONTROL);
28958275SEric Cheng 	mutex_enter(&mac_srs->srs_lock);
28968275SEric Cheng 	if (mac_srs->srs_bw->mac_bw_limit == 0) {
28978833SVenu.Iyer@Sun.COM 		/*
28988833SVenu.Iyer@Sun.COM 		 * zero bandwidth, no traffic is sent: drop the packets,
28998833SVenu.Iyer@Sun.COM 		 * or return the whole chain if the caller requests all
29008833SVenu.Iyer@Sun.COM 		 * unsent packets back.
29018833SVenu.Iyer@Sun.COM 		 */
29028833SVenu.Iyer@Sun.COM 		if (flag & MAC_TX_NO_ENQUEUE) {
29038833SVenu.Iyer@Sun.COM 			cookie = (mac_tx_cookie_t)mac_srs;
29048833SVenu.Iyer@Sun.COM 			*ret_mp = mp_chain;
29058833SVenu.Iyer@Sun.COM 		} else {
29068833SVenu.Iyer@Sun.COM 			MAC_TX_SRS_DROP_MESSAGE(mac_srs, mp_chain, cookie);
29078833SVenu.Iyer@Sun.COM 		}
29088275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
29098275SEric Cheng 		return (cookie);
29108275SEric Cheng 	} else if ((mac_srs->srs_first != NULL) ||
29118275SEric Cheng 	    (mac_srs->srs_bw->mac_bw_state & SRS_BW_ENFORCED)) {
29128275SEric Cheng 		cookie = mac_tx_srs_enqueue(mac_srs, mp_chain, flag,
29138275SEric Cheng 		    fanout_hint, ret_mp);
29148275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
29158275SEric Cheng 		return (cookie);
29168275SEric Cheng 	}
29178275SEric Cheng 	MAC_COUNT_CHAIN(mac_srs, mp_chain, tail, cnt, sz);
291811066Srafael.vanoni@sun.com 	now = ddi_get_lbolt();
291911066Srafael.vanoni@sun.com 	if (mac_srs->srs_bw->mac_bw_curr_time != now) {
292011066Srafael.vanoni@sun.com 		mac_srs->srs_bw->mac_bw_curr_time = now;
29218275SEric Cheng 		mac_srs->srs_bw->mac_bw_used = 0;
29228275SEric Cheng 	} else if (mac_srs->srs_bw->mac_bw_used >
29238275SEric Cheng 	    mac_srs->srs_bw->mac_bw_limit) {
29248275SEric Cheng 		mac_srs->srs_bw->mac_bw_state |= SRS_BW_ENFORCED;
29258275SEric Cheng 		MAC_TX_SRS_ENQUEUE_CHAIN(mac_srs,
29268275SEric Cheng 		    mp_chain, tail, cnt, sz);
29278275SEric Cheng 		/*
29288275SEric Cheng 		 * Wakeup worker thread. Note that worker
29298275SEric Cheng 		 * thread has to be woken up so that it
29308275SEric Cheng 		 * can fire up the timer to be woken up
29318275SEric Cheng 		 * on the next tick. Also once
29328275SEric Cheng 		 * BW_ENFORCED is set, it can only be
29338275SEric Cheng 		 * reset by srs_worker thread. Until then
29348275SEric Cheng 		 * all packets will get queued up in SRS
29358275SEric Cheng 		 * and hence this this code path won't be
29368275SEric Cheng 		 * entered until BW_ENFORCED is reset.
29378275SEric Cheng 		 */
29388275SEric Cheng 		cv_signal(&mac_srs->srs_async);
29398275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
29408275SEric Cheng 		return (cookie);
29418275SEric Cheng 	}
29428275SEric Cheng 
29438275SEric Cheng 	mac_srs->srs_bw->mac_bw_used += sz;
29448275SEric Cheng 	mutex_exit(&mac_srs->srs_lock);
29458275SEric Cheng 
29468275SEric Cheng 	if (srs_tx->st_mode == SRS_TX_BW_FANOUT) {
29478275SEric Cheng 		mac_soft_ring_t *softring;
29488275SEric Cheng 		uint_t indx, hash;
29498275SEric Cheng 
29508275SEric Cheng 		hash = HASH_HINT(fanout_hint);
29518275SEric Cheng 		indx = COMPUTE_INDEX(hash,
2952*11878SVenu.Iyer@Sun.COM 		    mac_srs->srs_tx_ring_count);
2953*11878SVenu.Iyer@Sun.COM 		softring = mac_srs->srs_tx_soft_rings[indx];
29548275SEric Cheng 		return (mac_tx_soft_ring_process(softring, mp_chain, flag,
29558275SEric Cheng 		    ret_mp));
2956*11878SVenu.Iyer@Sun.COM 	} else if (srs_tx->st_mode == SRS_TX_BW_AGGR) {
2957*11878SVenu.Iyer@Sun.COM 		return (mac_tx_aggr_mode(mac_srs, mp_chain,
2958*11878SVenu.Iyer@Sun.COM 		    fanout_hint, flag, ret_mp));
29598275SEric Cheng 	} else {
29608275SEric Cheng 		mac_tx_stats_t		stats;
29618275SEric Cheng 
29628275SEric Cheng 		mp_chain = mac_tx_send(srs_tx->st_arg1, srs_tx->st_arg2,
2963*11878SVenu.Iyer@Sun.COM 		    mp_chain, &stats);
29648275SEric Cheng 
29658275SEric Cheng 		if (mp_chain != NULL) {
29668275SEric Cheng 			mutex_enter(&mac_srs->srs_lock);
29678275SEric Cheng 			MAC_COUNT_CHAIN(mac_srs, mp_chain, tail, cnt, sz);
29688275SEric Cheng 			if (mac_srs->srs_bw->mac_bw_used > sz)
29698275SEric Cheng 				mac_srs->srs_bw->mac_bw_used -= sz;
29708275SEric Cheng 			else
29718275SEric Cheng 				mac_srs->srs_bw->mac_bw_used = 0;
29728275SEric Cheng 			cookie = mac_tx_srs_enqueue(mac_srs, mp_chain, flag,
29738275SEric Cheng 			    fanout_hint, ret_mp);
29748275SEric Cheng 			mutex_exit(&mac_srs->srs_lock);
29758275SEric Cheng 			return (cookie);
29768275SEric Cheng 		}
2977*11878SVenu.Iyer@Sun.COM 		SRS_TX_STATS_UPDATE(mac_srs, &stats);
29788275SEric Cheng 
29798275SEric Cheng 		return (NULL);
29808275SEric Cheng 	}
29818275SEric Cheng }
29828275SEric Cheng 
2983*11878SVenu.Iyer@Sun.COM /*
2984*11878SVenu.Iyer@Sun.COM  * mac_tx_aggr_mode
2985*11878SVenu.Iyer@Sun.COM  *
2986*11878SVenu.Iyer@Sun.COM  * This routine invokes an aggr function, aggr_find_tx_ring(), to find
2987*11878SVenu.Iyer@Sun.COM  * a (pseudo) Tx ring belonging to a port on which the packet has to
2988*11878SVenu.Iyer@Sun.COM  * be sent. aggr_find_tx_ring() first finds the outgoing port based on
2989*11878SVenu.Iyer@Sun.COM  * L2/L3/L4 policy and then uses the fanout_hint passed to it to pick
2990*11878SVenu.Iyer@Sun.COM  * a Tx ring from the selected port.
2991*11878SVenu.Iyer@Sun.COM  *
2992*11878SVenu.Iyer@Sun.COM  * Note that a port can be deleted from the aggregation. In such a case,
2993*11878SVenu.Iyer@Sun.COM  * the aggregation layer first separates the port from the rest of the
2994*11878SVenu.Iyer@Sun.COM  * ports making sure that port (and thus any Tx rings associated with
2995*11878SVenu.Iyer@Sun.COM  * it) won't get selected in the call to aggr_find_tx_ring() function.
2996*11878SVenu.Iyer@Sun.COM  * Later calls are made to mac_group_rem_ring() passing pseudo Tx ring
2997*11878SVenu.Iyer@Sun.COM  * handles one by one which in turn will quiesce the Tx SRS and remove
2998*11878SVenu.Iyer@Sun.COM  * the soft ring associated with the pseudo Tx ring. Unlike Rx side
2999*11878SVenu.Iyer@Sun.COM  * where a cookie is used to protect against mac_rx_ring() calls on
3000*11878SVenu.Iyer@Sun.COM  * rings that have been removed, no such cookie is needed on the Tx
3001*11878SVenu.Iyer@Sun.COM  * side as the pseudo Tx ring won't be available anymore to
3002*11878SVenu.Iyer@Sun.COM  * aggr_find_tx_ring() once the port has been removed.
3003*11878SVenu.Iyer@Sun.COM  */
3004*11878SVenu.Iyer@Sun.COM static mac_tx_cookie_t
mac_tx_aggr_mode(mac_soft_ring_set_t * mac_srs,mblk_t * mp_chain,uintptr_t fanout_hint,uint16_t flag,mblk_t ** ret_mp)3005*11878SVenu.Iyer@Sun.COM mac_tx_aggr_mode(mac_soft_ring_set_t *mac_srs, mblk_t *mp_chain,
3006*11878SVenu.Iyer@Sun.COM     uintptr_t fanout_hint, uint16_t flag, mblk_t **ret_mp)
3007*11878SVenu.Iyer@Sun.COM {
3008*11878SVenu.Iyer@Sun.COM 	mac_srs_tx_t		*srs_tx = &mac_srs->srs_tx;
3009*11878SVenu.Iyer@Sun.COM 	mac_tx_ring_fn_t	find_tx_ring_fn;
3010*11878SVenu.Iyer@Sun.COM 	mac_ring_handle_t	ring = NULL;
3011*11878SVenu.Iyer@Sun.COM 	void			*arg;
3012*11878SVenu.Iyer@Sun.COM 	mac_soft_ring_t		*sringp;
3013*11878SVenu.Iyer@Sun.COM 
3014*11878SVenu.Iyer@Sun.COM 	find_tx_ring_fn = srs_tx->st_capab_aggr.mca_find_tx_ring_fn;
3015*11878SVenu.Iyer@Sun.COM 	arg = srs_tx->st_capab_aggr.mca_arg;
3016*11878SVenu.Iyer@Sun.COM 	if (find_tx_ring_fn(arg, mp_chain, fanout_hint, &ring) == NULL)
3017*11878SVenu.Iyer@Sun.COM 		return (NULL);
3018*11878SVenu.Iyer@Sun.COM 	sringp = srs_tx->st_soft_rings[((mac_ring_t *)ring)->mr_index];
3019*11878SVenu.Iyer@Sun.COM 	return (mac_tx_soft_ring_process(sringp, mp_chain, flag, ret_mp));
3020*11878SVenu.Iyer@Sun.COM }
3021*11878SVenu.Iyer@Sun.COM 
3022*11878SVenu.Iyer@Sun.COM void
mac_tx_invoke_callbacks(mac_client_impl_t * mcip,mac_tx_cookie_t cookie)3023*11878SVenu.Iyer@Sun.COM mac_tx_invoke_callbacks(mac_client_impl_t *mcip, mac_tx_cookie_t cookie)
3024*11878SVenu.Iyer@Sun.COM {
3025*11878SVenu.Iyer@Sun.COM 	mac_cb_t *mcb;
3026*11878SVenu.Iyer@Sun.COM 	mac_tx_notify_cb_t *mtnfp;
3027*11878SVenu.Iyer@Sun.COM 
3028*11878SVenu.Iyer@Sun.COM 	/* Wakeup callback registered clients */
3029*11878SVenu.Iyer@Sun.COM 	MAC_CALLBACK_WALKER_INC(&mcip->mci_tx_notify_cb_info);
3030*11878SVenu.Iyer@Sun.COM 	for (mcb = mcip->mci_tx_notify_cb_list; mcb != NULL;
3031*11878SVenu.Iyer@Sun.COM 	    mcb = mcb->mcb_nextp) {
3032*11878SVenu.Iyer@Sun.COM 		mtnfp = (mac_tx_notify_cb_t *)mcb->mcb_objp;
3033*11878SVenu.Iyer@Sun.COM 		mtnfp->mtnf_fn(mtnfp->mtnf_arg, cookie);
3034*11878SVenu.Iyer@Sun.COM 	}
3035*11878SVenu.Iyer@Sun.COM 	MAC_CALLBACK_WALKER_DCR(&mcip->mci_tx_notify_cb_info,
3036*11878SVenu.Iyer@Sun.COM 	    &mcip->mci_tx_notify_cb_list);
3037*11878SVenu.Iyer@Sun.COM }
3038*11878SVenu.Iyer@Sun.COM 
30398275SEric Cheng /* ARGSUSED */
30408275SEric Cheng void
mac_tx_srs_drain(mac_soft_ring_set_t * mac_srs,uint_t proc_type)30418275SEric Cheng mac_tx_srs_drain(mac_soft_ring_set_t *mac_srs, uint_t proc_type)
30428275SEric Cheng {
30438275SEric Cheng 	mblk_t			*head, *tail;
30448275SEric Cheng 	size_t			sz;
30458275SEric Cheng 	uint32_t		tx_mode;
30468275SEric Cheng 	uint_t			saved_pkt_count;
30478275SEric Cheng 	mac_tx_stats_t		stats;
30488275SEric Cheng 	mac_srs_tx_t		*srs_tx = &mac_srs->srs_tx;
304911066Srafael.vanoni@sun.com 	clock_t			now;
30508275SEric Cheng 
30518275SEric Cheng 	saved_pkt_count = 0;
30528275SEric Cheng 	ASSERT(mutex_owned(&mac_srs->srs_lock));
30538275SEric Cheng 	ASSERT(!(mac_srs->srs_state & SRS_PROC));
30548275SEric Cheng 
30558275SEric Cheng 	mac_srs->srs_state |= SRS_PROC;
30568275SEric Cheng 
30578275SEric Cheng 	tx_mode = srs_tx->st_mode;
30588275SEric Cheng 	if (tx_mode == SRS_TX_DEFAULT || tx_mode == SRS_TX_SERIALIZE) {
30598275SEric Cheng 		if (mac_srs->srs_first != NULL) {
30608275SEric Cheng 			head = mac_srs->srs_first;
30618275SEric Cheng 			tail = mac_srs->srs_last;
30628275SEric Cheng 			saved_pkt_count = mac_srs->srs_count;
30638275SEric Cheng 			mac_srs->srs_first = NULL;
30648275SEric Cheng 			mac_srs->srs_last = NULL;
30658275SEric Cheng 			mac_srs->srs_count = 0;
30668275SEric Cheng 			mutex_exit(&mac_srs->srs_lock);
30678275SEric Cheng 
30688275SEric Cheng 			head = mac_tx_send(srs_tx->st_arg1, srs_tx->st_arg2,
30698275SEric Cheng 			    head, &stats);
30708275SEric Cheng 
30718275SEric Cheng 			mutex_enter(&mac_srs->srs_lock);
30728275SEric Cheng 			if (head != NULL) {
30738275SEric Cheng 				/* Device out of tx desc, set block */
30748275SEric Cheng 				if (head->b_next == NULL)
30758275SEric Cheng 					VERIFY(head == tail);
30768275SEric Cheng 				tail->b_next = mac_srs->srs_first;
30778275SEric Cheng 				mac_srs->srs_first = head;
30788275SEric Cheng 				mac_srs->srs_count +=
3079*11878SVenu.Iyer@Sun.COM 				    (saved_pkt_count - stats.mts_opackets);
30808275SEric Cheng 				if (mac_srs->srs_last == NULL)
30818275SEric Cheng 					mac_srs->srs_last = tail;
30828275SEric Cheng 				MAC_TX_SRS_BLOCK(mac_srs, head);
30838275SEric Cheng 			} else {
30848275SEric Cheng 				srs_tx->st_woken_up = B_FALSE;
3085*11878SVenu.Iyer@Sun.COM 				SRS_TX_STATS_UPDATE(mac_srs, &stats);
30868275SEric Cheng 			}
30878275SEric Cheng 		}
30888275SEric Cheng 	} else if (tx_mode == SRS_TX_BW) {
30898275SEric Cheng 		/*
30908275SEric Cheng 		 * We are here because the timer fired and we have some data
30918275SEric Cheng 		 * to tranmit. Also mac_tx_srs_worker should have reset
30928275SEric Cheng 		 * SRS_BW_ENFORCED flag
30938275SEric Cheng 		 */
30948275SEric Cheng 		ASSERT(!(mac_srs->srs_bw->mac_bw_state & SRS_BW_ENFORCED));
30958275SEric Cheng 		head = tail = mac_srs->srs_first;
30968275SEric Cheng 		while (mac_srs->srs_first != NULL) {
30978275SEric Cheng 			tail = mac_srs->srs_first;
30988275SEric Cheng 			tail->b_prev = NULL;
30998275SEric Cheng 			mac_srs->srs_first = tail->b_next;
31008275SEric Cheng 			if (mac_srs->srs_first == NULL)
31018275SEric Cheng 				mac_srs->srs_last = NULL;
31028275SEric Cheng 			mac_srs->srs_count--;
31038275SEric Cheng 			sz = msgdsize(tail);
31048275SEric Cheng 			mac_srs->srs_size -= sz;
31058275SEric Cheng 			saved_pkt_count++;
31068275SEric Cheng 			MAC_TX_UPDATE_BW_INFO(mac_srs, sz);
31078275SEric Cheng 
31088275SEric Cheng 			if (mac_srs->srs_bw->mac_bw_used <
31098275SEric Cheng 			    mac_srs->srs_bw->mac_bw_limit)
31108275SEric Cheng 				continue;
31118275SEric Cheng 
311211066Srafael.vanoni@sun.com 			now = ddi_get_lbolt();
311311066Srafael.vanoni@sun.com 			if (mac_srs->srs_bw->mac_bw_curr_time != now) {
311411066Srafael.vanoni@sun.com 				mac_srs->srs_bw->mac_bw_curr_time = now;
31158275SEric Cheng 				mac_srs->srs_bw->mac_bw_used = sz;
31168275SEric Cheng 				continue;
31178275SEric Cheng 			}
31188275SEric Cheng 			mac_srs->srs_bw->mac_bw_state |= SRS_BW_ENFORCED;
31198275SEric Cheng 			break;
31208275SEric Cheng 		}
31218275SEric Cheng 
31228275SEric Cheng 		ASSERT((head == NULL && tail == NULL) ||
31238275SEric Cheng 		    (head != NULL && tail != NULL));
31248275SEric Cheng 		if (tail != NULL) {
31258275SEric Cheng 			tail->b_next = NULL;
31268275SEric Cheng 			mutex_exit(&mac_srs->srs_lock);
31278275SEric Cheng 
31288275SEric Cheng 			head = mac_tx_send(srs_tx->st_arg1, srs_tx->st_arg2,
31298275SEric Cheng 			    head, &stats);
31308275SEric Cheng 
31318275SEric Cheng 			mutex_enter(&mac_srs->srs_lock);
31328275SEric Cheng 			if (head != NULL) {
31338275SEric Cheng 				uint_t size_sent;
31348275SEric Cheng 
31358275SEric Cheng 				/* Device out of tx desc, set block */
31368275SEric Cheng 				if (head->b_next == NULL)
31378275SEric Cheng 					VERIFY(head == tail);
31388275SEric Cheng 				tail->b_next = mac_srs->srs_first;
31398275SEric Cheng 				mac_srs->srs_first = head;
31408275SEric Cheng 				mac_srs->srs_count +=
3141*11878SVenu.Iyer@Sun.COM 				    (saved_pkt_count - stats.mts_opackets);
31428275SEric Cheng 				if (mac_srs->srs_last == NULL)
31438275SEric Cheng 					mac_srs->srs_last = tail;
3144*11878SVenu.Iyer@Sun.COM 				size_sent = sz - stats.mts_obytes;
31458275SEric Cheng 				mac_srs->srs_size += size_sent;
31468275SEric Cheng 				mac_srs->srs_bw->mac_bw_sz += size_sent;
31478275SEric Cheng 				if (mac_srs->srs_bw->mac_bw_used > size_sent) {
31488275SEric Cheng 					mac_srs->srs_bw->mac_bw_used -=
31498275SEric Cheng 					    size_sent;
31508275SEric Cheng 				} else {
31518275SEric Cheng 					mac_srs->srs_bw->mac_bw_used = 0;
31528275SEric Cheng 				}
31538275SEric Cheng 				MAC_TX_SRS_BLOCK(mac_srs, head);
31548275SEric Cheng 			} else {
31558275SEric Cheng 				srs_tx->st_woken_up = B_FALSE;
3156*11878SVenu.Iyer@Sun.COM 				SRS_TX_STATS_UPDATE(mac_srs, &stats);
31578275SEric Cheng 			}
31588275SEric Cheng 		}
3159*11878SVenu.Iyer@Sun.COM 	} else if (tx_mode == SRS_TX_BW_FANOUT || tx_mode == SRS_TX_BW_AGGR) {
31608275SEric Cheng 		mblk_t *prev;
31618275SEric Cheng 		uint64_t hint;
31628275SEric Cheng 
31638275SEric Cheng 		/*
31648275SEric Cheng 		 * We are here because the timer fired and we
31658275SEric Cheng 		 * have some quota to tranmit.
31668275SEric Cheng 		 */
31678275SEric Cheng 		prev = NULL;
31688275SEric Cheng 		head = tail = mac_srs->srs_first;
31698275SEric Cheng 		while (mac_srs->srs_first != NULL) {
31708275SEric Cheng 			tail = mac_srs->srs_first;
31718275SEric Cheng 			mac_srs->srs_first = tail->b_next;
31728275SEric Cheng 			if (mac_srs->srs_first == NULL)
31738275SEric Cheng 				mac_srs->srs_last = NULL;
31748275SEric Cheng 			mac_srs->srs_count--;
31758275SEric Cheng 			sz = msgdsize(tail);
31768275SEric Cheng 			mac_srs->srs_size -= sz;
31778275SEric Cheng 			mac_srs->srs_bw->mac_bw_used += sz;
31788275SEric Cheng 			if (prev == NULL)
31798275SEric Cheng 				hint = (ulong_t)tail->b_prev;
31808275SEric Cheng 			if (hint != (ulong_t)tail->b_prev) {
31818275SEric Cheng 				prev->b_next = NULL;
31828275SEric Cheng 				mutex_exit(&mac_srs->srs_lock);
31838275SEric Cheng 				TX_SRS_TO_SOFT_RING(mac_srs, head, hint);
31848275SEric Cheng 				head = tail;
31858275SEric Cheng 				hint = (ulong_t)tail->b_prev;
31868275SEric Cheng 				mutex_enter(&mac_srs->srs_lock);
31878275SEric Cheng 			}
31888275SEric Cheng 
31898275SEric Cheng 			prev = tail;
31908275SEric Cheng 			tail->b_prev = NULL;
31918275SEric Cheng 			if (mac_srs->srs_bw->mac_bw_used <
31928275SEric Cheng 			    mac_srs->srs_bw->mac_bw_limit)
31938275SEric Cheng 				continue;
31948275SEric Cheng 
319511066Srafael.vanoni@sun.com 			now = ddi_get_lbolt();
319611066Srafael.vanoni@sun.com 			if (mac_srs->srs_bw->mac_bw_curr_time != now) {
319711066Srafael.vanoni@sun.com 				mac_srs->srs_bw->mac_bw_curr_time = now;
31988275SEric Cheng 				mac_srs->srs_bw->mac_bw_used = 0;
31998275SEric Cheng 				continue;
32008275SEric Cheng 			}
32018275SEric Cheng 			mac_srs->srs_bw->mac_bw_state |= SRS_BW_ENFORCED;
32028275SEric Cheng 			break;
32038275SEric Cheng 		}
32048275SEric Cheng 		ASSERT((head == NULL && tail == NULL) ||
32058275SEric Cheng 		    (head != NULL && tail != NULL));
32068275SEric Cheng 		if (tail != NULL) {
32078275SEric Cheng 			tail->b_next = NULL;
32088275SEric Cheng 			mutex_exit(&mac_srs->srs_lock);
32098275SEric Cheng 			TX_SRS_TO_SOFT_RING(mac_srs, head, hint);
32108275SEric Cheng 			mutex_enter(&mac_srs->srs_lock);
32118275SEric Cheng 		}
32128275SEric Cheng 	}
32138275SEric Cheng 	/*
32148275SEric Cheng 	 * SRS_TX_FANOUT case not considered here because packets
32158275SEric Cheng 	 * won't be queued in the SRS for this case. Packets will
32168275SEric Cheng 	 * be sent directly to soft rings underneath and if there
32178275SEric Cheng 	 * is any queueing at all, it would be in Tx side soft
32188275SEric Cheng 	 * rings.
32198275SEric Cheng 	 */
32208275SEric Cheng 
32218275SEric Cheng 	/*
32228275SEric Cheng 	 * When srs_count becomes 0, reset SRS_TX_HIWAT and
32238275SEric Cheng 	 * SRS_TX_WAKEUP_CLIENT and wakeup registered clients.
32248275SEric Cheng 	 */
32258275SEric Cheng 	if (mac_srs->srs_count == 0 && (mac_srs->srs_state &
32268275SEric Cheng 	    (SRS_TX_HIWAT | SRS_TX_WAKEUP_CLIENT | SRS_ENQUEUED))) {
32278275SEric Cheng 		mac_client_impl_t *mcip = mac_srs->srs_mcip;
32288275SEric Cheng 		boolean_t wakeup_required = B_FALSE;
32298275SEric Cheng 
32308275SEric Cheng 		if (mac_srs->srs_state &
32318275SEric Cheng 		    (SRS_TX_HIWAT|SRS_TX_WAKEUP_CLIENT)) {
32328275SEric Cheng 			wakeup_required = B_TRUE;
32338275SEric Cheng 		}
32348275SEric Cheng 		mac_srs->srs_state &= ~(SRS_TX_HIWAT |
32358275SEric Cheng 		    SRS_TX_WAKEUP_CLIENT | SRS_ENQUEUED);
32368275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
32378275SEric Cheng 		if (wakeup_required) {
3238*11878SVenu.Iyer@Sun.COM 			mac_tx_invoke_callbacks(mcip, (mac_tx_cookie_t)mac_srs);
32398275SEric Cheng 			/*
32408275SEric Cheng 			 * If the client is not the primary MAC client, then we
32418275SEric Cheng 			 * need to send the notification to the clients upper
32428275SEric Cheng 			 * MAC, i.e. mci_upper_mip.
32438275SEric Cheng 			 */
32448275SEric Cheng 			mac_tx_notify(mcip->mci_upper_mip != NULL ?
32458275SEric Cheng 			    mcip->mci_upper_mip : mcip->mci_mip);
32468275SEric Cheng 		}
32478275SEric Cheng 		mutex_enter(&mac_srs->srs_lock);
32488275SEric Cheng 	}
32498275SEric Cheng 	mac_srs->srs_state &= ~SRS_PROC;
32508275SEric Cheng }
32518275SEric Cheng 
32528275SEric Cheng /*
32538275SEric Cheng  * Given a packet, get the flow_entry that identifies the flow
32548275SEric Cheng  * to which that packet belongs. The flow_entry will contain
32558275SEric Cheng  * the transmit function to be used to send the packet. If the
32568275SEric Cheng  * function returns NULL, the packet should be sent using the
32578275SEric Cheng  * underlying NIC.
32588275SEric Cheng  */
32598275SEric Cheng static flow_entry_t *
mac_tx_classify(mac_impl_t * mip,mblk_t * mp)32608275SEric Cheng mac_tx_classify(mac_impl_t *mip, mblk_t *mp)
32618275SEric Cheng {
32628275SEric Cheng 	flow_entry_t		*flent = NULL;
32638275SEric Cheng 	mac_client_impl_t	*mcip;
32648275SEric Cheng 	int	err;
32658275SEric Cheng 
32668275SEric Cheng 	/*
32678275SEric Cheng 	 * Do classification on the packet.
32688275SEric Cheng 	 */
32698275SEric Cheng 	err = mac_flow_lookup(mip->mi_flow_tab, mp, FLOW_OUTBOUND, &flent);
32708275SEric Cheng 	if (err != 0)
32718275SEric Cheng 		return (NULL);
32728275SEric Cheng 
32738275SEric Cheng 	/*
32748275SEric Cheng 	 * This flent might just be an additional one on the MAC client,
32758275SEric Cheng 	 * i.e. for classification purposes (different fdesc), however
32768275SEric Cheng 	 * the resources, SRS et. al., are in the mci_flent, so if
32778275SEric Cheng 	 * this isn't the mci_flent, we need to get it.
32788275SEric Cheng 	 */
32798275SEric Cheng 	if ((mcip = flent->fe_mcip) != NULL && mcip->mci_flent != flent) {
32808275SEric Cheng 		FLOW_REFRELE(flent);
32818275SEric Cheng 		flent = mcip->mci_flent;
32828275SEric Cheng 		FLOW_TRY_REFHOLD(flent, err);
32838275SEric Cheng 		if (err != 0)
32848275SEric Cheng 			return (NULL);
32858275SEric Cheng 	}
32868275SEric Cheng 
32878275SEric Cheng 	return (flent);
32888275SEric Cheng }
32898275SEric Cheng 
32908275SEric Cheng /*
32918275SEric Cheng  * This macro is only meant to be used by mac_tx_send().
32928275SEric Cheng  */
32938275SEric Cheng #define	CHECK_VID_AND_ADD_TAG(mp) {			\
32948275SEric Cheng 	if (vid_check) {				\
32958275SEric Cheng 		int err = 0;				\
32968275SEric Cheng 							\
32978275SEric Cheng 		MAC_VID_CHECK(src_mcip, (mp), err);	\
32988275SEric Cheng 		if (err != 0) {				\
32998275SEric Cheng 			freemsg((mp));			\
33008275SEric Cheng 			(mp) = next;			\
33018275SEric Cheng 			oerrors++;			\
33028275SEric Cheng 			continue;			\
33038275SEric Cheng 		}					\
33048275SEric Cheng 	}						\
33058275SEric Cheng 	if (add_tag) {					\
33068275SEric Cheng 		(mp) = mac_add_vlan_tag((mp), 0, vid);	\
33078275SEric Cheng 		if ((mp) == NULL) {			\
33088275SEric Cheng 			(mp) = next;			\
33098275SEric Cheng 			oerrors++;			\
33108275SEric Cheng 			continue;			\
33118275SEric Cheng 		}					\
33128275SEric Cheng 	}						\
33138275SEric Cheng }
33148275SEric Cheng 
33158275SEric Cheng mblk_t *
mac_tx_send(mac_client_handle_t mch,mac_ring_handle_t ring,mblk_t * mp_chain,mac_tx_stats_t * stats)33168275SEric Cheng mac_tx_send(mac_client_handle_t mch, mac_ring_handle_t ring, mblk_t *mp_chain,
33178275SEric Cheng     mac_tx_stats_t *stats)
33188275SEric Cheng {
33198275SEric Cheng 	mac_client_impl_t *src_mcip = (mac_client_impl_t *)mch;
33208275SEric Cheng 	mac_impl_t *mip = src_mcip->mci_mip;
33218275SEric Cheng 	uint_t obytes = 0, opackets = 0, oerrors = 0;
33228275SEric Cheng 	mblk_t *mp = NULL, *next;
33238275SEric Cheng 	boolean_t vid_check, add_tag;
33248275SEric Cheng 	uint16_t vid = 0;
33258275SEric Cheng 
33268275SEric Cheng 	if (mip->mi_nclients > 1) {
33278275SEric Cheng 		vid_check = MAC_VID_CHECK_NEEDED(src_mcip);
33288275SEric Cheng 		add_tag = MAC_TAG_NEEDED(src_mcip);
33298275SEric Cheng 		if (add_tag)
33308275SEric Cheng 			vid = mac_client_vid(mch);
33318275SEric Cheng 	} else {
33328275SEric Cheng 		ASSERT(mip->mi_nclients == 1);
33338275SEric Cheng 		vid_check = add_tag = B_FALSE;
33348275SEric Cheng 	}
33358275SEric Cheng 
33368275SEric Cheng 	/*
3337*11878SVenu.Iyer@Sun.COM 	 * Fastpath: if there's only one client, we simply send
3338*11878SVenu.Iyer@Sun.COM 	 * the packet down to the underlying NIC.
33398275SEric Cheng 	 */
3340*11878SVenu.Iyer@Sun.COM 	if (mip->mi_nactiveclients == 1) {
33418275SEric Cheng 		DTRACE_PROBE2(fastpath,
33428275SEric Cheng 		    mac_client_impl_t *, src_mcip, mblk_t *, mp_chain);
33438275SEric Cheng 
33448275SEric Cheng 		mp = mp_chain;
33458275SEric Cheng 		while (mp != NULL) {
33468275SEric Cheng 			next = mp->b_next;
33478275SEric Cheng 			mp->b_next = NULL;
33488275SEric Cheng 			opackets++;
33498275SEric Cheng 			obytes += (mp->b_cont == NULL ? MBLKL(mp) :
33508275SEric Cheng 			    msgdsize(mp));
33518275SEric Cheng 
33528275SEric Cheng 			CHECK_VID_AND_ADD_TAG(mp);
3353*11878SVenu.Iyer@Sun.COM 			MAC_TX(mip, ring, mp, src_mcip);
33548275SEric Cheng 
33558275SEric Cheng 			/*
33568275SEric Cheng 			 * If the driver is out of descriptors and does a
33578275SEric Cheng 			 * partial send it will return a chain of unsent
33588275SEric Cheng 			 * mblks. Adjust the accounting stats.
33598275SEric Cheng 			 */
33608275SEric Cheng 			if (mp != NULL) {
33618275SEric Cheng 				opackets--;
33628275SEric Cheng 				obytes -= msgdsize(mp);
33638275SEric Cheng 				mp->b_next = next;
33648275SEric Cheng 				break;
33658275SEric Cheng 			}
33668275SEric Cheng 			mp = next;
33678275SEric Cheng 		}
33688275SEric Cheng 		goto done;
33698275SEric Cheng 	}
33708275SEric Cheng 
33718275SEric Cheng 	/*
33728275SEric Cheng 	 * No fastpath, we either have more than one MAC client
33738275SEric Cheng 	 * defined on top of the same MAC, or one or more MAC
33748275SEric Cheng 	 * client promiscuous callbacks.
33758275SEric Cheng 	 */
33768275SEric Cheng 	DTRACE_PROBE3(slowpath, mac_client_impl_t *,
33778275SEric Cheng 	    src_mcip, int, mip->mi_nclients, mblk_t *, mp_chain);
33788275SEric Cheng 
33798275SEric Cheng 	mp = mp_chain;
33808275SEric Cheng 	while (mp != NULL) {
33818275SEric Cheng 		flow_entry_t *dst_flow_ent;
33828275SEric Cheng 		void *flow_cookie;
33838275SEric Cheng 		size_t	pkt_size;
33848275SEric Cheng 		mblk_t *mp1;
33858275SEric Cheng 
33868275SEric Cheng 		next = mp->b_next;
33878275SEric Cheng 		mp->b_next = NULL;
33888275SEric Cheng 		opackets++;
33898275SEric Cheng 		pkt_size = (mp->b_cont == NULL ? MBLKL(mp) : msgdsize(mp));
33908275SEric Cheng 		obytes += pkt_size;
33918275SEric Cheng 		CHECK_VID_AND_ADD_TAG(mp);
33928275SEric Cheng 
33938275SEric Cheng 		/*
33948275SEric Cheng 		 * Find the destination.
33958275SEric Cheng 		 */
33968275SEric Cheng 		dst_flow_ent = mac_tx_classify(mip, mp);
33978275SEric Cheng 
33988275SEric Cheng 		if (dst_flow_ent != NULL) {
33998275SEric Cheng 			size_t	hdrsize;
34008275SEric Cheng 			int	err = 0;
34018275SEric Cheng 
34028275SEric Cheng 			if (mip->mi_info.mi_nativemedia == DL_ETHER) {
34038275SEric Cheng 				struct ether_vlan_header *evhp =
34048275SEric Cheng 				    (struct ether_vlan_header *)mp->b_rptr;
34058275SEric Cheng 
34068275SEric Cheng 				if (ntohs(evhp->ether_tpid) == ETHERTYPE_VLAN)
34078275SEric Cheng 					hdrsize = sizeof (*evhp);
34088275SEric Cheng 				else
34098275SEric Cheng 					hdrsize = sizeof (struct ether_header);
34108275SEric Cheng 			} else {
34118275SEric Cheng 				mac_header_info_t	mhi;
34128275SEric Cheng 
34138275SEric Cheng 				err = mac_header_info((mac_handle_t)mip,
34148275SEric Cheng 				    mp, &mhi);
34158275SEric Cheng 				if (err == 0)
34168275SEric Cheng 					hdrsize = mhi.mhi_hdrsize;
34178275SEric Cheng 			}
34188275SEric Cheng 
34198275SEric Cheng 			/*
34208275SEric Cheng 			 * Got a matching flow. It's either another
34218275SEric Cheng 			 * MAC client, or a broadcast/multicast flow.
34228275SEric Cheng 			 * Make sure the packet size is within the
34238275SEric Cheng 			 * allowed size. If not drop the packet and
34248275SEric Cheng 			 * move to next packet.
34258275SEric Cheng 			 */
34268275SEric Cheng 			if (err != 0 ||
34278275SEric Cheng 			    (pkt_size - hdrsize) > mip->mi_sdu_max) {
34288275SEric Cheng 				oerrors++;
34298275SEric Cheng 				DTRACE_PROBE2(loopback__drop, size_t, pkt_size,
34308275SEric Cheng 				    mblk_t *, mp);
34318275SEric Cheng 				freemsg(mp);
34328275SEric Cheng 				mp = next;
34338275SEric Cheng 				FLOW_REFRELE(dst_flow_ent);
34348275SEric Cheng 				continue;
34358275SEric Cheng 			}
34368275SEric Cheng 			flow_cookie = mac_flow_get_client_cookie(dst_flow_ent);
34378275SEric Cheng 			if (flow_cookie != NULL) {
34388275SEric Cheng 				/*
34398275SEric Cheng 				 * The vnic_bcast_send function expects
34408275SEric Cheng 				 * to receive the sender MAC client
34418275SEric Cheng 				 * as value for arg2.
34428275SEric Cheng 				 */
34438275SEric Cheng 				mac_bcast_send(flow_cookie, src_mcip, mp,
34448275SEric Cheng 				    B_TRUE);
34458275SEric Cheng 			} else {
34468275SEric Cheng 				/*
3447*11878SVenu.Iyer@Sun.COM 				 * loopback the packet to a local MAC
3448*11878SVenu.Iyer@Sun.COM 				 * client. We force a context switch
3449*11878SVenu.Iyer@Sun.COM 				 * if both source and destination MAC
3450*11878SVenu.Iyer@Sun.COM 				 * clients are used by IP, i.e.
3451*11878SVenu.Iyer@Sun.COM 				 * bypass is set.
34528275SEric Cheng 				 */
34538275SEric Cheng 				boolean_t do_switch;
34548275SEric Cheng 				mac_client_impl_t *dst_mcip =
34558275SEric Cheng 				    dst_flow_ent->fe_mcip;
34568275SEric Cheng 
3457*11878SVenu.Iyer@Sun.COM 				/*
3458*11878SVenu.Iyer@Sun.COM 				 * Check if there are promiscuous mode
3459*11878SVenu.Iyer@Sun.COM 				 * callbacks defined. This check is
3460*11878SVenu.Iyer@Sun.COM 				 * done here in the 'else' case and
3461*11878SVenu.Iyer@Sun.COM 				 * not in other cases because this
3462*11878SVenu.Iyer@Sun.COM 				 * path is for local loopback
3463*11878SVenu.Iyer@Sun.COM 				 * communication which does not go
3464*11878SVenu.Iyer@Sun.COM 				 * through MAC_TX(). For paths that go
3465*11878SVenu.Iyer@Sun.COM 				 * through MAC_TX(), the promisc_list
3466*11878SVenu.Iyer@Sun.COM 				 * check is done inside the MAC_TX()
3467*11878SVenu.Iyer@Sun.COM 				 * macro.
3468*11878SVenu.Iyer@Sun.COM 				 */
3469*11878SVenu.Iyer@Sun.COM 				if (mip->mi_promisc_list != NULL)
3470*11878SVenu.Iyer@Sun.COM 					mac_promisc_dispatch(mip, mp, src_mcip);
3471*11878SVenu.Iyer@Sun.COM 
34728275SEric Cheng 				do_switch = ((src_mcip->mci_state_flags &
34738275SEric Cheng 				    dst_mcip->mci_state_flags &
34748275SEric Cheng 				    MCIS_CLIENT_POLL_CAPABLE) != 0);
34758275SEric Cheng 
34768275SEric Cheng 				if ((mp1 = mac_fix_cksum(mp)) != NULL) {
34778275SEric Cheng 					(dst_flow_ent->fe_cb_fn)(
34788275SEric Cheng 					    dst_flow_ent->fe_cb_arg1,
34798275SEric Cheng 					    dst_flow_ent->fe_cb_arg2,
34808275SEric Cheng 					    mp1, do_switch);
34818275SEric Cheng 				}
34828275SEric Cheng 			}
34838275SEric Cheng 			FLOW_REFRELE(dst_flow_ent);
34848275SEric Cheng 		} else {
34858275SEric Cheng 			/*
34868275SEric Cheng 			 * Unknown destination, send via the underlying
34878275SEric Cheng 			 * NIC.
34888275SEric Cheng 			 */
3489*11878SVenu.Iyer@Sun.COM 			MAC_TX(mip, ring, mp, src_mcip);
34908275SEric Cheng 			if (mp != NULL) {
34918275SEric Cheng 				/*
34928275SEric Cheng 				 * Adjust for the last packet that
34938275SEric Cheng 				 * could not be transmitted
34948275SEric Cheng 				 */
34958275SEric Cheng 				opackets--;
34968275SEric Cheng 				obytes -= pkt_size;
34978275SEric Cheng 				mp->b_next = next;
34988275SEric Cheng 				break;
34998275SEric Cheng 			}
35008275SEric Cheng 		}
35018275SEric Cheng 		mp = next;
35028275SEric Cheng 	}
35038275SEric Cheng 
35048275SEric Cheng done:
3505*11878SVenu.Iyer@Sun.COM 	stats->mts_obytes = obytes;
3506*11878SVenu.Iyer@Sun.COM 	stats->mts_opackets = opackets;
3507*11878SVenu.Iyer@Sun.COM 	stats->mts_oerrors = oerrors;
35088275SEric Cheng 	return (mp);
35098275SEric Cheng }
35108275SEric Cheng 
35118275SEric Cheng /*
35128275SEric Cheng  * mac_tx_srs_ring_present
35138275SEric Cheng  *
35148275SEric Cheng  * Returns whether the specified ring is part of the specified SRS.
35158275SEric Cheng  */
35168275SEric Cheng boolean_t
mac_tx_srs_ring_present(mac_soft_ring_set_t * srs,mac_ring_t * tx_ring)35178275SEric Cheng mac_tx_srs_ring_present(mac_soft_ring_set_t *srs, mac_ring_t *tx_ring)
35188275SEric Cheng {
35198275SEric Cheng 	int i;
35208275SEric Cheng 	mac_soft_ring_t *soft_ring;
35218275SEric Cheng 
35228275SEric Cheng 	if (srs->srs_tx.st_arg2 == tx_ring)
35238275SEric Cheng 		return (B_TRUE);
35248275SEric Cheng 
3525*11878SVenu.Iyer@Sun.COM 	for (i = 0; i < srs->srs_tx_ring_count; i++) {
3526*11878SVenu.Iyer@Sun.COM 		soft_ring =  srs->srs_tx_soft_rings[i];
35278275SEric Cheng 		if (soft_ring->s_ring_tx_arg2 == tx_ring)
35288275SEric Cheng 			return (B_TRUE);
35298275SEric Cheng 	}
35308275SEric Cheng 
35318275SEric Cheng 	return (B_FALSE);
35328275SEric Cheng }
35338275SEric Cheng 
35348275SEric Cheng /*
3535*11878SVenu.Iyer@Sun.COM  * mac_tx_srs_get_soft_ring
3536*11878SVenu.Iyer@Sun.COM  *
3537*11878SVenu.Iyer@Sun.COM  * Returns the TX soft ring associated with the given ring, if present.
3538*11878SVenu.Iyer@Sun.COM  */
3539*11878SVenu.Iyer@Sun.COM mac_soft_ring_t *
mac_tx_srs_get_soft_ring(mac_soft_ring_set_t * srs,mac_ring_t * tx_ring)3540*11878SVenu.Iyer@Sun.COM mac_tx_srs_get_soft_ring(mac_soft_ring_set_t *srs, mac_ring_t *tx_ring)
3541*11878SVenu.Iyer@Sun.COM {
3542*11878SVenu.Iyer@Sun.COM 	int		i;
3543*11878SVenu.Iyer@Sun.COM 	mac_soft_ring_t	*soft_ring;
3544*11878SVenu.Iyer@Sun.COM 
3545*11878SVenu.Iyer@Sun.COM 	if (srs->srs_tx.st_arg2 == tx_ring)
3546*11878SVenu.Iyer@Sun.COM 		return (NULL);
3547*11878SVenu.Iyer@Sun.COM 
3548*11878SVenu.Iyer@Sun.COM 	for (i = 0; i < srs->srs_tx_ring_count; i++) {
3549*11878SVenu.Iyer@Sun.COM 		soft_ring =  srs->srs_tx_soft_rings[i];
3550*11878SVenu.Iyer@Sun.COM 		if (soft_ring->s_ring_tx_arg2 == tx_ring)
3551*11878SVenu.Iyer@Sun.COM 			return (soft_ring);
3552*11878SVenu.Iyer@Sun.COM 	}
3553*11878SVenu.Iyer@Sun.COM 
3554*11878SVenu.Iyer@Sun.COM 	return (NULL);
3555*11878SVenu.Iyer@Sun.COM }
3556*11878SVenu.Iyer@Sun.COM 
3557*11878SVenu.Iyer@Sun.COM /*
35588275SEric Cheng  * mac_tx_srs_wakeup
35598275SEric Cheng  *
35608275SEric Cheng  * Called when Tx desc become available. Wakeup the appropriate worker
35618275SEric Cheng  * thread after resetting the SRS_TX_BLOCKED/S_RING_BLOCK bit in the
35628275SEric Cheng  * state field.
35638275SEric Cheng  */
35648275SEric Cheng void
mac_tx_srs_wakeup(mac_soft_ring_set_t * mac_srs,mac_ring_handle_t ring)35658275SEric Cheng mac_tx_srs_wakeup(mac_soft_ring_set_t *mac_srs, mac_ring_handle_t ring)
35668275SEric Cheng {
35678275SEric Cheng 	int i;
35688275SEric Cheng 	mac_soft_ring_t *sringp;
35698275SEric Cheng 	mac_srs_tx_t *srs_tx = &mac_srs->srs_tx;
35708275SEric Cheng 
35718275SEric Cheng 	mutex_enter(&mac_srs->srs_lock);
3572*11878SVenu.Iyer@Sun.COM 	/*
3573*11878SVenu.Iyer@Sun.COM 	 * srs_tx_ring_count == 0 is the single ring mode case. In
3574*11878SVenu.Iyer@Sun.COM 	 * this mode, there will not be Tx soft rings associated
3575*11878SVenu.Iyer@Sun.COM 	 * with the SRS.
3576*11878SVenu.Iyer@Sun.COM 	 */
3577*11878SVenu.Iyer@Sun.COM 	if (!MAC_TX_SOFT_RINGS(mac_srs)) {
35788275SEric Cheng 		if (srs_tx->st_arg2 == ring &&
35798275SEric Cheng 		    mac_srs->srs_state & SRS_TX_BLOCKED) {
35808275SEric Cheng 			mac_srs->srs_state &= ~SRS_TX_BLOCKED;
3581*11878SVenu.Iyer@Sun.COM 			srs_tx->st_stat.mts_unblockcnt++;
35828275SEric Cheng 			cv_signal(&mac_srs->srs_async);
35838275SEric Cheng 		}
35848275SEric Cheng 		/*
35858275SEric Cheng 		 * A wakeup can come before tx_srs_drain() could
35868275SEric Cheng 		 * grab srs lock and set SRS_TX_BLOCKED. So
35878275SEric Cheng 		 * always set woken_up flag when we come here.
35888275SEric Cheng 		 */
35898275SEric Cheng 		srs_tx->st_woken_up = B_TRUE;
35908275SEric Cheng 		mutex_exit(&mac_srs->srs_lock);
35918275SEric Cheng 		return;
35928275SEric Cheng 	}
35938275SEric Cheng 
3594*11878SVenu.Iyer@Sun.COM 	/*
3595*11878SVenu.Iyer@Sun.COM 	 * If you are here, it is for FANOUT, BW_FANOUT,
3596*11878SVenu.Iyer@Sun.COM 	 * AGGR_MODE or AGGR_BW_MODE case
3597*11878SVenu.Iyer@Sun.COM 	 */
3598*11878SVenu.Iyer@Sun.COM 	for (i = 0; i < mac_srs->srs_tx_ring_count; i++) {
3599*11878SVenu.Iyer@Sun.COM 		sringp = mac_srs->srs_tx_soft_rings[i];
36008275SEric Cheng 		mutex_enter(&sringp->s_ring_lock);
36018275SEric Cheng 		if (sringp->s_ring_tx_arg2 == ring) {
36028275SEric Cheng 			if (sringp->s_ring_state & S_RING_BLOCK) {
36038275SEric Cheng 				sringp->s_ring_state &= ~S_RING_BLOCK;
3604*11878SVenu.Iyer@Sun.COM 				sringp->s_st_stat.mts_unblockcnt++;
36058275SEric Cheng 				cv_signal(&sringp->s_ring_async);
36068275SEric Cheng 			}
36078275SEric Cheng 			sringp->s_ring_tx_woken_up = B_TRUE;
36088275SEric Cheng 		}
36098275SEric Cheng 		mutex_exit(&sringp->s_ring_lock);
36108275SEric Cheng 	}
36118275SEric Cheng 	mutex_exit(&mac_srs->srs_lock);
36128275SEric Cheng }
36138275SEric Cheng 
36148275SEric Cheng /*
36158275SEric Cheng  * Once the driver is done draining, send a MAC_NOTE_TX notification to unleash
36168275SEric Cheng  * the blocked clients again.
36178275SEric Cheng  */
36188275SEric Cheng void
mac_tx_notify(mac_impl_t * mip)36198275SEric Cheng mac_tx_notify(mac_impl_t *mip)
36208275SEric Cheng {
36218275SEric Cheng 	i_mac_notify(mip, MAC_NOTE_TX);
36228275SEric Cheng }
36238275SEric Cheng 
36248275SEric Cheng /*
36258275SEric Cheng  * RX SOFTRING RELATED FUNCTIONS
36268275SEric Cheng  *
36278275SEric Cheng  * These functions really belong in mac_soft_ring.c and here for
36288275SEric Cheng  * a short period.
36298275SEric Cheng  */
36308275SEric Cheng 
36318275SEric Cheng #define	SOFT_RING_ENQUEUE_CHAIN(ringp, mp, tail, cnt, sz) {	       	\
36328275SEric Cheng 	/*								\
36338275SEric Cheng 	 * Enqueue our mblk chain.					\
36348275SEric Cheng 	 */								\
36358275SEric Cheng 	ASSERT(MUTEX_HELD(&(ringp)->s_ring_lock));			\
36368275SEric Cheng 									\
36378275SEric Cheng 	if ((ringp)->s_ring_last != NULL)				\
36388275SEric Cheng 		(ringp)->s_ring_last->b_next = (mp);			\
36398275SEric Cheng 	else								\
36408275SEric Cheng 		(ringp)->s_ring_first = (mp);				\
36418275SEric Cheng 	(ringp)->s_ring_last = (tail);					\
36428275SEric Cheng 	(ringp)->s_ring_count += (cnt);					\
36438275SEric Cheng 	ASSERT((ringp)->s_ring_count > 0);				\
36448275SEric Cheng 	if ((ringp)->s_ring_type & ST_RING_BW_CTL) {			\
36458275SEric Cheng 		(ringp)->s_ring_size += sz;				\
36468275SEric Cheng 	}								\
36478275SEric Cheng }
36488275SEric Cheng 
36498275SEric Cheng /*
36508275SEric Cheng  * Default entry point to deliver a packet chain to a MAC client.
36518275SEric Cheng  * If the MAC client has flows, do the classification with these
36528275SEric Cheng  * flows as well.
36538275SEric Cheng  */
36548275SEric Cheng /* ARGSUSED */
36558275SEric Cheng void
mac_rx_deliver(void * arg1,mac_resource_handle_t mrh,mblk_t * mp_chain,mac_header_info_t * arg3)36568275SEric Cheng mac_rx_deliver(void *arg1, mac_resource_handle_t mrh, mblk_t *mp_chain,
36578275SEric Cheng     mac_header_info_t *arg3)
36588275SEric Cheng {
36598275SEric Cheng 	mac_client_impl_t *mcip = arg1;
36608275SEric Cheng 
36618275SEric Cheng 	if (mcip->mci_nvids == 1 &&
36629109SVenu.Iyer@Sun.COM 	    !(mcip->mci_state_flags & MCIS_STRIP_DISABLE)) {
36638275SEric Cheng 		/*
36648275SEric Cheng 		 * If the client has exactly one VID associated with it
36658275SEric Cheng 		 * and striping of VLAN header is not disabled,
36668275SEric Cheng 		 * remove the VLAN tag from the packet before
36678275SEric Cheng 		 * passing it on to the client's receive callback.
36688275SEric Cheng 		 * Note that this needs to be done after we dispatch
36698275SEric Cheng 		 * the packet to the promiscuous listeners of the
36708275SEric Cheng 		 * client, since they expect to see the whole
36718275SEric Cheng 		 * frame including the VLAN headers.
36728275SEric Cheng 		 */
36738275SEric Cheng 		mp_chain = mac_strip_vlan_tag_chain(mp_chain);
36748275SEric Cheng 	}
36758275SEric Cheng 
36768275SEric Cheng 	mcip->mci_rx_fn(mcip->mci_rx_arg, mrh, mp_chain, B_FALSE);
36778275SEric Cheng }
36788275SEric Cheng 
36798275SEric Cheng /*
36808275SEric Cheng  * mac_rx_soft_ring_process
36818275SEric Cheng  *
36828275SEric Cheng  * process a chain for a given soft ring. The number of packets queued
36838275SEric Cheng  * in the SRS and its associated soft rings (including this one) is
36848275SEric Cheng  * very small (tracked by srs_poll_pkt_cnt), then allow the entering
36858275SEric Cheng  * thread (interrupt or poll thread) to do inline processing. This
36868275SEric Cheng  * helps keep the latency down under low load.
36878275SEric Cheng  *
36888275SEric Cheng  * The proc and arg for each mblk is already stored in the mblk in
36898275SEric Cheng  * appropriate places.
36908275SEric Cheng  */
36918275SEric Cheng /* ARGSUSED */
36928275SEric Cheng void
mac_rx_soft_ring_process(mac_client_impl_t * mcip,mac_soft_ring_t * ringp,mblk_t * mp_chain,mblk_t * tail,int cnt,size_t sz)36938275SEric Cheng mac_rx_soft_ring_process(mac_client_impl_t *mcip, mac_soft_ring_t *ringp,
36948275SEric Cheng     mblk_t *mp_chain, mblk_t *tail, int cnt, size_t sz)
36958275SEric Cheng {
36968275SEric Cheng 	mac_direct_rx_t		proc;
36978275SEric Cheng 	void			*arg1;
36988275SEric Cheng 	mac_resource_handle_t	arg2;
36998275SEric Cheng 	mac_soft_ring_set_t	*mac_srs = ringp->s_ring_set;
37008275SEric Cheng 
37018275SEric Cheng 	ASSERT(ringp != NULL);
37028275SEric Cheng 	ASSERT(mp_chain != NULL);
37038275SEric Cheng 	ASSERT(tail != NULL);
37048275SEric Cheng 	ASSERT(MUTEX_NOT_HELD(&ringp->s_ring_lock));
37058275SEric Cheng 
37068275SEric Cheng 	mutex_enter(&ringp->s_ring_lock);
37078275SEric Cheng 	ringp->s_ring_total_inpkt += cnt;
3708*11878SVenu.Iyer@Sun.COM 	ringp->s_ring_total_rbytes += sz;
37098833SVenu.Iyer@Sun.COM 	if ((mac_srs->srs_rx.sr_poll_pkt_cnt <= 1) &&
37108833SVenu.Iyer@Sun.COM 	    !(ringp->s_ring_type & ST_RING_WORKER_ONLY)) {
37118275SEric Cheng 		/* If on processor or blanking on, then enqueue and return */
37128275SEric Cheng 		if (ringp->s_ring_state & S_RING_BLANK ||
37138275SEric Cheng 		    ringp->s_ring_state & S_RING_PROC) {
37148275SEric Cheng 			SOFT_RING_ENQUEUE_CHAIN(ringp, mp_chain, tail, cnt, sz);
37158275SEric Cheng 			mutex_exit(&ringp->s_ring_lock);
37168275SEric Cheng 			return;
37178275SEric Cheng 		}
37188275SEric Cheng 		proc = ringp->s_ring_rx_func;
37198275SEric Cheng 		arg1 = ringp->s_ring_rx_arg1;
37208275SEric Cheng 		arg2 = ringp->s_ring_rx_arg2;
37218275SEric Cheng 		/*
37228275SEric Cheng 		 * See if anything is already queued. If we are the
37238275SEric Cheng 		 * first packet, do inline processing else queue the
37248275SEric Cheng 		 * packet and do the drain.
37258275SEric Cheng 		 */
37268275SEric Cheng 		if (ringp->s_ring_first == NULL) {
37278275SEric Cheng 			/*
37288275SEric Cheng 			 * Fast-path, ok to process and nothing queued.
37298275SEric Cheng 			 */
37308275SEric Cheng 			ringp->s_ring_run = curthread;
37318275SEric Cheng 			ringp->s_ring_state |= (S_RING_PROC);
37328275SEric Cheng 
37338275SEric Cheng 			mutex_exit(&ringp->s_ring_lock);
37348275SEric Cheng 
37358275SEric Cheng 			/*
37368275SEric Cheng 			 * We are the chain of 1 packet so
37378275SEric Cheng 			 * go through this fast path.
37388275SEric Cheng 			 */
37398275SEric Cheng 			ASSERT(mp_chain->b_next == NULL);
37408275SEric Cheng 
37418275SEric Cheng 			(*proc)(arg1, arg2, mp_chain, NULL);
37428275SEric Cheng 
37438275SEric Cheng 			ASSERT(MUTEX_NOT_HELD(&ringp->s_ring_lock));
37448275SEric Cheng 			/*
37458275SEric Cheng 			 * If we have a soft ring set which is doing
37468275SEric Cheng 			 * bandwidth control, we need to decrement
37478275SEric Cheng 			 * srs_size and count so it the SRS can have a
37488275SEric Cheng 			 * accurate idea of what is the real data
37498275SEric Cheng 			 * queued between SRS and its soft rings. We
37508275SEric Cheng 			 * decrement the counters only when the packet
37518275SEric Cheng 			 * gets processed by both SRS and the soft ring.
37528275SEric Cheng 			 */
37538275SEric Cheng 			mutex_enter(&mac_srs->srs_lock);
37548275SEric Cheng 			MAC_UPDATE_SRS_COUNT_LOCKED(mac_srs, cnt);
37558275SEric Cheng 			MAC_UPDATE_SRS_SIZE_LOCKED(mac_srs, sz);
37568275SEric Cheng 			mutex_exit(&mac_srs->srs_lock);
37578275SEric Cheng 
37588275SEric Cheng 			mutex_enter(&ringp->s_ring_lock);
37598275SEric Cheng 			ringp->s_ring_run = NULL;
37608275SEric Cheng 			ringp->s_ring_state &= ~S_RING_PROC;
37618275SEric Cheng 			if (ringp->s_ring_state & S_RING_CLIENT_WAIT)
37628275SEric Cheng 				cv_signal(&ringp->s_ring_client_cv);
37638275SEric Cheng 
37648275SEric Cheng 			if ((ringp->s_ring_first == NULL) ||
37658275SEric Cheng 			    (ringp->s_ring_state & S_RING_BLANK)) {
37668275SEric Cheng 				/*
37678275SEric Cheng 				 * We processed inline our packet and
37688275SEric Cheng 				 * nothing new has arrived or our
37698275SEric Cheng 				 * receiver doesn't want to receive
37708275SEric Cheng 				 * any packets. We are done.
37718275SEric Cheng 				 */
37728275SEric Cheng 				mutex_exit(&ringp->s_ring_lock);
37738275SEric Cheng 				return;
37748275SEric Cheng 			}
37758275SEric Cheng 		} else {
37768275SEric Cheng 			SOFT_RING_ENQUEUE_CHAIN(ringp,
37778275SEric Cheng 			    mp_chain, tail, cnt, sz);
37788275SEric Cheng 		}
37798275SEric Cheng 
37808275SEric Cheng 		/*
37818275SEric Cheng 		 * We are here because either we couldn't do inline
37828275SEric Cheng 		 * processing (because something was already
37838275SEric Cheng 		 * queued), or we had a chain of more than one
37848275SEric Cheng 		 * packet, or something else arrived after we were
37858275SEric Cheng 		 * done with inline processing.
37868275SEric Cheng 		 */
37878275SEric Cheng 		ASSERT(MUTEX_HELD(&ringp->s_ring_lock));
37888275SEric Cheng 		ASSERT(ringp->s_ring_first != NULL);
37898275SEric Cheng 
37908275SEric Cheng 		ringp->s_ring_drain_func(ringp);
37918275SEric Cheng 		mutex_exit(&ringp->s_ring_lock);
37928275SEric Cheng 		return;
37938275SEric Cheng 	} else {
37948275SEric Cheng 		/* ST_RING_WORKER_ONLY case */
37958275SEric Cheng 		SOFT_RING_ENQUEUE_CHAIN(ringp, mp_chain, tail, cnt, sz);
37968275SEric Cheng 		mac_soft_ring_worker_wakeup(ringp);
37978275SEric Cheng 		mutex_exit(&ringp->s_ring_lock);
37988275SEric Cheng 	}
37998275SEric Cheng }
38008275SEric Cheng 
38018275SEric Cheng /*
38028275SEric Cheng  * TX SOFTRING RELATED FUNCTIONS
38038275SEric Cheng  *
38048275SEric Cheng  * These functions really belong in mac_soft_ring.c and here for
38058275SEric Cheng  * a short period.
38068275SEric Cheng  */
38078275SEric Cheng 
38088275SEric Cheng #define	TX_SOFT_RING_ENQUEUE_CHAIN(ringp, mp, tail, cnt, sz) {	       	\
38098275SEric Cheng 	ASSERT(MUTEX_HELD(&ringp->s_ring_lock));			\
38108275SEric Cheng 	ringp->s_ring_state |= S_RING_ENQUEUED;				\
38118275SEric Cheng 	SOFT_RING_ENQUEUE_CHAIN(ringp, mp_chain, tail, cnt, sz);	\
38128275SEric Cheng }
38138275SEric Cheng 
38148275SEric Cheng /*
38158275SEric Cheng  * mac_tx_sring_queued
38168275SEric Cheng  *
38178275SEric Cheng  * When we are out of transmit descriptors and we already have a
38188275SEric Cheng  * queue that exceeds hiwat (or the client called us with
38198275SEric Cheng  * MAC_TX_NO_ENQUEUE or MAC_DROP_ON_NO_DESC flag), return the
38208275SEric Cheng  * soft ring pointer as the opaque cookie for the client enable
38218275SEric Cheng  * flow control.
38228275SEric Cheng  */
38238275SEric Cheng static mac_tx_cookie_t
mac_tx_sring_enqueue(mac_soft_ring_t * ringp,mblk_t * mp_chain,uint16_t flag,mblk_t ** ret_mp)38248275SEric Cheng mac_tx_sring_enqueue(mac_soft_ring_t *ringp, mblk_t *mp_chain, uint16_t flag,
38258275SEric Cheng     mblk_t **ret_mp)
38268275SEric Cheng {
38278275SEric Cheng 	int cnt;
38288275SEric Cheng 	size_t sz;
38298275SEric Cheng 	mblk_t *tail;
38308275SEric Cheng 	mac_soft_ring_set_t *mac_srs = ringp->s_ring_set;
38318275SEric Cheng 	mac_tx_cookie_t cookie = NULL;
38328275SEric Cheng 	boolean_t wakeup_worker = B_TRUE;
38338275SEric Cheng 
38348275SEric Cheng 	ASSERT(MUTEX_HELD(&ringp->s_ring_lock));
38358275SEric Cheng 	MAC_COUNT_CHAIN(mac_srs, mp_chain, tail, cnt, sz);
38368275SEric Cheng 	if (flag & MAC_DROP_ON_NO_DESC) {
38378275SEric Cheng 		mac_pkt_drop(NULL, NULL, mp_chain, B_FALSE);
38388275SEric Cheng 		/* increment freed stats */
38398275SEric Cheng 		ringp->s_ring_drops += cnt;
38408275SEric Cheng 		cookie = (mac_tx_cookie_t)ringp;
38418275SEric Cheng 	} else {
38428275SEric Cheng 		if (ringp->s_ring_first != NULL)
38438275SEric Cheng 			wakeup_worker = B_FALSE;
38448275SEric Cheng 
38458275SEric Cheng 		if (flag & MAC_TX_NO_ENQUEUE) {
38468275SEric Cheng 			/*
38478275SEric Cheng 			 * If QUEUED is not set, queue the packet
38488275SEric Cheng 			 * and let mac_tx_soft_ring_drain() set
38498275SEric Cheng 			 * the TX_BLOCKED bit for the reasons
38508275SEric Cheng 			 * explained above. Otherwise, return the
38518275SEric Cheng 			 * mblks.
38528275SEric Cheng 			 */
38538275SEric Cheng 			if (wakeup_worker) {
38548275SEric Cheng 				TX_SOFT_RING_ENQUEUE_CHAIN(ringp,
38558275SEric Cheng 				    mp_chain, tail, cnt, sz);
38568275SEric Cheng 			} else {
38578275SEric Cheng 				ringp->s_ring_state |= S_RING_WAKEUP_CLIENT;
38588275SEric Cheng 				cookie = (mac_tx_cookie_t)ringp;
38598275SEric Cheng 				*ret_mp = mp_chain;
38608275SEric Cheng 			}
38618275SEric Cheng 		} else {
38628275SEric Cheng 			boolean_t enqueue = B_TRUE;
38638275SEric Cheng 
38648275SEric Cheng 			if (ringp->s_ring_count > ringp->s_ring_tx_hiwat) {
38658275SEric Cheng 				/*
38668275SEric Cheng 				 * flow-controlled. Store ringp in cookie
38678275SEric Cheng 				 * so that it can be returned as
38688275SEric Cheng 				 * mac_tx_cookie_t to client
38698275SEric Cheng 				 */
38708275SEric Cheng 				ringp->s_ring_state |= S_RING_TX_HIWAT;
38718275SEric Cheng 				cookie = (mac_tx_cookie_t)ringp;
38728275SEric Cheng 				ringp->s_ring_hiwat_cnt++;
38738275SEric Cheng 				if (ringp->s_ring_count >
38748275SEric Cheng 				    ringp->s_ring_tx_max_q_cnt) {
38758275SEric Cheng 					/* increment freed stats */
38768275SEric Cheng 					ringp->s_ring_drops += cnt;
38778275SEric Cheng 					/*
38788275SEric Cheng 					 * b_prev may be set to the fanout hint
38798275SEric Cheng 					 * hence can't use freemsg directly
38808275SEric Cheng 					 */
38818275SEric Cheng 					mac_pkt_drop(NULL, NULL,
38828275SEric Cheng 					    mp_chain, B_FALSE);
38838275SEric Cheng 					DTRACE_PROBE1(tx_queued_hiwat,
38848275SEric Cheng 					    mac_soft_ring_t *, ringp);
38858275SEric Cheng 					enqueue = B_FALSE;
38868275SEric Cheng 				}
38878275SEric Cheng 			}
38888275SEric Cheng 			if (enqueue) {
38898275SEric Cheng 				TX_SOFT_RING_ENQUEUE_CHAIN(ringp, mp_chain,
38908275SEric Cheng 				    tail, cnt, sz);
38918275SEric Cheng 			}
38928275SEric Cheng 		}
38938275SEric Cheng 		if (wakeup_worker)
38948275SEric Cheng 			cv_signal(&ringp->s_ring_async);
38958275SEric Cheng 	}
38968275SEric Cheng 	return (cookie);
38978275SEric Cheng }
38988275SEric Cheng 
38998275SEric Cheng 
39008275SEric Cheng /*
39018275SEric Cheng  * mac_tx_soft_ring_process
39028275SEric Cheng  *
39038275SEric Cheng  * This routine is called when fanning out outgoing traffic among
39048275SEric Cheng  * multipe Tx rings.
39058275SEric Cheng  * Note that a soft ring is associated with a h/w Tx ring.
39068275SEric Cheng  */
39078275SEric Cheng mac_tx_cookie_t
mac_tx_soft_ring_process(mac_soft_ring_t * ringp,mblk_t * mp_chain,uint16_t flag,mblk_t ** ret_mp)39088275SEric Cheng mac_tx_soft_ring_process(mac_soft_ring_t *ringp, mblk_t *mp_chain,
39098275SEric Cheng     uint16_t flag, mblk_t **ret_mp)
39108275SEric Cheng {
39118275SEric Cheng 	mac_soft_ring_set_t *mac_srs = ringp->s_ring_set;
39128275SEric Cheng 	int	cnt;
39138275SEric Cheng 	size_t	sz;
39148275SEric Cheng 	mblk_t	*tail;
39158275SEric Cheng 	mac_tx_cookie_t cookie = NULL;
39168275SEric Cheng 
39178275SEric Cheng 	ASSERT(ringp != NULL);
39188275SEric Cheng 	ASSERT(mp_chain != NULL);
39198275SEric Cheng 	ASSERT(MUTEX_NOT_HELD(&ringp->s_ring_lock));
39208275SEric Cheng 	/*
3921*11878SVenu.Iyer@Sun.COM 	 * The following modes can come here: SRS_TX_BW_FANOUT,
3922*11878SVenu.Iyer@Sun.COM 	 * SRS_TX_FANOUT, SRS_TX_AGGR, SRS_TX_BW_AGGR.
39238275SEric Cheng 	 */
3924*11878SVenu.Iyer@Sun.COM 	ASSERT(MAC_TX_SOFT_RINGS(mac_srs));
39258275SEric Cheng 	ASSERT(mac_srs->srs_tx.st_mode == SRS_TX_FANOUT ||
3926*11878SVenu.Iyer@Sun.COM 	    mac_srs->srs_tx.st_mode == SRS_TX_BW_FANOUT ||
3927*11878SVenu.Iyer@Sun.COM 	    mac_srs->srs_tx.st_mode == SRS_TX_AGGR ||
3928*11878SVenu.Iyer@Sun.COM 	    mac_srs->srs_tx.st_mode == SRS_TX_BW_AGGR);
39298275SEric Cheng 
39308275SEric Cheng 	if (ringp->s_ring_type & ST_RING_WORKER_ONLY) {
39318275SEric Cheng 		/* Serialization mode */
39328275SEric Cheng 
39338275SEric Cheng 		mutex_enter(&ringp->s_ring_lock);
39348275SEric Cheng 		if (ringp->s_ring_count > ringp->s_ring_tx_hiwat) {
39358275SEric Cheng 			cookie = mac_tx_sring_enqueue(ringp, mp_chain,
39368275SEric Cheng 			    flag, ret_mp);
39378275SEric Cheng 			mutex_exit(&ringp->s_ring_lock);
39388275SEric Cheng 			return (cookie);
39398275SEric Cheng 		}
39408275SEric Cheng 		MAC_COUNT_CHAIN(mac_srs, mp_chain, tail, cnt, sz);
39418275SEric Cheng 		TX_SOFT_RING_ENQUEUE_CHAIN(ringp, mp_chain, tail, cnt, sz);
39428275SEric Cheng 		if (ringp->s_ring_state & (S_RING_BLOCK | S_RING_PROC)) {
39438275SEric Cheng 			/*
39448275SEric Cheng 			 * If ring is blocked due to lack of Tx
39458275SEric Cheng 			 * descs, just return. Worker thread
39468275SEric Cheng 			 * will get scheduled when Tx desc's
39478275SEric Cheng 			 * become available.
39488275SEric Cheng 			 */
39498275SEric Cheng 			mutex_exit(&ringp->s_ring_lock);
39508275SEric Cheng 			return (cookie);
39518275SEric Cheng 		}
39528275SEric Cheng 		mac_soft_ring_worker_wakeup(ringp);
39538275SEric Cheng 		mutex_exit(&ringp->s_ring_lock);
39548275SEric Cheng 		return (cookie);
39558275SEric Cheng 	} else {
39568275SEric Cheng 		/* Default fanout mode */
39578275SEric Cheng 		/*
39588275SEric Cheng 		 * S_RING_BLOCKED is set when underlying NIC runs
39598275SEric Cheng 		 * out of Tx descs and messages start getting
39608275SEric Cheng 		 * queued. It won't get reset until
39618275SEric Cheng 		 * tx_srs_drain() completely drains out the
39628275SEric Cheng 		 * messages.
39638275SEric Cheng 		 */
39648275SEric Cheng 		mac_tx_stats_t		stats;
39658275SEric Cheng 
39668275SEric Cheng 		if (ringp->s_ring_state & S_RING_ENQUEUED) {
39678275SEric Cheng 			/* Tx descs/resources not available */
39688275SEric Cheng 			mutex_enter(&ringp->s_ring_lock);
39698275SEric Cheng 			if (ringp->s_ring_state & S_RING_ENQUEUED) {
39708275SEric Cheng 				cookie = mac_tx_sring_enqueue(ringp, mp_chain,
39718275SEric Cheng 				    flag, ret_mp);
39728275SEric Cheng 				mutex_exit(&ringp->s_ring_lock);
39738275SEric Cheng 				return (cookie);
39748275SEric Cheng 			}
39758275SEric Cheng 			/*
39768275SEric Cheng 			 * While we were computing mblk count, the
39778275SEric Cheng 			 * flow control condition got relieved.
39788275SEric Cheng 			 * Continue with the transmission.
39798275SEric Cheng 			 */
39808275SEric Cheng 			mutex_exit(&ringp->s_ring_lock);
39818275SEric Cheng 		}
39828275SEric Cheng 
39838275SEric Cheng 		mp_chain = mac_tx_send(ringp->s_ring_tx_arg1,
3984*11878SVenu.Iyer@Sun.COM 		    ringp->s_ring_tx_arg2, mp_chain, &stats);
39858275SEric Cheng 
39868275SEric Cheng 		/*
39878275SEric Cheng 		 * Multiple threads could be here sending packets.
39888275SEric Cheng 		 * Under such conditions, it is not possible to
39898275SEric Cheng 		 * automically set S_RING_BLOCKED bit to indicate
39908275SEric Cheng 		 * out of tx desc condition. To atomically set
39918275SEric Cheng 		 * this, we queue the returned packet and do
39928275SEric Cheng 		 * the setting of S_RING_BLOCKED in
39938275SEric Cheng 		 * mac_tx_soft_ring_drain().
39948275SEric Cheng 		 */
39958275SEric Cheng 		if (mp_chain != NULL) {
39968275SEric Cheng 			mutex_enter(&ringp->s_ring_lock);
39978275SEric Cheng 			cookie =
39988275SEric Cheng 			    mac_tx_sring_enqueue(ringp, mp_chain, flag, ret_mp);
39998275SEric Cheng 			mutex_exit(&ringp->s_ring_lock);
40008275SEric Cheng 			return (cookie);
40018275SEric Cheng 		}
4002*11878SVenu.Iyer@Sun.COM 		SRS_TX_STATS_UPDATE(mac_srs, &stats);
4003*11878SVenu.Iyer@Sun.COM 		SOFTRING_TX_STATS_UPDATE(ringp, &stats);
4004*11878SVenu.Iyer@Sun.COM 
40058275SEric Cheng 		return (NULL);
40068275SEric Cheng 	}
40078275SEric Cheng }
4008