xref: /onnv-gate/usr/src/uts/common/io/dmfe/dmfe_main.c (revision 6990:d24af98bb8ea)
15181Sgd78059 /*
25181Sgd78059  * CDDL HEADER START
35181Sgd78059  *
45181Sgd78059  * The contents of this file are subject to the terms of the
55181Sgd78059  * Common Development and Distribution License (the "License").
65181Sgd78059  * You may not use this file except in compliance with the License.
75181Sgd78059  *
85181Sgd78059  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
95181Sgd78059  * or http://www.opensolaris.org/os/licensing.
105181Sgd78059  * See the License for the specific language governing permissions
115181Sgd78059  * and limitations under the License.
125181Sgd78059  *
135181Sgd78059  * When distributing Covered Code, include this CDDL HEADER in each
145181Sgd78059  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
155181Sgd78059  * If applicable, add the following below this CDDL HEADER, with the
165181Sgd78059  * fields enclosed by brackets "[]" replaced with your own identifying
175181Sgd78059  * information: Portions Copyright [yyyy] [name of copyright owner]
185181Sgd78059  *
195181Sgd78059  * CDDL HEADER END
205181Sgd78059  */
215181Sgd78059 /*
225895Syz147064  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
235181Sgd78059  * Use is subject to license terms.
245181Sgd78059  */
255181Sgd78059 
265181Sgd78059 #pragma ident	"%Z%%M%	%I%	%E% SMI"
275181Sgd78059 
285181Sgd78059 #include <sys/types.h>
295181Sgd78059 #include <sys/sunddi.h>
305181Sgd78059 #include "dmfe_impl.h"
315181Sgd78059 
325181Sgd78059 /*
335181Sgd78059  * This is the string displayed by modinfo, etc.
345181Sgd78059  */
355181Sgd78059 static char dmfe_ident[] = "Davicom DM9102 Ethernet";
365181Sgd78059 
375181Sgd78059 
385181Sgd78059 /*
395181Sgd78059  * NOTES:
405181Sgd78059  *
415181Sgd78059  * #defines:
425181Sgd78059  *
435181Sgd78059  *	DMFE_PCI_RNUMBER is the register-set number to use for the operating
445181Sgd78059  *	registers.  On an OBP-based machine, regset 0 refers to CONFIG space,
455181Sgd78059  *	regset 1 will be the operating registers in I/O space, and regset 2
465181Sgd78059  *	will be the operating registers in MEMORY space (preferred).  If an
475181Sgd78059  *	expansion ROM is fitted, it may appear as a further register set.
485181Sgd78059  *
495181Sgd78059  *	DMFE_SLOP defines the amount by which the chip may read beyond
505181Sgd78059  *	the end of a buffer or descriptor, apparently 6-8 dwords :(
515181Sgd78059  *	We have to make sure this doesn't cause it to access unallocated
525181Sgd78059  *	or unmapped memory.
535181Sgd78059  *
545181Sgd78059  *	DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP)
555181Sgd78059  *	rounded up to a multiple of 4.  Here we choose a power of two for
565181Sgd78059  *	speed & simplicity at the cost of a bit more memory.
575181Sgd78059  *
585181Sgd78059  *	However, the buffer length field in the TX/RX descriptors is only
595181Sgd78059  *	eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes
605181Sgd78059  *	per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1
615181Sgd78059  *	(2000) bytes each.
625181Sgd78059  *
635181Sgd78059  *	DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for
645181Sgd78059  *	the data buffers.  The descriptors are always set up in CONSISTENT
655181Sgd78059  *	mode.
665181Sgd78059  *
675181Sgd78059  *	DMFE_HEADROOM defines how much space we'll leave in allocated
685181Sgd78059  *	mblks before the first valid data byte.  This should be chosen
695181Sgd78059  *	to be 2 modulo 4, so that once the ethernet header (14 bytes)
705181Sgd78059  *	has been stripped off, the packet data will be 4-byte aligned.
715181Sgd78059  *	The remaining space can be used by upstream modules to prepend
725181Sgd78059  *	any headers required.
735181Sgd78059  *
745181Sgd78059  * Patchable globals:
755181Sgd78059  *
765181Sgd78059  *	dmfe_bus_modes: the bus mode bits to be put into CSR0.
775181Sgd78059  *		Setting READ_MULTIPLE in this register seems to cause
785181Sgd78059  *		the chip to generate a READ LINE command with a parity
795181Sgd78059  *		error!  Don't do it!
805181Sgd78059  *
815181Sgd78059  *	dmfe_setup_desc1: the value to be put into descriptor word 1
825181Sgd78059  *		when sending a SETUP packet.
835181Sgd78059  *
845181Sgd78059  *		Setting TX_LAST_DESC in desc1 in a setup packet seems
855181Sgd78059  *		to make the chip spontaneously reset internally - it
865181Sgd78059  *		attempts to give back the setup packet descriptor by
875181Sgd78059  *		writing to PCI address 00000000 - which may or may not
885181Sgd78059  *		get a MASTER ABORT - after which most of its registers
895181Sgd78059  *		seem to have either default values or garbage!
905181Sgd78059  *
915181Sgd78059  *		TX_FIRST_DESC doesn't seem to have the same effect but
925181Sgd78059  *		it isn't needed on a setup packet so we'll leave it out
935181Sgd78059  *		too, just in case it has some other wierd side-effect.
945181Sgd78059  *
955181Sgd78059  *		The default hardware packet filtering mode is now
965181Sgd78059  *		HASH_AND_PERFECT (imperfect filtering of multicast
975181Sgd78059  *		packets and perfect filtering of unicast packets).
985181Sgd78059  *		If this is found not to work reliably, setting the
995181Sgd78059  *		TX_FILTER_TYPE1 bit will cause a switchover to using
1005181Sgd78059  *		HASH_ONLY mode (imperfect filtering of *all* packets).
1015181Sgd78059  *		Software will then perform the additional filtering
1025181Sgd78059  *		as required.
1035181Sgd78059  */
1045181Sgd78059 
1055181Sgd78059 #define	DMFE_PCI_RNUMBER	2
1065181Sgd78059 #define	DMFE_SLOP		(8*sizeof (uint32_t))
1075181Sgd78059 #define	DMFE_BUF_SIZE		2048
1085181Sgd78059 #define	DMFE_BUF_SIZE_1		2000
1095181Sgd78059 #define	DMFE_DMA_MODE		DDI_DMA_STREAMING
1105181Sgd78059 #define	DMFE_HEADROOM		34
1115181Sgd78059 
1125181Sgd78059 static uint32_t dmfe_bus_modes = TX_POLL_INTVL | CACHE_ALIGN;
1135181Sgd78059 static uint32_t dmfe_setup_desc1 = TX_SETUP_PACKET | SETUPBUF_SIZE |
1145181Sgd78059 					TX_FILTER_TYPE0;
1155181Sgd78059 
1165181Sgd78059 /*
1175181Sgd78059  * Some tunable parameters ...
1185181Sgd78059  *	Number of RX/TX ring entries (128/128)
1195181Sgd78059  *	Minimum number of TX ring slots to keep free (1)
1205181Sgd78059  *	Low-water mark at which to try to reclaim TX ring slots (1)
1215181Sgd78059  *	How often to take a TX-done interrupt (twice per ring cycle)
1225181Sgd78059  *	Whether to reclaim TX ring entries on a TX-done interrupt (no)
1235181Sgd78059  */
1245181Sgd78059 
1255181Sgd78059 #define	DMFE_TX_DESC		128	/* Should be a multiple of 4 <= 256 */
1265181Sgd78059 #define	DMFE_RX_DESC		128	/* Should be a multiple of 4 <= 256 */
1275181Sgd78059 
1285181Sgd78059 static uint32_t dmfe_rx_desc = DMFE_RX_DESC;
1295181Sgd78059 static uint32_t dmfe_tx_desc = DMFE_TX_DESC;
1305181Sgd78059 static uint32_t dmfe_tx_min_free = 1;
1315181Sgd78059 static uint32_t dmfe_tx_reclaim_level = 1;
1325181Sgd78059 static uint32_t dmfe_tx_int_factor = (DMFE_TX_DESC / 2) - 1;
1335181Sgd78059 static boolean_t dmfe_reclaim_on_done = B_FALSE;
1345181Sgd78059 
1355181Sgd78059 /*
1365181Sgd78059  * Time-related parameters:
1375181Sgd78059  *
1385181Sgd78059  *	We use a cyclic to provide a periodic callback; this is then used
1395181Sgd78059  * 	to check for TX-stall and poll the link status register.
1405181Sgd78059  *
1415181Sgd78059  *	DMFE_TICK is the interval between cyclic callbacks, in microseconds.
1425181Sgd78059  *
1435181Sgd78059  *	TX_STALL_TIME_100 is the timeout in microseconds between passing
1445181Sgd78059  *	a packet to the chip for transmission and seeing that it's gone,
1455181Sgd78059  *	when running at 100Mb/s.  If we haven't reclaimed at least one
1465181Sgd78059  *	descriptor in this time we assume the transmitter has stalled
1475181Sgd78059  *	and reset the chip.
1485181Sgd78059  *
1495181Sgd78059  *	TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s.
1505181Sgd78059  *
1515181Sgd78059  *	LINK_POLL_TIME is the interval between checks on the link state
1525181Sgd78059  *	when nothing appears to have happened (this is in addition to the
1535181Sgd78059  *	case where we think we've detected a link change, and serves as a
1545181Sgd78059  *	backup in case the quick link check doesn't work properly).
1555181Sgd78059  *
1565181Sgd78059  * Patchable globals:
1575181Sgd78059  *
1585181Sgd78059  *	dmfe_tick_us:		DMFE_TICK
1595181Sgd78059  *	dmfe_tx100_stall_us:	TX_STALL_TIME_100
1605181Sgd78059  *	dmfe_tx10_stall_us:	TX_STALL_TIME_10
1615181Sgd78059  *	dmfe_link_poll_us:	LINK_POLL_TIME
1625181Sgd78059  *
1635181Sgd78059  * These are then used in _init() to calculate:
1645181Sgd78059  *
1655181Sgd78059  *	stall_100_tix[]: number of consecutive cyclic callbacks without a
1665181Sgd78059  *			 reclaim before the TX process is considered stalled,
1675181Sgd78059  *			 when running at 100Mb/s.  The elements are indexed
1685181Sgd78059  *			 by transmit-engine-state.
1695181Sgd78059  *	stall_10_tix[]:	 number of consecutive cyclic callbacks without a
1705181Sgd78059  *			 reclaim before the TX process is considered stalled,
1715181Sgd78059  *			 when running at 10Mb/s.  The elements are indexed
1725181Sgd78059  *			 by transmit-engine-state.
1735181Sgd78059  *	factotum_tix:	 number of consecutive cyclic callbacks before waking
1745181Sgd78059  *			 up the factotum even though there doesn't appear to
1755181Sgd78059  *			 be anything for it to do
1765181Sgd78059  */
1775181Sgd78059 
1785181Sgd78059 #define	DMFE_TICK		25000		/* microseconds		*/
1795181Sgd78059 #define	TX_STALL_TIME_100	50000		/* microseconds		*/
1805181Sgd78059 #define	TX_STALL_TIME_10	200000		/* microseconds		*/
1815181Sgd78059 #define	LINK_POLL_TIME		5000000		/* microseconds		*/
1825181Sgd78059 
1835181Sgd78059 static uint32_t dmfe_tick_us = DMFE_TICK;
1845181Sgd78059 static uint32_t dmfe_tx100_stall_us = TX_STALL_TIME_100;
1855181Sgd78059 static uint32_t dmfe_tx10_stall_us = TX_STALL_TIME_10;
1865181Sgd78059 static uint32_t dmfe_link_poll_us = LINK_POLL_TIME;
1875181Sgd78059 
1885181Sgd78059 /*
1895181Sgd78059  * Calculated from above in _init()
1905181Sgd78059  */
1915181Sgd78059 
1925181Sgd78059 static uint32_t stall_100_tix[TX_PROCESS_MAX_STATE+1];
1935181Sgd78059 static uint32_t stall_10_tix[TX_PROCESS_MAX_STATE+1];
1945181Sgd78059 static uint32_t factotum_tix;
1955181Sgd78059 static uint32_t factotum_fast_tix;
1965181Sgd78059 static uint32_t factotum_start_tix;
1975181Sgd78059 
1985181Sgd78059 /*
1995181Sgd78059  * Property names
2005181Sgd78059  */
2015181Sgd78059 static char localmac_propname[] = "local-mac-address";
2025181Sgd78059 static char opmode_propname[] = "opmode-reg-value";
2035181Sgd78059 static char debug_propname[] = "dmfe-debug-flags";
2045181Sgd78059 
2055181Sgd78059 static int		dmfe_m_start(void *);
2065181Sgd78059 static void		dmfe_m_stop(void *);
2075181Sgd78059 static int		dmfe_m_promisc(void *, boolean_t);
2085181Sgd78059 static int		dmfe_m_multicst(void *, boolean_t, const uint8_t *);
2095181Sgd78059 static int		dmfe_m_unicst(void *, const uint8_t *);
2105181Sgd78059 static void		dmfe_m_ioctl(void *, queue_t *, mblk_t *);
2115181Sgd78059 static boolean_t	dmfe_m_getcapab(void *, mac_capab_t, void *);
2125181Sgd78059 static mblk_t		*dmfe_m_tx(void *, mblk_t *);
2135181Sgd78059 static int 		dmfe_m_stat(void *, uint_t, uint64_t *);
2145181Sgd78059 
2155181Sgd78059 static mac_callbacks_t dmfe_m_callbacks = {
2165181Sgd78059 	(MC_IOCTL | MC_GETCAPAB),
2175181Sgd78059 	dmfe_m_stat,
2185181Sgd78059 	dmfe_m_start,
2195181Sgd78059 	dmfe_m_stop,
2205181Sgd78059 	dmfe_m_promisc,
2215181Sgd78059 	dmfe_m_multicst,
2225181Sgd78059 	dmfe_m_unicst,
2235181Sgd78059 	dmfe_m_tx,
2245181Sgd78059 	NULL,
2255181Sgd78059 	dmfe_m_ioctl,
2265181Sgd78059 	dmfe_m_getcapab,
2275181Sgd78059 };
2285181Sgd78059 
2295181Sgd78059 
2305181Sgd78059 /*
2315181Sgd78059  * Describes the chip's DMA engine
2325181Sgd78059  */
2335181Sgd78059 static ddi_dma_attr_t dma_attr = {
2345181Sgd78059 	DMA_ATTR_V0,		/* dma_attr version */
2355181Sgd78059 	0,			/* dma_attr_addr_lo */
2365181Sgd78059 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
2375181Sgd78059 	0x0FFFFFF,		/* dma_attr_count_max */
2385181Sgd78059 	0x20,			/* dma_attr_align */
2395181Sgd78059 	0x7F,			/* dma_attr_burstsizes */
2405181Sgd78059 	1,			/* dma_attr_minxfer */
2415181Sgd78059 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
2425181Sgd78059 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
2435181Sgd78059 	1,			/* dma_attr_sgllen */
2445181Sgd78059 	1,			/* dma_attr_granular */
2455181Sgd78059 	0			/* dma_attr_flags */
2465181Sgd78059 };
2475181Sgd78059 
2485181Sgd78059 /*
2495181Sgd78059  * DMA access attributes for registers and descriptors
2505181Sgd78059  */
2515181Sgd78059 static ddi_device_acc_attr_t dmfe_reg_accattr = {
2525181Sgd78059 	DDI_DEVICE_ATTR_V0,
2535181Sgd78059 	DDI_STRUCTURE_LE_ACC,
2545181Sgd78059 	DDI_STRICTORDER_ACC
2555181Sgd78059 };
2565181Sgd78059 
2575181Sgd78059 /*
2585181Sgd78059  * DMA access attributes for data: NOT to be byte swapped.
2595181Sgd78059  */
2605181Sgd78059 static ddi_device_acc_attr_t dmfe_data_accattr = {
2615181Sgd78059 	DDI_DEVICE_ATTR_V0,
2625181Sgd78059 	DDI_NEVERSWAP_ACC,
2635181Sgd78059 	DDI_STRICTORDER_ACC
2645181Sgd78059 };
2655181Sgd78059 
2665181Sgd78059 static uchar_t dmfe_broadcast_addr[ETHERADDRL] = {
2675181Sgd78059 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2685181Sgd78059 };
2695181Sgd78059 
2705181Sgd78059 
2715181Sgd78059 /*
2725181Sgd78059  * ========== Lowest-level chip register & ring access routines ==========
2735181Sgd78059  */
2745181Sgd78059 
2755181Sgd78059 /*
2765181Sgd78059  * I/O register get/put routines
2775181Sgd78059  */
2785181Sgd78059 uint32_t
2795181Sgd78059 dmfe_chip_get32(dmfe_t *dmfep, off_t offset)
2805181Sgd78059 {
281*6990Sgd78059 	uint32_t *addr;
282*6990Sgd78059 
283*6990Sgd78059 	addr = (void *)(dmfep->io_reg + offset);
284*6990Sgd78059 	return (ddi_get32(dmfep->io_handle, addr));
2855181Sgd78059 }
2865181Sgd78059 
2875181Sgd78059 void
2885181Sgd78059 dmfe_chip_put32(dmfe_t *dmfep, off_t offset, uint32_t value)
2895181Sgd78059 {
290*6990Sgd78059 	uint32_t *addr;
291*6990Sgd78059 
292*6990Sgd78059 	addr = (void *)(dmfep->io_reg + offset);
293*6990Sgd78059 	ddi_put32(dmfep->io_handle, addr, value);
2945181Sgd78059 }
2955181Sgd78059 
2965181Sgd78059 /*
2975181Sgd78059  * TX/RX ring get/put routines
2985181Sgd78059  */
2995181Sgd78059 static uint32_t
3005181Sgd78059 dmfe_ring_get32(dma_area_t *dma_p, uint_t index, uint_t offset)
3015181Sgd78059 {
3025181Sgd78059 	uint32_t *addr;
3035181Sgd78059 
304*6990Sgd78059 	addr = (void *)dma_p->mem_va;
3055181Sgd78059 	return (ddi_get32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset));
3065181Sgd78059 }
3075181Sgd78059 
3085181Sgd78059 static void
3095181Sgd78059 dmfe_ring_put32(dma_area_t *dma_p, uint_t index, uint_t offset, uint32_t value)
3105181Sgd78059 {
3115181Sgd78059 	uint32_t *addr;
3125181Sgd78059 
313*6990Sgd78059 	addr = (void *)dma_p->mem_va;
3145181Sgd78059 	ddi_put32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset, value);
3155181Sgd78059 }
3165181Sgd78059 
3175181Sgd78059 /*
3185181Sgd78059  * Setup buffer get/put routines
3195181Sgd78059  */
3205181Sgd78059 static uint32_t
3215181Sgd78059 dmfe_setup_get32(dma_area_t *dma_p, uint_t index)
3225181Sgd78059 {
3235181Sgd78059 	uint32_t *addr;
3245181Sgd78059 
325*6990Sgd78059 	addr = (void *)dma_p->setup_va;
3265181Sgd78059 	return (ddi_get32(dma_p->acc_hdl, addr + index));
3275181Sgd78059 }
3285181Sgd78059 
3295181Sgd78059 static void
3305181Sgd78059 dmfe_setup_put32(dma_area_t *dma_p, uint_t index, uint32_t value)
3315181Sgd78059 {
3325181Sgd78059 	uint32_t *addr;
3335181Sgd78059 
334*6990Sgd78059 	addr = (void *)dma_p->setup_va;
3355181Sgd78059 	ddi_put32(dma_p->acc_hdl, addr + index, value);
3365181Sgd78059 }
3375181Sgd78059 
3385181Sgd78059 
3395181Sgd78059 /*
3405181Sgd78059  * ========== Low-level chip & ring buffer manipulation ==========
3415181Sgd78059  */
3425181Sgd78059 
3435181Sgd78059 #define	DMFE_DBG	DMFE_DBG_REGS	/* debug flag for this code	*/
3445181Sgd78059 
3455181Sgd78059 /*
3465181Sgd78059  * dmfe_set_opmode() -- function to set operating mode
3475181Sgd78059  */
3485181Sgd78059 static void
3495181Sgd78059 dmfe_set_opmode(dmfe_t *dmfep)
3505181Sgd78059 {
3515181Sgd78059 	DMFE_DEBUG(("dmfe_set_opmode: opmode 0x%x", dmfep->opmode));
3525181Sgd78059 
3535181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
3545181Sgd78059 
3555181Sgd78059 	dmfe_chip_put32(dmfep, OPN_MODE_REG, dmfep->opmode);
3565181Sgd78059 	drv_usecwait(10);
3575181Sgd78059 }
3585181Sgd78059 
3595181Sgd78059 /*
3605181Sgd78059  * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w
3615181Sgd78059  */
3625181Sgd78059 static void
3635181Sgd78059 dmfe_stop_chip(dmfe_t *dmfep, enum chip_state newstate)
3645181Sgd78059 {
3655181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
3665181Sgd78059 
3675181Sgd78059 	/*
3685181Sgd78059 	 * Stop the chip:
3695181Sgd78059 	 *	disable all interrupts
3705181Sgd78059 	 *	stop TX/RX processes
3715181Sgd78059 	 *	clear the status bits for TX/RX stopped
3725181Sgd78059 	 * If required, reset the chip
3735181Sgd78059 	 * Record the new state
3745181Sgd78059 	 */
3755181Sgd78059 	dmfe_chip_put32(dmfep, INT_MASK_REG, 0);
3765181Sgd78059 	dmfep->opmode &= ~(START_TRANSMIT | START_RECEIVE);
3775181Sgd78059 	dmfe_set_opmode(dmfep);
3785181Sgd78059 	dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
3795181Sgd78059 
3805181Sgd78059 	switch (newstate) {
3815181Sgd78059 	default:
3825181Sgd78059 		ASSERT(!"can't get here");
3835181Sgd78059 		return;
3845181Sgd78059 
3855181Sgd78059 	case CHIP_STOPPED:
3865181Sgd78059 	case CHIP_ERROR:
3875181Sgd78059 		break;
3885181Sgd78059 
3895181Sgd78059 	case CHIP_RESET:
3905181Sgd78059 		dmfe_chip_put32(dmfep, BUS_MODE_REG, SW_RESET);
3915181Sgd78059 		drv_usecwait(10);
3925181Sgd78059 		dmfe_chip_put32(dmfep, BUS_MODE_REG, 0);
3935181Sgd78059 		drv_usecwait(10);
3945181Sgd78059 		dmfe_chip_put32(dmfep, BUS_MODE_REG, dmfe_bus_modes);
3955181Sgd78059 		break;
3965181Sgd78059 	}
3975181Sgd78059 
3985181Sgd78059 	dmfep->chip_state = newstate;
3995181Sgd78059 }
4005181Sgd78059 
4015181Sgd78059 /*
4025181Sgd78059  * Initialize transmit and receive descriptor rings, and
4035181Sgd78059  * set the chip to point to the first entry in each ring
4045181Sgd78059  */
4055181Sgd78059 static void
4065181Sgd78059 dmfe_init_rings(dmfe_t *dmfep)
4075181Sgd78059 {
4085181Sgd78059 	dma_area_t *descp;
4095181Sgd78059 	uint32_t pstart;
4105181Sgd78059 	uint32_t pnext;
4115181Sgd78059 	uint32_t pbuff;
4125181Sgd78059 	uint32_t desc1;
4135181Sgd78059 	int i;
4145181Sgd78059 
4155181Sgd78059 	/*
4165181Sgd78059 	 * You need all the locks in order to rewrite the descriptor rings
4175181Sgd78059 	 */
4185181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
4195181Sgd78059 	ASSERT(mutex_owned(dmfep->rxlock));
4205181Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
4215181Sgd78059 
4225181Sgd78059 	/*
4235181Sgd78059 	 * Program the RX ring entries
4245181Sgd78059 	 */
4255181Sgd78059 	descp = &dmfep->rx_desc;
4265181Sgd78059 	pstart = descp->mem_dvma;
4275181Sgd78059 	pnext = pstart + sizeof (struct rx_desc_type);
4285181Sgd78059 	pbuff = dmfep->rx_buff.mem_dvma;
4295181Sgd78059 	desc1 = RX_CHAINING | DMFE_BUF_SIZE_1;
4305181Sgd78059 
4315181Sgd78059 	for (i = 0; i < dmfep->rx.n_desc; ++i) {
4325181Sgd78059 		dmfe_ring_put32(descp, i, RD_NEXT, pnext);
4335181Sgd78059 		dmfe_ring_put32(descp, i, BUFFER1, pbuff);
4345181Sgd78059 		dmfe_ring_put32(descp, i, DESC1, desc1);
4355181Sgd78059 		dmfe_ring_put32(descp, i, DESC0, RX_OWN);
4365181Sgd78059 
4375181Sgd78059 		pnext += sizeof (struct rx_desc_type);
4385181Sgd78059 		pbuff += DMFE_BUF_SIZE;
4395181Sgd78059 	}
4405181Sgd78059 
4415181Sgd78059 	/*
4425181Sgd78059 	 * Fix up last entry & sync
4435181Sgd78059 	 */
4445181Sgd78059 	dmfe_ring_put32(descp, --i, RD_NEXT, pstart);
4455181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
4465181Sgd78059 	dmfep->rx.next_free = 0;
4475181Sgd78059 
4485181Sgd78059 	/*
4495181Sgd78059 	 * Set the base address of the RX descriptor list in CSR3
4505181Sgd78059 	 */
4515181Sgd78059 	DMFE_DEBUG(("RX descriptor VA: $%p (DVMA $%x)",
4525181Sgd78059 	    descp->mem_va, descp->mem_dvma));
4535181Sgd78059 	dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma);
4545181Sgd78059 
4555181Sgd78059 	/*
4565181Sgd78059 	 * Program the TX ring entries
4575181Sgd78059 	 */
4585181Sgd78059 	descp = &dmfep->tx_desc;
4595181Sgd78059 	pstart = descp->mem_dvma;
4605181Sgd78059 	pnext = pstart + sizeof (struct tx_desc_type);
4615181Sgd78059 	pbuff = dmfep->tx_buff.mem_dvma;
4625181Sgd78059 	desc1 = TX_CHAINING;
4635181Sgd78059 
4645181Sgd78059 	for (i = 0; i < dmfep->tx.n_desc; ++i) {
4655181Sgd78059 		dmfe_ring_put32(descp, i, TD_NEXT, pnext);
4665181Sgd78059 		dmfe_ring_put32(descp, i, BUFFER1, pbuff);
4675181Sgd78059 		dmfe_ring_put32(descp, i, DESC1, desc1);
4685181Sgd78059 		dmfe_ring_put32(descp, i, DESC0, 0);
4695181Sgd78059 
4705181Sgd78059 		pnext += sizeof (struct tx_desc_type);
4715181Sgd78059 		pbuff += DMFE_BUF_SIZE;
4725181Sgd78059 	}
4735181Sgd78059 
4745181Sgd78059 	/*
4755181Sgd78059 	 * Fix up last entry & sync
4765181Sgd78059 	 */
4775181Sgd78059 	dmfe_ring_put32(descp, --i, TD_NEXT, pstart);
4785181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
4795181Sgd78059 	dmfep->tx.n_free = dmfep->tx.n_desc;
4805181Sgd78059 	dmfep->tx.next_free = dmfep->tx.next_busy = 0;
4815181Sgd78059 
4825181Sgd78059 	/*
4835181Sgd78059 	 * Set the base address of the TX descrptor list in CSR4
4845181Sgd78059 	 */
4855181Sgd78059 	DMFE_DEBUG(("TX descriptor VA: $%p (DVMA $%x)",
4865181Sgd78059 	    descp->mem_va, descp->mem_dvma));
4875181Sgd78059 	dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma);
4885181Sgd78059 }
4895181Sgd78059 
4905181Sgd78059 /*
4915181Sgd78059  * dmfe_start_chip() -- start the chip transmitting and/or receiving
4925181Sgd78059  */
4935181Sgd78059 static void
4945181Sgd78059 dmfe_start_chip(dmfe_t *dmfep, int mode)
4955181Sgd78059 {
4965181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
4975181Sgd78059 
4985181Sgd78059 	dmfep->opmode |= mode;
4995181Sgd78059 	dmfe_set_opmode(dmfep);
5005181Sgd78059 
5015181Sgd78059 	dmfe_chip_put32(dmfep, W_J_TIMER_REG, 0);
5025181Sgd78059 	/*
5035181Sgd78059 	 * Enable VLAN length mode (allows packets to be 4 bytes Longer).
5045181Sgd78059 	 */
5055181Sgd78059 	dmfe_chip_put32(dmfep, W_J_TIMER_REG, VLAN_ENABLE);
5065181Sgd78059 
5075181Sgd78059 	/*
5085181Sgd78059 	 * Clear any pending process-stopped interrupts
5095181Sgd78059 	 */
5105181Sgd78059 	dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
5115181Sgd78059 	dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX :
5125181Sgd78059 	    mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED;
5135181Sgd78059 }
5145181Sgd78059 
5155181Sgd78059 /*
5165181Sgd78059  * dmfe_enable_interrupts() -- enable our favourite set of interrupts.
5175181Sgd78059  *
5185181Sgd78059  * Normal interrupts:
5195181Sgd78059  *	We always enable:
5205181Sgd78059  *		RX_PKTDONE_INT		(packet received)
5215181Sgd78059  *		TX_PKTDONE_INT		(TX complete)
5225181Sgd78059  *	We never enable:
5235181Sgd78059  *		TX_ALLDONE_INT		(next TX buffer not ready)
5245181Sgd78059  *
5255181Sgd78059  * Abnormal interrupts:
5265181Sgd78059  *	We always enable:
5275181Sgd78059  *		RX_STOPPED_INT
5285181Sgd78059  *		TX_STOPPED_INT
5295181Sgd78059  *		SYSTEM_ERR_INT
5305181Sgd78059  *		RX_UNAVAIL_INT
5315181Sgd78059  *	We never enable:
5325181Sgd78059  *		RX_EARLY_INT
5335181Sgd78059  *		RX_WATCHDOG_INT
5345181Sgd78059  *		TX_JABBER_INT
5355181Sgd78059  *		TX_EARLY_INT
5365181Sgd78059  *		TX_UNDERFLOW_INT
5375181Sgd78059  *		GP_TIMER_INT		(not valid in -9 chips)
5385181Sgd78059  *		LINK_STATUS_INT		(not valid in -9 chips)
5395181Sgd78059  */
5405181Sgd78059 static void
5415181Sgd78059 dmfe_enable_interrupts(dmfe_t *dmfep)
5425181Sgd78059 {
5435181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
5445181Sgd78059 
5455181Sgd78059 	/*
5465181Sgd78059 	 * Put 'the standard set of interrupts' in the interrupt mask register
5475181Sgd78059 	 */
5485181Sgd78059 	dmfep->imask =	RX_PKTDONE_INT | TX_PKTDONE_INT |
5495181Sgd78059 	    RX_STOPPED_INT | TX_STOPPED_INT | RX_UNAVAIL_INT | SYSTEM_ERR_INT;
5505181Sgd78059 
5515181Sgd78059 	dmfe_chip_put32(dmfep, INT_MASK_REG,
5525181Sgd78059 	    NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask);
5535181Sgd78059 	dmfep->chip_state = CHIP_RUNNING;
5545181Sgd78059 
5555181Sgd78059 	DMFE_DEBUG(("dmfe_enable_interrupts: imask 0x%x", dmfep->imask));
5565181Sgd78059 }
5575181Sgd78059 
5585181Sgd78059 #undef	DMFE_DBG
5595181Sgd78059 
5605181Sgd78059 
5615181Sgd78059 /*
5625181Sgd78059  * ========== RX side routines ==========
5635181Sgd78059  */
5645181Sgd78059 
5655181Sgd78059 #define	DMFE_DBG	DMFE_DBG_RECV	/* debug flag for this code	*/
5665181Sgd78059 
5675181Sgd78059 /*
5685181Sgd78059  * Function to update receive statistics on various errors
5695181Sgd78059  */
5705181Sgd78059 static void
5715181Sgd78059 dmfe_update_rx_stats(dmfe_t *dmfep, uint32_t desc0)
5725181Sgd78059 {
5735181Sgd78059 	ASSERT(mutex_owned(dmfep->rxlock));
5745181Sgd78059 
5755181Sgd78059 	/*
5765181Sgd78059 	 * The error summary bit and the error bits that it summarises
5775181Sgd78059 	 * are only valid if this is the last fragment.  Therefore, a
5785181Sgd78059 	 * fragment only contributes to the error statistics if both
5795181Sgd78059 	 * the last-fragment and error summary bits are set.
5805181Sgd78059 	 */
5815181Sgd78059 	if (((RX_LAST_DESC | RX_ERR_SUMMARY) & ~desc0) == 0) {
5825181Sgd78059 		dmfep->rx_stats_ierrors += 1;
5835181Sgd78059 
5845181Sgd78059 		/*
5855181Sgd78059 		 * There are some other error bits in the descriptor for
5865181Sgd78059 		 * which there don't seem to be appropriate MAC statistics,
5875181Sgd78059 		 * notably RX_COLLISION and perhaps RX_DESC_ERR.  The
5885181Sgd78059 		 * latter may not be possible if it is supposed to indicate
5895181Sgd78059 		 * that one buffer has been filled with a partial packet
5905181Sgd78059 		 * and the next buffer required for the rest of the packet
5915181Sgd78059 		 * was not available, as all our buffers are more than large
5925181Sgd78059 		 * enough for a whole packet without fragmenting.
5935181Sgd78059 		 */
5945181Sgd78059 
5955181Sgd78059 		if (desc0 & RX_OVERFLOW) {
5965181Sgd78059 			dmfep->rx_stats_overflow += 1;
5975181Sgd78059 
5985181Sgd78059 		} else if (desc0 & RX_RUNT_FRAME)
5995181Sgd78059 			dmfep->rx_stats_short += 1;
6005181Sgd78059 
6015181Sgd78059 		if (desc0 & RX_CRC)
6025181Sgd78059 			dmfep->rx_stats_fcs += 1;
6035181Sgd78059 
6045181Sgd78059 		if (desc0 & RX_FRAME2LONG)
6055181Sgd78059 			dmfep->rx_stats_toolong += 1;
6065181Sgd78059 	}
6075181Sgd78059 
6085181Sgd78059 	/*
6095181Sgd78059 	 * A receive watchdog timeout is counted as a MAC-level receive
6105181Sgd78059 	 * error.  Strangely, it doesn't set the packet error summary bit,
6115181Sgd78059 	 * according to the chip data sheet :-?
6125181Sgd78059 	 */
6135181Sgd78059 	if (desc0 & RX_RCV_WD_TO)
6145181Sgd78059 		dmfep->rx_stats_macrcv_errors += 1;
6155181Sgd78059 
6165181Sgd78059 	if (desc0 & RX_DRIBBLING)
6175181Sgd78059 		dmfep->rx_stats_align += 1;
6185181Sgd78059 
6195181Sgd78059 	if (desc0 & RX_MII_ERR)
6205181Sgd78059 		dmfep->rx_stats_macrcv_errors += 1;
6215181Sgd78059 }
6225181Sgd78059 
6235181Sgd78059 /*
6245181Sgd78059  * Receive incoming packet(s) and pass them up ...
6255181Sgd78059  */
6265181Sgd78059 static mblk_t *
6275181Sgd78059 dmfe_getp(dmfe_t *dmfep)
6285181Sgd78059 {
6295181Sgd78059 	dma_area_t *descp;
6305181Sgd78059 	mblk_t **tail;
6315181Sgd78059 	mblk_t *head;
6325181Sgd78059 	mblk_t *mp;
6335181Sgd78059 	char *rxb;
6345181Sgd78059 	uchar_t *dp;
6355181Sgd78059 	uint32_t desc0;
6365181Sgd78059 	uint32_t misses;
6375181Sgd78059 	int packet_length;
6385181Sgd78059 	int index;
6395181Sgd78059 
6405181Sgd78059 	mutex_enter(dmfep->rxlock);
6415181Sgd78059 
6425181Sgd78059 	/*
6435181Sgd78059 	 * Update the missed frame statistic from the on-chip counter.
6445181Sgd78059 	 */
6455181Sgd78059 	misses = dmfe_chip_get32(dmfep, MISSED_FRAME_REG);
6465181Sgd78059 	dmfep->rx_stats_norcvbuf += (misses & MISSED_FRAME_MASK);
6475181Sgd78059 
6485181Sgd78059 	/*
6495181Sgd78059 	 * sync (all) receive descriptors before inspecting them
6505181Sgd78059 	 */
6515181Sgd78059 	descp = &dmfep->rx_desc;
6525181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
6535181Sgd78059 
6545181Sgd78059 	/*
6555181Sgd78059 	 * We should own at least one RX entry, since we've had a
6565181Sgd78059 	 * receive interrupt, but let's not be dogmatic about it.
6575181Sgd78059 	 */
6585181Sgd78059 	index = dmfep->rx.next_free;
6595181Sgd78059 	desc0 = dmfe_ring_get32(descp, index, DESC0);
6605181Sgd78059 	if (desc0 & RX_OWN)
6615181Sgd78059 		DMFE_DEBUG(("dmfe_getp: no work, desc0 0x%x", desc0));
6625181Sgd78059 
6635181Sgd78059 	for (head = NULL, tail = &head; (desc0 & RX_OWN) == 0; ) {
6645181Sgd78059 		/*
6655181Sgd78059 		 * Maintain statistics for every descriptor returned
6665181Sgd78059 		 * to us by the chip ...
6675181Sgd78059 		 */
6685181Sgd78059 		DMFE_DEBUG(("dmfe_getp: desc0 0x%x", desc0));
6695181Sgd78059 		dmfe_update_rx_stats(dmfep, desc0);
6705181Sgd78059 
6715181Sgd78059 		/*
6725181Sgd78059 		 * Check that the entry has both "packet start" and
6735181Sgd78059 		 * "packet end" flags.  We really shouldn't get packet
6745181Sgd78059 		 * fragments, 'cos all the RX buffers are bigger than
6755181Sgd78059 		 * the largest valid packet.  So we'll just drop any
6765181Sgd78059 		 * fragments we find & skip on to the next entry.
6775181Sgd78059 		 */
6785181Sgd78059 		if (((RX_FIRST_DESC | RX_LAST_DESC) & ~desc0) != 0) {
6795181Sgd78059 			DMFE_DEBUG(("dmfe_getp: dropping fragment"));
6805181Sgd78059 			goto skip;
6815181Sgd78059 		}
6825181Sgd78059 
6835181Sgd78059 		/*
6845181Sgd78059 		 * A whole packet in one buffer.  We have to check error
6855181Sgd78059 		 * status and packet length before forwarding it upstream.
6865181Sgd78059 		 */
6875181Sgd78059 		if (desc0 & RX_ERR_SUMMARY) {
6885181Sgd78059 			DMFE_DEBUG(("dmfe_getp: dropping errored packet"));
6895181Sgd78059 			goto skip;
6905181Sgd78059 		}
6915181Sgd78059 
6925181Sgd78059 		packet_length = (desc0 >> 16) & 0x3fff;
6935181Sgd78059 		if (packet_length > DMFE_MAX_PKT_SIZE) {
6945181Sgd78059 			DMFE_DEBUG(("dmfe_getp: dropping oversize packet, "
6955181Sgd78059 			    "length %d", packet_length));
6965181Sgd78059 			goto skip;
6975181Sgd78059 		} else if (packet_length < ETHERMIN) {
6985181Sgd78059 			/*
6995181Sgd78059 			 * Note that VLAN packet would be even larger,
7005181Sgd78059 			 * but we don't worry about dropping runt VLAN
7015181Sgd78059 			 * frames.
7025181Sgd78059 			 *
7035181Sgd78059 			 * This check is probably redundant, as well,
7045181Sgd78059 			 * since the hardware should drop RUNT frames.
7055181Sgd78059 			 */
7065181Sgd78059 			DMFE_DEBUG(("dmfe_getp: dropping undersize packet, "
7075181Sgd78059 			    "length %d", packet_length));
7085181Sgd78059 			goto skip;
7095181Sgd78059 		}
7105181Sgd78059 
7115181Sgd78059 		/*
7125181Sgd78059 		 * Sync the data, so we can examine it; then check that
7135181Sgd78059 		 * the packet is really intended for us (remember that
7145181Sgd78059 		 * if we're using Imperfect Filtering, then the chip will
7155181Sgd78059 		 * receive unicast packets sent to stations whose addresses
7165181Sgd78059 		 * just happen to hash to the same value as our own; we
7175181Sgd78059 		 * discard these here so they don't get sent upstream ...)
7185181Sgd78059 		 */
7195181Sgd78059 		(void) ddi_dma_sync(dmfep->rx_buff.dma_hdl,
7205181Sgd78059 		    index * DMFE_BUF_SIZE, DMFE_BUF_SIZE,
7215181Sgd78059 		    DDI_DMA_SYNC_FORKERNEL);
7225181Sgd78059 		rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE];
7235181Sgd78059 
7245181Sgd78059 
7255181Sgd78059 		/*
7265181Sgd78059 		 * We do not bother to check that the packet is really for
7275181Sgd78059 		 * us, we let the MAC framework make that check instead.
7285181Sgd78059 		 * This is especially important if we ever want to support
7295181Sgd78059 		 * multiple MAC addresses.
7305181Sgd78059 		 */
7315181Sgd78059 
7325181Sgd78059 		/*
7335181Sgd78059 		 * Packet looks good; get a buffer to copy it into.  We
7345181Sgd78059 		 * allow some space at the front of the allocated buffer
7355181Sgd78059 		 * (HEADROOM) in case any upstream modules want to prepend
7365181Sgd78059 		 * some sort of header.  The value has been carefully chosen
7375181Sgd78059 		 * So that it also has the side-effect of making the packet
7385181Sgd78059 		 * *contents* 4-byte aligned, as required by NCA!
7395181Sgd78059 		 */
7405181Sgd78059 		mp = allocb(DMFE_HEADROOM + packet_length, 0);
7415181Sgd78059 		if (mp == NULL) {
7425181Sgd78059 			DMFE_DEBUG(("dmfe_getp: no buffer - dropping packet"));
7435181Sgd78059 			dmfep->rx_stats_norcvbuf += 1;
7445181Sgd78059 			goto skip;
7455181Sgd78059 		}
7465181Sgd78059 
7475181Sgd78059 		/*
7485181Sgd78059 		 * Account for statistics of good packets.
7495181Sgd78059 		 */
7505181Sgd78059 		dmfep->rx_stats_ipackets += 1;
7515181Sgd78059 		dmfep->rx_stats_rbytes += packet_length;
7525181Sgd78059 		if (desc0 & RX_MULTI_FRAME) {
7535181Sgd78059 			if (bcmp(rxb, dmfe_broadcast_addr, ETHERADDRL)) {
7545181Sgd78059 				dmfep->rx_stats_multi += 1;
7555181Sgd78059 			} else {
7565181Sgd78059 				dmfep->rx_stats_bcast += 1;
7575181Sgd78059 			}
7585181Sgd78059 		}
7595181Sgd78059 
7605181Sgd78059 		/*
7615181Sgd78059 		 * Copy the packet into the STREAMS buffer
7625181Sgd78059 		 */
7635181Sgd78059 		dp = mp->b_rptr += DMFE_HEADROOM;
7645181Sgd78059 		mp->b_cont = mp->b_next = NULL;
7655181Sgd78059 
7665181Sgd78059 		/*
7675181Sgd78059 		 * Don't worry about stripping the vlan tag, the MAC
7685181Sgd78059 		 * layer will take care of that for us.
7695181Sgd78059 		 */
7705181Sgd78059 		bcopy(rxb, dp, packet_length);
7715181Sgd78059 
7725181Sgd78059 		/*
7735181Sgd78059 		 * Fix up the packet length, and link it to the chain
7745181Sgd78059 		 */
7755181Sgd78059 		mp->b_wptr = mp->b_rptr + packet_length - ETHERFCSL;
7765181Sgd78059 		*tail = mp;
7775181Sgd78059 		tail = &mp->b_next;
7785181Sgd78059 
7795181Sgd78059 	skip:
7805181Sgd78059 		/*
7815181Sgd78059 		 * Return ownership of ring entry & advance to next
7825181Sgd78059 		 */
7835181Sgd78059 		dmfe_ring_put32(descp, index, DESC0, RX_OWN);
7845181Sgd78059 		index = NEXT(index, dmfep->rx.n_desc);
7855181Sgd78059 		desc0 = dmfe_ring_get32(descp, index, DESC0);
7865181Sgd78059 	}
7875181Sgd78059 
7885181Sgd78059 	/*
7895181Sgd78059 	 * Remember where to start looking next time ...
7905181Sgd78059 	 */
7915181Sgd78059 	dmfep->rx.next_free = index;
7925181Sgd78059 
7935181Sgd78059 	/*
7945181Sgd78059 	 * sync the receive descriptors that we've given back
7955181Sgd78059 	 * (actually, we sync all of them for simplicity), and
7965181Sgd78059 	 * wake the chip in case it had suspended receive
7975181Sgd78059 	 */
7985181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
7995181Sgd78059 	dmfe_chip_put32(dmfep, RX_POLL_REG, 0);
8005181Sgd78059 
8015181Sgd78059 	mutex_exit(dmfep->rxlock);
8025181Sgd78059 	return (head);
8035181Sgd78059 }
8045181Sgd78059 
8055181Sgd78059 #undef	DMFE_DBG
8065181Sgd78059 
8075181Sgd78059 
8085181Sgd78059 /*
8095181Sgd78059  * ========== Primary TX side routines ==========
8105181Sgd78059  */
8115181Sgd78059 
8125181Sgd78059 #define	DMFE_DBG	DMFE_DBG_SEND	/* debug flag for this code	*/
8135181Sgd78059 
8145181Sgd78059 /*
8155181Sgd78059  *	TX ring management:
8165181Sgd78059  *
8175181Sgd78059  *	There are <tx.n_desc> entries in the ring, of which those from
8185181Sgd78059  *	<tx.next_free> round to but not including <tx.next_busy> must
8195181Sgd78059  *	be owned by the CPU.  The number of such entries should equal
8205181Sgd78059  *	<tx.n_free>; but there may also be some more entries which the
8215181Sgd78059  *	chip has given back but which we haven't yet accounted for.
8225181Sgd78059  *	The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts
8235181Sgd78059  *	as it discovers such entries.
8245181Sgd78059  *
8255181Sgd78059  *	Initially, or when the ring is entirely free:
8265181Sgd78059  *		C = Owned by CPU
8275181Sgd78059  *		D = Owned by Davicom (DMFE) chip
8285181Sgd78059  *
8295181Sgd78059  *	tx.next_free					tx.n_desc = 16
8305181Sgd78059  *	  |
8315181Sgd78059  *	  v
8325181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8335181Sgd78059  *	| C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C |
8345181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8355181Sgd78059  *	  ^
8365181Sgd78059  *	  |
8375181Sgd78059  *	tx.next_busy					tx.n_free = 16
8385181Sgd78059  *
8395181Sgd78059  *	On entry to reclaim() during normal use:
8405181Sgd78059  *
8415181Sgd78059  *					tx.next_free	tx.n_desc = 16
8425181Sgd78059  *					      |
8435181Sgd78059  *					      v
8445181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8455181Sgd78059  *	| C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
8465181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8475181Sgd78059  *		  ^
8485181Sgd78059  *		  |
8495181Sgd78059  *		tx.next_busy				tx.n_free = 9
8505181Sgd78059  *
8515181Sgd78059  *	On exit from reclaim():
8525181Sgd78059  *
8535181Sgd78059  *					tx.next_free	tx.n_desc = 16
8545181Sgd78059  *					      |
8555181Sgd78059  *					      v
8565181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8575181Sgd78059  *	| C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
8585181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8595181Sgd78059  *				  ^
8605181Sgd78059  *				  |
8615181Sgd78059  *			     tx.next_busy		tx.n_free = 13
8625181Sgd78059  *
8635181Sgd78059  *	The ring is considered "full" when only one entry is owned by
8645181Sgd78059  *	the CPU; thus <tx.n_free> should always be >= 1.
8655181Sgd78059  *
8665181Sgd78059  *			tx.next_free			tx.n_desc = 16
8675181Sgd78059  *			      |
8685181Sgd78059  *			      v
8695181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8705181Sgd78059  *	| D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D |
8715181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8725181Sgd78059  *				  ^
8735181Sgd78059  *				  |
8745181Sgd78059  *			     tx.next_busy		tx.n_free = 1
8755181Sgd78059  */
8765181Sgd78059 
8775181Sgd78059 /*
8785181Sgd78059  * Function to update transmit statistics on various errors
8795181Sgd78059  */
8805181Sgd78059 static void
8815181Sgd78059 dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1)
8825181Sgd78059 {
8835181Sgd78059 	uint32_t collisions;
8845181Sgd78059 	uint32_t errbits;
8855181Sgd78059 	uint32_t errsum;
8865181Sgd78059 
8875181Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
8885181Sgd78059 
8895181Sgd78059 	collisions = ((desc0 >> 3) & 0x0f);
8905181Sgd78059 	errsum = desc0 & TX_ERR_SUMMARY;
8915181Sgd78059 	errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS |
8925181Sgd78059 	    TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO);
8935181Sgd78059 	if ((errsum == 0) != (errbits == 0)) {
8945181Sgd78059 		dmfe_log(dmfep, "dubious TX error status 0x%x", desc0);
8955181Sgd78059 		desc0 |= TX_ERR_SUMMARY;
8965181Sgd78059 	}
8975181Sgd78059 
8985181Sgd78059 	if (desc0 & TX_ERR_SUMMARY) {
8995181Sgd78059 		dmfep->tx_stats_oerrors += 1;
9005181Sgd78059 
9015181Sgd78059 		/*
9025181Sgd78059 		 * If we ever see a transmit jabber timeout, we count it
9035181Sgd78059 		 * as a MAC-level transmit error; but we probably won't
9045181Sgd78059 		 * see it as it causes an Abnormal interrupt and we reset
9055181Sgd78059 		 * the chip in order to recover
9065181Sgd78059 		 */
9075181Sgd78059 		if (desc0 & TX_JABBER_TO) {
9085181Sgd78059 			dmfep->tx_stats_macxmt_errors += 1;
9095181Sgd78059 			dmfep->tx_stats_jabber += 1;
9105181Sgd78059 		}
9115181Sgd78059 
9125181Sgd78059 		if (desc0 & TX_UNDERFLOW)
9135181Sgd78059 			dmfep->tx_stats_underflow += 1;
9145181Sgd78059 		else if (desc0 & TX_LATE_COLL)
9155181Sgd78059 			dmfep->tx_stats_xmtlatecoll += 1;
9165181Sgd78059 
9175181Sgd78059 		if (desc0 & (TX_CARRIER_LOSS | TX_NO_CARRIER))
9185181Sgd78059 			dmfep->tx_stats_nocarrier += 1;
9195181Sgd78059 
9205181Sgd78059 		if (desc0 & TX_EXCESS_COLL) {
9215181Sgd78059 			dmfep->tx_stats_excoll += 1;
9225181Sgd78059 			collisions = 16;
9235181Sgd78059 		}
9245181Sgd78059 	} else {
9255181Sgd78059 		int	bit = index % NBBY;
9265181Sgd78059 		int	byt = index / NBBY;
9275181Sgd78059 
9285181Sgd78059 		if (dmfep->tx_mcast[byt] & bit) {
9295181Sgd78059 			dmfep->tx_mcast[byt] &= ~bit;
9305181Sgd78059 			dmfep->tx_stats_multi += 1;
9315181Sgd78059 
9325181Sgd78059 		} else if (dmfep->tx_bcast[byt] & bit) {
9335181Sgd78059 			dmfep->tx_bcast[byt] &= ~bit;
9345181Sgd78059 			dmfep->tx_stats_bcast += 1;
9355181Sgd78059 		}
9365181Sgd78059 
9375181Sgd78059 		dmfep->tx_stats_opackets += 1;
9385181Sgd78059 		dmfep->tx_stats_obytes += desc1 & TX_BUFFER_SIZE1;
9395181Sgd78059 	}
9405181Sgd78059 
9415181Sgd78059 	if (collisions == 1)
9425181Sgd78059 		dmfep->tx_stats_first_coll += 1;
9435181Sgd78059 	else if (collisions != 0)
9445181Sgd78059 		dmfep->tx_stats_multi_coll += 1;
9455181Sgd78059 	dmfep->tx_stats_collisions += collisions;
9465181Sgd78059 
9475181Sgd78059 	if (desc0 & TX_DEFERRED)
9485181Sgd78059 		dmfep->tx_stats_defer += 1;
9495181Sgd78059 }
9505181Sgd78059 
9515181Sgd78059 /*
9525181Sgd78059  * Reclaim all the ring entries that the chip has returned to us ...
9535181Sgd78059  *
9545181Sgd78059  * Returns B_FALSE if no entries could be reclaimed.  Otherwise, reclaims
9555181Sgd78059  * as many as possible, restarts the TX stall timeout, and returns B_TRUE.
9565181Sgd78059  */
9575181Sgd78059 static boolean_t
9585181Sgd78059 dmfe_reclaim_tx_desc(dmfe_t *dmfep)
9595181Sgd78059 {
9605181Sgd78059 	dma_area_t *descp;
9615181Sgd78059 	uint32_t desc0;
9625181Sgd78059 	uint32_t desc1;
9635181Sgd78059 	int i;
9645181Sgd78059 
9655181Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
9665181Sgd78059 
9675181Sgd78059 	/*
9685181Sgd78059 	 * sync transmit descriptor ring before looking at it
9695181Sgd78059 	 */
9705181Sgd78059 	descp = &dmfep->tx_desc;
9715181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
9725181Sgd78059 
9735181Sgd78059 	/*
9745181Sgd78059 	 * Early exit if there are no descriptors to reclaim, either
9755181Sgd78059 	 * because they're all reclaimed already, or because the next
9765181Sgd78059 	 * one is still owned by the chip ...
9775181Sgd78059 	 */
9785181Sgd78059 	i = dmfep->tx.next_busy;
9795181Sgd78059 	if (i == dmfep->tx.next_free)
9805181Sgd78059 		return (B_FALSE);
9815181Sgd78059 	desc0 = dmfe_ring_get32(descp, i, DESC0);
9825181Sgd78059 	if (desc0 & TX_OWN)
9835181Sgd78059 		return (B_FALSE);
9845181Sgd78059 
9855181Sgd78059 	/*
9865181Sgd78059 	 * Reclaim as many descriptors as possible ...
9875181Sgd78059 	 */
9885181Sgd78059 	for (;;) {
9895181Sgd78059 		desc1 = dmfe_ring_get32(descp, i, DESC1);
9905181Sgd78059 		ASSERT((desc1 & (TX_SETUP_PACKET | TX_LAST_DESC)) != 0);
9915181Sgd78059 
9925181Sgd78059 		if (desc1 & TX_SETUP_PACKET) {
9935181Sgd78059 			/*
9945181Sgd78059 			 * Setup packet - restore buffer address
9955181Sgd78059 			 */
9965181Sgd78059 			ASSERT(dmfe_ring_get32(descp, i, BUFFER1) ==
9975181Sgd78059 			    descp->setup_dvma);
9985181Sgd78059 			dmfe_ring_put32(descp, i, BUFFER1,
9995181Sgd78059 			    dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE);
10005181Sgd78059 		} else {
10015181Sgd78059 			/*
10025181Sgd78059 			 * Regular packet - just update stats
10035181Sgd78059 			 */
10045181Sgd78059 			ASSERT(dmfe_ring_get32(descp, i, BUFFER1) ==
10055181Sgd78059 			    dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE);
10065181Sgd78059 			dmfe_update_tx_stats(dmfep, i, desc0, desc1);
10075181Sgd78059 		}
10085181Sgd78059 
10095181Sgd78059 #if	DMFEDEBUG
10105181Sgd78059 		/*
10115181Sgd78059 		 * We can use one of the SPARE bits in the TX descriptor
10125181Sgd78059 		 * to track when a ring buffer slot is reclaimed.  Then
10135181Sgd78059 		 * we can deduce the last operation on a slot from the
10145181Sgd78059 		 * top half of DESC0:
10155181Sgd78059 		 *
10165181Sgd78059 		 *	0x8000 xxxx	given to DMFE chip (TX_OWN)
10175181Sgd78059 		 *	0x7fff xxxx	returned but not yet reclaimed
10185181Sgd78059 		 *	0x3fff xxxx	reclaimed
10195181Sgd78059 		 */
10205181Sgd78059 #define	TX_PEND_RECLAIM		(1UL<<30)
10215181Sgd78059 		dmfe_ring_put32(descp, i, DESC0, desc0 & ~TX_PEND_RECLAIM);
10225181Sgd78059 #endif	/* DMFEDEBUG */
10235181Sgd78059 
10245181Sgd78059 		/*
10255181Sgd78059 		 * Update count & index; we're all done if the ring is
10265181Sgd78059 		 * now fully reclaimed, or the next entry if still owned
10275181Sgd78059 		 * by the chip ...
10285181Sgd78059 		 */
10295181Sgd78059 		dmfep->tx.n_free += 1;
10305181Sgd78059 		i = NEXT(i, dmfep->tx.n_desc);
10315181Sgd78059 		if (i == dmfep->tx.next_free)
10325181Sgd78059 			break;
10335181Sgd78059 		desc0 = dmfe_ring_get32(descp, i, DESC0);
10345181Sgd78059 		if (desc0 & TX_OWN)
10355181Sgd78059 			break;
10365181Sgd78059 	}
10375181Sgd78059 
10385181Sgd78059 	dmfep->tx.next_busy = i;
10395181Sgd78059 	dmfep->tx_pending_tix = 0;
10405181Sgd78059 	return (B_TRUE);
10415181Sgd78059 }
10425181Sgd78059 
10435181Sgd78059 /*
10445181Sgd78059  * Send the message in the message block chain <mp>.
10455181Sgd78059  *
10465181Sgd78059  * The message is freed if and only if its contents are successfully copied
10475181Sgd78059  * and queued for transmission (so that the return value is B_TRUE).
10485181Sgd78059  * If we can't queue the message, the return value is B_FALSE and
10495181Sgd78059  * the message is *not* freed.
10505181Sgd78059  *
10515181Sgd78059  * This routine handles the special case of <mp> == NULL, which indicates
10525181Sgd78059  * that we want to "send" the special "setup packet" allocated during
10535181Sgd78059  * startup.  We have to use some different flags in the packet descriptor
10545181Sgd78059  * to say its a setup packet (from the global <dmfe_setup_desc1>), and the
10555181Sgd78059  * setup packet *isn't* freed after use.
10565181Sgd78059  */
10575181Sgd78059 static boolean_t
10585181Sgd78059 dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp)
10595181Sgd78059 {
10605181Sgd78059 	dma_area_t *descp;
10615181Sgd78059 	mblk_t *bp;
10625181Sgd78059 	char *txb;
10635181Sgd78059 	uint32_t desc1;
10645181Sgd78059 	uint32_t index;
10655181Sgd78059 	size_t totlen;
10665181Sgd78059 	size_t mblen;
10675181Sgd78059 
10685181Sgd78059 	/*
10695181Sgd78059 	 * If the number of free slots is below the reclaim threshold
10705181Sgd78059 	 * (soft limit), we'll try to reclaim some.  If we fail, and
10715181Sgd78059 	 * the number of free slots is also below the minimum required
10725181Sgd78059 	 * (the hard limit, usually 1), then we can't send the packet.
10735181Sgd78059 	 */
10745181Sgd78059 	mutex_enter(dmfep->txlock);
10755181Sgd78059 	if (dmfep->tx.n_free <= dmfe_tx_reclaim_level &&
10765181Sgd78059 	    dmfe_reclaim_tx_desc(dmfep) == B_FALSE &&
10775181Sgd78059 	    dmfep->tx.n_free <= dmfe_tx_min_free) {
10785181Sgd78059 		/*
10795181Sgd78059 		 * Resource shortage - return B_FALSE so the packet
10805181Sgd78059 		 * will be queued for retry after the next TX-done
10815181Sgd78059 		 * interrupt.
10825181Sgd78059 		 */
10835181Sgd78059 		mutex_exit(dmfep->txlock);
10845181Sgd78059 		DMFE_DEBUG(("dmfe_send_msg: no free descriptors"));
10855181Sgd78059 		return (B_FALSE);
10865181Sgd78059 	}
10875181Sgd78059 
10885181Sgd78059 	/*
10895181Sgd78059 	 * There's a slot available, so claim it by incrementing
10905181Sgd78059 	 * the next-free index and decrementing the free count.
10915181Sgd78059 	 * If the ring is currently empty, we also restart the
10925181Sgd78059 	 * stall-detect timer.  The ASSERTions check that our
10935181Sgd78059 	 * invariants still hold:
10945181Sgd78059 	 *	the next-free index must not match the next-busy index
10955181Sgd78059 	 *	there must still be at least one free entry
10965181Sgd78059 	 * After this, we now have exclusive ownership of the ring
10975181Sgd78059 	 * entry (and matching buffer) indicated by <index>, so we
10985181Sgd78059 	 * don't need to hold the TX lock any longer
10995181Sgd78059 	 */
11005181Sgd78059 	index = dmfep->tx.next_free;
11015181Sgd78059 	dmfep->tx.next_free = NEXT(index, dmfep->tx.n_desc);
11025181Sgd78059 	ASSERT(dmfep->tx.next_free != dmfep->tx.next_busy);
11035181Sgd78059 	if (dmfep->tx.n_free-- == dmfep->tx.n_desc)
11045181Sgd78059 		dmfep->tx_pending_tix = 0;
11055181Sgd78059 	ASSERT(dmfep->tx.n_free >= 1);
11065181Sgd78059 	mutex_exit(dmfep->txlock);
11075181Sgd78059 
11085181Sgd78059 	/*
11095181Sgd78059 	 * Check the ownership of the ring entry ...
11105181Sgd78059 	 */
11115181Sgd78059 	descp = &dmfep->tx_desc;
11125181Sgd78059 	ASSERT((dmfe_ring_get32(descp, index, DESC0) & TX_OWN) == 0);
11135181Sgd78059 
11145181Sgd78059 	if (mp == NULL) {
11155181Sgd78059 		/*
11165181Sgd78059 		 * Indicates we should send a SETUP packet, which we do by
11175181Sgd78059 		 * temporarily switching the BUFFER1 pointer in the ring
11185181Sgd78059 		 * entry.  The reclaim routine will restore BUFFER1 to its
11195181Sgd78059 		 * usual value.
11205181Sgd78059 		 *
11215181Sgd78059 		 * Note that as the setup packet is tagged on the end of
11225181Sgd78059 		 * the TX ring, when we sync the descriptor we're also
11235181Sgd78059 		 * implicitly syncing the setup packet - hence, we don't
11245181Sgd78059 		 * need a separate ddi_dma_sync() call here.
11255181Sgd78059 		 */
11265181Sgd78059 		desc1 = dmfe_setup_desc1;
11275181Sgd78059 		dmfe_ring_put32(descp, index, BUFFER1, descp->setup_dvma);
11285181Sgd78059 	} else {
11295181Sgd78059 		/*
11305181Sgd78059 		 * A regular packet; we copy the data into a pre-mapped
11315181Sgd78059 		 * buffer, which avoids the overhead (and complication)
11325181Sgd78059 		 * of mapping/unmapping STREAMS buffers and keeping hold
11335181Sgd78059 		 * of them until the DMA has completed.
11345181Sgd78059 		 *
11355181Sgd78059 		 * Because all buffers are the same size, and larger
11365181Sgd78059 		 * than the longest single valid message, we don't have
11375181Sgd78059 		 * to bother about splitting the message across multiple
11385181Sgd78059 		 * buffers.
11395181Sgd78059 		 */
11405181Sgd78059 		txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
11415181Sgd78059 		totlen = 0;
11425181Sgd78059 		bp = mp;
11435181Sgd78059 
11445181Sgd78059 		/*
11455181Sgd78059 		 * Copy all (remaining) mblks in the message ...
11465181Sgd78059 		 */
11475181Sgd78059 		for (; bp != NULL; bp = bp->b_cont) {
1148*6990Sgd78059 			mblen = MBLKL(bp);
11495181Sgd78059 			if ((totlen += mblen) <= DMFE_MAX_PKT_SIZE) {
11505181Sgd78059 				bcopy(bp->b_rptr, txb, mblen);
11515181Sgd78059 				txb += mblen;
11525181Sgd78059 			}
11535181Sgd78059 		}
11545181Sgd78059 
11555181Sgd78059 		/*
11565181Sgd78059 		 * Is this a multicast or broadcast packet?  We do
11575181Sgd78059 		 * this so that we can track statistics accurately
11585181Sgd78059 		 * when we reclaim it.
11595181Sgd78059 		 */
11605181Sgd78059 		txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
11615181Sgd78059 		if (txb[0] & 0x1) {
11625181Sgd78059 			if (bcmp(txb, dmfe_broadcast_addr, ETHERADDRL) == 0) {
11635181Sgd78059 				dmfep->tx_bcast[index / NBBY] |=
11645181Sgd78059 				    (1 << (index % NBBY));
11655181Sgd78059 			} else {
11665181Sgd78059 				dmfep->tx_mcast[index / NBBY] |=
11675181Sgd78059 				    (1 << (index % NBBY));
11685181Sgd78059 			}
11695181Sgd78059 		}
11705181Sgd78059 
11715181Sgd78059 		/*
11725181Sgd78059 		 * We'e reached the end of the chain; and we should have
11735181Sgd78059 		 * collected no more than DMFE_MAX_PKT_SIZE bytes into our
11745181Sgd78059 		 * buffer.  Note that the <size> field in the descriptor is
11755181Sgd78059 		 * only 11 bits, so bigger packets would be a problem!
11765181Sgd78059 		 */
11775181Sgd78059 		ASSERT(bp == NULL);
11785181Sgd78059 		ASSERT(totlen <= DMFE_MAX_PKT_SIZE);
11795181Sgd78059 		totlen &= TX_BUFFER_SIZE1;
11805181Sgd78059 		desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen;
11815181Sgd78059 
11825181Sgd78059 		(void) ddi_dma_sync(dmfep->tx_buff.dma_hdl,
11835181Sgd78059 		    index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, DDI_DMA_SYNC_FORDEV);
11845181Sgd78059 	}
11855181Sgd78059 
11865181Sgd78059 	/*
11875181Sgd78059 	 * Update ring descriptor entries, sync them, and wake up the
11885181Sgd78059 	 * transmit process
11895181Sgd78059 	 */
11905181Sgd78059 	if ((index & dmfe_tx_int_factor) == 0)
11915181Sgd78059 		desc1 |= TX_INT_ON_COMP;
11925181Sgd78059 	desc1 |= TX_CHAINING;
11935181Sgd78059 	dmfe_ring_put32(descp, index, DESC1, desc1);
11945181Sgd78059 	dmfe_ring_put32(descp, index, DESC0, TX_OWN);
11955181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
11965181Sgd78059 	dmfe_chip_put32(dmfep, TX_POLL_REG, 0);
11975181Sgd78059 
11985181Sgd78059 	/*
11995181Sgd78059 	 * Finally, free the message & return success
12005181Sgd78059 	 */
12015181Sgd78059 	if (mp)
12025181Sgd78059 		freemsg(mp);
12035181Sgd78059 	return (B_TRUE);
12045181Sgd78059 }
12055181Sgd78059 
12065181Sgd78059 /*
12075181Sgd78059  *	dmfe_m_tx() -- send a chain of packets
12085181Sgd78059  *
12095181Sgd78059  *	Called when packet(s) are ready to be transmitted. A pointer to an
12105181Sgd78059  *	M_DATA message that contains the packet is passed to this routine.
12115181Sgd78059  *	The complete LLC header is contained in the message's first message
12125181Sgd78059  *	block, and the remainder of the packet is contained within
12135181Sgd78059  *	additional M_DATA message blocks linked to the first message block.
12145181Sgd78059  *
12155181Sgd78059  *	Additional messages may be passed by linking with b_next.
12165181Sgd78059  */
12175181Sgd78059 static mblk_t *
12185181Sgd78059 dmfe_m_tx(void *arg, mblk_t *mp)
12195181Sgd78059 {
12205181Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
12215181Sgd78059 	mblk_t *next;
12225181Sgd78059 
12235181Sgd78059 	ASSERT(mp != NULL);
12245181Sgd78059 	ASSERT(dmfep->mac_state == DMFE_MAC_STARTED);
12255181Sgd78059 
12265181Sgd78059 	if (dmfep->chip_state != CHIP_RUNNING)
12275181Sgd78059 		return (mp);
12285181Sgd78059 
12295181Sgd78059 	while (mp != NULL) {
12305181Sgd78059 		next = mp->b_next;
12315181Sgd78059 		mp->b_next = NULL;
12325181Sgd78059 		if (!dmfe_send_msg(dmfep, mp)) {
12335181Sgd78059 			mp->b_next = next;
12345181Sgd78059 			break;
12355181Sgd78059 		}
12365181Sgd78059 		mp = next;
12375181Sgd78059 	}
12385181Sgd78059 
12395181Sgd78059 	return (mp);
12405181Sgd78059 }
12415181Sgd78059 
12425181Sgd78059 #undef	DMFE_DBG
12435181Sgd78059 
12445181Sgd78059 
12455181Sgd78059 /*
12465181Sgd78059  * ========== Address-setting routines (TX-side) ==========
12475181Sgd78059  */
12485181Sgd78059 
12495181Sgd78059 #define	DMFE_DBG	DMFE_DBG_ADDR	/* debug flag for this code	*/
12505181Sgd78059 
12515181Sgd78059 /*
12525181Sgd78059  * Find the index of the relevant bit in the setup packet.
12535181Sgd78059  * This must mirror the way the hardware will actually calculate it!
12545181Sgd78059  */
12555181Sgd78059 static uint32_t
12565181Sgd78059 dmfe_hash_index(const uint8_t *address)
12575181Sgd78059 {
12585181Sgd78059 	uint32_t const POLY = HASH_POLY;
12595181Sgd78059 	uint32_t crc = HASH_CRC;
12605181Sgd78059 	uint32_t index;
12615181Sgd78059 	uint32_t msb;
12625181Sgd78059 	uchar_t currentbyte;
12635181Sgd78059 	int byteslength;
12645181Sgd78059 	int shift;
12655181Sgd78059 	int bit;
12665181Sgd78059 
12675181Sgd78059 	for (byteslength = 0; byteslength < ETHERADDRL; ++byteslength) {
12685181Sgd78059 		currentbyte = address[byteslength];
12695181Sgd78059 		for (bit = 0; bit < 8; ++bit) {
12705181Sgd78059 			msb = crc >> 31;
12715181Sgd78059 			crc <<= 1;
12725181Sgd78059 			if (msb ^ (currentbyte & 1)) {
12735181Sgd78059 				crc ^= POLY;
12745181Sgd78059 				crc |= 0x00000001;
12755181Sgd78059 			}
12765181Sgd78059 			currentbyte >>= 1;
12775181Sgd78059 		}
12785181Sgd78059 	}
12795181Sgd78059 
12805181Sgd78059 	for (index = 0, bit = 23, shift = 8; shift >= 0; ++bit, --shift)
12815181Sgd78059 		index |= (((crc >> bit) & 1) << shift);
12825181Sgd78059 
12835181Sgd78059 	return (index);
12845181Sgd78059 }
12855181Sgd78059 
12865181Sgd78059 /*
12875181Sgd78059  * Find and set/clear the relevant bit in the setup packet hash table
12885181Sgd78059  * This must mirror the way the hardware will actually interpret it!
12895181Sgd78059  */
12905181Sgd78059 static void
12915181Sgd78059 dmfe_update_hash(dmfe_t *dmfep, uint32_t index, boolean_t val)
12925181Sgd78059 {
12935181Sgd78059 	dma_area_t *descp;
12945181Sgd78059 	uint32_t tmp;
12955181Sgd78059 
12965181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
12975181Sgd78059 
12985181Sgd78059 	descp = &dmfep->tx_desc;
12995181Sgd78059 	tmp = dmfe_setup_get32(descp, index/16);
13005181Sgd78059 	if (val)
13015181Sgd78059 		tmp |= 1 << (index%16);
13025181Sgd78059 	else
13035181Sgd78059 		tmp &= ~(1 << (index%16));
13045181Sgd78059 	dmfe_setup_put32(descp, index/16, tmp);
13055181Sgd78059 }
13065181Sgd78059 
13075181Sgd78059 /*
13085181Sgd78059  * Update the refcount for the bit in the setup packet corresponding
13095181Sgd78059  * to the specified address; if it changes between zero & nonzero,
13105181Sgd78059  * also update the bitmap itself & return B_TRUE, so that the caller
13115181Sgd78059  * knows to re-send the setup packet.  Otherwise (only the refcount
13125181Sgd78059  * changed), return B_FALSE
13135181Sgd78059  */
13145181Sgd78059 static boolean_t
13155181Sgd78059 dmfe_update_mcast(dmfe_t *dmfep, const uint8_t *mca, boolean_t val)
13165181Sgd78059 {
13175181Sgd78059 	uint32_t index;
13185181Sgd78059 	uint8_t *refp;
13195181Sgd78059 	boolean_t change;
13205181Sgd78059 
13215181Sgd78059 	index = dmfe_hash_index(mca);
13225181Sgd78059 	refp = &dmfep->mcast_refs[index];
13235181Sgd78059 	change = (val ? (*refp)++ : --(*refp)) == 0;
13245181Sgd78059 
13255181Sgd78059 	if (change)
13265181Sgd78059 		dmfe_update_hash(dmfep, index, val);
13275181Sgd78059 
13285181Sgd78059 	return (change);
13295181Sgd78059 }
13305181Sgd78059 
13315181Sgd78059 /*
13325181Sgd78059  * "Transmit" the (possibly updated) magic setup packet
13335181Sgd78059  */
13345181Sgd78059 static int
13355181Sgd78059 dmfe_send_setup(dmfe_t *dmfep)
13365181Sgd78059 {
13375181Sgd78059 	int status;
13385181Sgd78059 
13395181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
13405181Sgd78059 
13415181Sgd78059 	/*
13425181Sgd78059 	 * If the chip isn't running, we can't really send the setup frame
13435181Sgd78059 	 * now but it doesn't matter, 'cos it will be sent when the transmit
13445181Sgd78059 	 * process is restarted (see dmfe_start()).
13455181Sgd78059 	 */
13465181Sgd78059 	if ((dmfep->opmode & START_TRANSMIT) == 0)
13475181Sgd78059 		return (0);
13485181Sgd78059 
13495181Sgd78059 	/*
13505181Sgd78059 	 * "Send" the setup frame.  If it fails (e.g. no resources),
13515181Sgd78059 	 * set a flag; then the factotum will retry the "send".  Once
13525181Sgd78059 	 * it works, we can clear the flag no matter how many attempts
13535181Sgd78059 	 * had previously failed.  We tell the caller that it worked
13545181Sgd78059 	 * whether it did or not; after all, it *will* work eventually.
13555181Sgd78059 	 */
13565181Sgd78059 	status = dmfe_send_msg(dmfep, NULL);
13575181Sgd78059 	dmfep->need_setup = status ? B_FALSE : B_TRUE;
13585181Sgd78059 	return (0);
13595181Sgd78059 }
13605181Sgd78059 
13615181Sgd78059 /*
13625181Sgd78059  *	dmfe_m_unicst() -- set the physical network address
13635181Sgd78059  */
13645181Sgd78059 static int
13655181Sgd78059 dmfe_m_unicst(void *arg, const uint8_t *macaddr)
13665181Sgd78059 {
13675181Sgd78059 	dmfe_t *dmfep = arg;
13685181Sgd78059 	int status;
13695181Sgd78059 	int index;
13705181Sgd78059 
13715181Sgd78059 	/*
13725181Sgd78059 	 * Update our current address and send out a new setup packet
13735181Sgd78059 	 *
13745181Sgd78059 	 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT
13755181Sgd78059 	 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes).
13765181Sgd78059 	 *
13775181Sgd78059 	 * It is said that there is a bug in the 21140 where it fails to
13785181Sgd78059 	 * receive packes addresses to the specified perfect filter address.
13795181Sgd78059 	 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1
13805181Sgd78059 	 * bit should be set in the module variable dmfe_setup_desc1.
13815181Sgd78059 	 *
13825181Sgd78059 	 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering.
13835181Sgd78059 	 * In this mode, *all* incoming addresses are hashed and looked
13845181Sgd78059 	 * up in the bitmap described by the setup packet.  Therefore,
13855181Sgd78059 	 * the bit representing the station address has to be added to
13865181Sgd78059 	 * the table before sending it out.  If the address is changed,
13875181Sgd78059 	 * the old entry should be removed before the new entry is made.
13885181Sgd78059 	 *
13895181Sgd78059 	 * NOTE: in this mode, unicast packets that are not intended for
13905181Sgd78059 	 * this station may be received; it is up to software to filter
13915181Sgd78059 	 * them out afterwards!
13925181Sgd78059 	 *
13935181Sgd78059 	 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT
13945181Sgd78059 	 * filtering.  In this mode, multicast addresses are hashed and
13955181Sgd78059 	 * checked against the bitmap, while unicast addresses are simply
13965181Sgd78059 	 * matched against the one physical address specified in the setup
13975181Sgd78059 	 * packet.  This means that we shouldn't receive unicast packets
13985181Sgd78059 	 * that aren't intended for us (but software still has to filter
13995181Sgd78059 	 * multicast packets just the same).
14005181Sgd78059 	 *
14015181Sgd78059 	 * Whichever mode we're using, we have to enter the broadcast
14025181Sgd78059 	 * address into the multicast filter map too, so we do this on
14035181Sgd78059 	 * the first time through after attach or reset.
14045181Sgd78059 	 */
14055181Sgd78059 	mutex_enter(dmfep->oplock);
14065181Sgd78059 
14075181Sgd78059 	if (dmfep->addr_set && dmfe_setup_desc1 & TX_FILTER_TYPE1)
14085181Sgd78059 		(void) dmfe_update_mcast(dmfep, dmfep->curr_addr, B_FALSE);
14095181Sgd78059 	if (dmfe_setup_desc1 & TX_FILTER_TYPE1)
14105181Sgd78059 		(void) dmfe_update_mcast(dmfep, macaddr, B_TRUE);
14115181Sgd78059 	if (!dmfep->addr_set)
14125181Sgd78059 		(void) dmfe_update_mcast(dmfep, dmfe_broadcast_addr, B_TRUE);
14135181Sgd78059 
14145181Sgd78059 	/*
14155181Sgd78059 	 * Remember the new current address
14165181Sgd78059 	 */
14175181Sgd78059 	ethaddr_copy(macaddr, dmfep->curr_addr);
14185181Sgd78059 	dmfep->addr_set = B_TRUE;
14195181Sgd78059 
14205181Sgd78059 	/*
14215181Sgd78059 	 * Install the new physical address into the proper position in
14225181Sgd78059 	 * the setup frame; this is only used if we select hash+perfect
14235181Sgd78059 	 * filtering, but we'll put it in anyway.  The ugliness here is
14245181Sgd78059 	 * down to the usual war of the egg :(
14255181Sgd78059 	 */
14265181Sgd78059 	for (index = 0; index < ETHERADDRL; index += 2)
14275181Sgd78059 		dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2,
14285181Sgd78059 		    (macaddr[index+1] << 8) | macaddr[index]);
14295181Sgd78059 
14305181Sgd78059 	/*
14315181Sgd78059 	 * Finally, we're ready to "transmit" the setup frame
14325181Sgd78059 	 */
14335181Sgd78059 	status = dmfe_send_setup(dmfep);
14345181Sgd78059 	mutex_exit(dmfep->oplock);
14355181Sgd78059 
14365181Sgd78059 	return (status);
14375181Sgd78059 }
14385181Sgd78059 
14395181Sgd78059 /*
14405181Sgd78059  *	dmfe_m_multicst() -- enable or disable a multicast address
14415181Sgd78059  *
14425181Sgd78059  *	Program the hardware to enable/disable the multicast address
14435181Sgd78059  *	in "mca" (enable if add is true, otherwise disable it.)
14445181Sgd78059  *	We keep a refcount for each bit in the map, so that it still
14455181Sgd78059  *	works out properly if multiple addresses hash to the same bit.
14465181Sgd78059  *	dmfe_update_mcast() tells us whether the map actually changed;
14475181Sgd78059  *	if so, we have to re-"transmit" the magic setup packet.
14485181Sgd78059  */
14495181Sgd78059 static int
14505181Sgd78059 dmfe_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
14515181Sgd78059 {
14525181Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
14535181Sgd78059 	int status = 0;
14545181Sgd78059 
14555181Sgd78059 	mutex_enter(dmfep->oplock);
14565181Sgd78059 	if (dmfe_update_mcast(dmfep, mca, add))
14575181Sgd78059 		status = dmfe_send_setup(dmfep);
14585181Sgd78059 	mutex_exit(dmfep->oplock);
14595181Sgd78059 
14605181Sgd78059 	return (status);
14615181Sgd78059 }
14625181Sgd78059 
14635181Sgd78059 #undef	DMFE_DBG
14645181Sgd78059 
14655181Sgd78059 
14665181Sgd78059 /*
14675181Sgd78059  * ========== Internal state management entry points ==========
14685181Sgd78059  */
14695181Sgd78059 
14705181Sgd78059 #define	DMFE_DBG	DMFE_DBG_GLD	/* debug flag for this code	*/
14715181Sgd78059 
14725181Sgd78059 /*
14735181Sgd78059  * These routines provide all the functionality required by the
14745181Sgd78059  * corresponding MAC layer entry points, but don't update the MAC layer state
14755181Sgd78059  * so they can be called internally without disturbing our record
14765181Sgd78059  * of what MAC layer thinks we should be doing ...
14775181Sgd78059  */
14785181Sgd78059 
14795181Sgd78059 /*
14805181Sgd78059  *	dmfe_stop() -- stop processing, don't reset h/w or rings
14815181Sgd78059  */
14825181Sgd78059 static void
14835181Sgd78059 dmfe_stop(dmfe_t *dmfep)
14845181Sgd78059 {
14855181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
14865181Sgd78059 
14875181Sgd78059 	dmfe_stop_chip(dmfep, CHIP_STOPPED);
14885181Sgd78059 }
14895181Sgd78059 
14905181Sgd78059 /*
14915181Sgd78059  *	dmfe_reset() -- stop processing, reset h/w & rings to initial state
14925181Sgd78059  */
14935181Sgd78059 static void
14945181Sgd78059 dmfe_reset(dmfe_t *dmfep)
14955181Sgd78059 {
14965181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
14975181Sgd78059 	ASSERT(mutex_owned(dmfep->rxlock));
14985181Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
14995181Sgd78059 
15005181Sgd78059 	dmfe_stop_chip(dmfep, CHIP_RESET);
15015181Sgd78059 	dmfe_init_rings(dmfep);
15025181Sgd78059 }
15035181Sgd78059 
15045181Sgd78059 /*
15055181Sgd78059  *	dmfe_start() -- start transmitting/receiving
15065181Sgd78059  */
15075181Sgd78059 static void
15085181Sgd78059 dmfe_start(dmfe_t *dmfep)
15095181Sgd78059 {
15105181Sgd78059 	uint32_t gpsr;
15115181Sgd78059 
15125181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
15135181Sgd78059 
15145181Sgd78059 	ASSERT(dmfep->chip_state == CHIP_RESET ||
15155181Sgd78059 	    dmfep->chip_state == CHIP_STOPPED);
15165181Sgd78059 
15175181Sgd78059 	/*
15185181Sgd78059 	 * Make opmode consistent with PHY duplex setting
15195181Sgd78059 	 */
15205181Sgd78059 	gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
15215181Sgd78059 	if (gpsr & GPS_FULL_DUPLEX)
15225181Sgd78059 		dmfep->opmode |= FULL_DUPLEX;
15235181Sgd78059 	else
15245181Sgd78059 		dmfep->opmode &= ~FULL_DUPLEX;
15255181Sgd78059 
15265181Sgd78059 	/*
15275181Sgd78059 	 * Start transmit processing
15285181Sgd78059 	 * Set up the address filters
15295181Sgd78059 	 * Start receive processing
15305181Sgd78059 	 * Enable interrupts
15315181Sgd78059 	 */
15325181Sgd78059 	dmfe_start_chip(dmfep, START_TRANSMIT);
15335181Sgd78059 	(void) dmfe_send_setup(dmfep);
15345181Sgd78059 	drv_usecwait(10);
15355181Sgd78059 	dmfe_start_chip(dmfep, START_RECEIVE);
15365181Sgd78059 	dmfe_enable_interrupts(dmfep);
15375181Sgd78059 }
15385181Sgd78059 
15395181Sgd78059 /*
15405181Sgd78059  * dmfe_restart - restart transmitting/receiving after error or suspend
15415181Sgd78059  */
15425181Sgd78059 static void
15435181Sgd78059 dmfe_restart(dmfe_t *dmfep)
15445181Sgd78059 {
15455181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
15465181Sgd78059 
15475181Sgd78059 	/*
15485181Sgd78059 	 * You need not only <oplock>, but also <rxlock> AND <txlock>
15495181Sgd78059 	 * in order to reset the rings, but then <txlock> *mustn't*
15505181Sgd78059 	 * be held across the call to dmfe_start()
15515181Sgd78059 	 */
15525181Sgd78059 	mutex_enter(dmfep->rxlock);
15535181Sgd78059 	mutex_enter(dmfep->txlock);
15545181Sgd78059 	dmfe_reset(dmfep);
15555181Sgd78059 	mutex_exit(dmfep->txlock);
15565181Sgd78059 	mutex_exit(dmfep->rxlock);
15575181Sgd78059 	if (dmfep->mac_state == DMFE_MAC_STARTED)
15585181Sgd78059 		dmfe_start(dmfep);
15595181Sgd78059 }
15605181Sgd78059 
15615181Sgd78059 
15625181Sgd78059 /*
15635181Sgd78059  * ========== MAC-required management entry points ==========
15645181Sgd78059  */
15655181Sgd78059 
15665181Sgd78059 /*
15675181Sgd78059  *	dmfe_m_stop() -- stop transmitting/receiving
15685181Sgd78059  */
15695181Sgd78059 static void
15705181Sgd78059 dmfe_m_stop(void *arg)
15715181Sgd78059 {
15725181Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
15735181Sgd78059 
15745181Sgd78059 	/*
15755181Sgd78059 	 * Just stop processing, then record new MAC state
15765181Sgd78059 	 */
15775181Sgd78059 	mutex_enter(dmfep->oplock);
15785181Sgd78059 	dmfe_stop(dmfep);
15795181Sgd78059 	dmfep->mac_state = DMFE_MAC_STOPPED;
15805181Sgd78059 	mutex_exit(dmfep->oplock);
15815181Sgd78059 }
15825181Sgd78059 
15835181Sgd78059 /*
15845181Sgd78059  *	dmfe_m_start() -- start transmitting/receiving
15855181Sgd78059  */
15865181Sgd78059 static int
15875181Sgd78059 dmfe_m_start(void *arg)
15885181Sgd78059 {
15895181Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
15905181Sgd78059 
15915181Sgd78059 	/*
15925181Sgd78059 	 * Start processing and record new MAC state
15935181Sgd78059 	 */
15945181Sgd78059 	mutex_enter(dmfep->oplock);
15955181Sgd78059 	dmfe_start(dmfep);
15965181Sgd78059 	dmfep->mac_state = DMFE_MAC_STARTED;
15975181Sgd78059 	mutex_exit(dmfep->oplock);
15985181Sgd78059 
15995181Sgd78059 	return (0);
16005181Sgd78059 }
16015181Sgd78059 
16025181Sgd78059 /*
16035181Sgd78059  * dmfe_m_promisc() -- set or reset promiscuous mode on the board
16045181Sgd78059  *
16055181Sgd78059  *	Program the hardware to enable/disable promiscuous and/or
16065181Sgd78059  *	receive-all-multicast modes.  Davicom don't document this
16075181Sgd78059  *	clearly, but it looks like we can do this on-the-fly (i.e.
16085181Sgd78059  *	without stopping & restarting the TX/RX processes).
16095181Sgd78059  */
16105181Sgd78059 static int
16115181Sgd78059 dmfe_m_promisc(void *arg, boolean_t on)
16125181Sgd78059 {
16135181Sgd78059 	dmfe_t *dmfep = arg;
16145181Sgd78059 
16155181Sgd78059 	mutex_enter(dmfep->oplock);
16165181Sgd78059 	dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
16175181Sgd78059 	if (on)
16185181Sgd78059 		dmfep->opmode |= PROMISC_MODE;
16195181Sgd78059 	dmfe_set_opmode(dmfep);
16205181Sgd78059 	mutex_exit(dmfep->oplock);
16215181Sgd78059 
16225181Sgd78059 	return (0);
16235181Sgd78059 }
16245181Sgd78059 
16255181Sgd78059 /*ARGSUSED*/
16265181Sgd78059 static boolean_t
16275181Sgd78059 dmfe_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
16285181Sgd78059 {
16295181Sgd78059 	/*
16305181Sgd78059 	 * Note that the chip could support some form of polling and
16315181Sgd78059 	 * multiaddress support.  We should look into adding polling
16325181Sgd78059 	 * support later, once Solaris is better positioned to take
16335181Sgd78059 	 * advantage of it, although it may be of little use since
16345181Sgd78059 	 * even a lowly 500MHz US-IIe should be able to keep up with
16355181Sgd78059 	 * 100Mbps.  (Esp. if the packets are not unreasonably sized.)
16365181Sgd78059 	 *
16375181Sgd78059 	 * Multiaddress support, however, is likely to be of more
16385181Sgd78059 	 * utility with crossbow and virtualized NICs.  Although, the
16395181Sgd78059 	 * fact that dmfe is only supported on low-end US-IIe hardware
16405181Sgd78059 	 * makes one wonder whether VNICs are likely to be used on
16415181Sgd78059 	 * such platforms.  The chip certainly supports the notion,
16425181Sgd78059 	 * since it can be run in HASH-ONLY mode.  (Though this would
16435181Sgd78059 	 * require software to drop unicast packets that are
16445181Sgd78059 	 * incorrectly received due to hash collision of the
16455181Sgd78059 	 * destination mac address.)
16465181Sgd78059 	 *
16475181Sgd78059 	 * Interestingly enough, modern Davicom chips (the 9102D)
16485181Sgd78059 	 * support full IP checksum offload, though its unclear
16495181Sgd78059 	 * whether any of these chips are used on any systems that can
16505181Sgd78059 	 * run Solaris.
16515181Sgd78059 	 *
16525181Sgd78059 	 * If this driver is ever supported on x86 hardware, then
16535181Sgd78059 	 * these assumptions should be revisited.
16545181Sgd78059 	 */
16555181Sgd78059 	switch (cap) {
16565181Sgd78059 	case MAC_CAPAB_POLL:
16575181Sgd78059 	case MAC_CAPAB_MULTIADDRESS:
16585181Sgd78059 	case MAC_CAPAB_HCKSUM:
16595181Sgd78059 	default:
16605181Sgd78059 		return (B_FALSE);
16615181Sgd78059 	}
16625181Sgd78059 }
16635181Sgd78059 
16645181Sgd78059 
16655181Sgd78059 #undef	DMFE_DBG
16665181Sgd78059 
16675181Sgd78059 
16685181Sgd78059 /*
16695181Sgd78059  * ========== Factotum, implemented as a softint handler ==========
16705181Sgd78059  */
16715181Sgd78059 
16725181Sgd78059 #define	DMFE_DBG	DMFE_DBG_FACT	/* debug flag for this code	*/
16735181Sgd78059 
16745181Sgd78059 /*
16755181Sgd78059  * The factotum is woken up when there's something to do that we'd rather
16765181Sgd78059  * not do from inside a (high-level?) hardware interrupt handler.  Its
16775181Sgd78059  * two main tasks are:
16785181Sgd78059  *	reset & restart the chip after an error
16795181Sgd78059  *	update & restart the chip after a link status change
16805181Sgd78059  */
16815181Sgd78059 static uint_t
16825181Sgd78059 dmfe_factotum(caddr_t arg)
16835181Sgd78059 {
16845181Sgd78059 	dmfe_t *dmfep;
16855181Sgd78059 
1686*6990Sgd78059 	dmfep = (void *)arg;
16875181Sgd78059 	ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
16885181Sgd78059 
16895181Sgd78059 	mutex_enter(dmfep->oplock);
16905181Sgd78059 
16915181Sgd78059 	dmfep->factotum_flag = 0;
16925181Sgd78059 	DRV_KS_INC(dmfep, KS_FACTOTUM_RUN);
16935181Sgd78059 
16945181Sgd78059 	/*
16955181Sgd78059 	 * Check for chip error ...
16965181Sgd78059 	 */
16975181Sgd78059 	if (dmfep->chip_state == CHIP_ERROR) {
16985181Sgd78059 		/*
16995181Sgd78059 		 * Error recovery required: reset the chip and the rings,
17005181Sgd78059 		 * then, if it's supposed to be running, kick it off again.
17015181Sgd78059 		 */
17025181Sgd78059 		DRV_KS_INC(dmfep, KS_RECOVERY);
17035181Sgd78059 		dmfe_restart(dmfep);
17045181Sgd78059 	} else if (dmfep->need_setup) {
17055181Sgd78059 		(void) dmfe_send_setup(dmfep);
17065181Sgd78059 	}
17075181Sgd78059 	mutex_exit(dmfep->oplock);
17085181Sgd78059 
17095181Sgd78059 	/*
17105181Sgd78059 	 * Then, check the link state.  We need <milock> but not <oplock>
17115181Sgd78059 	 * to do this, but if something's changed, we need <oplock> as well
17125181Sgd78059 	 * in order to stop/restart the chip!  Note: we could simply hold
17135181Sgd78059 	 * <oplock> right through here, but we'd rather not 'cos checking
17145181Sgd78059 	 * the link state involves reading over the bit-serial MII bus,
17155181Sgd78059 	 * which takes ~500us even when nothing's changed.  Holding <oplock>
17165181Sgd78059 	 * would lock out the interrupt handler for the duration, so it's
17175181Sgd78059 	 * better to release it first and reacquire it only if needed.
17185181Sgd78059 	 */
17195181Sgd78059 	mutex_enter(dmfep->milock);
17205181Sgd78059 	if (dmfe_check_link(dmfep)) {
17215181Sgd78059 		mutex_enter(dmfep->oplock);
17225181Sgd78059 		dmfe_stop(dmfep);
17235181Sgd78059 		DRV_KS_INC(dmfep, KS_LINK_CHECK);
17245181Sgd78059 		if (dmfep->update_phy) {
17255181Sgd78059 			/*
17265181Sgd78059 			 *  The chip may reset itself for some unknown
17275181Sgd78059 			 * reason.  If this happens, the chip will use
17285181Sgd78059 			 * default settings (for speed, duplex, and autoneg),
17295181Sgd78059 			 * which possibly aren't the user's desired settings.
17305181Sgd78059 			 */
17315181Sgd78059 			dmfe_update_phy(dmfep);
17325181Sgd78059 			dmfep->update_phy = B_FALSE;
17335181Sgd78059 		}
17345181Sgd78059 		dmfe_recheck_link(dmfep, B_FALSE);
17355181Sgd78059 		if (dmfep->mac_state == DMFE_MAC_STARTED)
17365181Sgd78059 			dmfe_start(dmfep);
17375181Sgd78059 		mutex_exit(dmfep->oplock);
17385181Sgd78059 	}
17395181Sgd78059 	mutex_exit(dmfep->milock);
17405181Sgd78059 
17415181Sgd78059 	/*
17425181Sgd78059 	 * Keep MAC up-to-date about the state of the link ...
17435181Sgd78059 	 */
17445181Sgd78059 	mac_link_update(dmfep->mh, dmfep->link_state);
17455181Sgd78059 
17465181Sgd78059 	return (DDI_INTR_CLAIMED);
17475181Sgd78059 }
17485181Sgd78059 
17495181Sgd78059 static void
17505181Sgd78059 dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why)
17515181Sgd78059 {
17525181Sgd78059 	DMFE_DEBUG(("dmfe_wake_factotum: %s [%d] flag %d",
17535181Sgd78059 	    why, ks_id, dmfep->factotum_flag));
17545181Sgd78059 
17555181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
17565181Sgd78059 	DRV_KS_INC(dmfep, ks_id);
17575181Sgd78059 
17585181Sgd78059 	if (dmfep->factotum_flag++ == 0)
17595181Sgd78059 		ddi_trigger_softintr(dmfep->factotum_id);
17605181Sgd78059 }
17615181Sgd78059 
17625181Sgd78059 #undef	DMFE_DBG
17635181Sgd78059 
17645181Sgd78059 
17655181Sgd78059 /*
17665181Sgd78059  * ========== Periodic Tasks (Cyclic handler & friends) ==========
17675181Sgd78059  */
17685181Sgd78059 
17695181Sgd78059 #define	DMFE_DBG	DMFE_DBG_TICK	/* debug flag for this code	*/
17705181Sgd78059 
17715181Sgd78059 /*
17725181Sgd78059  * Periodic tick tasks, run from the cyclic handler
17735181Sgd78059  *
17745181Sgd78059  * Check the state of the link and wake the factotum if necessary
17755181Sgd78059  */
17765181Sgd78059 static void
17775181Sgd78059 dmfe_tick_link_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat)
17785181Sgd78059 {
17795181Sgd78059 	link_state_t phy_state;
17805181Sgd78059 	link_state_t utp_state;
17815181Sgd78059 	const char *why;
17825181Sgd78059 	int ks_id;
17835181Sgd78059 
17845181Sgd78059 	_NOTE(ARGUNUSED(istat))
17855181Sgd78059 
17865181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
17875181Sgd78059 
17885181Sgd78059 	/*
17895181Sgd78059 	 * Is it time to wake the factotum?  We do so periodically, in
17905181Sgd78059 	 * case the fast check below doesn't always reveal a link change
17915181Sgd78059 	 */
17925181Sgd78059 	if (dmfep->link_poll_tix-- == 0) {
17935181Sgd78059 		dmfep->link_poll_tix = factotum_tix;
17945181Sgd78059 		why = "tick (link poll)";
17955181Sgd78059 		ks_id = KS_TICK_LINK_POLL;
17965181Sgd78059 	} else {
17975181Sgd78059 		why = NULL;
17985181Sgd78059 		ks_id = KS_TICK_LINK_STATE;
17995181Sgd78059 	}
18005181Sgd78059 
18015181Sgd78059 	/*
18025181Sgd78059 	 * Has the link status changed?  If so, we might want to wake
18035181Sgd78059 	 * the factotum to deal with it.
18045181Sgd78059 	 */
18055181Sgd78059 	phy_state = (gpsr & GPS_LINK_STATUS) ? LINK_STATE_UP : LINK_STATE_DOWN;
18065181Sgd78059 	utp_state = (gpsr & GPS_UTP_SIG) ? LINK_STATE_UP : LINK_STATE_DOWN;
18075181Sgd78059 	if (phy_state != utp_state)
18085181Sgd78059 		why = "tick (phy <> utp)";
18095181Sgd78059 	else if ((dmfep->link_state == LINK_STATE_UP) &&
18105181Sgd78059 	    (phy_state == LINK_STATE_DOWN))
18115181Sgd78059 		why = "tick (UP -> DOWN)";
18125181Sgd78059 	else if (phy_state != dmfep->link_state) {
18135181Sgd78059 		if (dmfep->link_poll_tix > factotum_fast_tix)
18145181Sgd78059 			dmfep->link_poll_tix = factotum_fast_tix;
18155181Sgd78059 	}
18165181Sgd78059 
18175181Sgd78059 	if (why != NULL) {
18185181Sgd78059 		DMFE_DEBUG(("dmfe_%s: link %d phy %d utp %d",
18195181Sgd78059 		    why, dmfep->link_state, phy_state, utp_state));
18205181Sgd78059 		dmfe_wake_factotum(dmfep, ks_id, why);
18215181Sgd78059 	}
18225181Sgd78059 }
18235181Sgd78059 
18245181Sgd78059 /*
18255181Sgd78059  * Periodic tick tasks, run from the cyclic handler
18265181Sgd78059  *
18275181Sgd78059  * Check for TX stall; flag an error and wake the factotum if so.
18285181Sgd78059  */
18295181Sgd78059 static void
18305181Sgd78059 dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat)
18315181Sgd78059 {
18325181Sgd78059 	boolean_t tx_stall;
18335181Sgd78059 	uint32_t tx_state;
18345181Sgd78059 	uint32_t limit;
18355181Sgd78059 
18365181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
18375181Sgd78059 
18385181Sgd78059 	/*
18395181Sgd78059 	 * Check for transmit stall ...
18405181Sgd78059 	 *
18415181Sgd78059 	 * IF there's at least one packet in the ring, AND the timeout
18425181Sgd78059 	 * has elapsed, AND we can't reclaim any descriptors, THEN we've
18435181Sgd78059 	 * stalled; we return B_TRUE to trigger a reset-and-recover cycle.
18445181Sgd78059 	 *
18455181Sgd78059 	 * Note that the timeout limit is based on the transmit engine
18465181Sgd78059 	 * state; we allow the transmitter longer to make progress in
18475181Sgd78059 	 * some states than in others, based on observations of this
18485181Sgd78059 	 * chip's actual behaviour in the lab.
18495181Sgd78059 	 *
18505181Sgd78059 	 * By observation, we find that on about 1 in 10000 passes through
18515181Sgd78059 	 * here, the TX lock is already held.  In that case, we'll skip
18525181Sgd78059 	 * the check on this pass rather than wait.  Most likely, the send
18535181Sgd78059 	 * routine was holding the lock when the interrupt happened, and
18545181Sgd78059 	 * we'll succeed next time through.  In the event of a real stall,
18555181Sgd78059 	 * the TX ring will fill up, after which the send routine won't be
18565181Sgd78059 	 * called any more and then we're sure to get in.
18575181Sgd78059 	 */
18585181Sgd78059 	tx_stall = B_FALSE;
18595181Sgd78059 	if (mutex_tryenter(dmfep->txlock)) {
18605181Sgd78059 		if (dmfep->tx.n_free < dmfep->tx.n_desc) {
18615181Sgd78059 			tx_state = TX_PROCESS_STATE(istat);
18625181Sgd78059 			if (gpsr & GPS_LINK_100)
18635181Sgd78059 				limit = stall_100_tix[tx_state];
18645181Sgd78059 			else
18655181Sgd78059 				limit = stall_10_tix[tx_state];
18665181Sgd78059 			if (++dmfep->tx_pending_tix >= limit &&
18675181Sgd78059 			    dmfe_reclaim_tx_desc(dmfep) == B_FALSE) {
18685181Sgd78059 				dmfe_log(dmfep, "TX stall detected "
18695181Sgd78059 				    "after %d ticks in state %d; "
18705181Sgd78059 				    "automatic recovery initiated",
18715181Sgd78059 				    dmfep->tx_pending_tix, tx_state);
18725181Sgd78059 				tx_stall = B_TRUE;
18735181Sgd78059 			}
18745181Sgd78059 		}
18755181Sgd78059 		mutex_exit(dmfep->txlock);
18765181Sgd78059 	}
18775181Sgd78059 
18785181Sgd78059 	if (tx_stall) {
18795181Sgd78059 		dmfe_stop_chip(dmfep, CHIP_ERROR);
18805181Sgd78059 		dmfe_wake_factotum(dmfep, KS_TX_STALL, "tick (TX stall)");
18815181Sgd78059 	}
18825181Sgd78059 }
18835181Sgd78059 
18845181Sgd78059 /*
18855181Sgd78059  * Cyclic callback handler
18865181Sgd78059  */
18875181Sgd78059 static void
18885181Sgd78059 dmfe_cyclic(void *arg)
18895181Sgd78059 {
18905181Sgd78059 	dmfe_t *dmfep = arg;			/* private device info */
18915181Sgd78059 	uint32_t istat;
18925181Sgd78059 	uint32_t gpsr;
18935181Sgd78059 
18945181Sgd78059 	/*
18955181Sgd78059 	 * If the chip's not RUNNING, there's nothing to do.
18965181Sgd78059 	 * If we can't get the mutex straight away, we'll just
18975181Sgd78059 	 * skip this pass; we'll back back soon enough anyway.
18985181Sgd78059 	 */
18995181Sgd78059 	if (dmfep->chip_state != CHIP_RUNNING)
19005181Sgd78059 		return;
19015181Sgd78059 	if (mutex_tryenter(dmfep->oplock) == 0)
19025181Sgd78059 		return;
19035181Sgd78059 
19045181Sgd78059 	/*
19055181Sgd78059 	 * Recheck chip state (it might have been stopped since we
19065181Sgd78059 	 * checked above).  If still running, call each of the *tick*
19075181Sgd78059 	 * tasks.  They will check for link change, TX stall, etc ...
19085181Sgd78059 	 */
19095181Sgd78059 	if (dmfep->chip_state == CHIP_RUNNING) {
19105181Sgd78059 		istat = dmfe_chip_get32(dmfep, STATUS_REG);
19115181Sgd78059 		gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
19125181Sgd78059 		dmfe_tick_link_check(dmfep, gpsr, istat);
19135181Sgd78059 		dmfe_tick_stall_check(dmfep, gpsr, istat);
19145181Sgd78059 	}
19155181Sgd78059 
19165181Sgd78059 	DRV_KS_INC(dmfep, KS_CYCLIC_RUN);
19175181Sgd78059 	mutex_exit(dmfep->oplock);
19185181Sgd78059 }
19195181Sgd78059 
19205181Sgd78059 #undef	DMFE_DBG
19215181Sgd78059 
19225181Sgd78059 
19235181Sgd78059 /*
19245181Sgd78059  * ========== Hardware interrupt handler ==========
19255181Sgd78059  */
19265181Sgd78059 
19275181Sgd78059 #define	DMFE_DBG	DMFE_DBG_INT	/* debug flag for this code	*/
19285181Sgd78059 
19295181Sgd78059 /*
19305181Sgd78059  *	dmfe_interrupt() -- handle chip interrupts
19315181Sgd78059  */
19325181Sgd78059 static uint_t
19335181Sgd78059 dmfe_interrupt(caddr_t arg)
19345181Sgd78059 {
19355181Sgd78059 	dmfe_t *dmfep;			/* private device info */
19365181Sgd78059 	uint32_t interrupts;
19375181Sgd78059 	uint32_t istat;
19385181Sgd78059 	const char *msg;
19395181Sgd78059 	mblk_t *mp;
19405181Sgd78059 	boolean_t warning_msg = B_TRUE;
19415181Sgd78059 
1942*6990Sgd78059 	dmfep = (void *)arg;
19435181Sgd78059 
19445181Sgd78059 	/*
19455181Sgd78059 	 * A quick check as to whether the interrupt was from this
19465181Sgd78059 	 * device, before we even finish setting up all our local
19475181Sgd78059 	 * variables.  Note that reading the interrupt status register
19485181Sgd78059 	 * doesn't have any unpleasant side effects such as clearing
19495181Sgd78059 	 * the bits read, so it's quite OK to re-read it once we have
19505181Sgd78059 	 * determined that we are going to service this interrupt and
19515181Sgd78059 	 * grabbed the mutexen.
19525181Sgd78059 	 */
19535181Sgd78059 	istat = dmfe_chip_get32(dmfep, STATUS_REG);
19545181Sgd78059 	if ((istat & (NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT)) == 0)
19555181Sgd78059 		return (DDI_INTR_UNCLAIMED);
19565181Sgd78059 
19575181Sgd78059 	/*
19585181Sgd78059 	 * Unfortunately, there can be a race condition between attach()
19595181Sgd78059 	 * adding the interrupt handler and initialising the mutexen,
19605181Sgd78059 	 * and the handler itself being called because of a pending
19615181Sgd78059 	 * interrupt.  So, we check <imask>; if it shows that interrupts
19625181Sgd78059 	 * haven't yet been enabled (and therefore we shouldn't really
19635181Sgd78059 	 * be here at all), we will just write back the value read from
19645181Sgd78059 	 * the status register, thus acknowledging (and clearing) *all*
19655181Sgd78059 	 * pending conditions without really servicing them, and claim
19665181Sgd78059 	 * the interrupt.
19675181Sgd78059 	 */
19685181Sgd78059 	if (dmfep->imask == 0) {
19695181Sgd78059 		DMFE_DEBUG(("dmfe_interrupt: early interrupt 0x%x", istat));
19705181Sgd78059 		dmfe_chip_put32(dmfep, STATUS_REG, istat);
19715181Sgd78059 		return (DDI_INTR_CLAIMED);
19725181Sgd78059 	}
19735181Sgd78059 
19745181Sgd78059 	/*
19755181Sgd78059 	 * We're committed to servicing this interrupt, but we
19765181Sgd78059 	 * need to get the lock before going any further ...
19775181Sgd78059 	 */
19785181Sgd78059 	mutex_enter(dmfep->oplock);
19795181Sgd78059 	DRV_KS_INC(dmfep, KS_INTERRUPT);
19805181Sgd78059 
19815181Sgd78059 	/*
19825181Sgd78059 	 * Identify bits that represent enabled interrupts ...
19835181Sgd78059 	 */
19845181Sgd78059 	istat |= dmfe_chip_get32(dmfep, STATUS_REG);
19855181Sgd78059 	interrupts = istat & dmfep->imask;
19865181Sgd78059 	ASSERT(interrupts != 0);
19875181Sgd78059 
19885181Sgd78059 	DMFE_DEBUG(("dmfe_interrupt: istat 0x%x -> 0x%x", istat, interrupts));
19895181Sgd78059 
19905181Sgd78059 	/*
19915181Sgd78059 	 * Check for any interrupts other than TX/RX done.
19925181Sgd78059 	 * If there are any, they are considered Abnormal
19935181Sgd78059 	 * and will cause the chip to be reset.
19945181Sgd78059 	 */
19955181Sgd78059 	if (interrupts & ~(RX_PKTDONE_INT | TX_PKTDONE_INT)) {
19965181Sgd78059 		if (istat & ABNORMAL_SUMMARY_INT) {
19975181Sgd78059 			/*
19985181Sgd78059 			 * Any Abnormal interrupts will lead to us
19995181Sgd78059 			 * resetting the chip, so we don't bother
20005181Sgd78059 			 * to clear each interrupt individually.
20015181Sgd78059 			 *
20025181Sgd78059 			 * Our main task here is to identify the problem,
20035181Sgd78059 			 * by pointing out the most significant unexpected
20045181Sgd78059 			 * bit.  Additional bits may well be consequences
20055181Sgd78059 			 * of the first problem, so we consider the possible
20065181Sgd78059 			 * causes in order of severity.
20075181Sgd78059 			 */
20085181Sgd78059 			if (interrupts & SYSTEM_ERR_INT) {
20095181Sgd78059 				switch (istat & SYSTEM_ERR_BITS) {
20105181Sgd78059 				case SYSTEM_ERR_M_ABORT:
20115181Sgd78059 					msg = "Bus Master Abort";
20125181Sgd78059 					break;
20135181Sgd78059 
20145181Sgd78059 				case SYSTEM_ERR_T_ABORT:
20155181Sgd78059 					msg = "Bus Target Abort";
20165181Sgd78059 					break;
20175181Sgd78059 
20185181Sgd78059 				case SYSTEM_ERR_PARITY:
20195181Sgd78059 					msg = "Parity Error";
20205181Sgd78059 					break;
20215181Sgd78059 
20225181Sgd78059 				default:
20235181Sgd78059 					msg = "Unknown System Bus Error";
20245181Sgd78059 					break;
20255181Sgd78059 				}
20265181Sgd78059 			} else if (interrupts & RX_STOPPED_INT) {
20275181Sgd78059 				msg = "RX process stopped";
20285181Sgd78059 			} else if (interrupts & RX_UNAVAIL_INT) {
20295181Sgd78059 				msg = "RX buffer unavailable";
20305181Sgd78059 				warning_msg = B_FALSE;
20315181Sgd78059 			} else if (interrupts & RX_WATCHDOG_INT) {
20325181Sgd78059 				msg = "RX watchdog timeout?";
20335181Sgd78059 			} else if (interrupts & RX_EARLY_INT) {
20345181Sgd78059 				msg = "RX early interrupt?";
20355181Sgd78059 			} else if (interrupts & TX_STOPPED_INT) {
20365181Sgd78059 				msg = "TX process stopped";
20375181Sgd78059 			} else if (interrupts & TX_JABBER_INT) {
20385181Sgd78059 				msg = "TX jabber timeout";
20395181Sgd78059 			} else if (interrupts & TX_UNDERFLOW_INT) {
20405181Sgd78059 				msg = "TX underflow?";
20415181Sgd78059 			} else if (interrupts & TX_EARLY_INT) {
20425181Sgd78059 				msg = "TX early interrupt?";
20435181Sgd78059 
20445181Sgd78059 			} else if (interrupts & LINK_STATUS_INT) {
20455181Sgd78059 				msg = "Link status change?";
20465181Sgd78059 			} else if (interrupts & GP_TIMER_INT) {
20475181Sgd78059 				msg = "Timer expired?";
20485181Sgd78059 			}
20495181Sgd78059 
20505181Sgd78059 			if (warning_msg)
20515181Sgd78059 				dmfe_warning(dmfep, "abnormal interrupt, "
20525181Sgd78059 				    "status 0x%x: %s", istat, msg);
20535181Sgd78059 
20545181Sgd78059 			/*
20555181Sgd78059 			 * We don't want to run the entire reinitialisation
20565181Sgd78059 			 * code out of this (high-level?) interrupt, so we
20575181Sgd78059 			 * simply STOP the chip, and wake up the factotum
20585181Sgd78059 			 * to reinitalise it ...
20595181Sgd78059 			 */
20605181Sgd78059 			dmfe_stop_chip(dmfep, CHIP_ERROR);
20615181Sgd78059 			dmfe_wake_factotum(dmfep, KS_CHIP_ERROR,
20625181Sgd78059 			    "interrupt (error)");
20635181Sgd78059 		} else {
20645181Sgd78059 			/*
20655181Sgd78059 			 * We shouldn't really get here (it would mean
20665181Sgd78059 			 * there were some unprocessed enabled bits but
20675181Sgd78059 			 * they weren't Abnormal?), but we'll check just
20685181Sgd78059 			 * in case ...
20695181Sgd78059 			 */
20705181Sgd78059 			DMFE_DEBUG(("unexpected interrupt bits: 0x%x", istat));
20715181Sgd78059 		}
20725181Sgd78059 	}
20735181Sgd78059 
20745181Sgd78059 	/*
20755181Sgd78059 	 * Acknowledge all the original bits - except in the case of an
20765181Sgd78059 	 * error, when we leave them unacknowledged so that the recovery
20775181Sgd78059 	 * code can see what was going on when the problem occurred ...
20785181Sgd78059 	 */
20795181Sgd78059 	if (dmfep->chip_state != CHIP_ERROR) {
20805181Sgd78059 		(void) dmfe_chip_put32(dmfep, STATUS_REG, istat);
20815181Sgd78059 		/*
20825181Sgd78059 		 * Read-after-write forces completion on PCI bus.
20835181Sgd78059 		 *
20845181Sgd78059 		 */
20855181Sgd78059 		(void) dmfe_chip_get32(dmfep, STATUS_REG);
20865181Sgd78059 	}
20875181Sgd78059 
20885181Sgd78059 
20895181Sgd78059 	/*
20905181Sgd78059 	 * We've finished talking to the chip, so we can drop <oplock>
20915181Sgd78059 	 * before handling the normal interrupts, which only involve
20925181Sgd78059 	 * manipulation of descriptors ...
20935181Sgd78059 	 */
20945181Sgd78059 	mutex_exit(dmfep->oplock);
20955181Sgd78059 
20965181Sgd78059 	if (interrupts & RX_PKTDONE_INT)
20975181Sgd78059 		if ((mp = dmfe_getp(dmfep)) != NULL)
20985181Sgd78059 			mac_rx(dmfep->mh, NULL, mp);
20995181Sgd78059 
21005181Sgd78059 	if (interrupts & TX_PKTDONE_INT) {
21015181Sgd78059 		/*
21025181Sgd78059 		 * The only reason for taking this interrupt is to give
21035181Sgd78059 		 * MAC a chance to schedule queued packets after a
21045181Sgd78059 		 * ring-full condition.  To minimise the number of
21055181Sgd78059 		 * redundant TX-Done interrupts, we only mark two of the
21065181Sgd78059 		 * ring descriptors as 'interrupt-on-complete' - all the
21075181Sgd78059 		 * others are simply handed back without an interrupt.
21085181Sgd78059 		 */
21095181Sgd78059 		if (dmfe_reclaim_on_done && mutex_tryenter(dmfep->txlock)) {
21105181Sgd78059 			(void) dmfe_reclaim_tx_desc(dmfep);
21115181Sgd78059 			mutex_exit(dmfep->txlock);
21125181Sgd78059 		}
21135181Sgd78059 		mac_tx_update(dmfep->mh);
21145181Sgd78059 	}
21155181Sgd78059 
21165181Sgd78059 	return (DDI_INTR_CLAIMED);
21175181Sgd78059 }
21185181Sgd78059 
21195181Sgd78059 #undef	DMFE_DBG
21205181Sgd78059 
21215181Sgd78059 
21225181Sgd78059 /*
21235181Sgd78059  * ========== Statistics update handler ==========
21245181Sgd78059  */
21255181Sgd78059 
21265181Sgd78059 #define	DMFE_DBG	DMFE_DBG_STATS	/* debug flag for this code	*/
21275181Sgd78059 
21285181Sgd78059 static int
21295181Sgd78059 dmfe_m_stat(void *arg, uint_t stat, uint64_t *val)
21305181Sgd78059 {
21315181Sgd78059 	dmfe_t *dmfep = arg;
21325181Sgd78059 	int rv = 0;
21335181Sgd78059 
21345181Sgd78059 	mutex_enter(dmfep->milock);
21355181Sgd78059 	mutex_enter(dmfep->oplock);
21365181Sgd78059 	mutex_enter(dmfep->rxlock);
21375181Sgd78059 	mutex_enter(dmfep->txlock);
21385181Sgd78059 
21395181Sgd78059 	/* make sure we have all the stats collected */
21405181Sgd78059 	(void) dmfe_reclaim_tx_desc(dmfep);
21415181Sgd78059 
21425181Sgd78059 	switch (stat) {
21435181Sgd78059 	case MAC_STAT_IFSPEED:
21445181Sgd78059 		*val = dmfep->op_stats_speed;
21455181Sgd78059 		break;
21465181Sgd78059 
21475181Sgd78059 	case MAC_STAT_IPACKETS:
21485181Sgd78059 		*val = dmfep->rx_stats_ipackets;
21495181Sgd78059 		break;
21505181Sgd78059 
21515181Sgd78059 	case MAC_STAT_MULTIRCV:
21525181Sgd78059 		*val = dmfep->rx_stats_multi;
21535181Sgd78059 		break;
21545181Sgd78059 
21555181Sgd78059 	case MAC_STAT_BRDCSTRCV:
21565181Sgd78059 		*val = dmfep->rx_stats_bcast;
21575181Sgd78059 		break;
21585181Sgd78059 
21595181Sgd78059 	case MAC_STAT_RBYTES:
21605181Sgd78059 		*val = dmfep->rx_stats_rbytes;
21615181Sgd78059 		break;
21625181Sgd78059 
21635181Sgd78059 	case MAC_STAT_IERRORS:
21645181Sgd78059 		*val = dmfep->rx_stats_ierrors;
21655181Sgd78059 		break;
21665181Sgd78059 
21675181Sgd78059 	case MAC_STAT_NORCVBUF:
21685181Sgd78059 		*val = dmfep->rx_stats_norcvbuf;
21695181Sgd78059 		break;
21705181Sgd78059 
21715181Sgd78059 	case MAC_STAT_COLLISIONS:
21725181Sgd78059 		*val = dmfep->tx_stats_collisions;
21735181Sgd78059 		break;
21745181Sgd78059 
21755181Sgd78059 	case MAC_STAT_OERRORS:
21765181Sgd78059 		*val = dmfep->tx_stats_oerrors;
21775181Sgd78059 		break;
21785181Sgd78059 
21795181Sgd78059 	case MAC_STAT_OPACKETS:
21805181Sgd78059 		*val = dmfep->tx_stats_opackets;
21815181Sgd78059 		break;
21825181Sgd78059 
21835181Sgd78059 	case MAC_STAT_MULTIXMT:
21845181Sgd78059 		*val = dmfep->tx_stats_multi;
21855181Sgd78059 		break;
21865181Sgd78059 
21875181Sgd78059 	case MAC_STAT_BRDCSTXMT:
21885181Sgd78059 		*val = dmfep->tx_stats_bcast;
21895181Sgd78059 		break;
21905181Sgd78059 
21915181Sgd78059 	case MAC_STAT_OBYTES:
21925181Sgd78059 		*val = dmfep->tx_stats_obytes;
21935181Sgd78059 		break;
21945181Sgd78059 
21955181Sgd78059 	case MAC_STAT_OVERFLOWS:
21965181Sgd78059 		*val = dmfep->rx_stats_overflow;
21975181Sgd78059 		break;
21985181Sgd78059 
21995181Sgd78059 	case MAC_STAT_UNDERFLOWS:
22005181Sgd78059 		*val = dmfep->tx_stats_underflow;
22015181Sgd78059 		break;
22025181Sgd78059 
22035181Sgd78059 	case ETHER_STAT_ALIGN_ERRORS:
22045181Sgd78059 		*val = dmfep->rx_stats_align;
22055181Sgd78059 		break;
22065181Sgd78059 
22075181Sgd78059 	case ETHER_STAT_FCS_ERRORS:
22085181Sgd78059 		*val = dmfep->rx_stats_fcs;
22095181Sgd78059 		break;
22105181Sgd78059 
22115181Sgd78059 	case ETHER_STAT_TOOLONG_ERRORS:
22125181Sgd78059 		*val = dmfep->rx_stats_toolong;
22135181Sgd78059 		break;
22145181Sgd78059 
22155181Sgd78059 	case ETHER_STAT_TOOSHORT_ERRORS:
22165181Sgd78059 		*val = dmfep->rx_stats_short;
22175181Sgd78059 		break;
22185181Sgd78059 
22195181Sgd78059 	case ETHER_STAT_MACRCV_ERRORS:
22205181Sgd78059 		*val = dmfep->rx_stats_macrcv_errors;
22215181Sgd78059 		break;
22225181Sgd78059 
22235181Sgd78059 	case ETHER_STAT_MACXMT_ERRORS:
22245181Sgd78059 		*val = dmfep->tx_stats_macxmt_errors;
22255181Sgd78059 		break;
22265181Sgd78059 
22275181Sgd78059 	case ETHER_STAT_JABBER_ERRORS:
22285181Sgd78059 		*val = dmfep->tx_stats_jabber;
22295181Sgd78059 		break;
22305181Sgd78059 
22315181Sgd78059 	case ETHER_STAT_CARRIER_ERRORS:
22325181Sgd78059 		*val = dmfep->tx_stats_nocarrier;
22335181Sgd78059 		break;
22345181Sgd78059 
22355181Sgd78059 	case ETHER_STAT_TX_LATE_COLLISIONS:
22365181Sgd78059 		*val = dmfep->tx_stats_xmtlatecoll;
22375181Sgd78059 		break;
22385181Sgd78059 
22395181Sgd78059 	case ETHER_STAT_EX_COLLISIONS:
22405181Sgd78059 		*val = dmfep->tx_stats_excoll;
22415181Sgd78059 		break;
22425181Sgd78059 
22435181Sgd78059 	case ETHER_STAT_DEFER_XMTS:
22445181Sgd78059 		*val = dmfep->tx_stats_defer;
22455181Sgd78059 		break;
22465181Sgd78059 
22475181Sgd78059 	case ETHER_STAT_FIRST_COLLISIONS:
22485181Sgd78059 		*val = dmfep->tx_stats_first_coll;
22495181Sgd78059 		break;
22505181Sgd78059 
22515181Sgd78059 	case ETHER_STAT_MULTI_COLLISIONS:
22525181Sgd78059 		*val = dmfep->tx_stats_multi_coll;
22535181Sgd78059 		break;
22545181Sgd78059 
22555181Sgd78059 	case ETHER_STAT_XCVR_INUSE:
22565181Sgd78059 		*val = dmfep->phy_inuse;
22575181Sgd78059 		break;
22585181Sgd78059 
22595181Sgd78059 	case ETHER_STAT_XCVR_ID:
22605181Sgd78059 		*val = dmfep->phy_id;
22615181Sgd78059 		break;
22625181Sgd78059 
22635181Sgd78059 	case ETHER_STAT_XCVR_ADDR:
22645181Sgd78059 		*val = dmfep->phy_addr;
22655181Sgd78059 		break;
22665181Sgd78059 
22675181Sgd78059 	case ETHER_STAT_LINK_DUPLEX:
22685181Sgd78059 		*val = dmfep->op_stats_duplex;
22695181Sgd78059 		break;
22705181Sgd78059 
22715181Sgd78059 	case ETHER_STAT_CAP_100T4:
22725181Sgd78059 		*val = dmfep->param_bmsr_100T4;
22735181Sgd78059 		break;
22745181Sgd78059 
22755181Sgd78059 	case ETHER_STAT_CAP_100FDX:
22765181Sgd78059 		*val = dmfep->param_bmsr_100fdx;
22775181Sgd78059 		break;
22785181Sgd78059 
22795181Sgd78059 	case ETHER_STAT_CAP_100HDX:
22805181Sgd78059 		*val = dmfep->param_bmsr_100hdx;
22815181Sgd78059 		break;
22825181Sgd78059 
22835181Sgd78059 	case ETHER_STAT_CAP_10FDX:
22845181Sgd78059 		*val = dmfep->param_bmsr_10fdx;
22855181Sgd78059 		break;
22865181Sgd78059 
22875181Sgd78059 	case ETHER_STAT_CAP_10HDX:
22885181Sgd78059 		*val = dmfep->param_bmsr_10hdx;
22895181Sgd78059 		break;
22905181Sgd78059 
22915181Sgd78059 	case ETHER_STAT_CAP_AUTONEG:
22925181Sgd78059 		*val = dmfep->param_bmsr_autoneg;
22935181Sgd78059 		break;
22945181Sgd78059 
22955181Sgd78059 	case ETHER_STAT_CAP_REMFAULT:
22965181Sgd78059 		*val = dmfep->param_bmsr_remfault;
22975181Sgd78059 		break;
22985181Sgd78059 
22995181Sgd78059 	case ETHER_STAT_ADV_CAP_AUTONEG:
23005181Sgd78059 		*val = dmfep->param_autoneg;
23015181Sgd78059 		break;
23025181Sgd78059 
23035181Sgd78059 	case ETHER_STAT_ADV_CAP_100T4:
23045181Sgd78059 		*val = dmfep->param_anar_100T4;
23055181Sgd78059 		break;
23065181Sgd78059 
23075181Sgd78059 	case ETHER_STAT_ADV_CAP_100FDX:
23085181Sgd78059 		*val = dmfep->param_anar_100fdx;
23095181Sgd78059 		break;
23105181Sgd78059 
23115181Sgd78059 	case ETHER_STAT_ADV_CAP_100HDX:
23125181Sgd78059 		*val = dmfep->param_anar_100hdx;
23135181Sgd78059 		break;
23145181Sgd78059 
23155181Sgd78059 	case ETHER_STAT_ADV_CAP_10FDX:
23165181Sgd78059 		*val = dmfep->param_anar_10fdx;
23175181Sgd78059 		break;
23185181Sgd78059 
23195181Sgd78059 	case ETHER_STAT_ADV_CAP_10HDX:
23205181Sgd78059 		*val = dmfep->param_anar_10hdx;
23215181Sgd78059 		break;
23225181Sgd78059 
23235181Sgd78059 	case ETHER_STAT_ADV_REMFAULT:
23245181Sgd78059 		*val = dmfep->param_anar_remfault;
23255181Sgd78059 		break;
23265181Sgd78059 
23275181Sgd78059 	case ETHER_STAT_LP_CAP_AUTONEG:
23285181Sgd78059 		*val = dmfep->param_lp_autoneg;
23295181Sgd78059 		break;
23305181Sgd78059 
23315181Sgd78059 	case ETHER_STAT_LP_CAP_100T4:
23325181Sgd78059 		*val = dmfep->param_lp_100T4;
23335181Sgd78059 		break;
23345181Sgd78059 
23355181Sgd78059 	case ETHER_STAT_LP_CAP_100FDX:
23365181Sgd78059 		*val = dmfep->param_lp_100fdx;
23375181Sgd78059 		break;
23385181Sgd78059 
23395181Sgd78059 	case ETHER_STAT_LP_CAP_100HDX:
23405181Sgd78059 		*val = dmfep->param_lp_100hdx;
23415181Sgd78059 		break;
23425181Sgd78059 
23435181Sgd78059 	case ETHER_STAT_LP_CAP_10FDX:
23445181Sgd78059 		*val = dmfep->param_lp_10fdx;
23455181Sgd78059 		break;
23465181Sgd78059 
23475181Sgd78059 	case ETHER_STAT_LP_CAP_10HDX:
23485181Sgd78059 		*val = dmfep->param_lp_10hdx;
23495181Sgd78059 		break;
23505181Sgd78059 
23515181Sgd78059 	case ETHER_STAT_LP_REMFAULT:
23525181Sgd78059 		*val = dmfep->param_lp_remfault;
23535181Sgd78059 		break;
23545181Sgd78059 
23555181Sgd78059 	default:
23565181Sgd78059 		rv = ENOTSUP;
23575181Sgd78059 	}
23585181Sgd78059 
23595181Sgd78059 	mutex_exit(dmfep->txlock);
23605181Sgd78059 	mutex_exit(dmfep->rxlock);
23615181Sgd78059 	mutex_exit(dmfep->oplock);
23625181Sgd78059 	mutex_exit(dmfep->milock);
23635181Sgd78059 
23645181Sgd78059 	return (rv);
23655181Sgd78059 }
23665181Sgd78059 
23675181Sgd78059 #undef	DMFE_DBG
23685181Sgd78059 
23695181Sgd78059 
23705181Sgd78059 /*
23715181Sgd78059  * ========== Ioctl handler & subfunctions ==========
23725181Sgd78059  */
23735181Sgd78059 
23745181Sgd78059 #define	DMFE_DBG	DMFE_DBG_IOCTL	/* debug flag for this code	*/
23755181Sgd78059 
23765181Sgd78059 /*
23775181Sgd78059  * Loopback operation
23785181Sgd78059  *
23795181Sgd78059  * Support access to the internal loopback and external loopback
23805181Sgd78059  * functions selected via the Operation Mode Register (OPR).
23815181Sgd78059  * These will be used by netlbtest (see BugId 4370609)
23825181Sgd78059  *
23835181Sgd78059  * Note that changing the loopback mode causes a stop/restart cycle
23845181Sgd78059  *
23855181Sgd78059  * It would be nice to evolve this to support the ioctls in sys/netlb.h,
23865181Sgd78059  * but then it would be even better to use Brussels to configure this.
23875181Sgd78059  */
23885181Sgd78059 static enum ioc_reply
23895181Sgd78059 dmfe_loop_ioctl(dmfe_t *dmfep, queue_t *wq, mblk_t *mp, int cmd)
23905181Sgd78059 {
23915181Sgd78059 	loopback_t *loop_req_p;
23925181Sgd78059 	uint32_t loopmode;
23935181Sgd78059 
23945181Sgd78059 	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t))
23955181Sgd78059 		return (IOC_INVAL);
23965181Sgd78059 
2397*6990Sgd78059 	loop_req_p = (void *)mp->b_cont->b_rptr;
23985181Sgd78059 
23995181Sgd78059 	switch (cmd) {
24005181Sgd78059 	default:
24015181Sgd78059 		/*
24025181Sgd78059 		 * This should never happen ...
24035181Sgd78059 		 */
24045181Sgd78059 		dmfe_error(dmfep, "dmfe_loop_ioctl: invalid cmd 0x%x", cmd);
24055181Sgd78059 		return (IOC_INVAL);
24065181Sgd78059 
24075181Sgd78059 	case DMFE_GET_LOOP_MODE:
24085181Sgd78059 		/*
24095181Sgd78059 		 * This doesn't return the current loopback mode - it
24105181Sgd78059 		 * returns a bitmask :-( of all possible loopback modes
24115181Sgd78059 		 */
24125181Sgd78059 		DMFE_DEBUG(("dmfe_loop_ioctl: GET_LOOP_MODE"));
24135181Sgd78059 		loop_req_p->loopback = DMFE_LOOPBACK_MODES;
24145181Sgd78059 		miocack(wq, mp, sizeof (loopback_t), 0);
24155181Sgd78059 		return (IOC_DONE);
24165181Sgd78059 
24175181Sgd78059 	case DMFE_SET_LOOP_MODE:
24185181Sgd78059 		/*
24195181Sgd78059 		 * Select any of the various loopback modes
24205181Sgd78059 		 */
24215181Sgd78059 		DMFE_DEBUG(("dmfe_loop_ioctl: SET_LOOP_MODE %d",
24225181Sgd78059 		    loop_req_p->loopback));
24235181Sgd78059 		switch (loop_req_p->loopback) {
24245181Sgd78059 		default:
24255181Sgd78059 			return (IOC_INVAL);
24265181Sgd78059 
24275181Sgd78059 		case DMFE_LOOPBACK_OFF:
24285181Sgd78059 			loopmode = LOOPBACK_OFF;
24295181Sgd78059 			break;
24305181Sgd78059 
24315181Sgd78059 		case DMFE_PHY_A_LOOPBACK_ON:
24325181Sgd78059 			loopmode = LOOPBACK_PHY_A;
24335181Sgd78059 			break;
24345181Sgd78059 
24355181Sgd78059 		case DMFE_PHY_D_LOOPBACK_ON:
24365181Sgd78059 			loopmode = LOOPBACK_PHY_D;
24375181Sgd78059 			break;
24385181Sgd78059 
24395181Sgd78059 		case DMFE_INT_LOOPBACK_ON:
24405181Sgd78059 			loopmode = LOOPBACK_INTERNAL;
24415181Sgd78059 			break;
24425181Sgd78059 		}
24435181Sgd78059 
24445181Sgd78059 		if ((dmfep->opmode & LOOPBACK_MODE_MASK) != loopmode) {
24455181Sgd78059 			dmfep->opmode &= ~LOOPBACK_MODE_MASK;
24465181Sgd78059 			dmfep->opmode |= loopmode;
24475181Sgd78059 			return (IOC_RESTART_ACK);
24485181Sgd78059 		}
24495181Sgd78059 
24505181Sgd78059 		return (IOC_ACK);
24515181Sgd78059 	}
24525181Sgd78059 }
24535181Sgd78059 
24545181Sgd78059 /*
24555181Sgd78059  * Specific dmfe IOCTLs, the mac module handles the generic ones.
24565181Sgd78059  */
24575181Sgd78059 static void
24585181Sgd78059 dmfe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
24595181Sgd78059 {
24605181Sgd78059 	dmfe_t *dmfep = arg;
24615181Sgd78059 	struct iocblk *iocp;
24625181Sgd78059 	enum ioc_reply status;
24635181Sgd78059 	int cmd;
24645181Sgd78059 
24655181Sgd78059 	/*
24665181Sgd78059 	 * Validate the command before bothering with the mutexen ...
24675181Sgd78059 	 */
2468*6990Sgd78059 	iocp = (void *)mp->b_rptr;
24695181Sgd78059 	cmd = iocp->ioc_cmd;
24705181Sgd78059 	switch (cmd) {
24715181Sgd78059 	default:
24725181Sgd78059 		DMFE_DEBUG(("dmfe_m_ioctl: unknown cmd 0x%x", cmd));
24735181Sgd78059 		miocnak(wq, mp, 0, EINVAL);
24745181Sgd78059 		return;
24755181Sgd78059 
24765181Sgd78059 	case DMFE_SET_LOOP_MODE:
24775181Sgd78059 	case DMFE_GET_LOOP_MODE:
24785181Sgd78059 	case ND_GET:
24795181Sgd78059 	case ND_SET:
24805181Sgd78059 		break;
24815181Sgd78059 	}
24825181Sgd78059 
24835181Sgd78059 	mutex_enter(dmfep->milock);
24845181Sgd78059 	mutex_enter(dmfep->oplock);
24855181Sgd78059 
24865181Sgd78059 	switch (cmd) {
24875181Sgd78059 	default:
24885181Sgd78059 		_NOTE(NOTREACHED)
24895181Sgd78059 		status = IOC_INVAL;
24905181Sgd78059 		break;
24915181Sgd78059 
24925181Sgd78059 	case DMFE_SET_LOOP_MODE:
24935181Sgd78059 	case DMFE_GET_LOOP_MODE:
24945181Sgd78059 		status = dmfe_loop_ioctl(dmfep, wq, mp, cmd);
24955181Sgd78059 		break;
24965181Sgd78059 
24975181Sgd78059 	case ND_GET:
24985181Sgd78059 	case ND_SET:
24995181Sgd78059 		status = dmfe_nd_ioctl(dmfep, wq, mp, cmd);
25005181Sgd78059 		break;
25015181Sgd78059 	}
25025181Sgd78059 
25035181Sgd78059 	/*
25045181Sgd78059 	 * Do we need to restart?
25055181Sgd78059 	 */
25065181Sgd78059 	switch (status) {
25075181Sgd78059 	default:
25085181Sgd78059 		break;
25095181Sgd78059 
25105181Sgd78059 	case IOC_RESTART_ACK:
25115181Sgd78059 	case IOC_RESTART:
25125181Sgd78059 		/*
25135181Sgd78059 		 * PHY parameters changed; we need to stop, update the
25145181Sgd78059 		 * PHY layer and restart before sending the reply or ACK
25155181Sgd78059 		 */
25165181Sgd78059 		dmfe_stop(dmfep);
25175181Sgd78059 		dmfe_update_phy(dmfep);
25185181Sgd78059 		dmfep->update_phy = B_FALSE;
25195181Sgd78059 
25205181Sgd78059 		/*
25215181Sgd78059 		 * The link will now most likely go DOWN and UP, because
25225181Sgd78059 		 * we've changed the loopback state or the link parameters
25235181Sgd78059 		 * or autonegotiation.  So we have to check that it's
25245181Sgd78059 		 * settled down before we restart the TX/RX processes.
25255181Sgd78059 		 * The ioctl code will have planted some reason strings
25265181Sgd78059 		 * to explain what's happening, so the link state change
25275181Sgd78059 		 * messages won't be printed on the console . We wake the
25285181Sgd78059 		 * factotum to deal with link notifications, if any ...
25295181Sgd78059 		 */
25305181Sgd78059 		if (dmfe_check_link(dmfep)) {
25315181Sgd78059 			dmfe_recheck_link(dmfep, B_TRUE);
25325181Sgd78059 			dmfe_wake_factotum(dmfep, KS_LINK_CHECK, "ioctl");
25335181Sgd78059 		}
25345181Sgd78059 
25355181Sgd78059 		if (dmfep->mac_state == DMFE_MAC_STARTED)
25365181Sgd78059 			dmfe_start(dmfep);
25375181Sgd78059 		break;
25385181Sgd78059 	}
25395181Sgd78059 
25405181Sgd78059 	/*
25415181Sgd78059 	 * The 'reasons-for-link-change', if any, don't apply any more
25425181Sgd78059 	 */
25435181Sgd78059 	mutex_exit(dmfep->oplock);
25445181Sgd78059 	mutex_exit(dmfep->milock);
25455181Sgd78059 
25465181Sgd78059 	/*
25475181Sgd78059 	 * Finally, decide how to reply
25485181Sgd78059 	 */
25495181Sgd78059 	switch (status) {
25505181Sgd78059 	default:
25515181Sgd78059 		/*
25525181Sgd78059 		 * Error, reply with a NAK and EINVAL
25535181Sgd78059 		 */
25545181Sgd78059 		miocnak(wq, mp, 0, EINVAL);
25555181Sgd78059 		break;
25565181Sgd78059 
25575181Sgd78059 	case IOC_RESTART_ACK:
25585181Sgd78059 	case IOC_ACK:
25595181Sgd78059 		/*
25605181Sgd78059 		 * OK, reply with an ACK
25615181Sgd78059 		 */
25625181Sgd78059 		miocack(wq, mp, 0, 0);
25635181Sgd78059 		break;
25645181Sgd78059 
25655181Sgd78059 	case IOC_RESTART:
25665181Sgd78059 	case IOC_REPLY:
25675181Sgd78059 		/*
25685181Sgd78059 		 * OK, send prepared reply
25695181Sgd78059 		 */
25705181Sgd78059 		qreply(wq, mp);
25715181Sgd78059 		break;
25725181Sgd78059 
25735181Sgd78059 	case IOC_DONE:
25745181Sgd78059 		/*
25755181Sgd78059 		 * OK, reply already sent
25765181Sgd78059 		 */
25775181Sgd78059 		break;
25785181Sgd78059 	}
25795181Sgd78059 }
25805181Sgd78059 
25815181Sgd78059 #undef	DMFE_DBG
25825181Sgd78059 
25835181Sgd78059 
25845181Sgd78059 /*
25855181Sgd78059  * ========== Per-instance setup/teardown code ==========
25865181Sgd78059  */
25875181Sgd78059 
25885181Sgd78059 #define	DMFE_DBG	DMFE_DBG_INIT	/* debug flag for this code	*/
25895181Sgd78059 
25905181Sgd78059 /*
25915181Sgd78059  * Determine local MAC address & broadcast address for this interface
25925181Sgd78059  */
25935181Sgd78059 static void
25945181Sgd78059 dmfe_find_mac_address(dmfe_t *dmfep)
25955181Sgd78059 {
25965181Sgd78059 	uchar_t *prop;
25975181Sgd78059 	uint_t propsize;
25985181Sgd78059 	int err;
25995181Sgd78059 
26005181Sgd78059 	/*
26015181Sgd78059 	 * We have to find the "vendor's factory-set address".  This is
26025181Sgd78059 	 * the value of the property "local-mac-address", as set by OBP
26035181Sgd78059 	 * (or a .conf file!)
26045181Sgd78059 	 *
26055181Sgd78059 	 * If the property is not there, then we try to find the factory
26065181Sgd78059 	 * mac address from the devices serial EEPROM.
26075181Sgd78059 	 */
26085181Sgd78059 	bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr));
26095181Sgd78059 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo,
26105181Sgd78059 	    DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize);
26115181Sgd78059 	if (err == DDI_PROP_SUCCESS) {
26125181Sgd78059 		if (propsize == ETHERADDRL)
26135181Sgd78059 			ethaddr_copy(prop, dmfep->curr_addr);
26145181Sgd78059 		ddi_prop_free(prop);
26155181Sgd78059 	} else {
26165181Sgd78059 		/* no property set... check eeprom */
26175181Sgd78059 		dmfe_read_eeprom(dmfep, EEPROM_EN_ADDR, dmfep->curr_addr,
26185181Sgd78059 		    ETHERADDRL);
26195181Sgd78059 	}
26205181Sgd78059 
26215181Sgd78059 	DMFE_DEBUG(("dmfe_setup_mac_address: factory %s",
26225181Sgd78059 	    ether_sprintf((void *)dmfep->curr_addr)));
26235181Sgd78059 }
26245181Sgd78059 
26255181Sgd78059 static int
26265181Sgd78059 dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize,
26275181Sgd78059 	size_t setup, size_t slop, ddi_device_acc_attr_t *attr_p,
26285181Sgd78059 	uint_t dma_flags, dma_area_t *dma_p)
26295181Sgd78059 {
26305181Sgd78059 	ddi_dma_cookie_t dma_cookie;
26315181Sgd78059 	uint_t ncookies;
26325181Sgd78059 	int err;
26335181Sgd78059 
26345181Sgd78059 	/*
26355181Sgd78059 	 * Allocate handle
26365181Sgd78059 	 */
26375181Sgd78059 	err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr,
26385181Sgd78059 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
26395181Sgd78059 	if (err != DDI_SUCCESS)
26405181Sgd78059 		return (DDI_FAILURE);
26415181Sgd78059 
26425181Sgd78059 	/*
26435181Sgd78059 	 * Allocate memory
26445181Sgd78059 	 */
26455181Sgd78059 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop,
26465181Sgd78059 	    attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
26475181Sgd78059 	    DDI_DMA_SLEEP, NULL,
26485181Sgd78059 	    &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl);
26495181Sgd78059 	if (err != DDI_SUCCESS)
26505181Sgd78059 		return (DDI_FAILURE);
26515181Sgd78059 
26525181Sgd78059 	/*
26535181Sgd78059 	 * Bind the two together
26545181Sgd78059 	 */
26555181Sgd78059 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
26565181Sgd78059 	    dma_p->mem_va, dma_p->alength, dma_flags,
26575181Sgd78059 	    DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies);
26585181Sgd78059 	if (err != DDI_DMA_MAPPED)
26595181Sgd78059 		return (DDI_FAILURE);
26605181Sgd78059 	if ((dma_p->ncookies = ncookies) != 1)
26615181Sgd78059 		return (DDI_FAILURE);
26625181Sgd78059 
26635181Sgd78059 	dma_p->mem_dvma = dma_cookie.dmac_address;
26645181Sgd78059 	if (setup > 0) {
26655181Sgd78059 		dma_p->setup_dvma = dma_p->mem_dvma + memsize;
26665181Sgd78059 		dma_p->setup_va = dma_p->mem_va + memsize;
26675181Sgd78059 	} else {
26685181Sgd78059 		dma_p->setup_dvma = 0;
26695181Sgd78059 		dma_p->setup_va = NULL;
26705181Sgd78059 	}
26715181Sgd78059 
26725181Sgd78059 	return (DDI_SUCCESS);
26735181Sgd78059 }
26745181Sgd78059 
26755181Sgd78059 /*
26765181Sgd78059  * This function allocates the transmit and receive buffers and descriptors.
26775181Sgd78059  */
26785181Sgd78059 static int
26795181Sgd78059 dmfe_alloc_bufs(dmfe_t *dmfep)
26805181Sgd78059 {
26815181Sgd78059 	size_t memsize;
26825181Sgd78059 	int err;
26835181Sgd78059 
26845181Sgd78059 	/*
26855181Sgd78059 	 * Allocate memory & handles for TX descriptor ring
26865181Sgd78059 	 */
26875181Sgd78059 	memsize = dmfep->tx.n_desc * sizeof (struct tx_desc_type);
26885181Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP,
26895181Sgd78059 	    &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
26905181Sgd78059 	    &dmfep->tx_desc);
26915181Sgd78059 	if (err != DDI_SUCCESS)
26925181Sgd78059 		return (DDI_FAILURE);
26935181Sgd78059 
26945181Sgd78059 	/*
26955181Sgd78059 	 * Allocate memory & handles for TX buffers
26965181Sgd78059 	 */
26975181Sgd78059 	memsize = dmfep->tx.n_desc * DMFE_BUF_SIZE;
26985181Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
26995181Sgd78059 	    &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE,
27005181Sgd78059 	    &dmfep->tx_buff);
27015181Sgd78059 	if (err != DDI_SUCCESS)
27025181Sgd78059 		return (DDI_FAILURE);
27035181Sgd78059 
27045181Sgd78059 	/*
27055181Sgd78059 	 * Allocate memory & handles for RX descriptor ring
27065181Sgd78059 	 */
27075181Sgd78059 	memsize = dmfep->rx.n_desc * sizeof (struct rx_desc_type);
27085181Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP,
27095181Sgd78059 	    &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
27105181Sgd78059 	    &dmfep->rx_desc);
27115181Sgd78059 	if (err != DDI_SUCCESS)
27125181Sgd78059 		return (DDI_FAILURE);
27135181Sgd78059 
27145181Sgd78059 	/*
27155181Sgd78059 	 * Allocate memory & handles for RX buffers
27165181Sgd78059 	 */
27175181Sgd78059 	memsize = dmfep->rx.n_desc * DMFE_BUF_SIZE;
27185181Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
27195181Sgd78059 	    &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE, &dmfep->rx_buff);
27205181Sgd78059 	if (err != DDI_SUCCESS)
27215181Sgd78059 		return (DDI_FAILURE);
27225181Sgd78059 
27235181Sgd78059 	/*
27245181Sgd78059 	 * Allocate bitmasks for tx packet type tracking
27255181Sgd78059 	 */
27265181Sgd78059 	dmfep->tx_mcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
27275181Sgd78059 	dmfep->tx_bcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
27285181Sgd78059 
27295181Sgd78059 	return (DDI_SUCCESS);
27305181Sgd78059 }
27315181Sgd78059 
27325181Sgd78059 static void
27335181Sgd78059 dmfe_free_dma_mem(dma_area_t *dma_p)
27345181Sgd78059 {
27355181Sgd78059 	if (dma_p->dma_hdl != NULL) {
27365181Sgd78059 		if (dma_p->ncookies) {
27375181Sgd78059 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
27385181Sgd78059 			dma_p->ncookies = 0;
27395181Sgd78059 		}
27405181Sgd78059 		ddi_dma_free_handle(&dma_p->dma_hdl);
27415181Sgd78059 		dma_p->dma_hdl = NULL;
27425181Sgd78059 		dma_p->mem_dvma = 0;
27435181Sgd78059 		dma_p->setup_dvma = 0;
27445181Sgd78059 	}
27455181Sgd78059 
27465181Sgd78059 	if (dma_p->acc_hdl != NULL) {
27475181Sgd78059 		ddi_dma_mem_free(&dma_p->acc_hdl);
27485181Sgd78059 		dma_p->acc_hdl = NULL;
27495181Sgd78059 		dma_p->mem_va = NULL;
27505181Sgd78059 		dma_p->setup_va = NULL;
27515181Sgd78059 	}
27525181Sgd78059 }
27535181Sgd78059 
27545181Sgd78059 /*
27555181Sgd78059  * This routine frees the transmit and receive buffers and descriptors.
27565181Sgd78059  * Make sure the chip is stopped before calling it!
27575181Sgd78059  */
27585181Sgd78059 static void
27595181Sgd78059 dmfe_free_bufs(dmfe_t *dmfep)
27605181Sgd78059 {
27615181Sgd78059 	dmfe_free_dma_mem(&dmfep->rx_buff);
27625181Sgd78059 	dmfe_free_dma_mem(&dmfep->rx_desc);
27635181Sgd78059 	dmfe_free_dma_mem(&dmfep->tx_buff);
27645181Sgd78059 	dmfe_free_dma_mem(&dmfep->tx_desc);
27655181Sgd78059 	kmem_free(dmfep->tx_mcast, dmfep->tx.n_desc / NBBY);
27665181Sgd78059 	kmem_free(dmfep->tx_bcast, dmfep->tx.n_desc / NBBY);
27675181Sgd78059 }
27685181Sgd78059 
27695181Sgd78059 static void
27705181Sgd78059 dmfe_unattach(dmfe_t *dmfep)
27715181Sgd78059 {
27725181Sgd78059 	/*
27735181Sgd78059 	 * Clean up and free all DMFE data structures
27745181Sgd78059 	 */
27755181Sgd78059 	if (dmfep->cycid != NULL) {
27765181Sgd78059 		ddi_periodic_delete(dmfep->cycid);
27775181Sgd78059 		dmfep->cycid = NULL;
27785181Sgd78059 	}
27795181Sgd78059 
27805181Sgd78059 	if (dmfep->ksp_drv != NULL)
27815181Sgd78059 		kstat_delete(dmfep->ksp_drv);
27825181Sgd78059 	if (dmfep->progress & PROGRESS_HWINT) {
27835181Sgd78059 		ddi_remove_intr(dmfep->devinfo, 0, dmfep->iblk);
27845181Sgd78059 		mutex_destroy(dmfep->txlock);
27855181Sgd78059 		mutex_destroy(dmfep->rxlock);
27865181Sgd78059 		mutex_destroy(dmfep->oplock);
27875181Sgd78059 	}
27885181Sgd78059 	if (dmfep->progress & PROGRESS_SOFTINT)
27895181Sgd78059 		ddi_remove_softintr(dmfep->factotum_id);
27905181Sgd78059 	if (dmfep->progress & PROGRESS_BUFS)
27915181Sgd78059 		dmfe_free_bufs(dmfep);
27925181Sgd78059 	if (dmfep->progress & PROGRESS_REGS)
27935181Sgd78059 		ddi_regs_map_free(&dmfep->io_handle);
27945181Sgd78059 	if (dmfep->progress & PROGRESS_NDD)
27955181Sgd78059 		dmfe_nd_cleanup(dmfep);
27965181Sgd78059 
27975181Sgd78059 	kmem_free(dmfep, sizeof (*dmfep));
27985181Sgd78059 }
27995181Sgd78059 
28005181Sgd78059 static int
28015181Sgd78059 dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp)
28025181Sgd78059 {
28035181Sgd78059 	ddi_acc_handle_t handle;
28045181Sgd78059 	uint32_t regval;
28055181Sgd78059 
28065181Sgd78059 	if (pci_config_setup(dmfep->devinfo, &handle) != DDI_SUCCESS)
28075181Sgd78059 		return (DDI_FAILURE);
28085181Sgd78059 
28095181Sgd78059 	/*
28105181Sgd78059 	 * Get vendor/device/revision.  We expect (but don't check) that
28115181Sgd78059 	 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102)
28125181Sgd78059 	 */
28135181Sgd78059 	idp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
28145181Sgd78059 	idp->device = pci_config_get16(handle, PCI_CONF_DEVID);
28155181Sgd78059 	idp->revision = pci_config_get8(handle, PCI_CONF_REVID);
28165181Sgd78059 
28175181Sgd78059 	/*
28185181Sgd78059 	 * Turn on Bus Master Enable bit and ensure the device is not asleep
28195181Sgd78059 	 */
28205181Sgd78059 	regval = pci_config_get32(handle, PCI_CONF_COMM);
28215181Sgd78059 	pci_config_put32(handle, PCI_CONF_COMM, (regval | PCI_COMM_ME));
28225181Sgd78059 
28235181Sgd78059 	regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD);
28245181Sgd78059 	pci_config_put32(handle, PCI_DMFE_CONF_CFDD,
28255181Sgd78059 	    regval & ~(CFDD_SLEEP | CFDD_SNOOZE));
28265181Sgd78059 
28275181Sgd78059 	pci_config_teardown(&handle);
28285181Sgd78059 	return (DDI_SUCCESS);
28295181Sgd78059 }
28305181Sgd78059 
28315181Sgd78059 struct ks_index {
28325181Sgd78059 	int index;
28335181Sgd78059 	char *name;
28345181Sgd78059 };
28355181Sgd78059 
28365181Sgd78059 static const struct ks_index ks_drv_names[] = {
28375181Sgd78059 	{	KS_INTERRUPT,			"intr"			},
28385181Sgd78059 	{	KS_CYCLIC_RUN,			"cyclic_run"		},
28395181Sgd78059 
28405181Sgd78059 	{	KS_TICK_LINK_STATE,		"link_state_change"	},
28415181Sgd78059 	{	KS_TICK_LINK_POLL,		"link_state_poll"	},
28425181Sgd78059 	{	KS_TX_STALL,			"tx_stall_detect"	},
28435181Sgd78059 	{	KS_CHIP_ERROR,			"chip_error_interrupt"	},
28445181Sgd78059 
28455181Sgd78059 	{	KS_FACTOTUM_RUN,		"factotum_run"		},
28465181Sgd78059 	{	KS_RECOVERY,			"factotum_recover"	},
28475181Sgd78059 	{	KS_LINK_CHECK,			"factotum_link_check"	},
28485181Sgd78059 
28495181Sgd78059 	{	KS_LINK_UP_CNT,			"link_up_cnt"		},
28505181Sgd78059 	{	KS_LINK_DROP_CNT,		"link_drop_cnt"		},
28515181Sgd78059 
28525181Sgd78059 	{	KS_MIIREG_BMSR,			"mii_status"		},
28535181Sgd78059 	{	KS_MIIREG_ANAR,			"mii_advert_cap"	},
28545181Sgd78059 	{	KS_MIIREG_ANLPAR,		"mii_partner_cap"	},
28555181Sgd78059 	{	KS_MIIREG_ANER,			"mii_expansion_cap"	},
28565181Sgd78059 	{	KS_MIIREG_DSCSR,		"mii_dscsr"		},
28575181Sgd78059 
28585181Sgd78059 	{	-1,				NULL			}
28595181Sgd78059 };
28605181Sgd78059 
28615181Sgd78059 static void
28625181Sgd78059 dmfe_init_kstats(dmfe_t *dmfep, int instance)
28635181Sgd78059 {
28645181Sgd78059 	kstat_t *ksp;
28655181Sgd78059 	kstat_named_t *knp;
28665181Sgd78059 	const struct ks_index *ksip;
28675181Sgd78059 
28685181Sgd78059 	/* no need to create MII stats, the mac module already does it */
28695181Sgd78059 
28705181Sgd78059 	/* Create and initialise driver-defined kstats */
28715181Sgd78059 	ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net",
28725181Sgd78059 	    KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT);
28735181Sgd78059 	if (ksp != NULL) {
28745181Sgd78059 		for (knp = ksp->ks_data, ksip = ks_drv_names;
28755181Sgd78059 		    ksip->name != NULL; ++ksip) {
28765181Sgd78059 			kstat_named_init(&knp[ksip->index], ksip->name,
28775181Sgd78059 			    KSTAT_DATA_UINT64);
28785181Sgd78059 		}
28795181Sgd78059 		dmfep->ksp_drv = ksp;
28805181Sgd78059 		dmfep->knp_drv = knp;
28815181Sgd78059 		kstat_install(ksp);
28825181Sgd78059 	} else {
28835181Sgd78059 		dmfe_error(dmfep, "kstat_create() for dmfe_events failed");
28845181Sgd78059 	}
28855181Sgd78059 }
28865181Sgd78059 
28875181Sgd78059 static int
28885181Sgd78059 dmfe_resume(dev_info_t *devinfo)
28895181Sgd78059 {
28905181Sgd78059 	dmfe_t *dmfep;				/* Our private data	*/
28915181Sgd78059 	chip_id_t chipid;
28925181Sgd78059 
28935181Sgd78059 	dmfep = ddi_get_driver_private(devinfo);
28945181Sgd78059 	if (dmfep == NULL)
28955181Sgd78059 		return (DDI_FAILURE);
28965181Sgd78059 
28975181Sgd78059 	/*
28985181Sgd78059 	 * Refuse to resume if the data structures aren't consistent
28995181Sgd78059 	 */
29005181Sgd78059 	if (dmfep->devinfo != devinfo)
29015181Sgd78059 		return (DDI_FAILURE);
29025181Sgd78059 
29035181Sgd78059 	/*
29045181Sgd78059 	 * Refuse to resume if the chip's changed its identity (*boggle*)
29055181Sgd78059 	 */
29065181Sgd78059 	if (dmfe_config_init(dmfep, &chipid) != DDI_SUCCESS)
29075181Sgd78059 		return (DDI_FAILURE);
29085181Sgd78059 	if (chipid.vendor != dmfep->chipid.vendor)
29095181Sgd78059 		return (DDI_FAILURE);
29105181Sgd78059 	if (chipid.device != dmfep->chipid.device)
29115181Sgd78059 		return (DDI_FAILURE);
29125181Sgd78059 	if (chipid.revision != dmfep->chipid.revision)
29135181Sgd78059 		return (DDI_FAILURE);
29145181Sgd78059 
29155181Sgd78059 	/*
29165181Sgd78059 	 * All OK, reinitialise h/w & kick off MAC scheduling
29175181Sgd78059 	 */
29185181Sgd78059 	mutex_enter(dmfep->oplock);
29195181Sgd78059 	dmfe_restart(dmfep);
29205181Sgd78059 	mutex_exit(dmfep->oplock);
29215181Sgd78059 	mac_tx_update(dmfep->mh);
29225181Sgd78059 	return (DDI_SUCCESS);
29235181Sgd78059 }
29245181Sgd78059 
29255181Sgd78059 /*
29265181Sgd78059  * attach(9E) -- Attach a device to the system
29275181Sgd78059  *
29285181Sgd78059  * Called once for each board successfully probed.
29295181Sgd78059  */
29305181Sgd78059 static int
29315181Sgd78059 dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
29325181Sgd78059 {
29335181Sgd78059 	mac_register_t *macp;
29345181Sgd78059 	dmfe_t *dmfep;				/* Our private data	*/
29355181Sgd78059 	uint32_t csr6;
29365181Sgd78059 	int instance;
29375181Sgd78059 	int err;
29385181Sgd78059 
29395181Sgd78059 	instance = ddi_get_instance(devinfo);
29405181Sgd78059 
29415181Sgd78059 	switch (cmd) {
29425181Sgd78059 	default:
29435181Sgd78059 		return (DDI_FAILURE);
29445181Sgd78059 
29455181Sgd78059 	case DDI_RESUME:
29465181Sgd78059 		return (dmfe_resume(devinfo));
29475181Sgd78059 
29485181Sgd78059 	case DDI_ATTACH:
29495181Sgd78059 		break;
29505181Sgd78059 	}
29515181Sgd78059 
29525181Sgd78059 	dmfep = kmem_zalloc(sizeof (*dmfep), KM_SLEEP);
29535181Sgd78059 	ddi_set_driver_private(devinfo, dmfep);
29545181Sgd78059 	dmfep->devinfo = devinfo;
29555181Sgd78059 	dmfep->dmfe_guard = DMFE_GUARD;
29565181Sgd78059 
29575181Sgd78059 	/*
29585181Sgd78059 	 * Initialize more fields in DMFE private data
29595181Sgd78059 	 * Determine the local MAC address
29605181Sgd78059 	 */
29615181Sgd78059 #if	DMFEDEBUG
29625181Sgd78059 	dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0,
29635181Sgd78059 	    debug_propname, dmfe_debug);
29645181Sgd78059 #endif	/* DMFEDEBUG */
29655181Sgd78059 	dmfep->cycid = NULL;
29665181Sgd78059 	(void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d",
29675181Sgd78059 	    instance);
29685181Sgd78059 
29695181Sgd78059 	/*
29705181Sgd78059 	 * Check for custom "opmode-reg-value" property;
29715181Sgd78059 	 * if none, use the defaults below for CSR6 ...
29725181Sgd78059 	 */
29735181Sgd78059 	csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1;
29745181Sgd78059 	dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
29755181Sgd78059 	    DDI_PROP_DONTPASS, opmode_propname, csr6);
29765181Sgd78059 
29775181Sgd78059 	/*
29785181Sgd78059 	 * Read chip ID & set up config space command register(s)
29795181Sgd78059 	 */
29805181Sgd78059 	if (dmfe_config_init(dmfep, &dmfep->chipid) != DDI_SUCCESS) {
29815181Sgd78059 		dmfe_error(dmfep, "dmfe_config_init() failed");
29825181Sgd78059 		goto attach_fail;
29835181Sgd78059 	}
29845181Sgd78059 	dmfep->progress |= PROGRESS_CONFIG;
29855181Sgd78059 
29865181Sgd78059 	/*
29875181Sgd78059 	 * Register NDD-tweakable parameters
29885181Sgd78059 	 */
29895181Sgd78059 	if (dmfe_nd_init(dmfep)) {
29905181Sgd78059 		dmfe_error(dmfep, "dmfe_nd_init() failed");
29915181Sgd78059 		goto attach_fail;
29925181Sgd78059 	}
29935181Sgd78059 	dmfep->progress |= PROGRESS_NDD;
29945181Sgd78059 
29955181Sgd78059 	/*
29965181Sgd78059 	 * Map operating registers
29975181Sgd78059 	 */
29985181Sgd78059 	err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER,
29995181Sgd78059 	    &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle);
30005181Sgd78059 	if (err != DDI_SUCCESS) {
30015181Sgd78059 		dmfe_error(dmfep, "ddi_regs_map_setup() failed");
30025181Sgd78059 		goto attach_fail;
30035181Sgd78059 	}
30045181Sgd78059 	dmfep->progress |= PROGRESS_REGS;
30055181Sgd78059 
30065181Sgd78059 	/*
30075181Sgd78059 	 * Get our MAC address.
30085181Sgd78059 	 */
30095181Sgd78059 	dmfe_find_mac_address(dmfep);
30105181Sgd78059 
30115181Sgd78059 	/*
30125181Sgd78059 	 * Allocate the TX and RX descriptors/buffers.
30135181Sgd78059 	 */
30145181Sgd78059 	dmfep->tx.n_desc = dmfe_tx_desc;
30155181Sgd78059 	dmfep->rx.n_desc = dmfe_rx_desc;
30165181Sgd78059 	err = dmfe_alloc_bufs(dmfep);
30175181Sgd78059 	if (err != DDI_SUCCESS) {
30185181Sgd78059 		dmfe_error(dmfep, "DMA buffer allocation failed");
30195181Sgd78059 		goto attach_fail;
30205181Sgd78059 	}
30215181Sgd78059 	dmfep->progress |= PROGRESS_BUFS;
30225181Sgd78059 
30235181Sgd78059 	/*
30245181Sgd78059 	 * Add the softint handler
30255181Sgd78059 	 */
30265181Sgd78059 	dmfep->link_poll_tix = factotum_start_tix;
30275181Sgd78059 	if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id,
30285181Sgd78059 	    NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) {
30295181Sgd78059 		dmfe_error(dmfep, "ddi_add_softintr() failed");
30305181Sgd78059 		goto attach_fail;
30315181Sgd78059 	}
30325181Sgd78059 	dmfep->progress |= PROGRESS_SOFTINT;
30335181Sgd78059 
30345181Sgd78059 	/*
30355181Sgd78059 	 * Add the h/w interrupt handler & initialise mutexen
30365181Sgd78059 	 */
30375181Sgd78059 	if (ddi_add_intr(devinfo, 0, &dmfep->iblk, NULL,
30385181Sgd78059 	    dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) {
30395181Sgd78059 		dmfe_error(dmfep, "ddi_add_intr() failed");
30405181Sgd78059 		goto attach_fail;
30415181Sgd78059 	}
30425181Sgd78059 	mutex_init(dmfep->milock, NULL, MUTEX_DRIVER, NULL);
30435181Sgd78059 	mutex_init(dmfep->oplock, NULL, MUTEX_DRIVER, dmfep->iblk);
30445181Sgd78059 	mutex_init(dmfep->rxlock, NULL, MUTEX_DRIVER, dmfep->iblk);
30455181Sgd78059 	mutex_init(dmfep->txlock, NULL, MUTEX_DRIVER, dmfep->iblk);
30465181Sgd78059 	dmfep->progress |= PROGRESS_HWINT;
30475181Sgd78059 
30485181Sgd78059 	/*
30495181Sgd78059 	 * Create & initialise named kstats
30505181Sgd78059 	 */
30515181Sgd78059 	dmfe_init_kstats(dmfep, instance);
30525181Sgd78059 
30535181Sgd78059 	/*
30545181Sgd78059 	 * Reset & initialise the chip and the ring buffers
30555181Sgd78059 	 * Initialise the (internal) PHY
30565181Sgd78059 	 */
30575181Sgd78059 	mutex_enter(dmfep->oplock);
30585181Sgd78059 	mutex_enter(dmfep->rxlock);
30595181Sgd78059 	mutex_enter(dmfep->txlock);
30605181Sgd78059 
30615181Sgd78059 	dmfe_reset(dmfep);
30625181Sgd78059 
30635181Sgd78059 	/*
30645181Sgd78059 	 * Prepare the setup packet
30655181Sgd78059 	 */
30665181Sgd78059 	bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE);
30675181Sgd78059 	bzero(dmfep->mcast_refs, MCASTBUF_SIZE);
30685181Sgd78059 	dmfep->addr_set = B_FALSE;
30695181Sgd78059 	dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
30705181Sgd78059 	dmfep->mac_state = DMFE_MAC_RESET;
30715181Sgd78059 
30725181Sgd78059 	mutex_exit(dmfep->txlock);
30735181Sgd78059 	mutex_exit(dmfep->rxlock);
30745181Sgd78059 	mutex_exit(dmfep->oplock);
30755181Sgd78059 
30765181Sgd78059 	dmfep->link_state = LINK_STATE_UNKNOWN;
30775181Sgd78059 	if (dmfe_init_phy(dmfep) != B_TRUE)
30785181Sgd78059 		goto attach_fail;
30795181Sgd78059 	dmfep->update_phy = B_TRUE;
30805181Sgd78059 
30815181Sgd78059 	/*
30825181Sgd78059 	 * Send a reasonable setup frame.  This configures our starting
30835181Sgd78059 	 * address and the broadcast address.
30845181Sgd78059 	 */
30855181Sgd78059 	(void) dmfe_m_unicst(dmfep, dmfep->curr_addr);
30865181Sgd78059 
30875181Sgd78059 	/*
30885181Sgd78059 	 * Initialize pointers to device specific functions which
30895181Sgd78059 	 * will be used by the generic layer.
30905181Sgd78059 	 */
30915181Sgd78059 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
30925181Sgd78059 		goto attach_fail;
30935181Sgd78059 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
30945181Sgd78059 	macp->m_driver = dmfep;
30955181Sgd78059 	macp->m_dip = devinfo;
30965181Sgd78059 	macp->m_src_addr = dmfep->curr_addr;
30975181Sgd78059 	macp->m_callbacks = &dmfe_m_callbacks;
30985181Sgd78059 	macp->m_min_sdu = 0;
30995181Sgd78059 	macp->m_max_sdu = ETHERMTU;
31005895Syz147064 	macp->m_margin = VLAN_TAGSZ;
31015181Sgd78059 
31025181Sgd78059 	/*
31035181Sgd78059 	 * Finally, we're ready to register ourselves with the MAC layer
31045181Sgd78059 	 * interface; if this succeeds, we're all ready to start()
31055181Sgd78059 	 */
31065181Sgd78059 	err = mac_register(macp, &dmfep->mh);
31075181Sgd78059 	mac_free(macp);
31085181Sgd78059 	if (err != 0)
31095181Sgd78059 		goto attach_fail;
31105181Sgd78059 	ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
31115181Sgd78059 
31125181Sgd78059 	/*
31135181Sgd78059 	 * Install the cyclic callback that we use to check for link
31145181Sgd78059 	 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic())
31155181Sgd78059 	 * is invoked in kernel context then.
31165181Sgd78059 	 */
31175181Sgd78059 	ASSERT(dmfep->cycid == NULL);
31185181Sgd78059 	dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep,
31195181Sgd78059 	    dmfe_tick_us * 1000, DDI_IPL_0);
31205181Sgd78059 	return (DDI_SUCCESS);
31215181Sgd78059 
31225181Sgd78059 attach_fail:
31235181Sgd78059 	dmfe_unattach(dmfep);
31245181Sgd78059 	return (DDI_FAILURE);
31255181Sgd78059 }
31265181Sgd78059 
31275181Sgd78059 /*
31285181Sgd78059  *	dmfe_suspend() -- suspend transmit/receive for powerdown
31295181Sgd78059  */
31305181Sgd78059 static int
31315181Sgd78059 dmfe_suspend(dmfe_t *dmfep)
31325181Sgd78059 {
31335181Sgd78059 	/*
31345181Sgd78059 	 * Just stop processing ...
31355181Sgd78059 	 */
31365181Sgd78059 	mutex_enter(dmfep->oplock);
31375181Sgd78059 	dmfe_stop(dmfep);
31385181Sgd78059 	mutex_exit(dmfep->oplock);
31395181Sgd78059 
31405181Sgd78059 	return (DDI_SUCCESS);
31415181Sgd78059 }
31425181Sgd78059 
31435181Sgd78059 /*
31445181Sgd78059  * detach(9E) -- Detach a device from the system
31455181Sgd78059  */
31465181Sgd78059 static int
31475181Sgd78059 dmfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
31485181Sgd78059 {
31495181Sgd78059 	dmfe_t *dmfep;
31505181Sgd78059 
31515181Sgd78059 	dmfep = ddi_get_driver_private(devinfo);
31525181Sgd78059 
31535181Sgd78059 	switch (cmd) {
31545181Sgd78059 	default:
31555181Sgd78059 		return (DDI_FAILURE);
31565181Sgd78059 
31575181Sgd78059 	case DDI_SUSPEND:
31585181Sgd78059 		return (dmfe_suspend(dmfep));
31595181Sgd78059 
31605181Sgd78059 	case DDI_DETACH:
31615181Sgd78059 		break;
31625181Sgd78059 	}
31635181Sgd78059 
31645181Sgd78059 	/*
31655181Sgd78059 	 * Unregister from the MAC subsystem.  This can fail, in
31665181Sgd78059 	 * particular if there are DLPI style-2 streams still open -
31675181Sgd78059 	 * in which case we just return failure without shutting
31685181Sgd78059 	 * down chip operations.
31695181Sgd78059 	 */
31705181Sgd78059 	if (mac_unregister(dmfep->mh) != DDI_SUCCESS)
31715181Sgd78059 		return (DDI_FAILURE);
31725181Sgd78059 
31735181Sgd78059 	/*
31745181Sgd78059 	 * All activity stopped, so we can clean up & exit
31755181Sgd78059 	 */
31765181Sgd78059 	dmfe_unattach(dmfep);
31775181Sgd78059 	return (DDI_SUCCESS);
31785181Sgd78059 }
31795181Sgd78059 
31805181Sgd78059 
31815181Sgd78059 /*
31825181Sgd78059  * ========== Module Loading Data & Entry Points ==========
31835181Sgd78059  */
31845181Sgd78059 
31855181Sgd78059 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops, nulldev, nulldev, dmfe_attach, dmfe_detach,
31865181Sgd78059 	nodev, NULL, D_MP, NULL);
31875181Sgd78059 
31885181Sgd78059 static struct modldrv dmfe_modldrv = {
31895181Sgd78059 	&mod_driverops,		/* Type of module.  This one is a driver */
31905181Sgd78059 	dmfe_ident,		/* short description */
31915181Sgd78059 	&dmfe_dev_ops		/* driver specific ops */
31925181Sgd78059 };
31935181Sgd78059 
31945181Sgd78059 static struct modlinkage modlinkage = {
31955181Sgd78059 	MODREV_1, (void *)&dmfe_modldrv, NULL
31965181Sgd78059 };
31975181Sgd78059 
31985181Sgd78059 int
31995181Sgd78059 _info(struct modinfo *modinfop)
32005181Sgd78059 {
32015181Sgd78059 	return (mod_info(&modlinkage, modinfop));
32025181Sgd78059 }
32035181Sgd78059 
32045181Sgd78059 int
32055181Sgd78059 _init(void)
32065181Sgd78059 {
32075181Sgd78059 	uint32_t tmp100;
32085181Sgd78059 	uint32_t tmp10;
32095181Sgd78059 	int i;
32105181Sgd78059 	int status;
32115181Sgd78059 
32125181Sgd78059 	/* Calculate global timing parameters */
32135181Sgd78059 	tmp100 = (dmfe_tx100_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
32145181Sgd78059 	tmp10 = (dmfe_tx10_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
32155181Sgd78059 
32165181Sgd78059 	for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) {
32175181Sgd78059 		switch (i) {
32185181Sgd78059 		case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA):
32195181Sgd78059 		case TX_PROCESS_STATE(TX_PROCESS_WAIT_END):
32205181Sgd78059 			/*
32215181Sgd78059 			 * The chip doesn't spontaneously recover from
32225181Sgd78059 			 * a stall in these states, so we reset early
32235181Sgd78059 			 */
32245181Sgd78059 			stall_100_tix[i] = tmp100;
32255181Sgd78059 			stall_10_tix[i] = tmp10;
32265181Sgd78059 			break;
32275181Sgd78059 
32285181Sgd78059 		case TX_PROCESS_STATE(TX_PROCESS_SUSPEND):
32295181Sgd78059 		default:
32305181Sgd78059 			/*
32315181Sgd78059 			 * The chip has been seen to spontaneously recover
32325181Sgd78059 			 * after an apparent stall in the SUSPEND state,
32335181Sgd78059 			 * so we'll allow it rather longer to do so.  As
32345181Sgd78059 			 * stalls in other states have not been observed,
32355181Sgd78059 			 * we'll use long timeouts for them too ...
32365181Sgd78059 			 */
32375181Sgd78059 			stall_100_tix[i] = tmp100 * 20;
32385181Sgd78059 			stall_10_tix[i] = tmp10 * 20;
32395181Sgd78059 			break;
32405181Sgd78059 		}
32415181Sgd78059 	}
32425181Sgd78059 
32435181Sgd78059 	factotum_tix = (dmfe_link_poll_us+dmfe_tick_us-1)/dmfe_tick_us;
32445181Sgd78059 	factotum_fast_tix = 1+(factotum_tix/5);
32455181Sgd78059 	factotum_start_tix = 1+(factotum_tix*2);
32465181Sgd78059 
32475181Sgd78059 	mac_init_ops(&dmfe_dev_ops, "dmfe");
32485181Sgd78059 	status = mod_install(&modlinkage);
32495181Sgd78059 	if (status == DDI_SUCCESS)
32505181Sgd78059 		dmfe_log_init();
32515181Sgd78059 
32525181Sgd78059 	return (status);
32535181Sgd78059 }
32545181Sgd78059 
32555181Sgd78059 int
32565181Sgd78059 _fini(void)
32575181Sgd78059 {
32585181Sgd78059 	int status;
32595181Sgd78059 
32605181Sgd78059 	status = mod_remove(&modlinkage);
32615181Sgd78059 	if (status == DDI_SUCCESS) {
32625181Sgd78059 		mac_fini_ops(&dmfe_dev_ops);
32635181Sgd78059 		dmfe_log_fini();
32645181Sgd78059 	}
32655181Sgd78059 
32665181Sgd78059 	return (status);
32675181Sgd78059 }
32685181Sgd78059 
32695181Sgd78059 #undef	DMFE_DBG
3270