xref: /onnv-gate/usr/src/uts/common/io/dmfe/dmfe_main.c (revision 7656:2621e50fdf4a)
15181Sgd78059 /*
25181Sgd78059  * CDDL HEADER START
35181Sgd78059  *
45181Sgd78059  * The contents of this file are subject to the terms of the
55181Sgd78059  * Common Development and Distribution License (the "License").
65181Sgd78059  * You may not use this file except in compliance with the License.
75181Sgd78059  *
85181Sgd78059  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
95181Sgd78059  * or http://www.opensolaris.org/os/licensing.
105181Sgd78059  * See the License for the specific language governing permissions
115181Sgd78059  * and limitations under the License.
125181Sgd78059  *
135181Sgd78059  * When distributing Covered Code, include this CDDL HEADER in each
145181Sgd78059  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
155181Sgd78059  * If applicable, add the following below this CDDL HEADER, with the
165181Sgd78059  * fields enclosed by brackets "[]" replaced with your own identifying
175181Sgd78059  * information: Portions Copyright [yyyy] [name of copyright owner]
185181Sgd78059  *
195181Sgd78059  * CDDL HEADER END
205181Sgd78059  */
215181Sgd78059 /*
225895Syz147064  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
235181Sgd78059  * Use is subject to license terms.
245181Sgd78059  */
255181Sgd78059 
265181Sgd78059 
275181Sgd78059 #include <sys/types.h>
285181Sgd78059 #include <sys/sunddi.h>
295181Sgd78059 #include "dmfe_impl.h"
305181Sgd78059 
315181Sgd78059 /*
325181Sgd78059  * This is the string displayed by modinfo, etc.
335181Sgd78059  */
345181Sgd78059 static char dmfe_ident[] = "Davicom DM9102 Ethernet";
355181Sgd78059 
365181Sgd78059 
375181Sgd78059 /*
385181Sgd78059  * NOTES:
395181Sgd78059  *
405181Sgd78059  * #defines:
415181Sgd78059  *
425181Sgd78059  *	DMFE_PCI_RNUMBER is the register-set number to use for the operating
435181Sgd78059  *	registers.  On an OBP-based machine, regset 0 refers to CONFIG space,
445181Sgd78059  *	regset 1 will be the operating registers in I/O space, and regset 2
455181Sgd78059  *	will be the operating registers in MEMORY space (preferred).  If an
465181Sgd78059  *	expansion ROM is fitted, it may appear as a further register set.
475181Sgd78059  *
485181Sgd78059  *	DMFE_SLOP defines the amount by which the chip may read beyond
495181Sgd78059  *	the end of a buffer or descriptor, apparently 6-8 dwords :(
505181Sgd78059  *	We have to make sure this doesn't cause it to access unallocated
515181Sgd78059  *	or unmapped memory.
525181Sgd78059  *
535181Sgd78059  *	DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP)
545181Sgd78059  *	rounded up to a multiple of 4.  Here we choose a power of two for
555181Sgd78059  *	speed & simplicity at the cost of a bit more memory.
565181Sgd78059  *
575181Sgd78059  *	However, the buffer length field in the TX/RX descriptors is only
585181Sgd78059  *	eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes
595181Sgd78059  *	per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1
605181Sgd78059  *	(2000) bytes each.
615181Sgd78059  *
625181Sgd78059  *	DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for
635181Sgd78059  *	the data buffers.  The descriptors are always set up in CONSISTENT
645181Sgd78059  *	mode.
655181Sgd78059  *
665181Sgd78059  *	DMFE_HEADROOM defines how much space we'll leave in allocated
675181Sgd78059  *	mblks before the first valid data byte.  This should be chosen
685181Sgd78059  *	to be 2 modulo 4, so that once the ethernet header (14 bytes)
695181Sgd78059  *	has been stripped off, the packet data will be 4-byte aligned.
705181Sgd78059  *	The remaining space can be used by upstream modules to prepend
715181Sgd78059  *	any headers required.
725181Sgd78059  *
735181Sgd78059  * Patchable globals:
745181Sgd78059  *
755181Sgd78059  *	dmfe_bus_modes: the bus mode bits to be put into CSR0.
765181Sgd78059  *		Setting READ_MULTIPLE in this register seems to cause
775181Sgd78059  *		the chip to generate a READ LINE command with a parity
785181Sgd78059  *		error!  Don't do it!
795181Sgd78059  *
805181Sgd78059  *	dmfe_setup_desc1: the value to be put into descriptor word 1
815181Sgd78059  *		when sending a SETUP packet.
825181Sgd78059  *
835181Sgd78059  *		Setting TX_LAST_DESC in desc1 in a setup packet seems
845181Sgd78059  *		to make the chip spontaneously reset internally - it
855181Sgd78059  *		attempts to give back the setup packet descriptor by
865181Sgd78059  *		writing to PCI address 00000000 - which may or may not
875181Sgd78059  *		get a MASTER ABORT - after which most of its registers
885181Sgd78059  *		seem to have either default values or garbage!
895181Sgd78059  *
905181Sgd78059  *		TX_FIRST_DESC doesn't seem to have the same effect but
915181Sgd78059  *		it isn't needed on a setup packet so we'll leave it out
925181Sgd78059  *		too, just in case it has some other wierd side-effect.
935181Sgd78059  *
945181Sgd78059  *		The default hardware packet filtering mode is now
955181Sgd78059  *		HASH_AND_PERFECT (imperfect filtering of multicast
965181Sgd78059  *		packets and perfect filtering of unicast packets).
975181Sgd78059  *		If this is found not to work reliably, setting the
985181Sgd78059  *		TX_FILTER_TYPE1 bit will cause a switchover to using
995181Sgd78059  *		HASH_ONLY mode (imperfect filtering of *all* packets).
1005181Sgd78059  *		Software will then perform the additional filtering
1015181Sgd78059  *		as required.
1025181Sgd78059  */
1035181Sgd78059 
1045181Sgd78059 #define	DMFE_PCI_RNUMBER	2
1055181Sgd78059 #define	DMFE_SLOP		(8*sizeof (uint32_t))
1065181Sgd78059 #define	DMFE_BUF_SIZE		2048
1075181Sgd78059 #define	DMFE_BUF_SIZE_1		2000
1085181Sgd78059 #define	DMFE_DMA_MODE		DDI_DMA_STREAMING
1095181Sgd78059 #define	DMFE_HEADROOM		34
1105181Sgd78059 
1115181Sgd78059 static uint32_t dmfe_bus_modes = TX_POLL_INTVL | CACHE_ALIGN;
1125181Sgd78059 static uint32_t dmfe_setup_desc1 = TX_SETUP_PACKET | SETUPBUF_SIZE |
1135181Sgd78059 					TX_FILTER_TYPE0;
1145181Sgd78059 
1155181Sgd78059 /*
1165181Sgd78059  * Some tunable parameters ...
1175181Sgd78059  *	Number of RX/TX ring entries (128/128)
1185181Sgd78059  *	Minimum number of TX ring slots to keep free (1)
1195181Sgd78059  *	Low-water mark at which to try to reclaim TX ring slots (1)
1205181Sgd78059  *	How often to take a TX-done interrupt (twice per ring cycle)
1215181Sgd78059  *	Whether to reclaim TX ring entries on a TX-done interrupt (no)
1225181Sgd78059  */
1235181Sgd78059 
1245181Sgd78059 #define	DMFE_TX_DESC		128	/* Should be a multiple of 4 <= 256 */
1255181Sgd78059 #define	DMFE_RX_DESC		128	/* Should be a multiple of 4 <= 256 */
1265181Sgd78059 
1275181Sgd78059 static uint32_t dmfe_rx_desc = DMFE_RX_DESC;
1285181Sgd78059 static uint32_t dmfe_tx_desc = DMFE_TX_DESC;
1295181Sgd78059 static uint32_t dmfe_tx_min_free = 1;
1305181Sgd78059 static uint32_t dmfe_tx_reclaim_level = 1;
1315181Sgd78059 static uint32_t dmfe_tx_int_factor = (DMFE_TX_DESC / 2) - 1;
1325181Sgd78059 static boolean_t dmfe_reclaim_on_done = B_FALSE;
1335181Sgd78059 
1345181Sgd78059 /*
1355181Sgd78059  * Time-related parameters:
1365181Sgd78059  *
1375181Sgd78059  *	We use a cyclic to provide a periodic callback; this is then used
1385181Sgd78059  * 	to check for TX-stall and poll the link status register.
1395181Sgd78059  *
1405181Sgd78059  *	DMFE_TICK is the interval between cyclic callbacks, in microseconds.
1415181Sgd78059  *
1425181Sgd78059  *	TX_STALL_TIME_100 is the timeout in microseconds between passing
1435181Sgd78059  *	a packet to the chip for transmission and seeing that it's gone,
1445181Sgd78059  *	when running at 100Mb/s.  If we haven't reclaimed at least one
1455181Sgd78059  *	descriptor in this time we assume the transmitter has stalled
1465181Sgd78059  *	and reset the chip.
1475181Sgd78059  *
1485181Sgd78059  *	TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s.
1495181Sgd78059  *
1505181Sgd78059  *	LINK_POLL_TIME is the interval between checks on the link state
1515181Sgd78059  *	when nothing appears to have happened (this is in addition to the
1525181Sgd78059  *	case where we think we've detected a link change, and serves as a
1535181Sgd78059  *	backup in case the quick link check doesn't work properly).
1545181Sgd78059  *
1555181Sgd78059  * Patchable globals:
1565181Sgd78059  *
1575181Sgd78059  *	dmfe_tick_us:		DMFE_TICK
1585181Sgd78059  *	dmfe_tx100_stall_us:	TX_STALL_TIME_100
1595181Sgd78059  *	dmfe_tx10_stall_us:	TX_STALL_TIME_10
1605181Sgd78059  *	dmfe_link_poll_us:	LINK_POLL_TIME
1615181Sgd78059  *
1625181Sgd78059  * These are then used in _init() to calculate:
1635181Sgd78059  *
1645181Sgd78059  *	stall_100_tix[]: number of consecutive cyclic callbacks without a
1655181Sgd78059  *			 reclaim before the TX process is considered stalled,
1665181Sgd78059  *			 when running at 100Mb/s.  The elements are indexed
1675181Sgd78059  *			 by transmit-engine-state.
1685181Sgd78059  *	stall_10_tix[]:	 number of consecutive cyclic callbacks without a
1695181Sgd78059  *			 reclaim before the TX process is considered stalled,
1705181Sgd78059  *			 when running at 10Mb/s.  The elements are indexed
1715181Sgd78059  *			 by transmit-engine-state.
1725181Sgd78059  *	factotum_tix:	 number of consecutive cyclic callbacks before waking
1735181Sgd78059  *			 up the factotum even though there doesn't appear to
1745181Sgd78059  *			 be anything for it to do
1755181Sgd78059  */
1765181Sgd78059 
1775181Sgd78059 #define	DMFE_TICK		25000		/* microseconds		*/
1785181Sgd78059 #define	TX_STALL_TIME_100	50000		/* microseconds		*/
1795181Sgd78059 #define	TX_STALL_TIME_10	200000		/* microseconds		*/
1805181Sgd78059 #define	LINK_POLL_TIME		5000000		/* microseconds		*/
1815181Sgd78059 
1825181Sgd78059 static uint32_t dmfe_tick_us = DMFE_TICK;
1835181Sgd78059 static uint32_t dmfe_tx100_stall_us = TX_STALL_TIME_100;
1845181Sgd78059 static uint32_t dmfe_tx10_stall_us = TX_STALL_TIME_10;
1855181Sgd78059 static uint32_t dmfe_link_poll_us = LINK_POLL_TIME;
1865181Sgd78059 
1875181Sgd78059 /*
1885181Sgd78059  * Calculated from above in _init()
1895181Sgd78059  */
1905181Sgd78059 
1915181Sgd78059 static uint32_t stall_100_tix[TX_PROCESS_MAX_STATE+1];
1925181Sgd78059 static uint32_t stall_10_tix[TX_PROCESS_MAX_STATE+1];
1935181Sgd78059 static uint32_t factotum_tix;
1945181Sgd78059 static uint32_t factotum_fast_tix;
1955181Sgd78059 static uint32_t factotum_start_tix;
1965181Sgd78059 
1975181Sgd78059 /*
1985181Sgd78059  * Property names
1995181Sgd78059  */
2005181Sgd78059 static char localmac_propname[] = "local-mac-address";
2015181Sgd78059 static char opmode_propname[] = "opmode-reg-value";
2025181Sgd78059 static char debug_propname[] = "dmfe-debug-flags";
2035181Sgd78059 
2045181Sgd78059 static int		dmfe_m_start(void *);
2055181Sgd78059 static void		dmfe_m_stop(void *);
2065181Sgd78059 static int		dmfe_m_promisc(void *, boolean_t);
2075181Sgd78059 static int		dmfe_m_multicst(void *, boolean_t, const uint8_t *);
2085181Sgd78059 static int		dmfe_m_unicst(void *, const uint8_t *);
2095181Sgd78059 static void		dmfe_m_ioctl(void *, queue_t *, mblk_t *);
2105181Sgd78059 static boolean_t	dmfe_m_getcapab(void *, mac_capab_t, void *);
2115181Sgd78059 static mblk_t		*dmfe_m_tx(void *, mblk_t *);
2125181Sgd78059 static int 		dmfe_m_stat(void *, uint_t, uint64_t *);
2135181Sgd78059 
2145181Sgd78059 static mac_callbacks_t dmfe_m_callbacks = {
2155181Sgd78059 	(MC_IOCTL | MC_GETCAPAB),
2165181Sgd78059 	dmfe_m_stat,
2175181Sgd78059 	dmfe_m_start,
2185181Sgd78059 	dmfe_m_stop,
2195181Sgd78059 	dmfe_m_promisc,
2205181Sgd78059 	dmfe_m_multicst,
2215181Sgd78059 	dmfe_m_unicst,
2225181Sgd78059 	dmfe_m_tx,
2235181Sgd78059 	NULL,
2245181Sgd78059 	dmfe_m_ioctl,
2255181Sgd78059 	dmfe_m_getcapab,
2265181Sgd78059 };
2275181Sgd78059 
2285181Sgd78059 
2295181Sgd78059 /*
2305181Sgd78059  * Describes the chip's DMA engine
2315181Sgd78059  */
2325181Sgd78059 static ddi_dma_attr_t dma_attr = {
2335181Sgd78059 	DMA_ATTR_V0,		/* dma_attr version */
2345181Sgd78059 	0,			/* dma_attr_addr_lo */
2355181Sgd78059 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
2365181Sgd78059 	0x0FFFFFF,		/* dma_attr_count_max */
2375181Sgd78059 	0x20,			/* dma_attr_align */
2385181Sgd78059 	0x7F,			/* dma_attr_burstsizes */
2395181Sgd78059 	1,			/* dma_attr_minxfer */
2405181Sgd78059 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
2415181Sgd78059 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
2425181Sgd78059 	1,			/* dma_attr_sgllen */
2435181Sgd78059 	1,			/* dma_attr_granular */
2445181Sgd78059 	0			/* dma_attr_flags */
2455181Sgd78059 };
2465181Sgd78059 
2475181Sgd78059 /*
2485181Sgd78059  * DMA access attributes for registers and descriptors
2495181Sgd78059  */
2505181Sgd78059 static ddi_device_acc_attr_t dmfe_reg_accattr = {
2515181Sgd78059 	DDI_DEVICE_ATTR_V0,
2525181Sgd78059 	DDI_STRUCTURE_LE_ACC,
2535181Sgd78059 	DDI_STRICTORDER_ACC
2545181Sgd78059 };
2555181Sgd78059 
2565181Sgd78059 /*
2575181Sgd78059  * DMA access attributes for data: NOT to be byte swapped.
2585181Sgd78059  */
2595181Sgd78059 static ddi_device_acc_attr_t dmfe_data_accattr = {
2605181Sgd78059 	DDI_DEVICE_ATTR_V0,
2615181Sgd78059 	DDI_NEVERSWAP_ACC,
2625181Sgd78059 	DDI_STRICTORDER_ACC
2635181Sgd78059 };
2645181Sgd78059 
2655181Sgd78059 static uchar_t dmfe_broadcast_addr[ETHERADDRL] = {
2665181Sgd78059 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2675181Sgd78059 };
2685181Sgd78059 
2695181Sgd78059 
2705181Sgd78059 /*
2715181Sgd78059  * ========== Lowest-level chip register & ring access routines ==========
2725181Sgd78059  */
2735181Sgd78059 
2745181Sgd78059 /*
2755181Sgd78059  * I/O register get/put routines
2765181Sgd78059  */
2775181Sgd78059 uint32_t
2785181Sgd78059 dmfe_chip_get32(dmfe_t *dmfep, off_t offset)
2795181Sgd78059 {
2806990Sgd78059 	uint32_t *addr;
2816990Sgd78059 
2826990Sgd78059 	addr = (void *)(dmfep->io_reg + offset);
2836990Sgd78059 	return (ddi_get32(dmfep->io_handle, addr));
2845181Sgd78059 }
2855181Sgd78059 
2865181Sgd78059 void
2875181Sgd78059 dmfe_chip_put32(dmfe_t *dmfep, off_t offset, uint32_t value)
2885181Sgd78059 {
2896990Sgd78059 	uint32_t *addr;
2906990Sgd78059 
2916990Sgd78059 	addr = (void *)(dmfep->io_reg + offset);
2926990Sgd78059 	ddi_put32(dmfep->io_handle, addr, value);
2935181Sgd78059 }
2945181Sgd78059 
2955181Sgd78059 /*
2965181Sgd78059  * TX/RX ring get/put routines
2975181Sgd78059  */
2985181Sgd78059 static uint32_t
2995181Sgd78059 dmfe_ring_get32(dma_area_t *dma_p, uint_t index, uint_t offset)
3005181Sgd78059 {
3015181Sgd78059 	uint32_t *addr;
3025181Sgd78059 
3036990Sgd78059 	addr = (void *)dma_p->mem_va;
3045181Sgd78059 	return (ddi_get32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset));
3055181Sgd78059 }
3065181Sgd78059 
3075181Sgd78059 static void
3085181Sgd78059 dmfe_ring_put32(dma_area_t *dma_p, uint_t index, uint_t offset, uint32_t value)
3095181Sgd78059 {
3105181Sgd78059 	uint32_t *addr;
3115181Sgd78059 
3126990Sgd78059 	addr = (void *)dma_p->mem_va;
3135181Sgd78059 	ddi_put32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset, value);
3145181Sgd78059 }
3155181Sgd78059 
3165181Sgd78059 /*
3175181Sgd78059  * Setup buffer get/put routines
3185181Sgd78059  */
3195181Sgd78059 static uint32_t
3205181Sgd78059 dmfe_setup_get32(dma_area_t *dma_p, uint_t index)
3215181Sgd78059 {
3225181Sgd78059 	uint32_t *addr;
3235181Sgd78059 
3246990Sgd78059 	addr = (void *)dma_p->setup_va;
3255181Sgd78059 	return (ddi_get32(dma_p->acc_hdl, addr + index));
3265181Sgd78059 }
3275181Sgd78059 
3285181Sgd78059 static void
3295181Sgd78059 dmfe_setup_put32(dma_area_t *dma_p, uint_t index, uint32_t value)
3305181Sgd78059 {
3315181Sgd78059 	uint32_t *addr;
3325181Sgd78059 
3336990Sgd78059 	addr = (void *)dma_p->setup_va;
3345181Sgd78059 	ddi_put32(dma_p->acc_hdl, addr + index, value);
3355181Sgd78059 }
3365181Sgd78059 
3375181Sgd78059 
3385181Sgd78059 /*
3395181Sgd78059  * ========== Low-level chip & ring buffer manipulation ==========
3405181Sgd78059  */
3415181Sgd78059 
3425181Sgd78059 #define	DMFE_DBG	DMFE_DBG_REGS	/* debug flag for this code	*/
3435181Sgd78059 
3445181Sgd78059 /*
3455181Sgd78059  * dmfe_set_opmode() -- function to set operating mode
3465181Sgd78059  */
3475181Sgd78059 static void
3485181Sgd78059 dmfe_set_opmode(dmfe_t *dmfep)
3495181Sgd78059 {
3505181Sgd78059 	DMFE_DEBUG(("dmfe_set_opmode: opmode 0x%x", dmfep->opmode));
3515181Sgd78059 
3525181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
3535181Sgd78059 
3545181Sgd78059 	dmfe_chip_put32(dmfep, OPN_MODE_REG, dmfep->opmode);
3555181Sgd78059 	drv_usecwait(10);
3565181Sgd78059 }
3575181Sgd78059 
3585181Sgd78059 /*
3595181Sgd78059  * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w
3605181Sgd78059  */
3615181Sgd78059 static void
3625181Sgd78059 dmfe_stop_chip(dmfe_t *dmfep, enum chip_state newstate)
3635181Sgd78059 {
3645181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
3655181Sgd78059 
3665181Sgd78059 	/*
3675181Sgd78059 	 * Stop the chip:
3685181Sgd78059 	 *	disable all interrupts
3695181Sgd78059 	 *	stop TX/RX processes
3705181Sgd78059 	 *	clear the status bits for TX/RX stopped
3715181Sgd78059 	 * If required, reset the chip
3725181Sgd78059 	 * Record the new state
3735181Sgd78059 	 */
3745181Sgd78059 	dmfe_chip_put32(dmfep, INT_MASK_REG, 0);
3755181Sgd78059 	dmfep->opmode &= ~(START_TRANSMIT | START_RECEIVE);
3765181Sgd78059 	dmfe_set_opmode(dmfep);
3775181Sgd78059 	dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
3785181Sgd78059 
3795181Sgd78059 	switch (newstate) {
3805181Sgd78059 	default:
3815181Sgd78059 		ASSERT(!"can't get here");
3825181Sgd78059 		return;
3835181Sgd78059 
3845181Sgd78059 	case CHIP_STOPPED:
3855181Sgd78059 	case CHIP_ERROR:
3865181Sgd78059 		break;
3875181Sgd78059 
3885181Sgd78059 	case CHIP_RESET:
3895181Sgd78059 		dmfe_chip_put32(dmfep, BUS_MODE_REG, SW_RESET);
3905181Sgd78059 		drv_usecwait(10);
3915181Sgd78059 		dmfe_chip_put32(dmfep, BUS_MODE_REG, 0);
3925181Sgd78059 		drv_usecwait(10);
3935181Sgd78059 		dmfe_chip_put32(dmfep, BUS_MODE_REG, dmfe_bus_modes);
3945181Sgd78059 		break;
3955181Sgd78059 	}
3965181Sgd78059 
3975181Sgd78059 	dmfep->chip_state = newstate;
3985181Sgd78059 }
3995181Sgd78059 
4005181Sgd78059 /*
4015181Sgd78059  * Initialize transmit and receive descriptor rings, and
4025181Sgd78059  * set the chip to point to the first entry in each ring
4035181Sgd78059  */
4045181Sgd78059 static void
4055181Sgd78059 dmfe_init_rings(dmfe_t *dmfep)
4065181Sgd78059 {
4075181Sgd78059 	dma_area_t *descp;
4085181Sgd78059 	uint32_t pstart;
4095181Sgd78059 	uint32_t pnext;
4105181Sgd78059 	uint32_t pbuff;
4115181Sgd78059 	uint32_t desc1;
4125181Sgd78059 	int i;
4135181Sgd78059 
4145181Sgd78059 	/*
4155181Sgd78059 	 * You need all the locks in order to rewrite the descriptor rings
4165181Sgd78059 	 */
4175181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
4185181Sgd78059 	ASSERT(mutex_owned(dmfep->rxlock));
4195181Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
4205181Sgd78059 
4215181Sgd78059 	/*
4225181Sgd78059 	 * Program the RX ring entries
4235181Sgd78059 	 */
4245181Sgd78059 	descp = &dmfep->rx_desc;
4255181Sgd78059 	pstart = descp->mem_dvma;
4265181Sgd78059 	pnext = pstart + sizeof (struct rx_desc_type);
4275181Sgd78059 	pbuff = dmfep->rx_buff.mem_dvma;
4285181Sgd78059 	desc1 = RX_CHAINING | DMFE_BUF_SIZE_1;
4295181Sgd78059 
4305181Sgd78059 	for (i = 0; i < dmfep->rx.n_desc; ++i) {
4315181Sgd78059 		dmfe_ring_put32(descp, i, RD_NEXT, pnext);
4325181Sgd78059 		dmfe_ring_put32(descp, i, BUFFER1, pbuff);
4335181Sgd78059 		dmfe_ring_put32(descp, i, DESC1, desc1);
4345181Sgd78059 		dmfe_ring_put32(descp, i, DESC0, RX_OWN);
4355181Sgd78059 
4365181Sgd78059 		pnext += sizeof (struct rx_desc_type);
4375181Sgd78059 		pbuff += DMFE_BUF_SIZE;
4385181Sgd78059 	}
4395181Sgd78059 
4405181Sgd78059 	/*
4415181Sgd78059 	 * Fix up last entry & sync
4425181Sgd78059 	 */
4435181Sgd78059 	dmfe_ring_put32(descp, --i, RD_NEXT, pstart);
4445181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
4455181Sgd78059 	dmfep->rx.next_free = 0;
4465181Sgd78059 
4475181Sgd78059 	/*
4485181Sgd78059 	 * Set the base address of the RX descriptor list in CSR3
4495181Sgd78059 	 */
4505181Sgd78059 	DMFE_DEBUG(("RX descriptor VA: $%p (DVMA $%x)",
4515181Sgd78059 	    descp->mem_va, descp->mem_dvma));
4525181Sgd78059 	dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma);
4535181Sgd78059 
4545181Sgd78059 	/*
4555181Sgd78059 	 * Program the TX ring entries
4565181Sgd78059 	 */
4575181Sgd78059 	descp = &dmfep->tx_desc;
4585181Sgd78059 	pstart = descp->mem_dvma;
4595181Sgd78059 	pnext = pstart + sizeof (struct tx_desc_type);
4605181Sgd78059 	pbuff = dmfep->tx_buff.mem_dvma;
4615181Sgd78059 	desc1 = TX_CHAINING;
4625181Sgd78059 
4635181Sgd78059 	for (i = 0; i < dmfep->tx.n_desc; ++i) {
4645181Sgd78059 		dmfe_ring_put32(descp, i, TD_NEXT, pnext);
4655181Sgd78059 		dmfe_ring_put32(descp, i, BUFFER1, pbuff);
4665181Sgd78059 		dmfe_ring_put32(descp, i, DESC1, desc1);
4675181Sgd78059 		dmfe_ring_put32(descp, i, DESC0, 0);
4685181Sgd78059 
4695181Sgd78059 		pnext += sizeof (struct tx_desc_type);
4705181Sgd78059 		pbuff += DMFE_BUF_SIZE;
4715181Sgd78059 	}
4725181Sgd78059 
4735181Sgd78059 	/*
4745181Sgd78059 	 * Fix up last entry & sync
4755181Sgd78059 	 */
4765181Sgd78059 	dmfe_ring_put32(descp, --i, TD_NEXT, pstart);
4775181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
4785181Sgd78059 	dmfep->tx.n_free = dmfep->tx.n_desc;
4795181Sgd78059 	dmfep->tx.next_free = dmfep->tx.next_busy = 0;
4805181Sgd78059 
4815181Sgd78059 	/*
4825181Sgd78059 	 * Set the base address of the TX descrptor list in CSR4
4835181Sgd78059 	 */
4845181Sgd78059 	DMFE_DEBUG(("TX descriptor VA: $%p (DVMA $%x)",
4855181Sgd78059 	    descp->mem_va, descp->mem_dvma));
4865181Sgd78059 	dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma);
4875181Sgd78059 }
4885181Sgd78059 
4895181Sgd78059 /*
4905181Sgd78059  * dmfe_start_chip() -- start the chip transmitting and/or receiving
4915181Sgd78059  */
4925181Sgd78059 static void
4935181Sgd78059 dmfe_start_chip(dmfe_t *dmfep, int mode)
4945181Sgd78059 {
4955181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
4965181Sgd78059 
4975181Sgd78059 	dmfep->opmode |= mode;
4985181Sgd78059 	dmfe_set_opmode(dmfep);
4995181Sgd78059 
5005181Sgd78059 	dmfe_chip_put32(dmfep, W_J_TIMER_REG, 0);
5015181Sgd78059 	/*
5025181Sgd78059 	 * Enable VLAN length mode (allows packets to be 4 bytes Longer).
5035181Sgd78059 	 */
5045181Sgd78059 	dmfe_chip_put32(dmfep, W_J_TIMER_REG, VLAN_ENABLE);
5055181Sgd78059 
5065181Sgd78059 	/*
5075181Sgd78059 	 * Clear any pending process-stopped interrupts
5085181Sgd78059 	 */
5095181Sgd78059 	dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
5105181Sgd78059 	dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX :
5115181Sgd78059 	    mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED;
5125181Sgd78059 }
5135181Sgd78059 
5145181Sgd78059 /*
5155181Sgd78059  * dmfe_enable_interrupts() -- enable our favourite set of interrupts.
5165181Sgd78059  *
5175181Sgd78059  * Normal interrupts:
5185181Sgd78059  *	We always enable:
5195181Sgd78059  *		RX_PKTDONE_INT		(packet received)
5205181Sgd78059  *		TX_PKTDONE_INT		(TX complete)
5215181Sgd78059  *	We never enable:
5225181Sgd78059  *		TX_ALLDONE_INT		(next TX buffer not ready)
5235181Sgd78059  *
5245181Sgd78059  * Abnormal interrupts:
5255181Sgd78059  *	We always enable:
5265181Sgd78059  *		RX_STOPPED_INT
5275181Sgd78059  *		TX_STOPPED_INT
5285181Sgd78059  *		SYSTEM_ERR_INT
5295181Sgd78059  *		RX_UNAVAIL_INT
5305181Sgd78059  *	We never enable:
5315181Sgd78059  *		RX_EARLY_INT
5325181Sgd78059  *		RX_WATCHDOG_INT
5335181Sgd78059  *		TX_JABBER_INT
5345181Sgd78059  *		TX_EARLY_INT
5355181Sgd78059  *		TX_UNDERFLOW_INT
5365181Sgd78059  *		GP_TIMER_INT		(not valid in -9 chips)
5375181Sgd78059  *		LINK_STATUS_INT		(not valid in -9 chips)
5385181Sgd78059  */
5395181Sgd78059 static void
5405181Sgd78059 dmfe_enable_interrupts(dmfe_t *dmfep)
5415181Sgd78059 {
5425181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
5435181Sgd78059 
5445181Sgd78059 	/*
5455181Sgd78059 	 * Put 'the standard set of interrupts' in the interrupt mask register
5465181Sgd78059 	 */
5475181Sgd78059 	dmfep->imask =	RX_PKTDONE_INT | TX_PKTDONE_INT |
5485181Sgd78059 	    RX_STOPPED_INT | TX_STOPPED_INT | RX_UNAVAIL_INT | SYSTEM_ERR_INT;
5495181Sgd78059 
5505181Sgd78059 	dmfe_chip_put32(dmfep, INT_MASK_REG,
5515181Sgd78059 	    NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask);
5525181Sgd78059 	dmfep->chip_state = CHIP_RUNNING;
5535181Sgd78059 
5545181Sgd78059 	DMFE_DEBUG(("dmfe_enable_interrupts: imask 0x%x", dmfep->imask));
5555181Sgd78059 }
5565181Sgd78059 
5575181Sgd78059 #undef	DMFE_DBG
5585181Sgd78059 
5595181Sgd78059 
5605181Sgd78059 /*
5615181Sgd78059  * ========== RX side routines ==========
5625181Sgd78059  */
5635181Sgd78059 
5645181Sgd78059 #define	DMFE_DBG	DMFE_DBG_RECV	/* debug flag for this code	*/
5655181Sgd78059 
5665181Sgd78059 /*
5675181Sgd78059  * Function to update receive statistics on various errors
5685181Sgd78059  */
5695181Sgd78059 static void
5705181Sgd78059 dmfe_update_rx_stats(dmfe_t *dmfep, uint32_t desc0)
5715181Sgd78059 {
5725181Sgd78059 	ASSERT(mutex_owned(dmfep->rxlock));
5735181Sgd78059 
5745181Sgd78059 	/*
5755181Sgd78059 	 * The error summary bit and the error bits that it summarises
5765181Sgd78059 	 * are only valid if this is the last fragment.  Therefore, a
5775181Sgd78059 	 * fragment only contributes to the error statistics if both
5785181Sgd78059 	 * the last-fragment and error summary bits are set.
5795181Sgd78059 	 */
5805181Sgd78059 	if (((RX_LAST_DESC | RX_ERR_SUMMARY) & ~desc0) == 0) {
5815181Sgd78059 		dmfep->rx_stats_ierrors += 1;
5825181Sgd78059 
5835181Sgd78059 		/*
5845181Sgd78059 		 * There are some other error bits in the descriptor for
5855181Sgd78059 		 * which there don't seem to be appropriate MAC statistics,
5865181Sgd78059 		 * notably RX_COLLISION and perhaps RX_DESC_ERR.  The
5875181Sgd78059 		 * latter may not be possible if it is supposed to indicate
5885181Sgd78059 		 * that one buffer has been filled with a partial packet
5895181Sgd78059 		 * and the next buffer required for the rest of the packet
5905181Sgd78059 		 * was not available, as all our buffers are more than large
5915181Sgd78059 		 * enough for a whole packet without fragmenting.
5925181Sgd78059 		 */
5935181Sgd78059 
5945181Sgd78059 		if (desc0 & RX_OVERFLOW) {
5955181Sgd78059 			dmfep->rx_stats_overflow += 1;
5965181Sgd78059 
5975181Sgd78059 		} else if (desc0 & RX_RUNT_FRAME)
5985181Sgd78059 			dmfep->rx_stats_short += 1;
5995181Sgd78059 
6005181Sgd78059 		if (desc0 & RX_CRC)
6015181Sgd78059 			dmfep->rx_stats_fcs += 1;
6025181Sgd78059 
6035181Sgd78059 		if (desc0 & RX_FRAME2LONG)
6045181Sgd78059 			dmfep->rx_stats_toolong += 1;
6055181Sgd78059 	}
6065181Sgd78059 
6075181Sgd78059 	/*
6085181Sgd78059 	 * A receive watchdog timeout is counted as a MAC-level receive
6095181Sgd78059 	 * error.  Strangely, it doesn't set the packet error summary bit,
6105181Sgd78059 	 * according to the chip data sheet :-?
6115181Sgd78059 	 */
6125181Sgd78059 	if (desc0 & RX_RCV_WD_TO)
6135181Sgd78059 		dmfep->rx_stats_macrcv_errors += 1;
6145181Sgd78059 
6155181Sgd78059 	if (desc0 & RX_DRIBBLING)
6165181Sgd78059 		dmfep->rx_stats_align += 1;
6175181Sgd78059 
6185181Sgd78059 	if (desc0 & RX_MII_ERR)
6195181Sgd78059 		dmfep->rx_stats_macrcv_errors += 1;
6205181Sgd78059 }
6215181Sgd78059 
6225181Sgd78059 /*
6235181Sgd78059  * Receive incoming packet(s) and pass them up ...
6245181Sgd78059  */
6255181Sgd78059 static mblk_t *
6265181Sgd78059 dmfe_getp(dmfe_t *dmfep)
6275181Sgd78059 {
6285181Sgd78059 	dma_area_t *descp;
6295181Sgd78059 	mblk_t **tail;
6305181Sgd78059 	mblk_t *head;
6315181Sgd78059 	mblk_t *mp;
6325181Sgd78059 	char *rxb;
6335181Sgd78059 	uchar_t *dp;
6345181Sgd78059 	uint32_t desc0;
6355181Sgd78059 	uint32_t misses;
6365181Sgd78059 	int packet_length;
6375181Sgd78059 	int index;
6385181Sgd78059 
6395181Sgd78059 	mutex_enter(dmfep->rxlock);
6405181Sgd78059 
6415181Sgd78059 	/*
6425181Sgd78059 	 * Update the missed frame statistic from the on-chip counter.
6435181Sgd78059 	 */
6445181Sgd78059 	misses = dmfe_chip_get32(dmfep, MISSED_FRAME_REG);
6455181Sgd78059 	dmfep->rx_stats_norcvbuf += (misses & MISSED_FRAME_MASK);
6465181Sgd78059 
6475181Sgd78059 	/*
6485181Sgd78059 	 * sync (all) receive descriptors before inspecting them
6495181Sgd78059 	 */
6505181Sgd78059 	descp = &dmfep->rx_desc;
6515181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
6525181Sgd78059 
6535181Sgd78059 	/*
6545181Sgd78059 	 * We should own at least one RX entry, since we've had a
6555181Sgd78059 	 * receive interrupt, but let's not be dogmatic about it.
6565181Sgd78059 	 */
6575181Sgd78059 	index = dmfep->rx.next_free;
6585181Sgd78059 	desc0 = dmfe_ring_get32(descp, index, DESC0);
6595181Sgd78059 	if (desc0 & RX_OWN)
6605181Sgd78059 		DMFE_DEBUG(("dmfe_getp: no work, desc0 0x%x", desc0));
6615181Sgd78059 
6625181Sgd78059 	for (head = NULL, tail = &head; (desc0 & RX_OWN) == 0; ) {
6635181Sgd78059 		/*
6645181Sgd78059 		 * Maintain statistics for every descriptor returned
6655181Sgd78059 		 * to us by the chip ...
6665181Sgd78059 		 */
6675181Sgd78059 		DMFE_DEBUG(("dmfe_getp: desc0 0x%x", desc0));
6685181Sgd78059 		dmfe_update_rx_stats(dmfep, desc0);
6695181Sgd78059 
6705181Sgd78059 		/*
6715181Sgd78059 		 * Check that the entry has both "packet start" and
6725181Sgd78059 		 * "packet end" flags.  We really shouldn't get packet
6735181Sgd78059 		 * fragments, 'cos all the RX buffers are bigger than
6745181Sgd78059 		 * the largest valid packet.  So we'll just drop any
6755181Sgd78059 		 * fragments we find & skip on to the next entry.
6765181Sgd78059 		 */
6775181Sgd78059 		if (((RX_FIRST_DESC | RX_LAST_DESC) & ~desc0) != 0) {
6785181Sgd78059 			DMFE_DEBUG(("dmfe_getp: dropping fragment"));
6795181Sgd78059 			goto skip;
6805181Sgd78059 		}
6815181Sgd78059 
6825181Sgd78059 		/*
6835181Sgd78059 		 * A whole packet in one buffer.  We have to check error
6845181Sgd78059 		 * status and packet length before forwarding it upstream.
6855181Sgd78059 		 */
6865181Sgd78059 		if (desc0 & RX_ERR_SUMMARY) {
6875181Sgd78059 			DMFE_DEBUG(("dmfe_getp: dropping errored packet"));
6885181Sgd78059 			goto skip;
6895181Sgd78059 		}
6905181Sgd78059 
6915181Sgd78059 		packet_length = (desc0 >> 16) & 0x3fff;
6925181Sgd78059 		if (packet_length > DMFE_MAX_PKT_SIZE) {
6935181Sgd78059 			DMFE_DEBUG(("dmfe_getp: dropping oversize packet, "
6945181Sgd78059 			    "length %d", packet_length));
6955181Sgd78059 			goto skip;
6965181Sgd78059 		} else if (packet_length < ETHERMIN) {
6975181Sgd78059 			/*
6985181Sgd78059 			 * Note that VLAN packet would be even larger,
6995181Sgd78059 			 * but we don't worry about dropping runt VLAN
7005181Sgd78059 			 * frames.
7015181Sgd78059 			 *
7025181Sgd78059 			 * This check is probably redundant, as well,
7035181Sgd78059 			 * since the hardware should drop RUNT frames.
7045181Sgd78059 			 */
7055181Sgd78059 			DMFE_DEBUG(("dmfe_getp: dropping undersize packet, "
7065181Sgd78059 			    "length %d", packet_length));
7075181Sgd78059 			goto skip;
7085181Sgd78059 		}
7095181Sgd78059 
7105181Sgd78059 		/*
7115181Sgd78059 		 * Sync the data, so we can examine it; then check that
7125181Sgd78059 		 * the packet is really intended for us (remember that
7135181Sgd78059 		 * if we're using Imperfect Filtering, then the chip will
7145181Sgd78059 		 * receive unicast packets sent to stations whose addresses
7155181Sgd78059 		 * just happen to hash to the same value as our own; we
7165181Sgd78059 		 * discard these here so they don't get sent upstream ...)
7175181Sgd78059 		 */
7185181Sgd78059 		(void) ddi_dma_sync(dmfep->rx_buff.dma_hdl,
7195181Sgd78059 		    index * DMFE_BUF_SIZE, DMFE_BUF_SIZE,
7205181Sgd78059 		    DDI_DMA_SYNC_FORKERNEL);
7215181Sgd78059 		rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE];
7225181Sgd78059 
7235181Sgd78059 
7245181Sgd78059 		/*
7255181Sgd78059 		 * We do not bother to check that the packet is really for
7265181Sgd78059 		 * us, we let the MAC framework make that check instead.
7275181Sgd78059 		 * This is especially important if we ever want to support
7285181Sgd78059 		 * multiple MAC addresses.
7295181Sgd78059 		 */
7305181Sgd78059 
7315181Sgd78059 		/*
7325181Sgd78059 		 * Packet looks good; get a buffer to copy it into.  We
7335181Sgd78059 		 * allow some space at the front of the allocated buffer
7345181Sgd78059 		 * (HEADROOM) in case any upstream modules want to prepend
7355181Sgd78059 		 * some sort of header.  The value has been carefully chosen
7365181Sgd78059 		 * So that it also has the side-effect of making the packet
7375181Sgd78059 		 * *contents* 4-byte aligned, as required by NCA!
7385181Sgd78059 		 */
7395181Sgd78059 		mp = allocb(DMFE_HEADROOM + packet_length, 0);
7405181Sgd78059 		if (mp == NULL) {
7415181Sgd78059 			DMFE_DEBUG(("dmfe_getp: no buffer - dropping packet"));
7425181Sgd78059 			dmfep->rx_stats_norcvbuf += 1;
7435181Sgd78059 			goto skip;
7445181Sgd78059 		}
7455181Sgd78059 
7465181Sgd78059 		/*
7475181Sgd78059 		 * Account for statistics of good packets.
7485181Sgd78059 		 */
7495181Sgd78059 		dmfep->rx_stats_ipackets += 1;
7505181Sgd78059 		dmfep->rx_stats_rbytes += packet_length;
7515181Sgd78059 		if (desc0 & RX_MULTI_FRAME) {
7525181Sgd78059 			if (bcmp(rxb, dmfe_broadcast_addr, ETHERADDRL)) {
7535181Sgd78059 				dmfep->rx_stats_multi += 1;
7545181Sgd78059 			} else {
7555181Sgd78059 				dmfep->rx_stats_bcast += 1;
7565181Sgd78059 			}
7575181Sgd78059 		}
7585181Sgd78059 
7595181Sgd78059 		/*
7605181Sgd78059 		 * Copy the packet into the STREAMS buffer
7615181Sgd78059 		 */
7625181Sgd78059 		dp = mp->b_rptr += DMFE_HEADROOM;
7635181Sgd78059 		mp->b_cont = mp->b_next = NULL;
7645181Sgd78059 
7655181Sgd78059 		/*
7665181Sgd78059 		 * Don't worry about stripping the vlan tag, the MAC
7675181Sgd78059 		 * layer will take care of that for us.
7685181Sgd78059 		 */
7695181Sgd78059 		bcopy(rxb, dp, packet_length);
7705181Sgd78059 
7715181Sgd78059 		/*
7725181Sgd78059 		 * Fix up the packet length, and link it to the chain
7735181Sgd78059 		 */
7745181Sgd78059 		mp->b_wptr = mp->b_rptr + packet_length - ETHERFCSL;
7755181Sgd78059 		*tail = mp;
7765181Sgd78059 		tail = &mp->b_next;
7775181Sgd78059 
7785181Sgd78059 	skip:
7795181Sgd78059 		/*
7805181Sgd78059 		 * Return ownership of ring entry & advance to next
7815181Sgd78059 		 */
7825181Sgd78059 		dmfe_ring_put32(descp, index, DESC0, RX_OWN);
7835181Sgd78059 		index = NEXT(index, dmfep->rx.n_desc);
7845181Sgd78059 		desc0 = dmfe_ring_get32(descp, index, DESC0);
7855181Sgd78059 	}
7865181Sgd78059 
7875181Sgd78059 	/*
7885181Sgd78059 	 * Remember where to start looking next time ...
7895181Sgd78059 	 */
7905181Sgd78059 	dmfep->rx.next_free = index;
7915181Sgd78059 
7925181Sgd78059 	/*
7935181Sgd78059 	 * sync the receive descriptors that we've given back
7945181Sgd78059 	 * (actually, we sync all of them for simplicity), and
7955181Sgd78059 	 * wake the chip in case it had suspended receive
7965181Sgd78059 	 */
7975181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
7985181Sgd78059 	dmfe_chip_put32(dmfep, RX_POLL_REG, 0);
7995181Sgd78059 
8005181Sgd78059 	mutex_exit(dmfep->rxlock);
8015181Sgd78059 	return (head);
8025181Sgd78059 }
8035181Sgd78059 
8045181Sgd78059 #undef	DMFE_DBG
8055181Sgd78059 
8065181Sgd78059 
8075181Sgd78059 /*
8085181Sgd78059  * ========== Primary TX side routines ==========
8095181Sgd78059  */
8105181Sgd78059 
8115181Sgd78059 #define	DMFE_DBG	DMFE_DBG_SEND	/* debug flag for this code	*/
8125181Sgd78059 
8135181Sgd78059 /*
8145181Sgd78059  *	TX ring management:
8155181Sgd78059  *
8165181Sgd78059  *	There are <tx.n_desc> entries in the ring, of which those from
8175181Sgd78059  *	<tx.next_free> round to but not including <tx.next_busy> must
8185181Sgd78059  *	be owned by the CPU.  The number of such entries should equal
8195181Sgd78059  *	<tx.n_free>; but there may also be some more entries which the
8205181Sgd78059  *	chip has given back but which we haven't yet accounted for.
8215181Sgd78059  *	The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts
8225181Sgd78059  *	as it discovers such entries.
8235181Sgd78059  *
8245181Sgd78059  *	Initially, or when the ring is entirely free:
8255181Sgd78059  *		C = Owned by CPU
8265181Sgd78059  *		D = Owned by Davicom (DMFE) chip
8275181Sgd78059  *
8285181Sgd78059  *	tx.next_free					tx.n_desc = 16
8295181Sgd78059  *	  |
8305181Sgd78059  *	  v
8315181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8325181Sgd78059  *	| C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C |
8335181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8345181Sgd78059  *	  ^
8355181Sgd78059  *	  |
8365181Sgd78059  *	tx.next_busy					tx.n_free = 16
8375181Sgd78059  *
8385181Sgd78059  *	On entry to reclaim() during normal use:
8395181Sgd78059  *
8405181Sgd78059  *					tx.next_free	tx.n_desc = 16
8415181Sgd78059  *					      |
8425181Sgd78059  *					      v
8435181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8445181Sgd78059  *	| C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
8455181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8465181Sgd78059  *		  ^
8475181Sgd78059  *		  |
8485181Sgd78059  *		tx.next_busy				tx.n_free = 9
8495181Sgd78059  *
8505181Sgd78059  *	On exit from reclaim():
8515181Sgd78059  *
8525181Sgd78059  *					tx.next_free	tx.n_desc = 16
8535181Sgd78059  *					      |
8545181Sgd78059  *					      v
8555181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8565181Sgd78059  *	| C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
8575181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8585181Sgd78059  *				  ^
8595181Sgd78059  *				  |
8605181Sgd78059  *			     tx.next_busy		tx.n_free = 13
8615181Sgd78059  *
8625181Sgd78059  *	The ring is considered "full" when only one entry is owned by
8635181Sgd78059  *	the CPU; thus <tx.n_free> should always be >= 1.
8645181Sgd78059  *
8655181Sgd78059  *			tx.next_free			tx.n_desc = 16
8665181Sgd78059  *			      |
8675181Sgd78059  *			      v
8685181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8695181Sgd78059  *	| D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D |
8705181Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8715181Sgd78059  *				  ^
8725181Sgd78059  *				  |
8735181Sgd78059  *			     tx.next_busy		tx.n_free = 1
8745181Sgd78059  */
8755181Sgd78059 
8765181Sgd78059 /*
8775181Sgd78059  * Function to update transmit statistics on various errors
8785181Sgd78059  */
8795181Sgd78059 static void
8805181Sgd78059 dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1)
8815181Sgd78059 {
8825181Sgd78059 	uint32_t collisions;
8835181Sgd78059 	uint32_t errbits;
8845181Sgd78059 	uint32_t errsum;
8855181Sgd78059 
8865181Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
8875181Sgd78059 
8885181Sgd78059 	collisions = ((desc0 >> 3) & 0x0f);
8895181Sgd78059 	errsum = desc0 & TX_ERR_SUMMARY;
8905181Sgd78059 	errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS |
8915181Sgd78059 	    TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO);
8925181Sgd78059 	if ((errsum == 0) != (errbits == 0)) {
8935181Sgd78059 		dmfe_log(dmfep, "dubious TX error status 0x%x", desc0);
8945181Sgd78059 		desc0 |= TX_ERR_SUMMARY;
8955181Sgd78059 	}
8965181Sgd78059 
8975181Sgd78059 	if (desc0 & TX_ERR_SUMMARY) {
8985181Sgd78059 		dmfep->tx_stats_oerrors += 1;
8995181Sgd78059 
9005181Sgd78059 		/*
9015181Sgd78059 		 * If we ever see a transmit jabber timeout, we count it
9025181Sgd78059 		 * as a MAC-level transmit error; but we probably won't
9035181Sgd78059 		 * see it as it causes an Abnormal interrupt and we reset
9045181Sgd78059 		 * the chip in order to recover
9055181Sgd78059 		 */
9065181Sgd78059 		if (desc0 & TX_JABBER_TO) {
9075181Sgd78059 			dmfep->tx_stats_macxmt_errors += 1;
9085181Sgd78059 			dmfep->tx_stats_jabber += 1;
9095181Sgd78059 		}
9105181Sgd78059 
9115181Sgd78059 		if (desc0 & TX_UNDERFLOW)
9125181Sgd78059 			dmfep->tx_stats_underflow += 1;
9135181Sgd78059 		else if (desc0 & TX_LATE_COLL)
9145181Sgd78059 			dmfep->tx_stats_xmtlatecoll += 1;
9155181Sgd78059 
9165181Sgd78059 		if (desc0 & (TX_CARRIER_LOSS | TX_NO_CARRIER))
9175181Sgd78059 			dmfep->tx_stats_nocarrier += 1;
9185181Sgd78059 
9195181Sgd78059 		if (desc0 & TX_EXCESS_COLL) {
9205181Sgd78059 			dmfep->tx_stats_excoll += 1;
9215181Sgd78059 			collisions = 16;
9225181Sgd78059 		}
9235181Sgd78059 	} else {
9245181Sgd78059 		int	bit = index % NBBY;
9255181Sgd78059 		int	byt = index / NBBY;
9265181Sgd78059 
9275181Sgd78059 		if (dmfep->tx_mcast[byt] & bit) {
9285181Sgd78059 			dmfep->tx_mcast[byt] &= ~bit;
9295181Sgd78059 			dmfep->tx_stats_multi += 1;
9305181Sgd78059 
9315181Sgd78059 		} else if (dmfep->tx_bcast[byt] & bit) {
9325181Sgd78059 			dmfep->tx_bcast[byt] &= ~bit;
9335181Sgd78059 			dmfep->tx_stats_bcast += 1;
9345181Sgd78059 		}
9355181Sgd78059 
9365181Sgd78059 		dmfep->tx_stats_opackets += 1;
9375181Sgd78059 		dmfep->tx_stats_obytes += desc1 & TX_BUFFER_SIZE1;
9385181Sgd78059 	}
9395181Sgd78059 
9405181Sgd78059 	if (collisions == 1)
9415181Sgd78059 		dmfep->tx_stats_first_coll += 1;
9425181Sgd78059 	else if (collisions != 0)
9435181Sgd78059 		dmfep->tx_stats_multi_coll += 1;
9445181Sgd78059 	dmfep->tx_stats_collisions += collisions;
9455181Sgd78059 
9465181Sgd78059 	if (desc0 & TX_DEFERRED)
9475181Sgd78059 		dmfep->tx_stats_defer += 1;
9485181Sgd78059 }
9495181Sgd78059 
9505181Sgd78059 /*
9515181Sgd78059  * Reclaim all the ring entries that the chip has returned to us ...
9525181Sgd78059  *
9535181Sgd78059  * Returns B_FALSE if no entries could be reclaimed.  Otherwise, reclaims
9545181Sgd78059  * as many as possible, restarts the TX stall timeout, and returns B_TRUE.
9555181Sgd78059  */
9565181Sgd78059 static boolean_t
9575181Sgd78059 dmfe_reclaim_tx_desc(dmfe_t *dmfep)
9585181Sgd78059 {
9595181Sgd78059 	dma_area_t *descp;
9605181Sgd78059 	uint32_t desc0;
9615181Sgd78059 	uint32_t desc1;
9625181Sgd78059 	int i;
9635181Sgd78059 
9645181Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
9655181Sgd78059 
9665181Sgd78059 	/*
9675181Sgd78059 	 * sync transmit descriptor ring before looking at it
9685181Sgd78059 	 */
9695181Sgd78059 	descp = &dmfep->tx_desc;
9705181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
9715181Sgd78059 
9725181Sgd78059 	/*
9735181Sgd78059 	 * Early exit if there are no descriptors to reclaim, either
9745181Sgd78059 	 * because they're all reclaimed already, or because the next
9755181Sgd78059 	 * one is still owned by the chip ...
9765181Sgd78059 	 */
9775181Sgd78059 	i = dmfep->tx.next_busy;
9785181Sgd78059 	if (i == dmfep->tx.next_free)
9795181Sgd78059 		return (B_FALSE);
9805181Sgd78059 	desc0 = dmfe_ring_get32(descp, i, DESC0);
9815181Sgd78059 	if (desc0 & TX_OWN)
9825181Sgd78059 		return (B_FALSE);
9835181Sgd78059 
9845181Sgd78059 	/*
9855181Sgd78059 	 * Reclaim as many descriptors as possible ...
9865181Sgd78059 	 */
9875181Sgd78059 	for (;;) {
9885181Sgd78059 		desc1 = dmfe_ring_get32(descp, i, DESC1);
9895181Sgd78059 		ASSERT((desc1 & (TX_SETUP_PACKET | TX_LAST_DESC)) != 0);
9905181Sgd78059 
9915181Sgd78059 		if (desc1 & TX_SETUP_PACKET) {
9925181Sgd78059 			/*
9935181Sgd78059 			 * Setup packet - restore buffer address
9945181Sgd78059 			 */
9955181Sgd78059 			ASSERT(dmfe_ring_get32(descp, i, BUFFER1) ==
9965181Sgd78059 			    descp->setup_dvma);
9975181Sgd78059 			dmfe_ring_put32(descp, i, BUFFER1,
9985181Sgd78059 			    dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE);
9995181Sgd78059 		} else {
10005181Sgd78059 			/*
10015181Sgd78059 			 * Regular packet - just update stats
10025181Sgd78059 			 */
10035181Sgd78059 			ASSERT(dmfe_ring_get32(descp, i, BUFFER1) ==
10045181Sgd78059 			    dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE);
10055181Sgd78059 			dmfe_update_tx_stats(dmfep, i, desc0, desc1);
10065181Sgd78059 		}
10075181Sgd78059 
10085181Sgd78059 #if	DMFEDEBUG
10095181Sgd78059 		/*
10105181Sgd78059 		 * We can use one of the SPARE bits in the TX descriptor
10115181Sgd78059 		 * to track when a ring buffer slot is reclaimed.  Then
10125181Sgd78059 		 * we can deduce the last operation on a slot from the
10135181Sgd78059 		 * top half of DESC0:
10145181Sgd78059 		 *
10155181Sgd78059 		 *	0x8000 xxxx	given to DMFE chip (TX_OWN)
10165181Sgd78059 		 *	0x7fff xxxx	returned but not yet reclaimed
10175181Sgd78059 		 *	0x3fff xxxx	reclaimed
10185181Sgd78059 		 */
10195181Sgd78059 #define	TX_PEND_RECLAIM		(1UL<<30)
10205181Sgd78059 		dmfe_ring_put32(descp, i, DESC0, desc0 & ~TX_PEND_RECLAIM);
10215181Sgd78059 #endif	/* DMFEDEBUG */
10225181Sgd78059 
10235181Sgd78059 		/*
10245181Sgd78059 		 * Update count & index; we're all done if the ring is
10255181Sgd78059 		 * now fully reclaimed, or the next entry if still owned
10265181Sgd78059 		 * by the chip ...
10275181Sgd78059 		 */
10285181Sgd78059 		dmfep->tx.n_free += 1;
10295181Sgd78059 		i = NEXT(i, dmfep->tx.n_desc);
10305181Sgd78059 		if (i == dmfep->tx.next_free)
10315181Sgd78059 			break;
10325181Sgd78059 		desc0 = dmfe_ring_get32(descp, i, DESC0);
10335181Sgd78059 		if (desc0 & TX_OWN)
10345181Sgd78059 			break;
10355181Sgd78059 	}
10365181Sgd78059 
10375181Sgd78059 	dmfep->tx.next_busy = i;
10385181Sgd78059 	dmfep->tx_pending_tix = 0;
10395181Sgd78059 	return (B_TRUE);
10405181Sgd78059 }
10415181Sgd78059 
10425181Sgd78059 /*
10435181Sgd78059  * Send the message in the message block chain <mp>.
10445181Sgd78059  *
10455181Sgd78059  * The message is freed if and only if its contents are successfully copied
10465181Sgd78059  * and queued for transmission (so that the return value is B_TRUE).
10475181Sgd78059  * If we can't queue the message, the return value is B_FALSE and
10485181Sgd78059  * the message is *not* freed.
10495181Sgd78059  *
10505181Sgd78059  * This routine handles the special case of <mp> == NULL, which indicates
10515181Sgd78059  * that we want to "send" the special "setup packet" allocated during
10525181Sgd78059  * startup.  We have to use some different flags in the packet descriptor
10535181Sgd78059  * to say its a setup packet (from the global <dmfe_setup_desc1>), and the
10545181Sgd78059  * setup packet *isn't* freed after use.
10555181Sgd78059  */
10565181Sgd78059 static boolean_t
10575181Sgd78059 dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp)
10585181Sgd78059 {
10595181Sgd78059 	dma_area_t *descp;
10605181Sgd78059 	mblk_t *bp;
10615181Sgd78059 	char *txb;
10625181Sgd78059 	uint32_t desc1;
10635181Sgd78059 	uint32_t index;
10645181Sgd78059 	size_t totlen;
10655181Sgd78059 	size_t mblen;
10665181Sgd78059 
10675181Sgd78059 	/*
10685181Sgd78059 	 * If the number of free slots is below the reclaim threshold
10695181Sgd78059 	 * (soft limit), we'll try to reclaim some.  If we fail, and
10705181Sgd78059 	 * the number of free slots is also below the minimum required
10715181Sgd78059 	 * (the hard limit, usually 1), then we can't send the packet.
10725181Sgd78059 	 */
10735181Sgd78059 	mutex_enter(dmfep->txlock);
10745181Sgd78059 	if (dmfep->tx.n_free <= dmfe_tx_reclaim_level &&
10755181Sgd78059 	    dmfe_reclaim_tx_desc(dmfep) == B_FALSE &&
10765181Sgd78059 	    dmfep->tx.n_free <= dmfe_tx_min_free) {
10775181Sgd78059 		/*
10785181Sgd78059 		 * Resource shortage - return B_FALSE so the packet
10795181Sgd78059 		 * will be queued for retry after the next TX-done
10805181Sgd78059 		 * interrupt.
10815181Sgd78059 		 */
10825181Sgd78059 		mutex_exit(dmfep->txlock);
10835181Sgd78059 		DMFE_DEBUG(("dmfe_send_msg: no free descriptors"));
10845181Sgd78059 		return (B_FALSE);
10855181Sgd78059 	}
10865181Sgd78059 
10875181Sgd78059 	/*
10885181Sgd78059 	 * There's a slot available, so claim it by incrementing
10895181Sgd78059 	 * the next-free index and decrementing the free count.
10905181Sgd78059 	 * If the ring is currently empty, we also restart the
10915181Sgd78059 	 * stall-detect timer.  The ASSERTions check that our
10925181Sgd78059 	 * invariants still hold:
10935181Sgd78059 	 *	the next-free index must not match the next-busy index
10945181Sgd78059 	 *	there must still be at least one free entry
10955181Sgd78059 	 * After this, we now have exclusive ownership of the ring
10965181Sgd78059 	 * entry (and matching buffer) indicated by <index>, so we
10975181Sgd78059 	 * don't need to hold the TX lock any longer
10985181Sgd78059 	 */
10995181Sgd78059 	index = dmfep->tx.next_free;
11005181Sgd78059 	dmfep->tx.next_free = NEXT(index, dmfep->tx.n_desc);
11015181Sgd78059 	ASSERT(dmfep->tx.next_free != dmfep->tx.next_busy);
11025181Sgd78059 	if (dmfep->tx.n_free-- == dmfep->tx.n_desc)
11035181Sgd78059 		dmfep->tx_pending_tix = 0;
11045181Sgd78059 	ASSERT(dmfep->tx.n_free >= 1);
11055181Sgd78059 	mutex_exit(dmfep->txlock);
11065181Sgd78059 
11075181Sgd78059 	/*
11085181Sgd78059 	 * Check the ownership of the ring entry ...
11095181Sgd78059 	 */
11105181Sgd78059 	descp = &dmfep->tx_desc;
11115181Sgd78059 	ASSERT((dmfe_ring_get32(descp, index, DESC0) & TX_OWN) == 0);
11125181Sgd78059 
11135181Sgd78059 	if (mp == NULL) {
11145181Sgd78059 		/*
11155181Sgd78059 		 * Indicates we should send a SETUP packet, which we do by
11165181Sgd78059 		 * temporarily switching the BUFFER1 pointer in the ring
11175181Sgd78059 		 * entry.  The reclaim routine will restore BUFFER1 to its
11185181Sgd78059 		 * usual value.
11195181Sgd78059 		 *
11205181Sgd78059 		 * Note that as the setup packet is tagged on the end of
11215181Sgd78059 		 * the TX ring, when we sync the descriptor we're also
11225181Sgd78059 		 * implicitly syncing the setup packet - hence, we don't
11235181Sgd78059 		 * need a separate ddi_dma_sync() call here.
11245181Sgd78059 		 */
11255181Sgd78059 		desc1 = dmfe_setup_desc1;
11265181Sgd78059 		dmfe_ring_put32(descp, index, BUFFER1, descp->setup_dvma);
11275181Sgd78059 	} else {
11285181Sgd78059 		/*
11295181Sgd78059 		 * A regular packet; we copy the data into a pre-mapped
11305181Sgd78059 		 * buffer, which avoids the overhead (and complication)
11315181Sgd78059 		 * of mapping/unmapping STREAMS buffers and keeping hold
11325181Sgd78059 		 * of them until the DMA has completed.
11335181Sgd78059 		 *
11345181Sgd78059 		 * Because all buffers are the same size, and larger
11355181Sgd78059 		 * than the longest single valid message, we don't have
11365181Sgd78059 		 * to bother about splitting the message across multiple
11375181Sgd78059 		 * buffers.
11385181Sgd78059 		 */
11395181Sgd78059 		txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
11405181Sgd78059 		totlen = 0;
11415181Sgd78059 		bp = mp;
11425181Sgd78059 
11435181Sgd78059 		/*
11445181Sgd78059 		 * Copy all (remaining) mblks in the message ...
11455181Sgd78059 		 */
11465181Sgd78059 		for (; bp != NULL; bp = bp->b_cont) {
11476990Sgd78059 			mblen = MBLKL(bp);
11485181Sgd78059 			if ((totlen += mblen) <= DMFE_MAX_PKT_SIZE) {
11495181Sgd78059 				bcopy(bp->b_rptr, txb, mblen);
11505181Sgd78059 				txb += mblen;
11515181Sgd78059 			}
11525181Sgd78059 		}
11535181Sgd78059 
11545181Sgd78059 		/*
11555181Sgd78059 		 * Is this a multicast or broadcast packet?  We do
11565181Sgd78059 		 * this so that we can track statistics accurately
11575181Sgd78059 		 * when we reclaim it.
11585181Sgd78059 		 */
11595181Sgd78059 		txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
11605181Sgd78059 		if (txb[0] & 0x1) {
11615181Sgd78059 			if (bcmp(txb, dmfe_broadcast_addr, ETHERADDRL) == 0) {
11625181Sgd78059 				dmfep->tx_bcast[index / NBBY] |=
11635181Sgd78059 				    (1 << (index % NBBY));
11645181Sgd78059 			} else {
11655181Sgd78059 				dmfep->tx_mcast[index / NBBY] |=
11665181Sgd78059 				    (1 << (index % NBBY));
11675181Sgd78059 			}
11685181Sgd78059 		}
11695181Sgd78059 
11705181Sgd78059 		/*
11715181Sgd78059 		 * We'e reached the end of the chain; and we should have
11725181Sgd78059 		 * collected no more than DMFE_MAX_PKT_SIZE bytes into our
11735181Sgd78059 		 * buffer.  Note that the <size> field in the descriptor is
11745181Sgd78059 		 * only 11 bits, so bigger packets would be a problem!
11755181Sgd78059 		 */
11765181Sgd78059 		ASSERT(bp == NULL);
11775181Sgd78059 		ASSERT(totlen <= DMFE_MAX_PKT_SIZE);
11785181Sgd78059 		totlen &= TX_BUFFER_SIZE1;
11795181Sgd78059 		desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen;
11805181Sgd78059 
11815181Sgd78059 		(void) ddi_dma_sync(dmfep->tx_buff.dma_hdl,
11825181Sgd78059 		    index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, DDI_DMA_SYNC_FORDEV);
11835181Sgd78059 	}
11845181Sgd78059 
11855181Sgd78059 	/*
11865181Sgd78059 	 * Update ring descriptor entries, sync them, and wake up the
11875181Sgd78059 	 * transmit process
11885181Sgd78059 	 */
11895181Sgd78059 	if ((index & dmfe_tx_int_factor) == 0)
11905181Sgd78059 		desc1 |= TX_INT_ON_COMP;
11915181Sgd78059 	desc1 |= TX_CHAINING;
11925181Sgd78059 	dmfe_ring_put32(descp, index, DESC1, desc1);
11935181Sgd78059 	dmfe_ring_put32(descp, index, DESC0, TX_OWN);
11945181Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
11955181Sgd78059 	dmfe_chip_put32(dmfep, TX_POLL_REG, 0);
11965181Sgd78059 
11975181Sgd78059 	/*
11985181Sgd78059 	 * Finally, free the message & return success
11995181Sgd78059 	 */
12005181Sgd78059 	if (mp)
12015181Sgd78059 		freemsg(mp);
12025181Sgd78059 	return (B_TRUE);
12035181Sgd78059 }
12045181Sgd78059 
12055181Sgd78059 /*
12065181Sgd78059  *	dmfe_m_tx() -- send a chain of packets
12075181Sgd78059  *
12085181Sgd78059  *	Called when packet(s) are ready to be transmitted. A pointer to an
12095181Sgd78059  *	M_DATA message that contains the packet is passed to this routine.
12105181Sgd78059  *	The complete LLC header is contained in the message's first message
12115181Sgd78059  *	block, and the remainder of the packet is contained within
12125181Sgd78059  *	additional M_DATA message blocks linked to the first message block.
12135181Sgd78059  *
12145181Sgd78059  *	Additional messages may be passed by linking with b_next.
12155181Sgd78059  */
12165181Sgd78059 static mblk_t *
12175181Sgd78059 dmfe_m_tx(void *arg, mblk_t *mp)
12185181Sgd78059 {
12195181Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
12205181Sgd78059 	mblk_t *next;
12215181Sgd78059 
12225181Sgd78059 	ASSERT(mp != NULL);
12235181Sgd78059 	ASSERT(dmfep->mac_state == DMFE_MAC_STARTED);
12245181Sgd78059 
12255181Sgd78059 	if (dmfep->chip_state != CHIP_RUNNING)
12265181Sgd78059 		return (mp);
12275181Sgd78059 
12285181Sgd78059 	while (mp != NULL) {
12295181Sgd78059 		next = mp->b_next;
12305181Sgd78059 		mp->b_next = NULL;
12315181Sgd78059 		if (!dmfe_send_msg(dmfep, mp)) {
12325181Sgd78059 			mp->b_next = next;
12335181Sgd78059 			break;
12345181Sgd78059 		}
12355181Sgd78059 		mp = next;
12365181Sgd78059 	}
12375181Sgd78059 
12385181Sgd78059 	return (mp);
12395181Sgd78059 }
12405181Sgd78059 
12415181Sgd78059 #undef	DMFE_DBG
12425181Sgd78059 
12435181Sgd78059 
12445181Sgd78059 /*
12455181Sgd78059  * ========== Address-setting routines (TX-side) ==========
12465181Sgd78059  */
12475181Sgd78059 
12485181Sgd78059 #define	DMFE_DBG	DMFE_DBG_ADDR	/* debug flag for this code	*/
12495181Sgd78059 
12505181Sgd78059 /*
12515181Sgd78059  * Find the index of the relevant bit in the setup packet.
12525181Sgd78059  * This must mirror the way the hardware will actually calculate it!
12535181Sgd78059  */
12545181Sgd78059 static uint32_t
12555181Sgd78059 dmfe_hash_index(const uint8_t *address)
12565181Sgd78059 {
12575181Sgd78059 	uint32_t const POLY = HASH_POLY;
12585181Sgd78059 	uint32_t crc = HASH_CRC;
12595181Sgd78059 	uint32_t index;
12605181Sgd78059 	uint32_t msb;
12615181Sgd78059 	uchar_t currentbyte;
12625181Sgd78059 	int byteslength;
12635181Sgd78059 	int shift;
12645181Sgd78059 	int bit;
12655181Sgd78059 
12665181Sgd78059 	for (byteslength = 0; byteslength < ETHERADDRL; ++byteslength) {
12675181Sgd78059 		currentbyte = address[byteslength];
12685181Sgd78059 		for (bit = 0; bit < 8; ++bit) {
12695181Sgd78059 			msb = crc >> 31;
12705181Sgd78059 			crc <<= 1;
12715181Sgd78059 			if (msb ^ (currentbyte & 1)) {
12725181Sgd78059 				crc ^= POLY;
12735181Sgd78059 				crc |= 0x00000001;
12745181Sgd78059 			}
12755181Sgd78059 			currentbyte >>= 1;
12765181Sgd78059 		}
12775181Sgd78059 	}
12785181Sgd78059 
12795181Sgd78059 	for (index = 0, bit = 23, shift = 8; shift >= 0; ++bit, --shift)
12805181Sgd78059 		index |= (((crc >> bit) & 1) << shift);
12815181Sgd78059 
12825181Sgd78059 	return (index);
12835181Sgd78059 }
12845181Sgd78059 
12855181Sgd78059 /*
12865181Sgd78059  * Find and set/clear the relevant bit in the setup packet hash table
12875181Sgd78059  * This must mirror the way the hardware will actually interpret it!
12885181Sgd78059  */
12895181Sgd78059 static void
12905181Sgd78059 dmfe_update_hash(dmfe_t *dmfep, uint32_t index, boolean_t val)
12915181Sgd78059 {
12925181Sgd78059 	dma_area_t *descp;
12935181Sgd78059 	uint32_t tmp;
12945181Sgd78059 
12955181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
12965181Sgd78059 
12975181Sgd78059 	descp = &dmfep->tx_desc;
12985181Sgd78059 	tmp = dmfe_setup_get32(descp, index/16);
12995181Sgd78059 	if (val)
13005181Sgd78059 		tmp |= 1 << (index%16);
13015181Sgd78059 	else
13025181Sgd78059 		tmp &= ~(1 << (index%16));
13035181Sgd78059 	dmfe_setup_put32(descp, index/16, tmp);
13045181Sgd78059 }
13055181Sgd78059 
13065181Sgd78059 /*
13075181Sgd78059  * Update the refcount for the bit in the setup packet corresponding
13085181Sgd78059  * to the specified address; if it changes between zero & nonzero,
13095181Sgd78059  * also update the bitmap itself & return B_TRUE, so that the caller
13105181Sgd78059  * knows to re-send the setup packet.  Otherwise (only the refcount
13115181Sgd78059  * changed), return B_FALSE
13125181Sgd78059  */
13135181Sgd78059 static boolean_t
13145181Sgd78059 dmfe_update_mcast(dmfe_t *dmfep, const uint8_t *mca, boolean_t val)
13155181Sgd78059 {
13165181Sgd78059 	uint32_t index;
13175181Sgd78059 	uint8_t *refp;
13185181Sgd78059 	boolean_t change;
13195181Sgd78059 
13205181Sgd78059 	index = dmfe_hash_index(mca);
13215181Sgd78059 	refp = &dmfep->mcast_refs[index];
13225181Sgd78059 	change = (val ? (*refp)++ : --(*refp)) == 0;
13235181Sgd78059 
13245181Sgd78059 	if (change)
13255181Sgd78059 		dmfe_update_hash(dmfep, index, val);
13265181Sgd78059 
13275181Sgd78059 	return (change);
13285181Sgd78059 }
13295181Sgd78059 
13305181Sgd78059 /*
13315181Sgd78059  * "Transmit" the (possibly updated) magic setup packet
13325181Sgd78059  */
13335181Sgd78059 static int
13345181Sgd78059 dmfe_send_setup(dmfe_t *dmfep)
13355181Sgd78059 {
13365181Sgd78059 	int status;
13375181Sgd78059 
13385181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
13395181Sgd78059 
13405181Sgd78059 	/*
13415181Sgd78059 	 * If the chip isn't running, we can't really send the setup frame
13425181Sgd78059 	 * now but it doesn't matter, 'cos it will be sent when the transmit
13435181Sgd78059 	 * process is restarted (see dmfe_start()).
13445181Sgd78059 	 */
13455181Sgd78059 	if ((dmfep->opmode & START_TRANSMIT) == 0)
13465181Sgd78059 		return (0);
13475181Sgd78059 
13485181Sgd78059 	/*
13495181Sgd78059 	 * "Send" the setup frame.  If it fails (e.g. no resources),
13505181Sgd78059 	 * set a flag; then the factotum will retry the "send".  Once
13515181Sgd78059 	 * it works, we can clear the flag no matter how many attempts
13525181Sgd78059 	 * had previously failed.  We tell the caller that it worked
13535181Sgd78059 	 * whether it did or not; after all, it *will* work eventually.
13545181Sgd78059 	 */
13555181Sgd78059 	status = dmfe_send_msg(dmfep, NULL);
13565181Sgd78059 	dmfep->need_setup = status ? B_FALSE : B_TRUE;
13575181Sgd78059 	return (0);
13585181Sgd78059 }
13595181Sgd78059 
13605181Sgd78059 /*
13615181Sgd78059  *	dmfe_m_unicst() -- set the physical network address
13625181Sgd78059  */
13635181Sgd78059 static int
13645181Sgd78059 dmfe_m_unicst(void *arg, const uint8_t *macaddr)
13655181Sgd78059 {
13665181Sgd78059 	dmfe_t *dmfep = arg;
13675181Sgd78059 	int status;
13685181Sgd78059 	int index;
13695181Sgd78059 
13705181Sgd78059 	/*
13715181Sgd78059 	 * Update our current address and send out a new setup packet
13725181Sgd78059 	 *
13735181Sgd78059 	 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT
13745181Sgd78059 	 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes).
13755181Sgd78059 	 *
13765181Sgd78059 	 * It is said that there is a bug in the 21140 where it fails to
13775181Sgd78059 	 * receive packes addresses to the specified perfect filter address.
13785181Sgd78059 	 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1
13795181Sgd78059 	 * bit should be set in the module variable dmfe_setup_desc1.
13805181Sgd78059 	 *
13815181Sgd78059 	 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering.
13825181Sgd78059 	 * In this mode, *all* incoming addresses are hashed and looked
13835181Sgd78059 	 * up in the bitmap described by the setup packet.  Therefore,
13845181Sgd78059 	 * the bit representing the station address has to be added to
13855181Sgd78059 	 * the table before sending it out.  If the address is changed,
13865181Sgd78059 	 * the old entry should be removed before the new entry is made.
13875181Sgd78059 	 *
13885181Sgd78059 	 * NOTE: in this mode, unicast packets that are not intended for
13895181Sgd78059 	 * this station may be received; it is up to software to filter
13905181Sgd78059 	 * them out afterwards!
13915181Sgd78059 	 *
13925181Sgd78059 	 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT
13935181Sgd78059 	 * filtering.  In this mode, multicast addresses are hashed and
13945181Sgd78059 	 * checked against the bitmap, while unicast addresses are simply
13955181Sgd78059 	 * matched against the one physical address specified in the setup
13965181Sgd78059 	 * packet.  This means that we shouldn't receive unicast packets
13975181Sgd78059 	 * that aren't intended for us (but software still has to filter
13985181Sgd78059 	 * multicast packets just the same).
13995181Sgd78059 	 *
14005181Sgd78059 	 * Whichever mode we're using, we have to enter the broadcast
14015181Sgd78059 	 * address into the multicast filter map too, so we do this on
14025181Sgd78059 	 * the first time through after attach or reset.
14035181Sgd78059 	 */
14045181Sgd78059 	mutex_enter(dmfep->oplock);
14055181Sgd78059 
14065181Sgd78059 	if (dmfep->addr_set && dmfe_setup_desc1 & TX_FILTER_TYPE1)
14075181Sgd78059 		(void) dmfe_update_mcast(dmfep, dmfep->curr_addr, B_FALSE);
14085181Sgd78059 	if (dmfe_setup_desc1 & TX_FILTER_TYPE1)
14095181Sgd78059 		(void) dmfe_update_mcast(dmfep, macaddr, B_TRUE);
14105181Sgd78059 	if (!dmfep->addr_set)
14115181Sgd78059 		(void) dmfe_update_mcast(dmfep, dmfe_broadcast_addr, B_TRUE);
14125181Sgd78059 
14135181Sgd78059 	/*
14145181Sgd78059 	 * Remember the new current address
14155181Sgd78059 	 */
14165181Sgd78059 	ethaddr_copy(macaddr, dmfep->curr_addr);
14175181Sgd78059 	dmfep->addr_set = B_TRUE;
14185181Sgd78059 
14195181Sgd78059 	/*
14205181Sgd78059 	 * Install the new physical address into the proper position in
14215181Sgd78059 	 * the setup frame; this is only used if we select hash+perfect
14225181Sgd78059 	 * filtering, but we'll put it in anyway.  The ugliness here is
14235181Sgd78059 	 * down to the usual war of the egg :(
14245181Sgd78059 	 */
14255181Sgd78059 	for (index = 0; index < ETHERADDRL; index += 2)
14265181Sgd78059 		dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2,
14275181Sgd78059 		    (macaddr[index+1] << 8) | macaddr[index]);
14285181Sgd78059 
14295181Sgd78059 	/*
14305181Sgd78059 	 * Finally, we're ready to "transmit" the setup frame
14315181Sgd78059 	 */
14325181Sgd78059 	status = dmfe_send_setup(dmfep);
14335181Sgd78059 	mutex_exit(dmfep->oplock);
14345181Sgd78059 
14355181Sgd78059 	return (status);
14365181Sgd78059 }
14375181Sgd78059 
14385181Sgd78059 /*
14395181Sgd78059  *	dmfe_m_multicst() -- enable or disable a multicast address
14405181Sgd78059  *
14415181Sgd78059  *	Program the hardware to enable/disable the multicast address
14425181Sgd78059  *	in "mca" (enable if add is true, otherwise disable it.)
14435181Sgd78059  *	We keep a refcount for each bit in the map, so that it still
14445181Sgd78059  *	works out properly if multiple addresses hash to the same bit.
14455181Sgd78059  *	dmfe_update_mcast() tells us whether the map actually changed;
14465181Sgd78059  *	if so, we have to re-"transmit" the magic setup packet.
14475181Sgd78059  */
14485181Sgd78059 static int
14495181Sgd78059 dmfe_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
14505181Sgd78059 {
14515181Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
14525181Sgd78059 	int status = 0;
14535181Sgd78059 
14545181Sgd78059 	mutex_enter(dmfep->oplock);
14555181Sgd78059 	if (dmfe_update_mcast(dmfep, mca, add))
14565181Sgd78059 		status = dmfe_send_setup(dmfep);
14575181Sgd78059 	mutex_exit(dmfep->oplock);
14585181Sgd78059 
14595181Sgd78059 	return (status);
14605181Sgd78059 }
14615181Sgd78059 
14625181Sgd78059 #undef	DMFE_DBG
14635181Sgd78059 
14645181Sgd78059 
14655181Sgd78059 /*
14665181Sgd78059  * ========== Internal state management entry points ==========
14675181Sgd78059  */
14685181Sgd78059 
14695181Sgd78059 #define	DMFE_DBG	DMFE_DBG_GLD	/* debug flag for this code	*/
14705181Sgd78059 
14715181Sgd78059 /*
14725181Sgd78059  * These routines provide all the functionality required by the
14735181Sgd78059  * corresponding MAC layer entry points, but don't update the MAC layer state
14745181Sgd78059  * so they can be called internally without disturbing our record
14755181Sgd78059  * of what MAC layer thinks we should be doing ...
14765181Sgd78059  */
14775181Sgd78059 
14785181Sgd78059 /*
14795181Sgd78059  *	dmfe_stop() -- stop processing, don't reset h/w or rings
14805181Sgd78059  */
14815181Sgd78059 static void
14825181Sgd78059 dmfe_stop(dmfe_t *dmfep)
14835181Sgd78059 {
14845181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
14855181Sgd78059 
14865181Sgd78059 	dmfe_stop_chip(dmfep, CHIP_STOPPED);
14875181Sgd78059 }
14885181Sgd78059 
14895181Sgd78059 /*
14905181Sgd78059  *	dmfe_reset() -- stop processing, reset h/w & rings to initial state
14915181Sgd78059  */
14925181Sgd78059 static void
14935181Sgd78059 dmfe_reset(dmfe_t *dmfep)
14945181Sgd78059 {
14955181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
14965181Sgd78059 	ASSERT(mutex_owned(dmfep->rxlock));
14975181Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
14985181Sgd78059 
14995181Sgd78059 	dmfe_stop_chip(dmfep, CHIP_RESET);
15005181Sgd78059 	dmfe_init_rings(dmfep);
15015181Sgd78059 }
15025181Sgd78059 
15035181Sgd78059 /*
15045181Sgd78059  *	dmfe_start() -- start transmitting/receiving
15055181Sgd78059  */
15065181Sgd78059 static void
15075181Sgd78059 dmfe_start(dmfe_t *dmfep)
15085181Sgd78059 {
15095181Sgd78059 	uint32_t gpsr;
15105181Sgd78059 
15115181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
15125181Sgd78059 
15135181Sgd78059 	ASSERT(dmfep->chip_state == CHIP_RESET ||
15145181Sgd78059 	    dmfep->chip_state == CHIP_STOPPED);
15155181Sgd78059 
15165181Sgd78059 	/*
15175181Sgd78059 	 * Make opmode consistent with PHY duplex setting
15185181Sgd78059 	 */
15195181Sgd78059 	gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
15205181Sgd78059 	if (gpsr & GPS_FULL_DUPLEX)
15215181Sgd78059 		dmfep->opmode |= FULL_DUPLEX;
15225181Sgd78059 	else
15235181Sgd78059 		dmfep->opmode &= ~FULL_DUPLEX;
15245181Sgd78059 
15255181Sgd78059 	/*
15265181Sgd78059 	 * Start transmit processing
15275181Sgd78059 	 * Set up the address filters
15285181Sgd78059 	 * Start receive processing
15295181Sgd78059 	 * Enable interrupts
15305181Sgd78059 	 */
15315181Sgd78059 	dmfe_start_chip(dmfep, START_TRANSMIT);
15325181Sgd78059 	(void) dmfe_send_setup(dmfep);
15335181Sgd78059 	drv_usecwait(10);
15345181Sgd78059 	dmfe_start_chip(dmfep, START_RECEIVE);
15355181Sgd78059 	dmfe_enable_interrupts(dmfep);
15365181Sgd78059 }
15375181Sgd78059 
15385181Sgd78059 /*
15395181Sgd78059  * dmfe_restart - restart transmitting/receiving after error or suspend
15405181Sgd78059  */
15415181Sgd78059 static void
15425181Sgd78059 dmfe_restart(dmfe_t *dmfep)
15435181Sgd78059 {
15445181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
15455181Sgd78059 
15465181Sgd78059 	/*
15475181Sgd78059 	 * You need not only <oplock>, but also <rxlock> AND <txlock>
15485181Sgd78059 	 * in order to reset the rings, but then <txlock> *mustn't*
15495181Sgd78059 	 * be held across the call to dmfe_start()
15505181Sgd78059 	 */
15515181Sgd78059 	mutex_enter(dmfep->rxlock);
15525181Sgd78059 	mutex_enter(dmfep->txlock);
15535181Sgd78059 	dmfe_reset(dmfep);
15545181Sgd78059 	mutex_exit(dmfep->txlock);
15555181Sgd78059 	mutex_exit(dmfep->rxlock);
15565181Sgd78059 	if (dmfep->mac_state == DMFE_MAC_STARTED)
15575181Sgd78059 		dmfe_start(dmfep);
15585181Sgd78059 }
15595181Sgd78059 
15605181Sgd78059 
15615181Sgd78059 /*
15625181Sgd78059  * ========== MAC-required management entry points ==========
15635181Sgd78059  */
15645181Sgd78059 
15655181Sgd78059 /*
15665181Sgd78059  *	dmfe_m_stop() -- stop transmitting/receiving
15675181Sgd78059  */
15685181Sgd78059 static void
15695181Sgd78059 dmfe_m_stop(void *arg)
15705181Sgd78059 {
15715181Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
15725181Sgd78059 
15735181Sgd78059 	/*
15745181Sgd78059 	 * Just stop processing, then record new MAC state
15755181Sgd78059 	 */
15765181Sgd78059 	mutex_enter(dmfep->oplock);
15775181Sgd78059 	dmfe_stop(dmfep);
15785181Sgd78059 	dmfep->mac_state = DMFE_MAC_STOPPED;
15795181Sgd78059 	mutex_exit(dmfep->oplock);
15805181Sgd78059 }
15815181Sgd78059 
15825181Sgd78059 /*
15835181Sgd78059  *	dmfe_m_start() -- start transmitting/receiving
15845181Sgd78059  */
15855181Sgd78059 static int
15865181Sgd78059 dmfe_m_start(void *arg)
15875181Sgd78059 {
15885181Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
15895181Sgd78059 
15905181Sgd78059 	/*
15915181Sgd78059 	 * Start processing and record new MAC state
15925181Sgd78059 	 */
15935181Sgd78059 	mutex_enter(dmfep->oplock);
15945181Sgd78059 	dmfe_start(dmfep);
15955181Sgd78059 	dmfep->mac_state = DMFE_MAC_STARTED;
15965181Sgd78059 	mutex_exit(dmfep->oplock);
15975181Sgd78059 
15985181Sgd78059 	return (0);
15995181Sgd78059 }
16005181Sgd78059 
16015181Sgd78059 /*
16025181Sgd78059  * dmfe_m_promisc() -- set or reset promiscuous mode on the board
16035181Sgd78059  *
16045181Sgd78059  *	Program the hardware to enable/disable promiscuous and/or
16055181Sgd78059  *	receive-all-multicast modes.  Davicom don't document this
16065181Sgd78059  *	clearly, but it looks like we can do this on-the-fly (i.e.
16075181Sgd78059  *	without stopping & restarting the TX/RX processes).
16085181Sgd78059  */
16095181Sgd78059 static int
16105181Sgd78059 dmfe_m_promisc(void *arg, boolean_t on)
16115181Sgd78059 {
16125181Sgd78059 	dmfe_t *dmfep = arg;
16135181Sgd78059 
16145181Sgd78059 	mutex_enter(dmfep->oplock);
16155181Sgd78059 	dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
16165181Sgd78059 	if (on)
16175181Sgd78059 		dmfep->opmode |= PROMISC_MODE;
16185181Sgd78059 	dmfe_set_opmode(dmfep);
16195181Sgd78059 	mutex_exit(dmfep->oplock);
16205181Sgd78059 
16215181Sgd78059 	return (0);
16225181Sgd78059 }
16235181Sgd78059 
16245181Sgd78059 /*ARGSUSED*/
16255181Sgd78059 static boolean_t
16265181Sgd78059 dmfe_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
16275181Sgd78059 {
16285181Sgd78059 	/*
16295181Sgd78059 	 * Note that the chip could support some form of polling and
16305181Sgd78059 	 * multiaddress support.  We should look into adding polling
16315181Sgd78059 	 * support later, once Solaris is better positioned to take
16325181Sgd78059 	 * advantage of it, although it may be of little use since
16335181Sgd78059 	 * even a lowly 500MHz US-IIe should be able to keep up with
16345181Sgd78059 	 * 100Mbps.  (Esp. if the packets are not unreasonably sized.)
16355181Sgd78059 	 *
16365181Sgd78059 	 * Multiaddress support, however, is likely to be of more
16375181Sgd78059 	 * utility with crossbow and virtualized NICs.  Although, the
16385181Sgd78059 	 * fact that dmfe is only supported on low-end US-IIe hardware
16395181Sgd78059 	 * makes one wonder whether VNICs are likely to be used on
16405181Sgd78059 	 * such platforms.  The chip certainly supports the notion,
16415181Sgd78059 	 * since it can be run in HASH-ONLY mode.  (Though this would
16425181Sgd78059 	 * require software to drop unicast packets that are
16435181Sgd78059 	 * incorrectly received due to hash collision of the
16445181Sgd78059 	 * destination mac address.)
16455181Sgd78059 	 *
16465181Sgd78059 	 * Interestingly enough, modern Davicom chips (the 9102D)
16475181Sgd78059 	 * support full IP checksum offload, though its unclear
16485181Sgd78059 	 * whether any of these chips are used on any systems that can
16495181Sgd78059 	 * run Solaris.
16505181Sgd78059 	 *
16515181Sgd78059 	 * If this driver is ever supported on x86 hardware, then
16525181Sgd78059 	 * these assumptions should be revisited.
16535181Sgd78059 	 */
16545181Sgd78059 	switch (cap) {
16555181Sgd78059 	case MAC_CAPAB_POLL:
16565181Sgd78059 	case MAC_CAPAB_MULTIADDRESS:
16575181Sgd78059 	case MAC_CAPAB_HCKSUM:
16585181Sgd78059 	default:
16595181Sgd78059 		return (B_FALSE);
16605181Sgd78059 	}
16615181Sgd78059 }
16625181Sgd78059 
16635181Sgd78059 
16645181Sgd78059 #undef	DMFE_DBG
16655181Sgd78059 
16665181Sgd78059 
16675181Sgd78059 /*
16685181Sgd78059  * ========== Factotum, implemented as a softint handler ==========
16695181Sgd78059  */
16705181Sgd78059 
16715181Sgd78059 #define	DMFE_DBG	DMFE_DBG_FACT	/* debug flag for this code	*/
16725181Sgd78059 
16735181Sgd78059 /*
16745181Sgd78059  * The factotum is woken up when there's something to do that we'd rather
16755181Sgd78059  * not do from inside a (high-level?) hardware interrupt handler.  Its
16765181Sgd78059  * two main tasks are:
16775181Sgd78059  *	reset & restart the chip after an error
16785181Sgd78059  *	update & restart the chip after a link status change
16795181Sgd78059  */
16805181Sgd78059 static uint_t
16815181Sgd78059 dmfe_factotum(caddr_t arg)
16825181Sgd78059 {
16835181Sgd78059 	dmfe_t *dmfep;
16845181Sgd78059 
16856990Sgd78059 	dmfep = (void *)arg;
16865181Sgd78059 	ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
16875181Sgd78059 
16885181Sgd78059 	mutex_enter(dmfep->oplock);
16895181Sgd78059 
16905181Sgd78059 	dmfep->factotum_flag = 0;
16915181Sgd78059 	DRV_KS_INC(dmfep, KS_FACTOTUM_RUN);
16925181Sgd78059 
16935181Sgd78059 	/*
16945181Sgd78059 	 * Check for chip error ...
16955181Sgd78059 	 */
16965181Sgd78059 	if (dmfep->chip_state == CHIP_ERROR) {
16975181Sgd78059 		/*
16985181Sgd78059 		 * Error recovery required: reset the chip and the rings,
16995181Sgd78059 		 * then, if it's supposed to be running, kick it off again.
17005181Sgd78059 		 */
17015181Sgd78059 		DRV_KS_INC(dmfep, KS_RECOVERY);
17025181Sgd78059 		dmfe_restart(dmfep);
17035181Sgd78059 	} else if (dmfep->need_setup) {
17045181Sgd78059 		(void) dmfe_send_setup(dmfep);
17055181Sgd78059 	}
17065181Sgd78059 	mutex_exit(dmfep->oplock);
17075181Sgd78059 
17085181Sgd78059 	/*
17095181Sgd78059 	 * Then, check the link state.  We need <milock> but not <oplock>
17105181Sgd78059 	 * to do this, but if something's changed, we need <oplock> as well
17115181Sgd78059 	 * in order to stop/restart the chip!  Note: we could simply hold
17125181Sgd78059 	 * <oplock> right through here, but we'd rather not 'cos checking
17135181Sgd78059 	 * the link state involves reading over the bit-serial MII bus,
17145181Sgd78059 	 * which takes ~500us even when nothing's changed.  Holding <oplock>
17155181Sgd78059 	 * would lock out the interrupt handler for the duration, so it's
17165181Sgd78059 	 * better to release it first and reacquire it only if needed.
17175181Sgd78059 	 */
17185181Sgd78059 	mutex_enter(dmfep->milock);
17195181Sgd78059 	if (dmfe_check_link(dmfep)) {
17205181Sgd78059 		mutex_enter(dmfep->oplock);
17215181Sgd78059 		dmfe_stop(dmfep);
17225181Sgd78059 		DRV_KS_INC(dmfep, KS_LINK_CHECK);
17235181Sgd78059 		if (dmfep->update_phy) {
17245181Sgd78059 			/*
17255181Sgd78059 			 *  The chip may reset itself for some unknown
17265181Sgd78059 			 * reason.  If this happens, the chip will use
17275181Sgd78059 			 * default settings (for speed, duplex, and autoneg),
17285181Sgd78059 			 * which possibly aren't the user's desired settings.
17295181Sgd78059 			 */
17305181Sgd78059 			dmfe_update_phy(dmfep);
17315181Sgd78059 			dmfep->update_phy = B_FALSE;
17325181Sgd78059 		}
17335181Sgd78059 		dmfe_recheck_link(dmfep, B_FALSE);
17345181Sgd78059 		if (dmfep->mac_state == DMFE_MAC_STARTED)
17355181Sgd78059 			dmfe_start(dmfep);
17365181Sgd78059 		mutex_exit(dmfep->oplock);
17375181Sgd78059 	}
17385181Sgd78059 	mutex_exit(dmfep->milock);
17395181Sgd78059 
17405181Sgd78059 	/*
17415181Sgd78059 	 * Keep MAC up-to-date about the state of the link ...
17425181Sgd78059 	 */
17435181Sgd78059 	mac_link_update(dmfep->mh, dmfep->link_state);
17445181Sgd78059 
17455181Sgd78059 	return (DDI_INTR_CLAIMED);
17465181Sgd78059 }
17475181Sgd78059 
17485181Sgd78059 static void
17495181Sgd78059 dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why)
17505181Sgd78059 {
17515181Sgd78059 	DMFE_DEBUG(("dmfe_wake_factotum: %s [%d] flag %d",
17525181Sgd78059 	    why, ks_id, dmfep->factotum_flag));
17535181Sgd78059 
17545181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
17555181Sgd78059 	DRV_KS_INC(dmfep, ks_id);
17565181Sgd78059 
17575181Sgd78059 	if (dmfep->factotum_flag++ == 0)
17585181Sgd78059 		ddi_trigger_softintr(dmfep->factotum_id);
17595181Sgd78059 }
17605181Sgd78059 
17615181Sgd78059 #undef	DMFE_DBG
17625181Sgd78059 
17635181Sgd78059 
17645181Sgd78059 /*
17655181Sgd78059  * ========== Periodic Tasks (Cyclic handler & friends) ==========
17665181Sgd78059  */
17675181Sgd78059 
17685181Sgd78059 #define	DMFE_DBG	DMFE_DBG_TICK	/* debug flag for this code	*/
17695181Sgd78059 
17705181Sgd78059 /*
17715181Sgd78059  * Periodic tick tasks, run from the cyclic handler
17725181Sgd78059  *
17735181Sgd78059  * Check the state of the link and wake the factotum if necessary
17745181Sgd78059  */
17755181Sgd78059 static void
17765181Sgd78059 dmfe_tick_link_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat)
17775181Sgd78059 {
17785181Sgd78059 	link_state_t phy_state;
17795181Sgd78059 	link_state_t utp_state;
17805181Sgd78059 	const char *why;
17815181Sgd78059 	int ks_id;
17825181Sgd78059 
17835181Sgd78059 	_NOTE(ARGUNUSED(istat))
17845181Sgd78059 
17855181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
17865181Sgd78059 
17875181Sgd78059 	/*
17885181Sgd78059 	 * Is it time to wake the factotum?  We do so periodically, in
17895181Sgd78059 	 * case the fast check below doesn't always reveal a link change
17905181Sgd78059 	 */
17915181Sgd78059 	if (dmfep->link_poll_tix-- == 0) {
17925181Sgd78059 		dmfep->link_poll_tix = factotum_tix;
17935181Sgd78059 		why = "tick (link poll)";
17945181Sgd78059 		ks_id = KS_TICK_LINK_POLL;
17955181Sgd78059 	} else {
17965181Sgd78059 		why = NULL;
17975181Sgd78059 		ks_id = KS_TICK_LINK_STATE;
17985181Sgd78059 	}
17995181Sgd78059 
18005181Sgd78059 	/*
18015181Sgd78059 	 * Has the link status changed?  If so, we might want to wake
18025181Sgd78059 	 * the factotum to deal with it.
18035181Sgd78059 	 */
18045181Sgd78059 	phy_state = (gpsr & GPS_LINK_STATUS) ? LINK_STATE_UP : LINK_STATE_DOWN;
18055181Sgd78059 	utp_state = (gpsr & GPS_UTP_SIG) ? LINK_STATE_UP : LINK_STATE_DOWN;
18065181Sgd78059 	if (phy_state != utp_state)
18075181Sgd78059 		why = "tick (phy <> utp)";
18085181Sgd78059 	else if ((dmfep->link_state == LINK_STATE_UP) &&
18095181Sgd78059 	    (phy_state == LINK_STATE_DOWN))
18105181Sgd78059 		why = "tick (UP -> DOWN)";
18115181Sgd78059 	else if (phy_state != dmfep->link_state) {
18125181Sgd78059 		if (dmfep->link_poll_tix > factotum_fast_tix)
18135181Sgd78059 			dmfep->link_poll_tix = factotum_fast_tix;
18145181Sgd78059 	}
18155181Sgd78059 
18165181Sgd78059 	if (why != NULL) {
18175181Sgd78059 		DMFE_DEBUG(("dmfe_%s: link %d phy %d utp %d",
18185181Sgd78059 		    why, dmfep->link_state, phy_state, utp_state));
18195181Sgd78059 		dmfe_wake_factotum(dmfep, ks_id, why);
18205181Sgd78059 	}
18215181Sgd78059 }
18225181Sgd78059 
18235181Sgd78059 /*
18245181Sgd78059  * Periodic tick tasks, run from the cyclic handler
18255181Sgd78059  *
18265181Sgd78059  * Check for TX stall; flag an error and wake the factotum if so.
18275181Sgd78059  */
18285181Sgd78059 static void
18295181Sgd78059 dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat)
18305181Sgd78059 {
18315181Sgd78059 	boolean_t tx_stall;
18325181Sgd78059 	uint32_t tx_state;
18335181Sgd78059 	uint32_t limit;
18345181Sgd78059 
18355181Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
18365181Sgd78059 
18375181Sgd78059 	/*
18385181Sgd78059 	 * Check for transmit stall ...
18395181Sgd78059 	 *
18405181Sgd78059 	 * IF there's at least one packet in the ring, AND the timeout
18415181Sgd78059 	 * has elapsed, AND we can't reclaim any descriptors, THEN we've
18425181Sgd78059 	 * stalled; we return B_TRUE to trigger a reset-and-recover cycle.
18435181Sgd78059 	 *
18445181Sgd78059 	 * Note that the timeout limit is based on the transmit engine
18455181Sgd78059 	 * state; we allow the transmitter longer to make progress in
18465181Sgd78059 	 * some states than in others, based on observations of this
18475181Sgd78059 	 * chip's actual behaviour in the lab.
18485181Sgd78059 	 *
18495181Sgd78059 	 * By observation, we find that on about 1 in 10000 passes through
18505181Sgd78059 	 * here, the TX lock is already held.  In that case, we'll skip
18515181Sgd78059 	 * the check on this pass rather than wait.  Most likely, the send
18525181Sgd78059 	 * routine was holding the lock when the interrupt happened, and
18535181Sgd78059 	 * we'll succeed next time through.  In the event of a real stall,
18545181Sgd78059 	 * the TX ring will fill up, after which the send routine won't be
18555181Sgd78059 	 * called any more and then we're sure to get in.
18565181Sgd78059 	 */
18575181Sgd78059 	tx_stall = B_FALSE;
18585181Sgd78059 	if (mutex_tryenter(dmfep->txlock)) {
18595181Sgd78059 		if (dmfep->tx.n_free < dmfep->tx.n_desc) {
18605181Sgd78059 			tx_state = TX_PROCESS_STATE(istat);
18615181Sgd78059 			if (gpsr & GPS_LINK_100)
18625181Sgd78059 				limit = stall_100_tix[tx_state];
18635181Sgd78059 			else
18645181Sgd78059 				limit = stall_10_tix[tx_state];
18655181Sgd78059 			if (++dmfep->tx_pending_tix >= limit &&
18665181Sgd78059 			    dmfe_reclaim_tx_desc(dmfep) == B_FALSE) {
18675181Sgd78059 				dmfe_log(dmfep, "TX stall detected "
18685181Sgd78059 				    "after %d ticks in state %d; "
18695181Sgd78059 				    "automatic recovery initiated",
18705181Sgd78059 				    dmfep->tx_pending_tix, tx_state);
18715181Sgd78059 				tx_stall = B_TRUE;
18725181Sgd78059 			}
18735181Sgd78059 		}
18745181Sgd78059 		mutex_exit(dmfep->txlock);
18755181Sgd78059 	}
18765181Sgd78059 
18775181Sgd78059 	if (tx_stall) {
18785181Sgd78059 		dmfe_stop_chip(dmfep, CHIP_ERROR);
18795181Sgd78059 		dmfe_wake_factotum(dmfep, KS_TX_STALL, "tick (TX stall)");
18805181Sgd78059 	}
18815181Sgd78059 }
18825181Sgd78059 
18835181Sgd78059 /*
18845181Sgd78059  * Cyclic callback handler
18855181Sgd78059  */
18865181Sgd78059 static void
18875181Sgd78059 dmfe_cyclic(void *arg)
18885181Sgd78059 {
18895181Sgd78059 	dmfe_t *dmfep = arg;			/* private device info */
18905181Sgd78059 	uint32_t istat;
18915181Sgd78059 	uint32_t gpsr;
18925181Sgd78059 
18935181Sgd78059 	/*
18945181Sgd78059 	 * If the chip's not RUNNING, there's nothing to do.
18955181Sgd78059 	 * If we can't get the mutex straight away, we'll just
18965181Sgd78059 	 * skip this pass; we'll back back soon enough anyway.
18975181Sgd78059 	 */
18985181Sgd78059 	if (dmfep->chip_state != CHIP_RUNNING)
18995181Sgd78059 		return;
19005181Sgd78059 	if (mutex_tryenter(dmfep->oplock) == 0)
19015181Sgd78059 		return;
19025181Sgd78059 
19035181Sgd78059 	/*
19045181Sgd78059 	 * Recheck chip state (it might have been stopped since we
19055181Sgd78059 	 * checked above).  If still running, call each of the *tick*
19065181Sgd78059 	 * tasks.  They will check for link change, TX stall, etc ...
19075181Sgd78059 	 */
19085181Sgd78059 	if (dmfep->chip_state == CHIP_RUNNING) {
19095181Sgd78059 		istat = dmfe_chip_get32(dmfep, STATUS_REG);
19105181Sgd78059 		gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
19115181Sgd78059 		dmfe_tick_link_check(dmfep, gpsr, istat);
19125181Sgd78059 		dmfe_tick_stall_check(dmfep, gpsr, istat);
19135181Sgd78059 	}
19145181Sgd78059 
19155181Sgd78059 	DRV_KS_INC(dmfep, KS_CYCLIC_RUN);
19165181Sgd78059 	mutex_exit(dmfep->oplock);
19175181Sgd78059 }
19185181Sgd78059 
19195181Sgd78059 #undef	DMFE_DBG
19205181Sgd78059 
19215181Sgd78059 
19225181Sgd78059 /*
19235181Sgd78059  * ========== Hardware interrupt handler ==========
19245181Sgd78059  */
19255181Sgd78059 
19265181Sgd78059 #define	DMFE_DBG	DMFE_DBG_INT	/* debug flag for this code	*/
19275181Sgd78059 
19285181Sgd78059 /*
19295181Sgd78059  *	dmfe_interrupt() -- handle chip interrupts
19305181Sgd78059  */
19315181Sgd78059 static uint_t
19325181Sgd78059 dmfe_interrupt(caddr_t arg)
19335181Sgd78059 {
19345181Sgd78059 	dmfe_t *dmfep;			/* private device info */
19355181Sgd78059 	uint32_t interrupts;
19365181Sgd78059 	uint32_t istat;
19375181Sgd78059 	const char *msg;
19385181Sgd78059 	mblk_t *mp;
19395181Sgd78059 	boolean_t warning_msg = B_TRUE;
19405181Sgd78059 
19416990Sgd78059 	dmfep = (void *)arg;
19425181Sgd78059 
19435181Sgd78059 	/*
19445181Sgd78059 	 * A quick check as to whether the interrupt was from this
19455181Sgd78059 	 * device, before we even finish setting up all our local
19465181Sgd78059 	 * variables.  Note that reading the interrupt status register
19475181Sgd78059 	 * doesn't have any unpleasant side effects such as clearing
19485181Sgd78059 	 * the bits read, so it's quite OK to re-read it once we have
19495181Sgd78059 	 * determined that we are going to service this interrupt and
19505181Sgd78059 	 * grabbed the mutexen.
19515181Sgd78059 	 */
19525181Sgd78059 	istat = dmfe_chip_get32(dmfep, STATUS_REG);
19535181Sgd78059 	if ((istat & (NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT)) == 0)
19545181Sgd78059 		return (DDI_INTR_UNCLAIMED);
19555181Sgd78059 
19565181Sgd78059 	/*
19575181Sgd78059 	 * Unfortunately, there can be a race condition between attach()
19585181Sgd78059 	 * adding the interrupt handler and initialising the mutexen,
19595181Sgd78059 	 * and the handler itself being called because of a pending
19605181Sgd78059 	 * interrupt.  So, we check <imask>; if it shows that interrupts
19615181Sgd78059 	 * haven't yet been enabled (and therefore we shouldn't really
19625181Sgd78059 	 * be here at all), we will just write back the value read from
19635181Sgd78059 	 * the status register, thus acknowledging (and clearing) *all*
19645181Sgd78059 	 * pending conditions without really servicing them, and claim
19655181Sgd78059 	 * the interrupt.
19665181Sgd78059 	 */
19675181Sgd78059 	if (dmfep->imask == 0) {
19685181Sgd78059 		DMFE_DEBUG(("dmfe_interrupt: early interrupt 0x%x", istat));
19695181Sgd78059 		dmfe_chip_put32(dmfep, STATUS_REG, istat);
19705181Sgd78059 		return (DDI_INTR_CLAIMED);
19715181Sgd78059 	}
19725181Sgd78059 
19735181Sgd78059 	/*
19745181Sgd78059 	 * We're committed to servicing this interrupt, but we
19755181Sgd78059 	 * need to get the lock before going any further ...
19765181Sgd78059 	 */
19775181Sgd78059 	mutex_enter(dmfep->oplock);
19785181Sgd78059 	DRV_KS_INC(dmfep, KS_INTERRUPT);
19795181Sgd78059 
19805181Sgd78059 	/*
19815181Sgd78059 	 * Identify bits that represent enabled interrupts ...
19825181Sgd78059 	 */
19835181Sgd78059 	istat |= dmfe_chip_get32(dmfep, STATUS_REG);
19845181Sgd78059 	interrupts = istat & dmfep->imask;
19855181Sgd78059 	ASSERT(interrupts != 0);
19865181Sgd78059 
19875181Sgd78059 	DMFE_DEBUG(("dmfe_interrupt: istat 0x%x -> 0x%x", istat, interrupts));
19885181Sgd78059 
19895181Sgd78059 	/*
19905181Sgd78059 	 * Check for any interrupts other than TX/RX done.
19915181Sgd78059 	 * If there are any, they are considered Abnormal
19925181Sgd78059 	 * and will cause the chip to be reset.
19935181Sgd78059 	 */
19945181Sgd78059 	if (interrupts & ~(RX_PKTDONE_INT | TX_PKTDONE_INT)) {
19955181Sgd78059 		if (istat & ABNORMAL_SUMMARY_INT) {
19965181Sgd78059 			/*
19975181Sgd78059 			 * Any Abnormal interrupts will lead to us
19985181Sgd78059 			 * resetting the chip, so we don't bother
19995181Sgd78059 			 * to clear each interrupt individually.
20005181Sgd78059 			 *
20015181Sgd78059 			 * Our main task here is to identify the problem,
20025181Sgd78059 			 * by pointing out the most significant unexpected
20035181Sgd78059 			 * bit.  Additional bits may well be consequences
20045181Sgd78059 			 * of the first problem, so we consider the possible
20055181Sgd78059 			 * causes in order of severity.
20065181Sgd78059 			 */
20075181Sgd78059 			if (interrupts & SYSTEM_ERR_INT) {
20085181Sgd78059 				switch (istat & SYSTEM_ERR_BITS) {
20095181Sgd78059 				case SYSTEM_ERR_M_ABORT:
20105181Sgd78059 					msg = "Bus Master Abort";
20115181Sgd78059 					break;
20125181Sgd78059 
20135181Sgd78059 				case SYSTEM_ERR_T_ABORT:
20145181Sgd78059 					msg = "Bus Target Abort";
20155181Sgd78059 					break;
20165181Sgd78059 
20175181Sgd78059 				case SYSTEM_ERR_PARITY:
20185181Sgd78059 					msg = "Parity Error";
20195181Sgd78059 					break;
20205181Sgd78059 
20215181Sgd78059 				default:
20225181Sgd78059 					msg = "Unknown System Bus Error";
20235181Sgd78059 					break;
20245181Sgd78059 				}
20255181Sgd78059 			} else if (interrupts & RX_STOPPED_INT) {
20265181Sgd78059 				msg = "RX process stopped";
20275181Sgd78059 			} else if (interrupts & RX_UNAVAIL_INT) {
20285181Sgd78059 				msg = "RX buffer unavailable";
20295181Sgd78059 				warning_msg = B_FALSE;
20305181Sgd78059 			} else if (interrupts & RX_WATCHDOG_INT) {
20315181Sgd78059 				msg = "RX watchdog timeout?";
20325181Sgd78059 			} else if (interrupts & RX_EARLY_INT) {
20335181Sgd78059 				msg = "RX early interrupt?";
20345181Sgd78059 			} else if (interrupts & TX_STOPPED_INT) {
20355181Sgd78059 				msg = "TX process stopped";
20365181Sgd78059 			} else if (interrupts & TX_JABBER_INT) {
20375181Sgd78059 				msg = "TX jabber timeout";
20385181Sgd78059 			} else if (interrupts & TX_UNDERFLOW_INT) {
20395181Sgd78059 				msg = "TX underflow?";
20405181Sgd78059 			} else if (interrupts & TX_EARLY_INT) {
20415181Sgd78059 				msg = "TX early interrupt?";
20425181Sgd78059 
20435181Sgd78059 			} else if (interrupts & LINK_STATUS_INT) {
20445181Sgd78059 				msg = "Link status change?";
20455181Sgd78059 			} else if (interrupts & GP_TIMER_INT) {
20465181Sgd78059 				msg = "Timer expired?";
20475181Sgd78059 			}
20485181Sgd78059 
20495181Sgd78059 			if (warning_msg)
20505181Sgd78059 				dmfe_warning(dmfep, "abnormal interrupt, "
20515181Sgd78059 				    "status 0x%x: %s", istat, msg);
20525181Sgd78059 
20535181Sgd78059 			/*
20545181Sgd78059 			 * We don't want to run the entire reinitialisation
20555181Sgd78059 			 * code out of this (high-level?) interrupt, so we
20565181Sgd78059 			 * simply STOP the chip, and wake up the factotum
20575181Sgd78059 			 * to reinitalise it ...
20585181Sgd78059 			 */
20595181Sgd78059 			dmfe_stop_chip(dmfep, CHIP_ERROR);
20605181Sgd78059 			dmfe_wake_factotum(dmfep, KS_CHIP_ERROR,
20615181Sgd78059 			    "interrupt (error)");
20625181Sgd78059 		} else {
20635181Sgd78059 			/*
20645181Sgd78059 			 * We shouldn't really get here (it would mean
20655181Sgd78059 			 * there were some unprocessed enabled bits but
20665181Sgd78059 			 * they weren't Abnormal?), but we'll check just
20675181Sgd78059 			 * in case ...
20685181Sgd78059 			 */
20695181Sgd78059 			DMFE_DEBUG(("unexpected interrupt bits: 0x%x", istat));
20705181Sgd78059 		}
20715181Sgd78059 	}
20725181Sgd78059 
20735181Sgd78059 	/*
20745181Sgd78059 	 * Acknowledge all the original bits - except in the case of an
20755181Sgd78059 	 * error, when we leave them unacknowledged so that the recovery
20765181Sgd78059 	 * code can see what was going on when the problem occurred ...
20775181Sgd78059 	 */
20785181Sgd78059 	if (dmfep->chip_state != CHIP_ERROR) {
20795181Sgd78059 		(void) dmfe_chip_put32(dmfep, STATUS_REG, istat);
20805181Sgd78059 		/*
20815181Sgd78059 		 * Read-after-write forces completion on PCI bus.
20825181Sgd78059 		 *
20835181Sgd78059 		 */
20845181Sgd78059 		(void) dmfe_chip_get32(dmfep, STATUS_REG);
20855181Sgd78059 	}
20865181Sgd78059 
20875181Sgd78059 
20885181Sgd78059 	/*
20895181Sgd78059 	 * We've finished talking to the chip, so we can drop <oplock>
20905181Sgd78059 	 * before handling the normal interrupts, which only involve
20915181Sgd78059 	 * manipulation of descriptors ...
20925181Sgd78059 	 */
20935181Sgd78059 	mutex_exit(dmfep->oplock);
20945181Sgd78059 
20955181Sgd78059 	if (interrupts & RX_PKTDONE_INT)
20965181Sgd78059 		if ((mp = dmfe_getp(dmfep)) != NULL)
20975181Sgd78059 			mac_rx(dmfep->mh, NULL, mp);
20985181Sgd78059 
20995181Sgd78059 	if (interrupts & TX_PKTDONE_INT) {
21005181Sgd78059 		/*
21015181Sgd78059 		 * The only reason for taking this interrupt is to give
21025181Sgd78059 		 * MAC a chance to schedule queued packets after a
21035181Sgd78059 		 * ring-full condition.  To minimise the number of
21045181Sgd78059 		 * redundant TX-Done interrupts, we only mark two of the
21055181Sgd78059 		 * ring descriptors as 'interrupt-on-complete' - all the
21065181Sgd78059 		 * others are simply handed back without an interrupt.
21075181Sgd78059 		 */
21085181Sgd78059 		if (dmfe_reclaim_on_done && mutex_tryenter(dmfep->txlock)) {
21095181Sgd78059 			(void) dmfe_reclaim_tx_desc(dmfep);
21105181Sgd78059 			mutex_exit(dmfep->txlock);
21115181Sgd78059 		}
21125181Sgd78059 		mac_tx_update(dmfep->mh);
21135181Sgd78059 	}
21145181Sgd78059 
21155181Sgd78059 	return (DDI_INTR_CLAIMED);
21165181Sgd78059 }
21175181Sgd78059 
21185181Sgd78059 #undef	DMFE_DBG
21195181Sgd78059 
21205181Sgd78059 
21215181Sgd78059 /*
21225181Sgd78059  * ========== Statistics update handler ==========
21235181Sgd78059  */
21245181Sgd78059 
21255181Sgd78059 #define	DMFE_DBG	DMFE_DBG_STATS	/* debug flag for this code	*/
21265181Sgd78059 
21275181Sgd78059 static int
21285181Sgd78059 dmfe_m_stat(void *arg, uint_t stat, uint64_t *val)
21295181Sgd78059 {
21305181Sgd78059 	dmfe_t *dmfep = arg;
21315181Sgd78059 	int rv = 0;
21325181Sgd78059 
21335181Sgd78059 	mutex_enter(dmfep->milock);
21345181Sgd78059 	mutex_enter(dmfep->oplock);
21355181Sgd78059 	mutex_enter(dmfep->rxlock);
21365181Sgd78059 	mutex_enter(dmfep->txlock);
21375181Sgd78059 
21385181Sgd78059 	/* make sure we have all the stats collected */
21395181Sgd78059 	(void) dmfe_reclaim_tx_desc(dmfep);
21405181Sgd78059 
21415181Sgd78059 	switch (stat) {
21425181Sgd78059 	case MAC_STAT_IFSPEED:
21435181Sgd78059 		*val = dmfep->op_stats_speed;
21445181Sgd78059 		break;
21455181Sgd78059 
21465181Sgd78059 	case MAC_STAT_IPACKETS:
21475181Sgd78059 		*val = dmfep->rx_stats_ipackets;
21485181Sgd78059 		break;
21495181Sgd78059 
21505181Sgd78059 	case MAC_STAT_MULTIRCV:
21515181Sgd78059 		*val = dmfep->rx_stats_multi;
21525181Sgd78059 		break;
21535181Sgd78059 
21545181Sgd78059 	case MAC_STAT_BRDCSTRCV:
21555181Sgd78059 		*val = dmfep->rx_stats_bcast;
21565181Sgd78059 		break;
21575181Sgd78059 
21585181Sgd78059 	case MAC_STAT_RBYTES:
21595181Sgd78059 		*val = dmfep->rx_stats_rbytes;
21605181Sgd78059 		break;
21615181Sgd78059 
21625181Sgd78059 	case MAC_STAT_IERRORS:
21635181Sgd78059 		*val = dmfep->rx_stats_ierrors;
21645181Sgd78059 		break;
21655181Sgd78059 
21665181Sgd78059 	case MAC_STAT_NORCVBUF:
21675181Sgd78059 		*val = dmfep->rx_stats_norcvbuf;
21685181Sgd78059 		break;
21695181Sgd78059 
21705181Sgd78059 	case MAC_STAT_COLLISIONS:
21715181Sgd78059 		*val = dmfep->tx_stats_collisions;
21725181Sgd78059 		break;
21735181Sgd78059 
21745181Sgd78059 	case MAC_STAT_OERRORS:
21755181Sgd78059 		*val = dmfep->tx_stats_oerrors;
21765181Sgd78059 		break;
21775181Sgd78059 
21785181Sgd78059 	case MAC_STAT_OPACKETS:
21795181Sgd78059 		*val = dmfep->tx_stats_opackets;
21805181Sgd78059 		break;
21815181Sgd78059 
21825181Sgd78059 	case MAC_STAT_MULTIXMT:
21835181Sgd78059 		*val = dmfep->tx_stats_multi;
21845181Sgd78059 		break;
21855181Sgd78059 
21865181Sgd78059 	case MAC_STAT_BRDCSTXMT:
21875181Sgd78059 		*val = dmfep->tx_stats_bcast;
21885181Sgd78059 		break;
21895181Sgd78059 
21905181Sgd78059 	case MAC_STAT_OBYTES:
21915181Sgd78059 		*val = dmfep->tx_stats_obytes;
21925181Sgd78059 		break;
21935181Sgd78059 
21945181Sgd78059 	case MAC_STAT_OVERFLOWS:
21955181Sgd78059 		*val = dmfep->rx_stats_overflow;
21965181Sgd78059 		break;
21975181Sgd78059 
21985181Sgd78059 	case MAC_STAT_UNDERFLOWS:
21995181Sgd78059 		*val = dmfep->tx_stats_underflow;
22005181Sgd78059 		break;
22015181Sgd78059 
22025181Sgd78059 	case ETHER_STAT_ALIGN_ERRORS:
22035181Sgd78059 		*val = dmfep->rx_stats_align;
22045181Sgd78059 		break;
22055181Sgd78059 
22065181Sgd78059 	case ETHER_STAT_FCS_ERRORS:
22075181Sgd78059 		*val = dmfep->rx_stats_fcs;
22085181Sgd78059 		break;
22095181Sgd78059 
22105181Sgd78059 	case ETHER_STAT_TOOLONG_ERRORS:
22115181Sgd78059 		*val = dmfep->rx_stats_toolong;
22125181Sgd78059 		break;
22135181Sgd78059 
22145181Sgd78059 	case ETHER_STAT_TOOSHORT_ERRORS:
22155181Sgd78059 		*val = dmfep->rx_stats_short;
22165181Sgd78059 		break;
22175181Sgd78059 
22185181Sgd78059 	case ETHER_STAT_MACRCV_ERRORS:
22195181Sgd78059 		*val = dmfep->rx_stats_macrcv_errors;
22205181Sgd78059 		break;
22215181Sgd78059 
22225181Sgd78059 	case ETHER_STAT_MACXMT_ERRORS:
22235181Sgd78059 		*val = dmfep->tx_stats_macxmt_errors;
22245181Sgd78059 		break;
22255181Sgd78059 
22265181Sgd78059 	case ETHER_STAT_JABBER_ERRORS:
22275181Sgd78059 		*val = dmfep->tx_stats_jabber;
22285181Sgd78059 		break;
22295181Sgd78059 
22305181Sgd78059 	case ETHER_STAT_CARRIER_ERRORS:
22315181Sgd78059 		*val = dmfep->tx_stats_nocarrier;
22325181Sgd78059 		break;
22335181Sgd78059 
22345181Sgd78059 	case ETHER_STAT_TX_LATE_COLLISIONS:
22355181Sgd78059 		*val = dmfep->tx_stats_xmtlatecoll;
22365181Sgd78059 		break;
22375181Sgd78059 
22385181Sgd78059 	case ETHER_STAT_EX_COLLISIONS:
22395181Sgd78059 		*val = dmfep->tx_stats_excoll;
22405181Sgd78059 		break;
22415181Sgd78059 
22425181Sgd78059 	case ETHER_STAT_DEFER_XMTS:
22435181Sgd78059 		*val = dmfep->tx_stats_defer;
22445181Sgd78059 		break;
22455181Sgd78059 
22465181Sgd78059 	case ETHER_STAT_FIRST_COLLISIONS:
22475181Sgd78059 		*val = dmfep->tx_stats_first_coll;
22485181Sgd78059 		break;
22495181Sgd78059 
22505181Sgd78059 	case ETHER_STAT_MULTI_COLLISIONS:
22515181Sgd78059 		*val = dmfep->tx_stats_multi_coll;
22525181Sgd78059 		break;
22535181Sgd78059 
22545181Sgd78059 	case ETHER_STAT_XCVR_INUSE:
22555181Sgd78059 		*val = dmfep->phy_inuse;
22565181Sgd78059 		break;
22575181Sgd78059 
22585181Sgd78059 	case ETHER_STAT_XCVR_ID:
22595181Sgd78059 		*val = dmfep->phy_id;
22605181Sgd78059 		break;
22615181Sgd78059 
22625181Sgd78059 	case ETHER_STAT_XCVR_ADDR:
22635181Sgd78059 		*val = dmfep->phy_addr;
22645181Sgd78059 		break;
22655181Sgd78059 
22665181Sgd78059 	case ETHER_STAT_LINK_DUPLEX:
22675181Sgd78059 		*val = dmfep->op_stats_duplex;
22685181Sgd78059 		break;
22695181Sgd78059 
22705181Sgd78059 	case ETHER_STAT_CAP_100T4:
22715181Sgd78059 		*val = dmfep->param_bmsr_100T4;
22725181Sgd78059 		break;
22735181Sgd78059 
22745181Sgd78059 	case ETHER_STAT_CAP_100FDX:
22755181Sgd78059 		*val = dmfep->param_bmsr_100fdx;
22765181Sgd78059 		break;
22775181Sgd78059 
22785181Sgd78059 	case ETHER_STAT_CAP_100HDX:
22795181Sgd78059 		*val = dmfep->param_bmsr_100hdx;
22805181Sgd78059 		break;
22815181Sgd78059 
22825181Sgd78059 	case ETHER_STAT_CAP_10FDX:
22835181Sgd78059 		*val = dmfep->param_bmsr_10fdx;
22845181Sgd78059 		break;
22855181Sgd78059 
22865181Sgd78059 	case ETHER_STAT_CAP_10HDX:
22875181Sgd78059 		*val = dmfep->param_bmsr_10hdx;
22885181Sgd78059 		break;
22895181Sgd78059 
22905181Sgd78059 	case ETHER_STAT_CAP_AUTONEG:
22915181Sgd78059 		*val = dmfep->param_bmsr_autoneg;
22925181Sgd78059 		break;
22935181Sgd78059 
22945181Sgd78059 	case ETHER_STAT_CAP_REMFAULT:
22955181Sgd78059 		*val = dmfep->param_bmsr_remfault;
22965181Sgd78059 		break;
22975181Sgd78059 
22985181Sgd78059 	case ETHER_STAT_ADV_CAP_AUTONEG:
22995181Sgd78059 		*val = dmfep->param_autoneg;
23005181Sgd78059 		break;
23015181Sgd78059 
23025181Sgd78059 	case ETHER_STAT_ADV_CAP_100T4:
23035181Sgd78059 		*val = dmfep->param_anar_100T4;
23045181Sgd78059 		break;
23055181Sgd78059 
23065181Sgd78059 	case ETHER_STAT_ADV_CAP_100FDX:
23075181Sgd78059 		*val = dmfep->param_anar_100fdx;
23085181Sgd78059 		break;
23095181Sgd78059 
23105181Sgd78059 	case ETHER_STAT_ADV_CAP_100HDX:
23115181Sgd78059 		*val = dmfep->param_anar_100hdx;
23125181Sgd78059 		break;
23135181Sgd78059 
23145181Sgd78059 	case ETHER_STAT_ADV_CAP_10FDX:
23155181Sgd78059 		*val = dmfep->param_anar_10fdx;
23165181Sgd78059 		break;
23175181Sgd78059 
23185181Sgd78059 	case ETHER_STAT_ADV_CAP_10HDX:
23195181Sgd78059 		*val = dmfep->param_anar_10hdx;
23205181Sgd78059 		break;
23215181Sgd78059 
23225181Sgd78059 	case ETHER_STAT_ADV_REMFAULT:
23235181Sgd78059 		*val = dmfep->param_anar_remfault;
23245181Sgd78059 		break;
23255181Sgd78059 
23265181Sgd78059 	case ETHER_STAT_LP_CAP_AUTONEG:
23275181Sgd78059 		*val = dmfep->param_lp_autoneg;
23285181Sgd78059 		break;
23295181Sgd78059 
23305181Sgd78059 	case ETHER_STAT_LP_CAP_100T4:
23315181Sgd78059 		*val = dmfep->param_lp_100T4;
23325181Sgd78059 		break;
23335181Sgd78059 
23345181Sgd78059 	case ETHER_STAT_LP_CAP_100FDX:
23355181Sgd78059 		*val = dmfep->param_lp_100fdx;
23365181Sgd78059 		break;
23375181Sgd78059 
23385181Sgd78059 	case ETHER_STAT_LP_CAP_100HDX:
23395181Sgd78059 		*val = dmfep->param_lp_100hdx;
23405181Sgd78059 		break;
23415181Sgd78059 
23425181Sgd78059 	case ETHER_STAT_LP_CAP_10FDX:
23435181Sgd78059 		*val = dmfep->param_lp_10fdx;
23445181Sgd78059 		break;
23455181Sgd78059 
23465181Sgd78059 	case ETHER_STAT_LP_CAP_10HDX:
23475181Sgd78059 		*val = dmfep->param_lp_10hdx;
23485181Sgd78059 		break;
23495181Sgd78059 
23505181Sgd78059 	case ETHER_STAT_LP_REMFAULT:
23515181Sgd78059 		*val = dmfep->param_lp_remfault;
23525181Sgd78059 		break;
23535181Sgd78059 
23545181Sgd78059 	default:
23555181Sgd78059 		rv = ENOTSUP;
23565181Sgd78059 	}
23575181Sgd78059 
23585181Sgd78059 	mutex_exit(dmfep->txlock);
23595181Sgd78059 	mutex_exit(dmfep->rxlock);
23605181Sgd78059 	mutex_exit(dmfep->oplock);
23615181Sgd78059 	mutex_exit(dmfep->milock);
23625181Sgd78059 
23635181Sgd78059 	return (rv);
23645181Sgd78059 }
23655181Sgd78059 
23665181Sgd78059 #undef	DMFE_DBG
23675181Sgd78059 
23685181Sgd78059 
23695181Sgd78059 /*
23705181Sgd78059  * ========== Ioctl handler & subfunctions ==========
23715181Sgd78059  */
23725181Sgd78059 
23735181Sgd78059 #define	DMFE_DBG	DMFE_DBG_IOCTL	/* debug flag for this code	*/
23745181Sgd78059 
23755181Sgd78059 /*
23765181Sgd78059  * Loopback operation
23775181Sgd78059  *
23785181Sgd78059  * Support access to the internal loopback and external loopback
23795181Sgd78059  * functions selected via the Operation Mode Register (OPR).
23805181Sgd78059  * These will be used by netlbtest (see BugId 4370609)
23815181Sgd78059  *
23825181Sgd78059  * Note that changing the loopback mode causes a stop/restart cycle
23835181Sgd78059  *
23845181Sgd78059  * It would be nice to evolve this to support the ioctls in sys/netlb.h,
23855181Sgd78059  * but then it would be even better to use Brussels to configure this.
23865181Sgd78059  */
23875181Sgd78059 static enum ioc_reply
23885181Sgd78059 dmfe_loop_ioctl(dmfe_t *dmfep, queue_t *wq, mblk_t *mp, int cmd)
23895181Sgd78059 {
23905181Sgd78059 	loopback_t *loop_req_p;
23915181Sgd78059 	uint32_t loopmode;
23925181Sgd78059 
23935181Sgd78059 	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t))
23945181Sgd78059 		return (IOC_INVAL);
23955181Sgd78059 
23966990Sgd78059 	loop_req_p = (void *)mp->b_cont->b_rptr;
23975181Sgd78059 
23985181Sgd78059 	switch (cmd) {
23995181Sgd78059 	default:
24005181Sgd78059 		/*
24015181Sgd78059 		 * This should never happen ...
24025181Sgd78059 		 */
24035181Sgd78059 		dmfe_error(dmfep, "dmfe_loop_ioctl: invalid cmd 0x%x", cmd);
24045181Sgd78059 		return (IOC_INVAL);
24055181Sgd78059 
24065181Sgd78059 	case DMFE_GET_LOOP_MODE:
24075181Sgd78059 		/*
24085181Sgd78059 		 * This doesn't return the current loopback mode - it
24095181Sgd78059 		 * returns a bitmask :-( of all possible loopback modes
24105181Sgd78059 		 */
24115181Sgd78059 		DMFE_DEBUG(("dmfe_loop_ioctl: GET_LOOP_MODE"));
24125181Sgd78059 		loop_req_p->loopback = DMFE_LOOPBACK_MODES;
24135181Sgd78059 		miocack(wq, mp, sizeof (loopback_t), 0);
24145181Sgd78059 		return (IOC_DONE);
24155181Sgd78059 
24165181Sgd78059 	case DMFE_SET_LOOP_MODE:
24175181Sgd78059 		/*
24185181Sgd78059 		 * Select any of the various loopback modes
24195181Sgd78059 		 */
24205181Sgd78059 		DMFE_DEBUG(("dmfe_loop_ioctl: SET_LOOP_MODE %d",
24215181Sgd78059 		    loop_req_p->loopback));
24225181Sgd78059 		switch (loop_req_p->loopback) {
24235181Sgd78059 		default:
24245181Sgd78059 			return (IOC_INVAL);
24255181Sgd78059 
24265181Sgd78059 		case DMFE_LOOPBACK_OFF:
24275181Sgd78059 			loopmode = LOOPBACK_OFF;
24285181Sgd78059 			break;
24295181Sgd78059 
24305181Sgd78059 		case DMFE_PHY_A_LOOPBACK_ON:
24315181Sgd78059 			loopmode = LOOPBACK_PHY_A;
24325181Sgd78059 			break;
24335181Sgd78059 
24345181Sgd78059 		case DMFE_PHY_D_LOOPBACK_ON:
24355181Sgd78059 			loopmode = LOOPBACK_PHY_D;
24365181Sgd78059 			break;
24375181Sgd78059 
24385181Sgd78059 		case DMFE_INT_LOOPBACK_ON:
24395181Sgd78059 			loopmode = LOOPBACK_INTERNAL;
24405181Sgd78059 			break;
24415181Sgd78059 		}
24425181Sgd78059 
24435181Sgd78059 		if ((dmfep->opmode & LOOPBACK_MODE_MASK) != loopmode) {
24445181Sgd78059 			dmfep->opmode &= ~LOOPBACK_MODE_MASK;
24455181Sgd78059 			dmfep->opmode |= loopmode;
24465181Sgd78059 			return (IOC_RESTART_ACK);
24475181Sgd78059 		}
24485181Sgd78059 
24495181Sgd78059 		return (IOC_ACK);
24505181Sgd78059 	}
24515181Sgd78059 }
24525181Sgd78059 
24535181Sgd78059 /*
24545181Sgd78059  * Specific dmfe IOCTLs, the mac module handles the generic ones.
24555181Sgd78059  */
24565181Sgd78059 static void
24575181Sgd78059 dmfe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
24585181Sgd78059 {
24595181Sgd78059 	dmfe_t *dmfep = arg;
24605181Sgd78059 	struct iocblk *iocp;
24615181Sgd78059 	enum ioc_reply status;
24625181Sgd78059 	int cmd;
24635181Sgd78059 
24645181Sgd78059 	/*
24655181Sgd78059 	 * Validate the command before bothering with the mutexen ...
24665181Sgd78059 	 */
24676990Sgd78059 	iocp = (void *)mp->b_rptr;
24685181Sgd78059 	cmd = iocp->ioc_cmd;
24695181Sgd78059 	switch (cmd) {
24705181Sgd78059 	default:
24715181Sgd78059 		DMFE_DEBUG(("dmfe_m_ioctl: unknown cmd 0x%x", cmd));
24725181Sgd78059 		miocnak(wq, mp, 0, EINVAL);
24735181Sgd78059 		return;
24745181Sgd78059 
24755181Sgd78059 	case DMFE_SET_LOOP_MODE:
24765181Sgd78059 	case DMFE_GET_LOOP_MODE:
24775181Sgd78059 	case ND_GET:
24785181Sgd78059 	case ND_SET:
24795181Sgd78059 		break;
24805181Sgd78059 	}
24815181Sgd78059 
24825181Sgd78059 	mutex_enter(dmfep->milock);
24835181Sgd78059 	mutex_enter(dmfep->oplock);
24845181Sgd78059 
24855181Sgd78059 	switch (cmd) {
24865181Sgd78059 	default:
24875181Sgd78059 		_NOTE(NOTREACHED)
24885181Sgd78059 		status = IOC_INVAL;
24895181Sgd78059 		break;
24905181Sgd78059 
24915181Sgd78059 	case DMFE_SET_LOOP_MODE:
24925181Sgd78059 	case DMFE_GET_LOOP_MODE:
24935181Sgd78059 		status = dmfe_loop_ioctl(dmfep, wq, mp, cmd);
24945181Sgd78059 		break;
24955181Sgd78059 
24965181Sgd78059 	case ND_GET:
24975181Sgd78059 	case ND_SET:
24985181Sgd78059 		status = dmfe_nd_ioctl(dmfep, wq, mp, cmd);
24995181Sgd78059 		break;
25005181Sgd78059 	}
25015181Sgd78059 
25025181Sgd78059 	/*
25035181Sgd78059 	 * Do we need to restart?
25045181Sgd78059 	 */
25055181Sgd78059 	switch (status) {
25065181Sgd78059 	default:
25075181Sgd78059 		break;
25085181Sgd78059 
25095181Sgd78059 	case IOC_RESTART_ACK:
25105181Sgd78059 	case IOC_RESTART:
25115181Sgd78059 		/*
25125181Sgd78059 		 * PHY parameters changed; we need to stop, update the
25135181Sgd78059 		 * PHY layer and restart before sending the reply or ACK
25145181Sgd78059 		 */
25155181Sgd78059 		dmfe_stop(dmfep);
25165181Sgd78059 		dmfe_update_phy(dmfep);
25175181Sgd78059 		dmfep->update_phy = B_FALSE;
25185181Sgd78059 
25195181Sgd78059 		/*
25205181Sgd78059 		 * The link will now most likely go DOWN and UP, because
25215181Sgd78059 		 * we've changed the loopback state or the link parameters
25225181Sgd78059 		 * or autonegotiation.  So we have to check that it's
25235181Sgd78059 		 * settled down before we restart the TX/RX processes.
25245181Sgd78059 		 * The ioctl code will have planted some reason strings
25255181Sgd78059 		 * to explain what's happening, so the link state change
25265181Sgd78059 		 * messages won't be printed on the console . We wake the
25275181Sgd78059 		 * factotum to deal with link notifications, if any ...
25285181Sgd78059 		 */
25295181Sgd78059 		if (dmfe_check_link(dmfep)) {
25305181Sgd78059 			dmfe_recheck_link(dmfep, B_TRUE);
25315181Sgd78059 			dmfe_wake_factotum(dmfep, KS_LINK_CHECK, "ioctl");
25325181Sgd78059 		}
25335181Sgd78059 
25345181Sgd78059 		if (dmfep->mac_state == DMFE_MAC_STARTED)
25355181Sgd78059 			dmfe_start(dmfep);
25365181Sgd78059 		break;
25375181Sgd78059 	}
25385181Sgd78059 
25395181Sgd78059 	/*
25405181Sgd78059 	 * The 'reasons-for-link-change', if any, don't apply any more
25415181Sgd78059 	 */
25425181Sgd78059 	mutex_exit(dmfep->oplock);
25435181Sgd78059 	mutex_exit(dmfep->milock);
25445181Sgd78059 
25455181Sgd78059 	/*
25465181Sgd78059 	 * Finally, decide how to reply
25475181Sgd78059 	 */
25485181Sgd78059 	switch (status) {
25495181Sgd78059 	default:
25505181Sgd78059 		/*
25515181Sgd78059 		 * Error, reply with a NAK and EINVAL
25525181Sgd78059 		 */
25535181Sgd78059 		miocnak(wq, mp, 0, EINVAL);
25545181Sgd78059 		break;
25555181Sgd78059 
25565181Sgd78059 	case IOC_RESTART_ACK:
25575181Sgd78059 	case IOC_ACK:
25585181Sgd78059 		/*
25595181Sgd78059 		 * OK, reply with an ACK
25605181Sgd78059 		 */
25615181Sgd78059 		miocack(wq, mp, 0, 0);
25625181Sgd78059 		break;
25635181Sgd78059 
25645181Sgd78059 	case IOC_RESTART:
25655181Sgd78059 	case IOC_REPLY:
25665181Sgd78059 		/*
25675181Sgd78059 		 * OK, send prepared reply
25685181Sgd78059 		 */
25695181Sgd78059 		qreply(wq, mp);
25705181Sgd78059 		break;
25715181Sgd78059 
25725181Sgd78059 	case IOC_DONE:
25735181Sgd78059 		/*
25745181Sgd78059 		 * OK, reply already sent
25755181Sgd78059 		 */
25765181Sgd78059 		break;
25775181Sgd78059 	}
25785181Sgd78059 }
25795181Sgd78059 
25805181Sgd78059 #undef	DMFE_DBG
25815181Sgd78059 
25825181Sgd78059 
25835181Sgd78059 /*
25845181Sgd78059  * ========== Per-instance setup/teardown code ==========
25855181Sgd78059  */
25865181Sgd78059 
25875181Sgd78059 #define	DMFE_DBG	DMFE_DBG_INIT	/* debug flag for this code	*/
25885181Sgd78059 
25895181Sgd78059 /*
25905181Sgd78059  * Determine local MAC address & broadcast address for this interface
25915181Sgd78059  */
25925181Sgd78059 static void
25935181Sgd78059 dmfe_find_mac_address(dmfe_t *dmfep)
25945181Sgd78059 {
25955181Sgd78059 	uchar_t *prop;
25965181Sgd78059 	uint_t propsize;
25975181Sgd78059 	int err;
25985181Sgd78059 
25995181Sgd78059 	/*
26005181Sgd78059 	 * We have to find the "vendor's factory-set address".  This is
26015181Sgd78059 	 * the value of the property "local-mac-address", as set by OBP
26025181Sgd78059 	 * (or a .conf file!)
26035181Sgd78059 	 *
26045181Sgd78059 	 * If the property is not there, then we try to find the factory
26055181Sgd78059 	 * mac address from the devices serial EEPROM.
26065181Sgd78059 	 */
26075181Sgd78059 	bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr));
26085181Sgd78059 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo,
26095181Sgd78059 	    DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize);
26105181Sgd78059 	if (err == DDI_PROP_SUCCESS) {
26115181Sgd78059 		if (propsize == ETHERADDRL)
26125181Sgd78059 			ethaddr_copy(prop, dmfep->curr_addr);
26135181Sgd78059 		ddi_prop_free(prop);
26145181Sgd78059 	} else {
26155181Sgd78059 		/* no property set... check eeprom */
26165181Sgd78059 		dmfe_read_eeprom(dmfep, EEPROM_EN_ADDR, dmfep->curr_addr,
26175181Sgd78059 		    ETHERADDRL);
26185181Sgd78059 	}
26195181Sgd78059 
26205181Sgd78059 	DMFE_DEBUG(("dmfe_setup_mac_address: factory %s",
26215181Sgd78059 	    ether_sprintf((void *)dmfep->curr_addr)));
26225181Sgd78059 }
26235181Sgd78059 
26245181Sgd78059 static int
26255181Sgd78059 dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize,
26265181Sgd78059 	size_t setup, size_t slop, ddi_device_acc_attr_t *attr_p,
26275181Sgd78059 	uint_t dma_flags, dma_area_t *dma_p)
26285181Sgd78059 {
26295181Sgd78059 	ddi_dma_cookie_t dma_cookie;
26305181Sgd78059 	uint_t ncookies;
26315181Sgd78059 	int err;
26325181Sgd78059 
26335181Sgd78059 	/*
26345181Sgd78059 	 * Allocate handle
26355181Sgd78059 	 */
26365181Sgd78059 	err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr,
26375181Sgd78059 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
26385181Sgd78059 	if (err != DDI_SUCCESS)
26395181Sgd78059 		return (DDI_FAILURE);
26405181Sgd78059 
26415181Sgd78059 	/*
26425181Sgd78059 	 * Allocate memory
26435181Sgd78059 	 */
26445181Sgd78059 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop,
26455181Sgd78059 	    attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
26465181Sgd78059 	    DDI_DMA_SLEEP, NULL,
26475181Sgd78059 	    &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl);
26485181Sgd78059 	if (err != DDI_SUCCESS)
26495181Sgd78059 		return (DDI_FAILURE);
26505181Sgd78059 
26515181Sgd78059 	/*
26525181Sgd78059 	 * Bind the two together
26535181Sgd78059 	 */
26545181Sgd78059 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
26555181Sgd78059 	    dma_p->mem_va, dma_p->alength, dma_flags,
26565181Sgd78059 	    DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies);
26575181Sgd78059 	if (err != DDI_DMA_MAPPED)
26585181Sgd78059 		return (DDI_FAILURE);
26595181Sgd78059 	if ((dma_p->ncookies = ncookies) != 1)
26605181Sgd78059 		return (DDI_FAILURE);
26615181Sgd78059 
26625181Sgd78059 	dma_p->mem_dvma = dma_cookie.dmac_address;
26635181Sgd78059 	if (setup > 0) {
26645181Sgd78059 		dma_p->setup_dvma = dma_p->mem_dvma + memsize;
26655181Sgd78059 		dma_p->setup_va = dma_p->mem_va + memsize;
26665181Sgd78059 	} else {
26675181Sgd78059 		dma_p->setup_dvma = 0;
26685181Sgd78059 		dma_p->setup_va = NULL;
26695181Sgd78059 	}
26705181Sgd78059 
26715181Sgd78059 	return (DDI_SUCCESS);
26725181Sgd78059 }
26735181Sgd78059 
26745181Sgd78059 /*
26755181Sgd78059  * This function allocates the transmit and receive buffers and descriptors.
26765181Sgd78059  */
26775181Sgd78059 static int
26785181Sgd78059 dmfe_alloc_bufs(dmfe_t *dmfep)
26795181Sgd78059 {
26805181Sgd78059 	size_t memsize;
26815181Sgd78059 	int err;
26825181Sgd78059 
26835181Sgd78059 	/*
26845181Sgd78059 	 * Allocate memory & handles for TX descriptor ring
26855181Sgd78059 	 */
26865181Sgd78059 	memsize = dmfep->tx.n_desc * sizeof (struct tx_desc_type);
26875181Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP,
26885181Sgd78059 	    &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
26895181Sgd78059 	    &dmfep->tx_desc);
26905181Sgd78059 	if (err != DDI_SUCCESS)
26915181Sgd78059 		return (DDI_FAILURE);
26925181Sgd78059 
26935181Sgd78059 	/*
26945181Sgd78059 	 * Allocate memory & handles for TX buffers
26955181Sgd78059 	 */
26965181Sgd78059 	memsize = dmfep->tx.n_desc * DMFE_BUF_SIZE;
26975181Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
26985181Sgd78059 	    &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE,
26995181Sgd78059 	    &dmfep->tx_buff);
27005181Sgd78059 	if (err != DDI_SUCCESS)
27015181Sgd78059 		return (DDI_FAILURE);
27025181Sgd78059 
27035181Sgd78059 	/*
27045181Sgd78059 	 * Allocate memory & handles for RX descriptor ring
27055181Sgd78059 	 */
27065181Sgd78059 	memsize = dmfep->rx.n_desc * sizeof (struct rx_desc_type);
27075181Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP,
27085181Sgd78059 	    &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
27095181Sgd78059 	    &dmfep->rx_desc);
27105181Sgd78059 	if (err != DDI_SUCCESS)
27115181Sgd78059 		return (DDI_FAILURE);
27125181Sgd78059 
27135181Sgd78059 	/*
27145181Sgd78059 	 * Allocate memory & handles for RX buffers
27155181Sgd78059 	 */
27165181Sgd78059 	memsize = dmfep->rx.n_desc * DMFE_BUF_SIZE;
27175181Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
27185181Sgd78059 	    &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE, &dmfep->rx_buff);
27195181Sgd78059 	if (err != DDI_SUCCESS)
27205181Sgd78059 		return (DDI_FAILURE);
27215181Sgd78059 
27225181Sgd78059 	/*
27235181Sgd78059 	 * Allocate bitmasks for tx packet type tracking
27245181Sgd78059 	 */
27255181Sgd78059 	dmfep->tx_mcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
27265181Sgd78059 	dmfep->tx_bcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
27275181Sgd78059 
27285181Sgd78059 	return (DDI_SUCCESS);
27295181Sgd78059 }
27305181Sgd78059 
27315181Sgd78059 static void
27325181Sgd78059 dmfe_free_dma_mem(dma_area_t *dma_p)
27335181Sgd78059 {
27345181Sgd78059 	if (dma_p->dma_hdl != NULL) {
27355181Sgd78059 		if (dma_p->ncookies) {
27365181Sgd78059 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
27375181Sgd78059 			dma_p->ncookies = 0;
27385181Sgd78059 		}
27395181Sgd78059 		ddi_dma_free_handle(&dma_p->dma_hdl);
27405181Sgd78059 		dma_p->dma_hdl = NULL;
27415181Sgd78059 		dma_p->mem_dvma = 0;
27425181Sgd78059 		dma_p->setup_dvma = 0;
27435181Sgd78059 	}
27445181Sgd78059 
27455181Sgd78059 	if (dma_p->acc_hdl != NULL) {
27465181Sgd78059 		ddi_dma_mem_free(&dma_p->acc_hdl);
27475181Sgd78059 		dma_p->acc_hdl = NULL;
27485181Sgd78059 		dma_p->mem_va = NULL;
27495181Sgd78059 		dma_p->setup_va = NULL;
27505181Sgd78059 	}
27515181Sgd78059 }
27525181Sgd78059 
27535181Sgd78059 /*
27545181Sgd78059  * This routine frees the transmit and receive buffers and descriptors.
27555181Sgd78059  * Make sure the chip is stopped before calling it!
27565181Sgd78059  */
27575181Sgd78059 static void
27585181Sgd78059 dmfe_free_bufs(dmfe_t *dmfep)
27595181Sgd78059 {
27605181Sgd78059 	dmfe_free_dma_mem(&dmfep->rx_buff);
27615181Sgd78059 	dmfe_free_dma_mem(&dmfep->rx_desc);
27625181Sgd78059 	dmfe_free_dma_mem(&dmfep->tx_buff);
27635181Sgd78059 	dmfe_free_dma_mem(&dmfep->tx_desc);
27645181Sgd78059 	kmem_free(dmfep->tx_mcast, dmfep->tx.n_desc / NBBY);
27655181Sgd78059 	kmem_free(dmfep->tx_bcast, dmfep->tx.n_desc / NBBY);
27665181Sgd78059 }
27675181Sgd78059 
27685181Sgd78059 static void
27695181Sgd78059 dmfe_unattach(dmfe_t *dmfep)
27705181Sgd78059 {
27715181Sgd78059 	/*
27725181Sgd78059 	 * Clean up and free all DMFE data structures
27735181Sgd78059 	 */
27745181Sgd78059 	if (dmfep->cycid != NULL) {
27755181Sgd78059 		ddi_periodic_delete(dmfep->cycid);
27765181Sgd78059 		dmfep->cycid = NULL;
27775181Sgd78059 	}
27785181Sgd78059 
27795181Sgd78059 	if (dmfep->ksp_drv != NULL)
27805181Sgd78059 		kstat_delete(dmfep->ksp_drv);
27815181Sgd78059 	if (dmfep->progress & PROGRESS_HWINT) {
27825181Sgd78059 		ddi_remove_intr(dmfep->devinfo, 0, dmfep->iblk);
27835181Sgd78059 		mutex_destroy(dmfep->txlock);
27845181Sgd78059 		mutex_destroy(dmfep->rxlock);
27855181Sgd78059 		mutex_destroy(dmfep->oplock);
27865181Sgd78059 	}
27875181Sgd78059 	if (dmfep->progress & PROGRESS_SOFTINT)
27885181Sgd78059 		ddi_remove_softintr(dmfep->factotum_id);
27895181Sgd78059 	if (dmfep->progress & PROGRESS_BUFS)
27905181Sgd78059 		dmfe_free_bufs(dmfep);
27915181Sgd78059 	if (dmfep->progress & PROGRESS_REGS)
27925181Sgd78059 		ddi_regs_map_free(&dmfep->io_handle);
27935181Sgd78059 	if (dmfep->progress & PROGRESS_NDD)
27945181Sgd78059 		dmfe_nd_cleanup(dmfep);
27955181Sgd78059 
27965181Sgd78059 	kmem_free(dmfep, sizeof (*dmfep));
27975181Sgd78059 }
27985181Sgd78059 
27995181Sgd78059 static int
28005181Sgd78059 dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp)
28015181Sgd78059 {
28025181Sgd78059 	ddi_acc_handle_t handle;
28035181Sgd78059 	uint32_t regval;
28045181Sgd78059 
28055181Sgd78059 	if (pci_config_setup(dmfep->devinfo, &handle) != DDI_SUCCESS)
28065181Sgd78059 		return (DDI_FAILURE);
28075181Sgd78059 
28085181Sgd78059 	/*
28095181Sgd78059 	 * Get vendor/device/revision.  We expect (but don't check) that
28105181Sgd78059 	 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102)
28115181Sgd78059 	 */
28125181Sgd78059 	idp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
28135181Sgd78059 	idp->device = pci_config_get16(handle, PCI_CONF_DEVID);
28145181Sgd78059 	idp->revision = pci_config_get8(handle, PCI_CONF_REVID);
28155181Sgd78059 
28165181Sgd78059 	/*
28175181Sgd78059 	 * Turn on Bus Master Enable bit and ensure the device is not asleep
28185181Sgd78059 	 */
28195181Sgd78059 	regval = pci_config_get32(handle, PCI_CONF_COMM);
28205181Sgd78059 	pci_config_put32(handle, PCI_CONF_COMM, (regval | PCI_COMM_ME));
28215181Sgd78059 
28225181Sgd78059 	regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD);
28235181Sgd78059 	pci_config_put32(handle, PCI_DMFE_CONF_CFDD,
28245181Sgd78059 	    regval & ~(CFDD_SLEEP | CFDD_SNOOZE));
28255181Sgd78059 
28265181Sgd78059 	pci_config_teardown(&handle);
28275181Sgd78059 	return (DDI_SUCCESS);
28285181Sgd78059 }
28295181Sgd78059 
28305181Sgd78059 struct ks_index {
28315181Sgd78059 	int index;
28325181Sgd78059 	char *name;
28335181Sgd78059 };
28345181Sgd78059 
28355181Sgd78059 static const struct ks_index ks_drv_names[] = {
28365181Sgd78059 	{	KS_INTERRUPT,			"intr"			},
28375181Sgd78059 	{	KS_CYCLIC_RUN,			"cyclic_run"		},
28385181Sgd78059 
28395181Sgd78059 	{	KS_TICK_LINK_STATE,		"link_state_change"	},
28405181Sgd78059 	{	KS_TICK_LINK_POLL,		"link_state_poll"	},
28415181Sgd78059 	{	KS_TX_STALL,			"tx_stall_detect"	},
28425181Sgd78059 	{	KS_CHIP_ERROR,			"chip_error_interrupt"	},
28435181Sgd78059 
28445181Sgd78059 	{	KS_FACTOTUM_RUN,		"factotum_run"		},
28455181Sgd78059 	{	KS_RECOVERY,			"factotum_recover"	},
28465181Sgd78059 	{	KS_LINK_CHECK,			"factotum_link_check"	},
28475181Sgd78059 
28485181Sgd78059 	{	KS_LINK_UP_CNT,			"link_up_cnt"		},
28495181Sgd78059 	{	KS_LINK_DROP_CNT,		"link_drop_cnt"		},
28505181Sgd78059 
28515181Sgd78059 	{	KS_MIIREG_BMSR,			"mii_status"		},
28525181Sgd78059 	{	KS_MIIREG_ANAR,			"mii_advert_cap"	},
28535181Sgd78059 	{	KS_MIIREG_ANLPAR,		"mii_partner_cap"	},
28545181Sgd78059 	{	KS_MIIREG_ANER,			"mii_expansion_cap"	},
28555181Sgd78059 	{	KS_MIIREG_DSCSR,		"mii_dscsr"		},
28565181Sgd78059 
28575181Sgd78059 	{	-1,				NULL			}
28585181Sgd78059 };
28595181Sgd78059 
28605181Sgd78059 static void
28615181Sgd78059 dmfe_init_kstats(dmfe_t *dmfep, int instance)
28625181Sgd78059 {
28635181Sgd78059 	kstat_t *ksp;
28645181Sgd78059 	kstat_named_t *knp;
28655181Sgd78059 	const struct ks_index *ksip;
28665181Sgd78059 
28675181Sgd78059 	/* no need to create MII stats, the mac module already does it */
28685181Sgd78059 
28695181Sgd78059 	/* Create and initialise driver-defined kstats */
28705181Sgd78059 	ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net",
28715181Sgd78059 	    KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT);
28725181Sgd78059 	if (ksp != NULL) {
28735181Sgd78059 		for (knp = ksp->ks_data, ksip = ks_drv_names;
28745181Sgd78059 		    ksip->name != NULL; ++ksip) {
28755181Sgd78059 			kstat_named_init(&knp[ksip->index], ksip->name,
28765181Sgd78059 			    KSTAT_DATA_UINT64);
28775181Sgd78059 		}
28785181Sgd78059 		dmfep->ksp_drv = ksp;
28795181Sgd78059 		dmfep->knp_drv = knp;
28805181Sgd78059 		kstat_install(ksp);
28815181Sgd78059 	} else {
28825181Sgd78059 		dmfe_error(dmfep, "kstat_create() for dmfe_events failed");
28835181Sgd78059 	}
28845181Sgd78059 }
28855181Sgd78059 
28865181Sgd78059 static int
28875181Sgd78059 dmfe_resume(dev_info_t *devinfo)
28885181Sgd78059 {
28895181Sgd78059 	dmfe_t *dmfep;				/* Our private data	*/
28905181Sgd78059 	chip_id_t chipid;
28915181Sgd78059 
28925181Sgd78059 	dmfep = ddi_get_driver_private(devinfo);
28935181Sgd78059 	if (dmfep == NULL)
28945181Sgd78059 		return (DDI_FAILURE);
28955181Sgd78059 
28965181Sgd78059 	/*
28975181Sgd78059 	 * Refuse to resume if the data structures aren't consistent
28985181Sgd78059 	 */
28995181Sgd78059 	if (dmfep->devinfo != devinfo)
29005181Sgd78059 		return (DDI_FAILURE);
29015181Sgd78059 
29025181Sgd78059 	/*
29035181Sgd78059 	 * Refuse to resume if the chip's changed its identity (*boggle*)
29045181Sgd78059 	 */
29055181Sgd78059 	if (dmfe_config_init(dmfep, &chipid) != DDI_SUCCESS)
29065181Sgd78059 		return (DDI_FAILURE);
29075181Sgd78059 	if (chipid.vendor != dmfep->chipid.vendor)
29085181Sgd78059 		return (DDI_FAILURE);
29095181Sgd78059 	if (chipid.device != dmfep->chipid.device)
29105181Sgd78059 		return (DDI_FAILURE);
29115181Sgd78059 	if (chipid.revision != dmfep->chipid.revision)
29125181Sgd78059 		return (DDI_FAILURE);
29135181Sgd78059 
29145181Sgd78059 	/*
29155181Sgd78059 	 * All OK, reinitialise h/w & kick off MAC scheduling
29165181Sgd78059 	 */
29175181Sgd78059 	mutex_enter(dmfep->oplock);
29185181Sgd78059 	dmfe_restart(dmfep);
29195181Sgd78059 	mutex_exit(dmfep->oplock);
29205181Sgd78059 	mac_tx_update(dmfep->mh);
29215181Sgd78059 	return (DDI_SUCCESS);
29225181Sgd78059 }
29235181Sgd78059 
29245181Sgd78059 /*
29255181Sgd78059  * attach(9E) -- Attach a device to the system
29265181Sgd78059  *
29275181Sgd78059  * Called once for each board successfully probed.
29285181Sgd78059  */
29295181Sgd78059 static int
29305181Sgd78059 dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
29315181Sgd78059 {
29325181Sgd78059 	mac_register_t *macp;
29335181Sgd78059 	dmfe_t *dmfep;				/* Our private data	*/
29345181Sgd78059 	uint32_t csr6;
29355181Sgd78059 	int instance;
29365181Sgd78059 	int err;
29375181Sgd78059 
29385181Sgd78059 	instance = ddi_get_instance(devinfo);
29395181Sgd78059 
29405181Sgd78059 	switch (cmd) {
29415181Sgd78059 	default:
29425181Sgd78059 		return (DDI_FAILURE);
29435181Sgd78059 
29445181Sgd78059 	case DDI_RESUME:
29455181Sgd78059 		return (dmfe_resume(devinfo));
29465181Sgd78059 
29475181Sgd78059 	case DDI_ATTACH:
29485181Sgd78059 		break;
29495181Sgd78059 	}
29505181Sgd78059 
29515181Sgd78059 	dmfep = kmem_zalloc(sizeof (*dmfep), KM_SLEEP);
29525181Sgd78059 	ddi_set_driver_private(devinfo, dmfep);
29535181Sgd78059 	dmfep->devinfo = devinfo;
29545181Sgd78059 	dmfep->dmfe_guard = DMFE_GUARD;
29555181Sgd78059 
29565181Sgd78059 	/*
29575181Sgd78059 	 * Initialize more fields in DMFE private data
29585181Sgd78059 	 * Determine the local MAC address
29595181Sgd78059 	 */
29605181Sgd78059 #if	DMFEDEBUG
29615181Sgd78059 	dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0,
29625181Sgd78059 	    debug_propname, dmfe_debug);
29635181Sgd78059 #endif	/* DMFEDEBUG */
29645181Sgd78059 	dmfep->cycid = NULL;
29655181Sgd78059 	(void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d",
29665181Sgd78059 	    instance);
29675181Sgd78059 
29685181Sgd78059 	/*
29695181Sgd78059 	 * Check for custom "opmode-reg-value" property;
29705181Sgd78059 	 * if none, use the defaults below for CSR6 ...
29715181Sgd78059 	 */
29725181Sgd78059 	csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1;
29735181Sgd78059 	dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
29745181Sgd78059 	    DDI_PROP_DONTPASS, opmode_propname, csr6);
29755181Sgd78059 
29765181Sgd78059 	/*
29775181Sgd78059 	 * Read chip ID & set up config space command register(s)
29785181Sgd78059 	 */
29795181Sgd78059 	if (dmfe_config_init(dmfep, &dmfep->chipid) != DDI_SUCCESS) {
29805181Sgd78059 		dmfe_error(dmfep, "dmfe_config_init() failed");
29815181Sgd78059 		goto attach_fail;
29825181Sgd78059 	}
29835181Sgd78059 	dmfep->progress |= PROGRESS_CONFIG;
29845181Sgd78059 
29855181Sgd78059 	/*
29865181Sgd78059 	 * Register NDD-tweakable parameters
29875181Sgd78059 	 */
29885181Sgd78059 	if (dmfe_nd_init(dmfep)) {
29895181Sgd78059 		dmfe_error(dmfep, "dmfe_nd_init() failed");
29905181Sgd78059 		goto attach_fail;
29915181Sgd78059 	}
29925181Sgd78059 	dmfep->progress |= PROGRESS_NDD;
29935181Sgd78059 
29945181Sgd78059 	/*
29955181Sgd78059 	 * Map operating registers
29965181Sgd78059 	 */
29975181Sgd78059 	err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER,
29985181Sgd78059 	    &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle);
29995181Sgd78059 	if (err != DDI_SUCCESS) {
30005181Sgd78059 		dmfe_error(dmfep, "ddi_regs_map_setup() failed");
30015181Sgd78059 		goto attach_fail;
30025181Sgd78059 	}
30035181Sgd78059 	dmfep->progress |= PROGRESS_REGS;
30045181Sgd78059 
30055181Sgd78059 	/*
30065181Sgd78059 	 * Get our MAC address.
30075181Sgd78059 	 */
30085181Sgd78059 	dmfe_find_mac_address(dmfep);
30095181Sgd78059 
30105181Sgd78059 	/*
30115181Sgd78059 	 * Allocate the TX and RX descriptors/buffers.
30125181Sgd78059 	 */
30135181Sgd78059 	dmfep->tx.n_desc = dmfe_tx_desc;
30145181Sgd78059 	dmfep->rx.n_desc = dmfe_rx_desc;
30155181Sgd78059 	err = dmfe_alloc_bufs(dmfep);
30165181Sgd78059 	if (err != DDI_SUCCESS) {
30175181Sgd78059 		dmfe_error(dmfep, "DMA buffer allocation failed");
30185181Sgd78059 		goto attach_fail;
30195181Sgd78059 	}
30205181Sgd78059 	dmfep->progress |= PROGRESS_BUFS;
30215181Sgd78059 
30225181Sgd78059 	/*
30235181Sgd78059 	 * Add the softint handler
30245181Sgd78059 	 */
30255181Sgd78059 	dmfep->link_poll_tix = factotum_start_tix;
30265181Sgd78059 	if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id,
30275181Sgd78059 	    NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) {
30285181Sgd78059 		dmfe_error(dmfep, "ddi_add_softintr() failed");
30295181Sgd78059 		goto attach_fail;
30305181Sgd78059 	}
30315181Sgd78059 	dmfep->progress |= PROGRESS_SOFTINT;
30325181Sgd78059 
30335181Sgd78059 	/*
30345181Sgd78059 	 * Add the h/w interrupt handler & initialise mutexen
30355181Sgd78059 	 */
30365181Sgd78059 	if (ddi_add_intr(devinfo, 0, &dmfep->iblk, NULL,
30375181Sgd78059 	    dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) {
30385181Sgd78059 		dmfe_error(dmfep, "ddi_add_intr() failed");
30395181Sgd78059 		goto attach_fail;
30405181Sgd78059 	}
30415181Sgd78059 	mutex_init(dmfep->milock, NULL, MUTEX_DRIVER, NULL);
30425181Sgd78059 	mutex_init(dmfep->oplock, NULL, MUTEX_DRIVER, dmfep->iblk);
30435181Sgd78059 	mutex_init(dmfep->rxlock, NULL, MUTEX_DRIVER, dmfep->iblk);
30445181Sgd78059 	mutex_init(dmfep->txlock, NULL, MUTEX_DRIVER, dmfep->iblk);
30455181Sgd78059 	dmfep->progress |= PROGRESS_HWINT;
30465181Sgd78059 
30475181Sgd78059 	/*
30485181Sgd78059 	 * Create & initialise named kstats
30495181Sgd78059 	 */
30505181Sgd78059 	dmfe_init_kstats(dmfep, instance);
30515181Sgd78059 
30525181Sgd78059 	/*
30535181Sgd78059 	 * Reset & initialise the chip and the ring buffers
30545181Sgd78059 	 * Initialise the (internal) PHY
30555181Sgd78059 	 */
30565181Sgd78059 	mutex_enter(dmfep->oplock);
30575181Sgd78059 	mutex_enter(dmfep->rxlock);
30585181Sgd78059 	mutex_enter(dmfep->txlock);
30595181Sgd78059 
30605181Sgd78059 	dmfe_reset(dmfep);
30615181Sgd78059 
30625181Sgd78059 	/*
30635181Sgd78059 	 * Prepare the setup packet
30645181Sgd78059 	 */
30655181Sgd78059 	bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE);
30665181Sgd78059 	bzero(dmfep->mcast_refs, MCASTBUF_SIZE);
30675181Sgd78059 	dmfep->addr_set = B_FALSE;
30685181Sgd78059 	dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
30695181Sgd78059 	dmfep->mac_state = DMFE_MAC_RESET;
30705181Sgd78059 
30715181Sgd78059 	mutex_exit(dmfep->txlock);
30725181Sgd78059 	mutex_exit(dmfep->rxlock);
30735181Sgd78059 	mutex_exit(dmfep->oplock);
30745181Sgd78059 
30755181Sgd78059 	dmfep->link_state = LINK_STATE_UNKNOWN;
30765181Sgd78059 	if (dmfe_init_phy(dmfep) != B_TRUE)
30775181Sgd78059 		goto attach_fail;
30785181Sgd78059 	dmfep->update_phy = B_TRUE;
30795181Sgd78059 
30805181Sgd78059 	/*
30815181Sgd78059 	 * Send a reasonable setup frame.  This configures our starting
30825181Sgd78059 	 * address and the broadcast address.
30835181Sgd78059 	 */
30845181Sgd78059 	(void) dmfe_m_unicst(dmfep, dmfep->curr_addr);
30855181Sgd78059 
30865181Sgd78059 	/*
30875181Sgd78059 	 * Initialize pointers to device specific functions which
30885181Sgd78059 	 * will be used by the generic layer.
30895181Sgd78059 	 */
30905181Sgd78059 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
30915181Sgd78059 		goto attach_fail;
30925181Sgd78059 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
30935181Sgd78059 	macp->m_driver = dmfep;
30945181Sgd78059 	macp->m_dip = devinfo;
30955181Sgd78059 	macp->m_src_addr = dmfep->curr_addr;
30965181Sgd78059 	macp->m_callbacks = &dmfe_m_callbacks;
30975181Sgd78059 	macp->m_min_sdu = 0;
30985181Sgd78059 	macp->m_max_sdu = ETHERMTU;
30995895Syz147064 	macp->m_margin = VLAN_TAGSZ;
31005181Sgd78059 
31015181Sgd78059 	/*
31025181Sgd78059 	 * Finally, we're ready to register ourselves with the MAC layer
31035181Sgd78059 	 * interface; if this succeeds, we're all ready to start()
31045181Sgd78059 	 */
31055181Sgd78059 	err = mac_register(macp, &dmfep->mh);
31065181Sgd78059 	mac_free(macp);
31075181Sgd78059 	if (err != 0)
31085181Sgd78059 		goto attach_fail;
31095181Sgd78059 	ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
31105181Sgd78059 
31115181Sgd78059 	/*
31125181Sgd78059 	 * Install the cyclic callback that we use to check for link
31135181Sgd78059 	 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic())
31145181Sgd78059 	 * is invoked in kernel context then.
31155181Sgd78059 	 */
31165181Sgd78059 	ASSERT(dmfep->cycid == NULL);
31175181Sgd78059 	dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep,
31185181Sgd78059 	    dmfe_tick_us * 1000, DDI_IPL_0);
31195181Sgd78059 	return (DDI_SUCCESS);
31205181Sgd78059 
31215181Sgd78059 attach_fail:
31225181Sgd78059 	dmfe_unattach(dmfep);
31235181Sgd78059 	return (DDI_FAILURE);
31245181Sgd78059 }
31255181Sgd78059 
31265181Sgd78059 /*
31275181Sgd78059  *	dmfe_suspend() -- suspend transmit/receive for powerdown
31285181Sgd78059  */
31295181Sgd78059 static int
31305181Sgd78059 dmfe_suspend(dmfe_t *dmfep)
31315181Sgd78059 {
31325181Sgd78059 	/*
31335181Sgd78059 	 * Just stop processing ...
31345181Sgd78059 	 */
31355181Sgd78059 	mutex_enter(dmfep->oplock);
31365181Sgd78059 	dmfe_stop(dmfep);
31375181Sgd78059 	mutex_exit(dmfep->oplock);
31385181Sgd78059 
31395181Sgd78059 	return (DDI_SUCCESS);
31405181Sgd78059 }
31415181Sgd78059 
31425181Sgd78059 /*
31435181Sgd78059  * detach(9E) -- Detach a device from the system
31445181Sgd78059  */
31455181Sgd78059 static int
31465181Sgd78059 dmfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
31475181Sgd78059 {
31485181Sgd78059 	dmfe_t *dmfep;
31495181Sgd78059 
31505181Sgd78059 	dmfep = ddi_get_driver_private(devinfo);
31515181Sgd78059 
31525181Sgd78059 	switch (cmd) {
31535181Sgd78059 	default:
31545181Sgd78059 		return (DDI_FAILURE);
31555181Sgd78059 
31565181Sgd78059 	case DDI_SUSPEND:
31575181Sgd78059 		return (dmfe_suspend(dmfep));
31585181Sgd78059 
31595181Sgd78059 	case DDI_DETACH:
31605181Sgd78059 		break;
31615181Sgd78059 	}
31625181Sgd78059 
31635181Sgd78059 	/*
31645181Sgd78059 	 * Unregister from the MAC subsystem.  This can fail, in
31655181Sgd78059 	 * particular if there are DLPI style-2 streams still open -
31665181Sgd78059 	 * in which case we just return failure without shutting
31675181Sgd78059 	 * down chip operations.
31685181Sgd78059 	 */
31695181Sgd78059 	if (mac_unregister(dmfep->mh) != DDI_SUCCESS)
31705181Sgd78059 		return (DDI_FAILURE);
31715181Sgd78059 
31725181Sgd78059 	/*
31735181Sgd78059 	 * All activity stopped, so we can clean up & exit
31745181Sgd78059 	 */
31755181Sgd78059 	dmfe_unattach(dmfep);
31765181Sgd78059 	return (DDI_SUCCESS);
31775181Sgd78059 }
31785181Sgd78059 
31795181Sgd78059 
31805181Sgd78059 /*
31815181Sgd78059  * ========== Module Loading Data & Entry Points ==========
31825181Sgd78059  */
31835181Sgd78059 
31845181Sgd78059 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops, nulldev, nulldev, dmfe_attach, dmfe_detach,
3185*7656SSherry.Moore@Sun.COM 	nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
31865181Sgd78059 
31875181Sgd78059 static struct modldrv dmfe_modldrv = {
31885181Sgd78059 	&mod_driverops,		/* Type of module.  This one is a driver */
31895181Sgd78059 	dmfe_ident,		/* short description */
31905181Sgd78059 	&dmfe_dev_ops		/* driver specific ops */
31915181Sgd78059 };
31925181Sgd78059 
31935181Sgd78059 static struct modlinkage modlinkage = {
31945181Sgd78059 	MODREV_1, (void *)&dmfe_modldrv, NULL
31955181Sgd78059 };
31965181Sgd78059 
31975181Sgd78059 int
31985181Sgd78059 _info(struct modinfo *modinfop)
31995181Sgd78059 {
32005181Sgd78059 	return (mod_info(&modlinkage, modinfop));
32015181Sgd78059 }
32025181Sgd78059 
32035181Sgd78059 int
32045181Sgd78059 _init(void)
32055181Sgd78059 {
32065181Sgd78059 	uint32_t tmp100;
32075181Sgd78059 	uint32_t tmp10;
32085181Sgd78059 	int i;
32095181Sgd78059 	int status;
32105181Sgd78059 
32115181Sgd78059 	/* Calculate global timing parameters */
32125181Sgd78059 	tmp100 = (dmfe_tx100_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
32135181Sgd78059 	tmp10 = (dmfe_tx10_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
32145181Sgd78059 
32155181Sgd78059 	for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) {
32165181Sgd78059 		switch (i) {
32175181Sgd78059 		case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA):
32185181Sgd78059 		case TX_PROCESS_STATE(TX_PROCESS_WAIT_END):
32195181Sgd78059 			/*
32205181Sgd78059 			 * The chip doesn't spontaneously recover from
32215181Sgd78059 			 * a stall in these states, so we reset early
32225181Sgd78059 			 */
32235181Sgd78059 			stall_100_tix[i] = tmp100;
32245181Sgd78059 			stall_10_tix[i] = tmp10;
32255181Sgd78059 			break;
32265181Sgd78059 
32275181Sgd78059 		case TX_PROCESS_STATE(TX_PROCESS_SUSPEND):
32285181Sgd78059 		default:
32295181Sgd78059 			/*
32305181Sgd78059 			 * The chip has been seen to spontaneously recover
32315181Sgd78059 			 * after an apparent stall in the SUSPEND state,
32325181Sgd78059 			 * so we'll allow it rather longer to do so.  As
32335181Sgd78059 			 * stalls in other states have not been observed,
32345181Sgd78059 			 * we'll use long timeouts for them too ...
32355181Sgd78059 			 */
32365181Sgd78059 			stall_100_tix[i] = tmp100 * 20;
32375181Sgd78059 			stall_10_tix[i] = tmp10 * 20;
32385181Sgd78059 			break;
32395181Sgd78059 		}
32405181Sgd78059 	}
32415181Sgd78059 
32425181Sgd78059 	factotum_tix = (dmfe_link_poll_us+dmfe_tick_us-1)/dmfe_tick_us;
32435181Sgd78059 	factotum_fast_tix = 1+(factotum_tix/5);
32445181Sgd78059 	factotum_start_tix = 1+(factotum_tix*2);
32455181Sgd78059 
32465181Sgd78059 	mac_init_ops(&dmfe_dev_ops, "dmfe");
32475181Sgd78059 	status = mod_install(&modlinkage);
32485181Sgd78059 	if (status == DDI_SUCCESS)
32495181Sgd78059 		dmfe_log_init();
32505181Sgd78059 
32515181Sgd78059 	return (status);
32525181Sgd78059 }
32535181Sgd78059 
32545181Sgd78059 int
32555181Sgd78059 _fini(void)
32565181Sgd78059 {
32575181Sgd78059 	int status;
32585181Sgd78059 
32595181Sgd78059 	status = mod_remove(&modlinkage);
32605181Sgd78059 	if (status == DDI_SUCCESS) {
32615181Sgd78059 		mac_fini_ops(&dmfe_dev_ops);
32625181Sgd78059 		dmfe_log_fini();
32635181Sgd78059 	}
32645181Sgd78059 
32655181Sgd78059 	return (status);
32665181Sgd78059 }
32675181Sgd78059 
32685181Sgd78059 #undef	DMFE_DBG
3269