xref: /onnv-gate/usr/src/uts/intel/io/amd8111s/amd8111s_main.c (revision 7656:2621e50fdf4a)
15256Slh155975 /*
25895Syz147064  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
35256Slh155975  * Use is subject to license terms.
45256Slh155975  */
55256Slh155975 
65256Slh155975 /*
75256Slh155975  * Copyright (c) 2001-2006 Advanced Micro Devices, Inc.  All rights reserved.
85256Slh155975  *
95256Slh155975  * Redistribution and use in source and binary forms, with or without
105256Slh155975  * modification, are permitted provided that the following conditions are met:
115256Slh155975  *
125256Slh155975  * + Redistributions of source code must retain the above copyright notice,
135256Slh155975  * + this list of conditions and the following disclaimer.
145256Slh155975  *
155256Slh155975  * + Redistributions in binary form must reproduce the above copyright
165256Slh155975  * + notice, this list of conditions and the following disclaimer in the
175256Slh155975  * + documentation and/or other materials provided with the distribution.
185256Slh155975  *
195256Slh155975  * + Neither the name of Advanced Micro Devices, Inc. nor the names of its
205256Slh155975  * + contributors may be used to endorse or promote products derived from
215256Slh155975  * + this software without specific prior written permission.
225256Slh155975  *
235256Slh155975  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
245256Slh155975  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
255256Slh155975  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
265256Slh155975  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
275256Slh155975  * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR
285256Slh155975  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
295256Slh155975  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
305256Slh155975  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
315256Slh155975  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
325256Slh155975  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
335256Slh155975  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
345256Slh155975  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
355256Slh155975  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
365256Slh155975  *
375256Slh155975  * Import/Export/Re-Export/Use/Release/Transfer Restrictions and
385256Slh155975  * Compliance with Applicable Laws.  Notice is hereby given that
395256Slh155975  * the software may be subject to restrictions on use, release,
405256Slh155975  * transfer, importation, exportation and/or re-exportation under
415256Slh155975  * the laws and regulations of the United States or other
425256Slh155975  * countries ("Applicable Laws"), which include but are not
435256Slh155975  * limited to U.S. export control laws such as the Export
445256Slh155975  * Administration Regulations and national security controls as
455256Slh155975  * defined thereunder, as well as State Department controls under
465256Slh155975  * the U.S. Munitions List.  Permission to use and/or
475256Slh155975  * redistribute the software is conditioned upon compliance with
485256Slh155975  * all Applicable Laws, including U.S. export control laws
495256Slh155975  * regarding specifically designated persons, countries and
505256Slh155975  * nationals of countries subject to national security controls.
515256Slh155975  */
525256Slh155975 
535256Slh155975 /* include files */
545256Slh155975 #include <sys/disp.h>
555256Slh155975 #include <sys/atomic.h>
565895Syz147064 #include <sys/vlan.h>
575256Slh155975 #include "amd8111s_main.h"
585256Slh155975 
595256Slh155975 /* Global macro Definations */
605256Slh155975 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
615256Slh155975 #define	INTERFACE_NAME "amd8111s"
625256Slh155975 #define	AMD8111S_SPLIT	128
635256Slh155975 #define	AMD8111S_SEND_MAX	64
645256Slh155975 
65*7656SSherry.Moore@Sun.COM static char ident[] = "AMD8111 10/100M Ethernet";
665256Slh155975 
675256Slh155975 /*
685256Slh155975  * Driver Entry Points
695256Slh155975  */
705256Slh155975 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t);
715256Slh155975 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t);
725256Slh155975 
735256Slh155975 /*
745256Slh155975  * GLD Entry points prototype
755256Slh155975  */
765256Slh155975 static int amd8111s_m_unicst(void *, const uint8_t *);
775256Slh155975 static int amd8111s_m_promisc(void *, boolean_t);
785256Slh155975 static int amd8111s_m_stat(void *, uint_t, uint64_t *);
795256Slh155975 static void amd8111s_m_resources(void *arg);
805256Slh155975 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *);
815256Slh155975 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr);
825256Slh155975 static int amd8111s_m_start(void *);
835256Slh155975 static void amd8111s_m_stop(void *);
845256Slh155975 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp);
855256Slh155975 static uint_t amd8111s_intr(caddr_t);
865256Slh155975 
875256Slh155975 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *);
885256Slh155975 
895256Slh155975 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *);
905256Slh155975 static int amd8111s_odlInit(struct LayerPointers *);
915256Slh155975 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *);
925256Slh155975 static void amd8111s_free_descriptors(struct LayerPointers *);
935256Slh155975 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *,
945256Slh155975 		struct amd8111s_dma_ringbuf *, uint32_t, uint32_t);
955256Slh155975 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *);
965256Slh155975 
975256Slh155975 
985256Slh155975 static void amd8111s_log(struct LayerPointers *adapter, int level,
995256Slh155975     char *fmt, ...);
1005256Slh155975 
1015256Slh155975 static struct cb_ops amd8111s_cb_ops = {
1025256Slh155975 	nulldev,
1035256Slh155975 	nulldev,
1045256Slh155975 	nodev,
1055256Slh155975 	nodev,
1065256Slh155975 	nodev,
1075256Slh155975 	nodev,
1085256Slh155975 	nodev,
1095256Slh155975 	nodev,
1105256Slh155975 	nodev,
1115256Slh155975 	nodev,
1125256Slh155975 	nodev,
1135256Slh155975 	nochpoll,
1145256Slh155975 	ddi_prop_op,
1155256Slh155975 	NULL,
1165256Slh155975 	D_NEW | D_MP,
1175256Slh155975 	CB_REV,		/* cb_rev */
1185256Slh155975 	nodev,		/* cb_aread */
1195256Slh155975 	nodev		/* cb_awrite */
1205256Slh155975 };
1215256Slh155975 
1225256Slh155975 static struct dev_ops amd8111s_dev_ops = {
1235256Slh155975 	DEVO_REV,		/* devo_rev */
1245256Slh155975 	0,			/* devo_refcnt */
1255256Slh155975 	NULL,			/* devo_getinfo */
1265256Slh155975 	nulldev,		/* devo_identify */
1275256Slh155975 	nulldev,		/* devo_probe */
1285256Slh155975 	amd8111s_attach,	/* devo_attach */
1295256Slh155975 	amd8111s_detach,	/* devo_detach */
1305256Slh155975 	nodev,			/* devo_reset */
1315256Slh155975 	&amd8111s_cb_ops,	/* devo_cb_ops */
1325256Slh155975 	NULL,			/* devo_bus_ops */
133*7656SSherry.Moore@Sun.COM 	nodev,			/* devo_power */
134*7656SSherry.Moore@Sun.COM 	ddi_quiesce_not_supported,	/* devo_quiesce */
1355256Slh155975 };
1365256Slh155975 
1375256Slh155975 struct modldrv amd8111s_modldrv = {
1385256Slh155975 	&mod_driverops,		/* Type of module. This one is a driver */
1395256Slh155975 	ident,			/* short description */
1405256Slh155975 	&amd8111s_dev_ops	/* driver specific ops */
1415256Slh155975 };
1425256Slh155975 
1435256Slh155975 struct modlinkage amd8111s_modlinkage = {
1445256Slh155975 	MODREV_1, (void *)&amd8111s_modldrv, NULL
1455256Slh155975 };
1465256Slh155975 
1475256Slh155975 /*
1485256Slh155975  * Global Variables
1495256Slh155975  */
1505256Slh155975 struct LayerPointers *amd8111sadapter;
1515256Slh155975 
1525256Slh155975 static ddi_dma_attr_t pcn_buff_dma_attr_t = {
1535256Slh155975 	DMA_ATTR_V0,	/* dma_attr_version */
1545256Slh155975 	(uint64_t)0,		/* dma_attr_addr_lo */
1555256Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
1565256Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_count_max */
1575256Slh155975 	(uint64_t)1,		/* dma_attr_align */
1585256Slh155975 	(uint_t)0x7F,		/* dma_attr_burstsizes */
1595256Slh155975 	(uint32_t)1,		/* dma_attr_minxfer */
1605256Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
1615256Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
1625256Slh155975 	(int)1,			/* dma_attr_sgllen */
1635256Slh155975 	(uint32_t)1,		/* granularity */
1645256Slh155975 	(uint_t)0		/* dma_attr_flags */
1655256Slh155975 };
1665256Slh155975 
1675256Slh155975 static ddi_dma_attr_t pcn_desc_dma_attr_t = {
1685256Slh155975 	DMA_ATTR_V0,		/* dma_attr_version */
1695256Slh155975 	(uint64_t)0,		/* dma_attr_addr_lo */
1705256Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
1715256Slh155975 	(uint64_t)0x7FFFFFFF,	/* dma_attr_count_max */
1725256Slh155975 	(uint64_t)0x10,		/* dma_attr_align */
1735256Slh155975 	(uint_t)0xFFFFFFFFU,	/* dma_attr_burstsizes */
1745256Slh155975 	(uint32_t)1,		/* dma_attr_minxfer */
1755256Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
1765256Slh155975 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
1775256Slh155975 	(int)1,			/* dma_attr_sgllen */
1785256Slh155975 	(uint32_t)1,		/* granularity */
1795256Slh155975 	(uint_t)0		/* dma_attr_flags */
1805256Slh155975 };
1815256Slh155975 
1825256Slh155975 /* PIO access attributes for registers */
1835256Slh155975 static ddi_device_acc_attr_t pcn_acc_attr = {
1845256Slh155975 	DDI_DEVICE_ATTR_V0,
1855256Slh155975 	DDI_STRUCTURE_LE_ACC,
1865256Slh155975 	DDI_STRICTORDER_ACC
1875256Slh155975 };
1885256Slh155975 
1895256Slh155975 #define	AMD8111S_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL)
1905256Slh155975 
1915256Slh155975 
1925256Slh155975 static mac_callbacks_t amd8111s_m_callbacks = {
1935256Slh155975 	AMD8111S_M_CALLBACK_FLAGS,
1945256Slh155975 	amd8111s_m_stat,
1955256Slh155975 	amd8111s_m_start,
1965256Slh155975 	amd8111s_m_stop,
1975256Slh155975 	amd8111s_m_promisc,
1985256Slh155975 	amd8111s_m_multicst,
1995256Slh155975 	amd8111s_m_unicst,
2005256Slh155975 	amd8111s_m_tx,
2015256Slh155975 	amd8111s_m_resources,
2025256Slh155975 	amd8111s_m_ioctl
2035256Slh155975 };
2045256Slh155975 
2055256Slh155975 
2065256Slh155975 /*
2075256Slh155975  * Standard Driver Load Entry Point
2085256Slh155975  * It will be called at load time of driver.
2095256Slh155975  */
2105256Slh155975 int
2115256Slh155975 _init()
2125256Slh155975 {
2135256Slh155975 	int status;
2145256Slh155975 	mac_init_ops(&amd8111s_dev_ops, "amd8111s");
2155256Slh155975 
2165256Slh155975 	status = mod_install(&amd8111s_modlinkage);
2175256Slh155975 	if (status != DDI_SUCCESS) {
2185256Slh155975 		mac_fini_ops(&amd8111s_dev_ops);
2195256Slh155975 	}
2205256Slh155975 
2215256Slh155975 	return (status);
2225256Slh155975 }
2235256Slh155975 
2245256Slh155975 /*
2255256Slh155975  * Standard Driver Entry Point for Query.
2265256Slh155975  * It will be called at any time to get Driver info.
2275256Slh155975  */
2285256Slh155975 int
2295256Slh155975 _info(struct modinfo *modinfop)
2305256Slh155975 {
2315256Slh155975 	return (mod_info(&amd8111s_modlinkage, modinfop));
2325256Slh155975 }
2335256Slh155975 
2345256Slh155975 /*
2355256Slh155975  *	Standard Driver Entry Point for Unload.
2365256Slh155975  *	It will be called at unload time of driver.
2375256Slh155975  */
2385256Slh155975 int
2395256Slh155975 _fini()
2405256Slh155975 {
2415256Slh155975 	int status;
2425256Slh155975 
2435256Slh155975 	status = mod_remove(&amd8111s_modlinkage);
2445256Slh155975 	if (status == DDI_SUCCESS) {
2455256Slh155975 		mac_fini_ops(&amd8111s_dev_ops);
2465256Slh155975 	}
2475256Slh155975 
2485256Slh155975 	return (status);
2495256Slh155975 }
2505256Slh155975 
2515256Slh155975 /* Adjust Interrupt Coalescing Register to coalesce interrupts */
2525256Slh155975 static void
2535256Slh155975 amd8111s_m_blank(void *arg, time_t ticks, uint32_t count)
2545256Slh155975 {
2555256Slh155975 	_NOTE(ARGUNUSED(arg, ticks, count));
2565256Slh155975 }
2575256Slh155975 
2585256Slh155975 static void
2595256Slh155975 amd8111s_m_resources(void *arg)
2605256Slh155975 {
2615256Slh155975 	struct LayerPointers *adapter = arg;
2625256Slh155975 	mac_rx_fifo_t mrf;
2635256Slh155975 
2645256Slh155975 	mrf.mrf_type = MAC_RX_FIFO;
2655256Slh155975 	mrf.mrf_blank = amd8111s_m_blank;
2665256Slh155975 	mrf.mrf_arg = (void *)adapter;
2675256Slh155975 	mrf.mrf_normal_blank_time = 128;
2685256Slh155975 	mrf.mrf_normal_pkt_count = 8;
2695256Slh155975 
2705256Slh155975 	adapter->pOdl->mrh = mac_resource_add(adapter->pOdl->mh,
2715256Slh155975 	    (mac_resource_t *)&mrf);
2725256Slh155975 }
2735256Slh155975 
2745256Slh155975 /*
2755256Slh155975  * Loopback Support
2765256Slh155975  */
2775256Slh155975 static lb_property_t loopmodes[] = {
2785256Slh155975 	{ normal,	"normal",	AMD8111S_LB_NONE		},
2795256Slh155975 	{ external,	"100Mbps",	AMD8111S_LB_EXTERNAL_100	},
2805256Slh155975 	{ external,	"10Mbps",	AMD8111S_LB_EXTERNAL_10		},
2815256Slh155975 	{ internal,	"MAC",		AMD8111S_LB_INTERNAL_MAC	}
2825256Slh155975 };
2835256Slh155975 
2845256Slh155975 static void
2855256Slh155975 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode)
2865256Slh155975 {
2875256Slh155975 
2885256Slh155975 	/*
2895256Slh155975 	 * If the mode isn't being changed, there's nothing to do ...
2905256Slh155975 	 */
2915256Slh155975 	if (mode == adapter->pOdl->loopback_mode)
2925256Slh155975 		return;
2935256Slh155975 
2945256Slh155975 	/*
2955256Slh155975 	 * Validate the requested mode and prepare a suitable message
2965256Slh155975 	 * to explain the link down/up cycle that the change will
2975256Slh155975 	 * probably induce ...
2985256Slh155975 	 */
2995256Slh155975 	switch (mode) {
3005256Slh155975 	default:
3015256Slh155975 		return;
3025256Slh155975 
3035256Slh155975 	case AMD8111S_LB_NONE:
3045256Slh155975 		mdlStopChip(adapter);
3055256Slh155975 		if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) {
3065256Slh155975 			cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB");
3075256Slh155975 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
3085256Slh155975 			    INLOOP);
3095256Slh155975 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
3105256Slh155975 			    FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
3115256Slh155975 		} else {
3125256Slh155975 			cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB");
3135256Slh155975 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
3145256Slh155975 			    EXLOOP);
3155256Slh155975 		}
3165256Slh155975 
3175256Slh155975 		amd8111s_reset(adapter);
3185256Slh155975 		adapter->pOdl->LinkStatus = LINK_STATE_DOWN;
3195256Slh155975 		adapter->pOdl->rx_fcs_stripped = B_FALSE;
3205256Slh155975 		mdlStartChip(adapter);
3215256Slh155975 		break;
3225256Slh155975 
3235256Slh155975 	case AMD8111S_LB_EXTERNAL_100:
3245256Slh155975 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100");
3255256Slh155975 		mdlStopChip(adapter);
3265256Slh155975 		amd8111s_reset(adapter);
3275256Slh155975 		SetIntrCoalesc(adapter, B_FALSE);
3285256Slh155975 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100);
3295256Slh155975 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
3305256Slh155975 		    VAL0 | EXLOOP);
3315256Slh155975 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
3325256Slh155975 		adapter->pMdl->Speed = 100;
3335256Slh155975 		adapter->pMdl->FullDuplex = B_TRUE;
3345256Slh155975 		/* Tell GLD the state of the physical link. */
3355256Slh155975 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
3365256Slh155975 
3375256Slh155975 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
3385256Slh155975 
3395256Slh155975 		mdlStartChip(adapter);
3405256Slh155975 		break;
3415256Slh155975 
3425256Slh155975 	case AMD8111S_LB_EXTERNAL_10:
3435256Slh155975 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10");
3445256Slh155975 		mdlStopChip(adapter);
3455256Slh155975 		amd8111s_reset(adapter);
3465256Slh155975 		SetIntrCoalesc(adapter, B_FALSE);
3475256Slh155975 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10);
3485256Slh155975 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
3495256Slh155975 		    VAL0 | EXLOOP);
3505256Slh155975 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
3515256Slh155975 		adapter->pMdl->Speed = 10;
3525256Slh155975 		adapter->pMdl->FullDuplex = B_TRUE;
3535256Slh155975 		/* Tell GLD the state of the physical link. */
3545256Slh155975 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
3555256Slh155975 
3565256Slh155975 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
3575256Slh155975 
3585256Slh155975 		mdlStartChip(adapter);
3595256Slh155975 		break;
3605256Slh155975 
3615256Slh155975 	case AMD8111S_LB_INTERNAL_MAC:
3625256Slh155975 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC");
3635256Slh155975 		mdlStopChip(adapter);
3645256Slh155975 		amd8111s_reset(adapter);
3655256Slh155975 		SetIntrCoalesc(adapter, B_FALSE);
3665256Slh155975 		/* Disable Port Manager */
3675256Slh155975 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
3685256Slh155975 		    EN_PMGR);
3695256Slh155975 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
3705256Slh155975 		    VAL0 | INLOOP);
3715256Slh155975 
3725256Slh155975 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
3735256Slh155975 		    VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
3745256Slh155975 
3755256Slh155975 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
3765256Slh155975 		adapter->pMdl->FullDuplex = B_TRUE;
3775256Slh155975 		/* Tell GLD the state of the physical link. */
3785256Slh155975 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
3795256Slh155975 
3805256Slh155975 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
3815256Slh155975 
3825256Slh155975 		mdlStartChip(adapter);
3835256Slh155975 		break;
3845256Slh155975 	}
3855256Slh155975 
3865256Slh155975 	/*
3875256Slh155975 	 * All OK; tell the caller to reprogram
3885256Slh155975 	 * the PHY and/or MAC for the new mode ...
3895256Slh155975 	 */
3905256Slh155975 	adapter->pOdl->loopback_mode = mode;
3915256Slh155975 }
3925256Slh155975 
3935256Slh155975 static enum ioc_reply
3945256Slh155975 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp,
3955256Slh155975     mblk_t *mp)
3965256Slh155975 {
3975256Slh155975 	lb_info_sz_t *lbsp;
3985256Slh155975 	lb_property_t *lbpp;
3995256Slh155975 	uint32_t *lbmp;
4005256Slh155975 	int cmd;
4015256Slh155975 
4025256Slh155975 	/*
4035256Slh155975 	 * Validate format of ioctl
4045256Slh155975 	 */
4055256Slh155975 	if (mp->b_cont == NULL)
4065256Slh155975 		return (IOC_INVAL);
4075256Slh155975 
4085256Slh155975 	cmd = iocp->ioc_cmd;
4095256Slh155975 	switch (cmd) {
4105256Slh155975 	default:
4115256Slh155975 		/* NOTREACHED */
4125256Slh155975 		amd8111s_log(adapter, CE_NOTE,
4135256Slh155975 		    "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd);
4145256Slh155975 		return (IOC_INVAL);
4155256Slh155975 
4165256Slh155975 	case LB_GET_INFO_SIZE:
4175256Slh155975 		if (iocp->ioc_count != sizeof (lb_info_sz_t)) {
4185256Slh155975 			amd8111s_log(adapter, CE_NOTE,
4195256Slh155975 			    "wrong LB_GET_INFO_SIZE size");
4205256Slh155975 			return (IOC_INVAL);
4215256Slh155975 		}
4226990Sgd78059 		lbsp = (void *)mp->b_cont->b_rptr;
4235256Slh155975 		*lbsp = sizeof (loopmodes);
4245256Slh155975 		break;
4255256Slh155975 
4265256Slh155975 	case LB_GET_INFO:
4275256Slh155975 		if (iocp->ioc_count != sizeof (loopmodes)) {
4285256Slh155975 			amd8111s_log(adapter, CE_NOTE,
4295256Slh155975 			    "Wrong LB_GET_INFO size");
4305256Slh155975 			return (IOC_INVAL);
4315256Slh155975 		}
4326990Sgd78059 		lbpp = (void *)mp->b_cont->b_rptr;
4335256Slh155975 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
4345256Slh155975 		break;
4355256Slh155975 
4365256Slh155975 	case LB_GET_MODE:
4375256Slh155975 		if (iocp->ioc_count != sizeof (uint32_t)) {
4385256Slh155975 			amd8111s_log(adapter, CE_NOTE,
4395256Slh155975 			    "Wrong LB_GET_MODE size");
4405256Slh155975 			return (IOC_INVAL);
4415256Slh155975 		}
4426990Sgd78059 		lbmp = (void *)mp->b_cont->b_rptr;
4435256Slh155975 		*lbmp = adapter->pOdl->loopback_mode;
4445256Slh155975 		break;
4455256Slh155975 
4465256Slh155975 	case LB_SET_MODE:
4475256Slh155975 		if (iocp->ioc_count != sizeof (uint32_t)) {
4485256Slh155975 			amd8111s_log(adapter, CE_NOTE,
4495256Slh155975 			    "Wrong LB_SET_MODE size");
4505256Slh155975 			return (IOC_INVAL);
4515256Slh155975 		}
4526990Sgd78059 		lbmp = (void *)mp->b_cont->b_rptr;
4535256Slh155975 		amd8111s_set_loop_mode(adapter, *lbmp);
4545256Slh155975 		break;
4555256Slh155975 	}
4565256Slh155975 	return (IOC_REPLY);
4575256Slh155975 }
4585256Slh155975 
4595256Slh155975 static void
4605256Slh155975 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
4615256Slh155975 {
4625256Slh155975 	struct iocblk *iocp;
4635256Slh155975 	struct LayerPointers *adapter;
4645256Slh155975 	enum ioc_reply status;
4655256Slh155975 
4666990Sgd78059 	iocp = (void *)mp->b_rptr;
4675256Slh155975 	iocp->ioc_error = 0;
4686990Sgd78059 	adapter = arg;
4695256Slh155975 
4705256Slh155975 	ASSERT(adapter);
4715256Slh155975 	if (adapter == NULL) {
4725256Slh155975 		miocnak(q, mp, 0, EINVAL);
4735256Slh155975 		return;
4745256Slh155975 	}
4755256Slh155975 
4765256Slh155975 	switch (iocp->ioc_cmd) {
4775256Slh155975 
4785256Slh155975 	case LB_GET_INFO_SIZE:
4795256Slh155975 	case LB_GET_INFO:
4805256Slh155975 	case LB_GET_MODE:
4815256Slh155975 	case LB_SET_MODE:
4825256Slh155975 		status = amd8111s_loopback_ioctl(adapter, iocp, mp);
4835256Slh155975 		break;
4845256Slh155975 
4855256Slh155975 	default:
4865256Slh155975 		status = IOC_INVAL;
4875256Slh155975 		break;
4885256Slh155975 	}
4895256Slh155975 
4905256Slh155975 	/*
4915256Slh155975 	 * Decide how to reply
4925256Slh155975 	 */
4935256Slh155975 	switch (status) {
4945256Slh155975 	default:
4955256Slh155975 	case IOC_INVAL:
4965256Slh155975 		/*
4975256Slh155975 		 * Error, reply with a NAK and EINVAL or the specified error
4985256Slh155975 		 */
4995256Slh155975 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
5005256Slh155975 		    EINVAL : iocp->ioc_error);
5015256Slh155975 		break;
5025256Slh155975 
5035256Slh155975 	case IOC_DONE:
5045256Slh155975 		/*
5055256Slh155975 		 * OK, reply already sent
5065256Slh155975 		 */
5075256Slh155975 		break;
5085256Slh155975 
5095256Slh155975 	case IOC_ACK:
5105256Slh155975 		/*
5115256Slh155975 		 * OK, reply with an ACK
5125256Slh155975 		 */
5135256Slh155975 		miocack(q, mp, 0, 0);
5145256Slh155975 		break;
5155256Slh155975 
5165256Slh155975 	case IOC_REPLY:
5175256Slh155975 		/*
5185256Slh155975 		 * OK, send prepared reply as ACK or NAK
5195256Slh155975 		 */
5205256Slh155975 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
5215256Slh155975 		    M_IOCACK : M_IOCNAK;
5225256Slh155975 		qreply(q, mp);
5235256Slh155975 		break;
5245256Slh155975 	}
5255256Slh155975 }
5265256Slh155975 
5275256Slh155975 /*
5285256Slh155975  * Copy one packet from dma memory to mblk. Inc dma descriptor pointer.
5295256Slh155975  */
5305256Slh155975 static boolean_t
5315256Slh155975 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp)
5325256Slh155975 {
5335256Slh155975 	int length = 0;
5345256Slh155975 	mblk_t *mp;
5355256Slh155975 	struct rx_desc *descriptor;
5365256Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
5375256Slh155975 	struct amd8111s_statistics *statistics = &pOdl->statistics;
5385256Slh155975 	struct nonphysical *pNonphysical = pLayerPointers->pMil
5395256Slh155975 	    ->pNonphysical;
5405256Slh155975 
5415256Slh155975 	mutex_enter(&pOdl->mdlRcvLock);
5425256Slh155975 	descriptor = pNonphysical->RxBufDescQRead->descriptor;
5435256Slh155975 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle,
5445256Slh155975 	    pNonphysical->RxBufDescQRead->descriptor -
5455256Slh155975 	    pNonphysical->RxBufDescQStart->descriptor,
5465256Slh155975 	    sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU);
5475256Slh155975 	if ((descriptor->Rx_OWN) == 0) {
5485256Slh155975 	/*
5495256Slh155975 	 * If the frame is received with errors, then set MCNT
5505256Slh155975 	 * of that pkt in ReceiveArray to 0. This packet would
5515256Slh155975 	 * be discarded later and not indicated to OS.
5525256Slh155975 	 */
5535256Slh155975 		if (descriptor->Rx_ERR) {
5545256Slh155975 			statistics->rx_desc_err ++;
5555256Slh155975 			descriptor->Rx_ERR = 0;
5565256Slh155975 			if (descriptor->Rx_FRAM == 1) {
5575256Slh155975 				statistics->rx_desc_err_FRAM ++;
5585256Slh155975 				descriptor->Rx_FRAM = 0;
5595256Slh155975 			}
5605256Slh155975 			if (descriptor->Rx_OFLO == 1) {
5615256Slh155975 				statistics->rx_desc_err_OFLO ++;
5625256Slh155975 				descriptor->Rx_OFLO = 0;
5635256Slh155975 				pOdl->rx_overflow_counter ++;
5645256Slh155975 				if ((pOdl->rx_overflow_counter > 5) &&
5655256Slh155975 				    (pOdl->pause_interval == 0)) {
5665256Slh155975 					statistics->rx_double_overflow ++;
5675256Slh155975 					mdlSendPause(pLayerPointers);
5685256Slh155975 					pOdl->rx_overflow_counter = 0;
5695256Slh155975 					pOdl->pause_interval = 25;
5705256Slh155975 				}
5715256Slh155975 			}
5725256Slh155975 			if (descriptor->Rx_CRC == 1) {
5735256Slh155975 				statistics->rx_desc_err_CRC ++;
5745256Slh155975 				descriptor->Rx_CRC = 0;
5755256Slh155975 			}
5765256Slh155975 			if (descriptor->Rx_BUFF == 1) {
5775256Slh155975 				statistics->rx_desc_err_BUFF ++;
5785256Slh155975 				descriptor->Rx_BUFF = 0;
5795256Slh155975 			}
5805256Slh155975 			goto Next_Descriptor;
5815256Slh155975 		}
5825256Slh155975 
5835256Slh155975 		/* Length of incoming packet */
5845256Slh155975 		if (pOdl->rx_fcs_stripped) {
5855256Slh155975 			length = descriptor->Rx_MCNT -4;
5865256Slh155975 		} else {
5875256Slh155975 			length = descriptor->Rx_MCNT;
5885256Slh155975 		}
5895256Slh155975 		if (length < 62) {
5905256Slh155975 			statistics->rx_error_zerosize ++;
5915256Slh155975 		}
5925256Slh155975 
5935256Slh155975 		if ((mp = allocb(length, BPRI_MED)) == NULL) {
5945256Slh155975 			statistics->rx_allocfail ++;
5955256Slh155975 			goto failed;
5965256Slh155975 		}
5975256Slh155975 		/* Copy from virtual address of incoming packet */
5985256Slh155975 		bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap),
5995256Slh155975 		    mp->b_rptr, length);
6005256Slh155975 		mp->b_wptr = mp->b_rptr + length;
6015256Slh155975 		statistics->rx_ok_packets ++;
6025256Slh155975 		if (*last_mp == NULL) {
6035256Slh155975 			*last_mp = mp;
6045256Slh155975 		} else {
6055256Slh155975 			(*last_mp)->b_next = mp;
6065256Slh155975 			*last_mp = mp;
6075256Slh155975 		}
6085256Slh155975 
6095256Slh155975 Next_Descriptor:
6105256Slh155975 		descriptor->Rx_MCNT = 0;
6115256Slh155975 		descriptor->Rx_SOP = 0;
6125256Slh155975 		descriptor->Rx_EOP = 0;
6135256Slh155975 		descriptor->Rx_PAM = 0;
6145256Slh155975 		descriptor->Rx_BAM = 0;
6155256Slh155975 		descriptor->TT = 0;
6165256Slh155975 		descriptor->Rx_OWN = 1;
6175256Slh155975 		pNonphysical->RxBufDescQRead->descriptor++;
6185256Slh155975 		pNonphysical->RxBufDescQRead->USpaceMap++;
6195256Slh155975 		if (pNonphysical->RxBufDescQRead->descriptor >
6205256Slh155975 		    pNonphysical->RxBufDescQEnd->descriptor) {
6215256Slh155975 			pNonphysical->RxBufDescQRead->descriptor =
6225256Slh155975 			    pNonphysical->RxBufDescQStart->descriptor;
6235256Slh155975 			pNonphysical->RxBufDescQRead->USpaceMap =
6245256Slh155975 			    pNonphysical->RxBufDescQStart->USpaceMap;
6255256Slh155975 		}
6265256Slh155975 		mutex_exit(&pOdl->mdlRcvLock);
6275256Slh155975 
6285256Slh155975 		return (B_TRUE);
6295256Slh155975 	}
6305256Slh155975 
6315256Slh155975 failed:
6325256Slh155975 	mutex_exit(&pOdl->mdlRcvLock);
6335256Slh155975 	return (B_FALSE);
6345256Slh155975 }
6355256Slh155975 
6365256Slh155975 /*
6375256Slh155975  * Get the received packets from NIC card and send them to GLD.
6385256Slh155975  */
6395256Slh155975 static void
6405256Slh155975 amd8111s_receive(struct LayerPointers *pLayerPointers)
6415256Slh155975 {
6425256Slh155975 	int numOfPkts = 0;
6435256Slh155975 	struct odl *pOdl;
6445256Slh155975 	mblk_t *ret_mp = NULL, *last_mp = NULL;
6455256Slh155975 
6465256Slh155975 	pOdl = pLayerPointers->pOdl;
6475256Slh155975 
6485256Slh155975 	rw_enter(&pOdl->chip_lock, RW_READER);
6495256Slh155975 	if (!pLayerPointers->run) {
6505256Slh155975 		rw_exit(&pOdl->chip_lock);
6515256Slh155975 		return;
6525256Slh155975 	}
6535256Slh155975 
6545256Slh155975 	if (pOdl->pause_interval > 0)
6555256Slh155975 		pOdl->pause_interval --;
6565256Slh155975 
6575256Slh155975 	while (numOfPkts < RX_RING_SIZE) {
6585256Slh155975 
6595256Slh155975 		if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) {
6605256Slh155975 			break;
6615256Slh155975 		}
6625256Slh155975 		if (ret_mp == NULL)
6635256Slh155975 			ret_mp = last_mp;
6645256Slh155975 		numOfPkts++;
6655256Slh155975 	}
6665256Slh155975 
6675256Slh155975 	if (ret_mp) {
6685256Slh155975 		mac_rx(pOdl->mh, pOdl->mrh, ret_mp);
6695256Slh155975 	}
6705256Slh155975 
6715256Slh155975 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0,
6725256Slh155975 	    DDI_DMA_SYNC_FORDEV);
6735256Slh155975 
6745256Slh155975 	mdlReceive(pLayerPointers);
6755256Slh155975 
6765256Slh155975 	rw_exit(&pOdl->chip_lock);
6775256Slh155975 
6785256Slh155975 }
6795256Slh155975 
6805256Slh155975 /*
6815256Slh155975  * Print message in release-version driver.
6825256Slh155975  */
6835256Slh155975 static void
6845256Slh155975 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...)
6855256Slh155975 {
6865256Slh155975 	auto char name[32];
6875256Slh155975 	auto char buf[256];
6885256Slh155975 	va_list ap;
6895256Slh155975 
6905256Slh155975 	if (adapter != NULL) {
6915256Slh155975 		(void) sprintf(name, "amd8111s%d",
6925256Slh155975 		    ddi_get_instance(adapter->pOdl->devinfo));
6935256Slh155975 	} else {
6945256Slh155975 		(void) sprintf(name, "amd8111s");
6955256Slh155975 	}
6965256Slh155975 	va_start(ap, fmt);
6975256Slh155975 	(void) vsprintf(buf, fmt, ap);
6985256Slh155975 	va_end(ap);
6995256Slh155975 	cmn_err(level, "%s: %s", name, buf);
7005256Slh155975 }
7015256Slh155975 
7025256Slh155975 /*
7035256Slh155975  * To allocate & initilize all resources.
7045256Slh155975  * Called by amd8111s_attach().
7055256Slh155975  */
7065256Slh155975 static int
7075256Slh155975 amd8111s_odlInit(struct LayerPointers *pLayerPointers)
7085256Slh155975 {
7095256Slh155975 	unsigned long mem_req_array[MEM_REQ_MAX];
7105256Slh155975 	unsigned long mem_set_array[MEM_REQ_MAX];
7115256Slh155975 	unsigned long *pmem_req_array;
7125256Slh155975 	unsigned long *pmem_set_array;
7135256Slh155975 	int i, size;
7145256Slh155975 
7155256Slh155975 	for (i = 0; i < MEM_REQ_MAX; i++) {
7165256Slh155975 		mem_req_array[i] = 0;
7175256Slh155975 		mem_set_array[i] = 0;
7185256Slh155975 	}
7195256Slh155975 
7205256Slh155975 	milRequestResources(mem_req_array);
7215256Slh155975 
7225256Slh155975 	pmem_req_array = mem_req_array;
7235256Slh155975 	pmem_set_array = mem_set_array;
7245256Slh155975 	while (*pmem_req_array) {
7255256Slh155975 		switch (*pmem_req_array) {
7265256Slh155975 		case VIRTUAL:
7275256Slh155975 			*pmem_set_array = VIRTUAL;
7285256Slh155975 			pmem_req_array++;
7295256Slh155975 			pmem_set_array++;
7305256Slh155975 			*(pmem_set_array) = *(pmem_req_array);
7315256Slh155975 			pmem_set_array++;
7325256Slh155975 			*(pmem_set_array) = (unsigned long) kmem_zalloc(
7335256Slh155975 			    *(pmem_req_array), KM_NOSLEEP);
7345256Slh155975 			if (*pmem_set_array == NULL)
7355256Slh155975 				goto odl_init_failure;
7365256Slh155975 			break;
7375256Slh155975 		}
7385256Slh155975 		pmem_req_array++;
7395256Slh155975 		pmem_set_array++;
7405256Slh155975 	}
7415256Slh155975 
7425256Slh155975 	/*
7435256Slh155975 	 * Initilize memory on lower layers
7445256Slh155975 	 */
7455256Slh155975 	milSetResources(pLayerPointers, mem_set_array);
7465256Slh155975 
7475256Slh155975 	/* Allocate Rx/Tx descriptors */
7485256Slh155975 	if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) {
7495256Slh155975 		*pmem_set_array = NULL;
7505256Slh155975 		goto odl_init_failure;
7515256Slh155975 	}
7525256Slh155975 
7535256Slh155975 	/*
7545256Slh155975 	 * Allocate Rx buffer for each Rx descriptor. Then call mil layer
7555256Slh155975 	 * routine to fill physical address of Rx buffer into Rx descriptor.
7565256Slh155975 	 */
7575256Slh155975 	if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) {
7585256Slh155975 		amd8111s_free_descriptors(pLayerPointers);
7595256Slh155975 		*pmem_set_array = NULL;
7605256Slh155975 		goto odl_init_failure;
7615256Slh155975 	}
7625256Slh155975 	milInitGlbds(pLayerPointers);
7635256Slh155975 
7645256Slh155975 	return (0);
7655256Slh155975 
7665256Slh155975 odl_init_failure:
7675256Slh155975 	/*
7685256Slh155975 	 * Free All memory allocated so far
7695256Slh155975 	 */
7705256Slh155975 	pmem_req_array = mem_set_array;
7715256Slh155975 	while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) {
7725256Slh155975 		switch (*pmem_req_array) {
7735256Slh155975 		case VIRTUAL:
7745256Slh155975 			pmem_req_array++;	/* Size */
7755256Slh155975 			size = *(pmem_req_array);
7765256Slh155975 			pmem_req_array++;	/* Virtual Address */
7775256Slh155975 			if (pmem_req_array == NULL)
7785256Slh155975 				return (1);
7795256Slh155975 			kmem_free((int *)*pmem_req_array, size);
7805256Slh155975 			break;
7815256Slh155975 		}
7825256Slh155975 		pmem_req_array++;
7835256Slh155975 	}
7845256Slh155975 	return (1);
7855256Slh155975 }
7865256Slh155975 
7875256Slh155975 /*
7885256Slh155975  * Allocate and initialize Tx/Rx descriptors
7895256Slh155975  */
7905256Slh155975 static boolean_t
7915256Slh155975 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers)
7925256Slh155975 {
7935256Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
7945256Slh155975 	struct mil *pMil = pLayerPointers->pMil;
7955256Slh155975 	dev_info_t *devinfo = pOdl->devinfo;
7965256Slh155975 	uint_t length, count, i;
7975256Slh155975 	size_t real_length;
7985256Slh155975 
7995256Slh155975 	/*
8005256Slh155975 	 * Allocate Rx descriptors
8015256Slh155975 	 */
8025256Slh155975 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
8035256Slh155975 	    NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) {
8045256Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
8055256Slh155975 		    "ddi_dma_alloc_handle for Rx desc failed");
8065256Slh155975 		pOdl->rx_desc_dma_handle = NULL;
8075256Slh155975 		return (B_FALSE);
8085256Slh155975 	}
8095256Slh155975 
8105256Slh155975 	length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT;
8115256Slh155975 	if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length,
8125256Slh155975 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
8135256Slh155975 	    NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length,
8145256Slh155975 	    &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) {
8155256Slh155975 
8165256Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
8175256Slh155975 		    "ddi_dma_mem_handle for Rx desc failed");
8185256Slh155975 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
8195256Slh155975 		pOdl->rx_desc_dma_handle = NULL;
8205256Slh155975 		return (B_FALSE);
8215256Slh155975 	}
8225256Slh155975 
8235256Slh155975 	if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle,
8245256Slh155975 	    NULL, (caddr_t)pMil->Rx_desc_original, real_length,
8255256Slh155975 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
8265256Slh155975 	    NULL, &pOdl->rx_desc_dma_cookie,
8275256Slh155975 	    &count) != DDI_SUCCESS) {
8285256Slh155975 
8295256Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
8305256Slh155975 		    "ddi_dma_addr_bind_handle for Rx desc failed");
8315256Slh155975 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
8325256Slh155975 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
8335256Slh155975 		pOdl->rx_desc_dma_handle = NULL;
8345256Slh155975 		return (B_FALSE);
8355256Slh155975 	}
8365256Slh155975 	ASSERT(count == 1);
8375256Slh155975 
8385256Slh155975 	/* Initialize Rx descriptors related variables */
8395256Slh155975 	pMil->Rx_desc = (struct rx_desc *)
8405256Slh155975 	    ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT);
8415256Slh155975 	pMil->Rx_desc_pa = (unsigned int)
8425256Slh155975 	    ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT);
8435256Slh155975 
8445256Slh155975 	pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa;
8455256Slh155975 
8465256Slh155975 
8475256Slh155975 	/*
8485256Slh155975 	 * Allocate Tx descriptors
8495256Slh155975 	 */
8505256Slh155975 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
8515256Slh155975 	    NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) {
8525256Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
8535256Slh155975 		    "ddi_dma_alloc_handle for Tx desc failed");
8545256Slh155975 		goto allocate_desc_fail;
8555256Slh155975 	}
8565256Slh155975 
8575256Slh155975 	length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT;
8585256Slh155975 	if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length,
8595256Slh155975 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
8605256Slh155975 	    NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length,
8615256Slh155975 	    &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) {
8625256Slh155975 
8635256Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
8645256Slh155975 		    "ddi_dma_mem_handle for Tx desc failed");
8655256Slh155975 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
8665256Slh155975 		goto allocate_desc_fail;
8675256Slh155975 	}
8685256Slh155975 
8695256Slh155975 	if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle,
8705256Slh155975 	    NULL, (caddr_t)pMil->Tx_desc_original, real_length,
8715256Slh155975 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
8725256Slh155975 	    NULL, &pOdl->tx_desc_dma_cookie,
8735256Slh155975 	    &count) != DDI_SUCCESS) {
8745256Slh155975 
8755256Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
8765256Slh155975 		    "ddi_dma_addr_bind_handle for Tx desc failed");
8775256Slh155975 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
8785256Slh155975 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
8795256Slh155975 		goto allocate_desc_fail;
8805256Slh155975 	}
8815256Slh155975 	ASSERT(count == 1);
8825256Slh155975 	/* Set the DMA area to all zeros */
8835256Slh155975 	bzero((caddr_t)pMil->Tx_desc_original, length);
8845256Slh155975 
8855256Slh155975 	/* Initialize Tx descriptors related variables */
8865256Slh155975 	pMil->Tx_desc = (struct tx_desc *)
8875256Slh155975 	    ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT);
8885256Slh155975 	pMil->pNonphysical->TxDescQRead = pMil->Tx_desc;
8895256Slh155975 	pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc;
8905256Slh155975 	pMil->pNonphysical->TxDescQStart = pMil->Tx_desc;
8915256Slh155975 	pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]);
8925256Slh155975 
8935256Slh155975 	/* Physical Addr of Tx_desc_original & Tx_desc */
8945256Slh155975 	pLayerPointers->pMil->Tx_desc_pa =
8955256Slh155975 	    ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) &
8965256Slh155975 	    ~ALIGNMENT);
8975256Slh155975 
8985256Slh155975 	/* Setting the reserved bits in the tx descriptors */
8995256Slh155975 	for (i = 0; i < TX_RING_SIZE; i++) {
9005256Slh155975 		pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f;
9015256Slh155975 		pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0;
9025256Slh155975 		pMil->pNonphysical->TxDescQWrite++;
9035256Slh155975 	}
9045256Slh155975 	pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart;
9055256Slh155975 
9065256Slh155975 	pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa;
9075256Slh155975 
9085256Slh155975 	return (B_TRUE);
9095256Slh155975 
9105256Slh155975 allocate_desc_fail:
9115256Slh155975 	pOdl->tx_desc_dma_handle = NULL;
9125256Slh155975 	(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
9135256Slh155975 	ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
9145256Slh155975 	ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
9155256Slh155975 	pOdl->rx_desc_dma_handle = NULL;
9165256Slh155975 	return (B_FALSE);
9175256Slh155975 }
9185256Slh155975 
9195256Slh155975 /*
9205256Slh155975  * Free Tx/Rx descriptors
9215256Slh155975  */
9225256Slh155975 static void
9235256Slh155975 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers)
9245256Slh155975 {
9255256Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
9265256Slh155975 
9275256Slh155975 	/* Free Rx descriptors */
9285256Slh155975 	if (pOdl->rx_desc_dma_handle) {
9295256Slh155975 		(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
9305256Slh155975 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
9315256Slh155975 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
9325256Slh155975 		pOdl->rx_desc_dma_handle = NULL;
9335256Slh155975 	}
9345256Slh155975 
9355256Slh155975 	/* Free Rx descriptors */
9365256Slh155975 	if (pOdl->tx_desc_dma_handle) {
9375256Slh155975 		(void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle);
9385256Slh155975 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
9395256Slh155975 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
9405256Slh155975 		pOdl->tx_desc_dma_handle = NULL;
9415256Slh155975 	}
9425256Slh155975 }
9435256Slh155975 
9445256Slh155975 /*
9455256Slh155975  * Allocate Tx/Rx Ring buffer
9465256Slh155975  */
9475256Slh155975 static boolean_t
9485256Slh155975 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers,
9495256Slh155975 			struct amd8111s_dma_ringbuf *pRing,
9505256Slh155975 			uint32_t ring_size, uint32_t msg_size)
9515256Slh155975 {
9525256Slh155975 	uint32_t idx, msg_idx = 0, msg_acc;
9535256Slh155975 	dev_info_t *devinfo = pLayerPointers->pOdl->devinfo;
9545256Slh155975 	size_t real_length;
9555256Slh155975 	uint_t count = 0;
9565256Slh155975 
9575256Slh155975 	ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1);
9585256Slh155975 	pRing->dma_buf_sz = msg_size;
9595256Slh155975 	pRing->ring_size = ring_size;
9605256Slh155975 	pRing->trunk_num = AMD8111S_SPLIT;
9615256Slh155975 	pRing->buf_sz = msg_size * ring_size;
9625256Slh155975 	if (ring_size < pRing->trunk_num)
9635256Slh155975 		pRing->trunk_num = ring_size;
9645256Slh155975 	ASSERT((pRing->buf_sz % pRing->trunk_num) == 0);
9655256Slh155975 
9665256Slh155975 	pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num;
9675256Slh155975 	ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0);
9685256Slh155975 
9695256Slh155975 	pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) *
9705256Slh155975 	    ring_size, KM_NOSLEEP);
9715256Slh155975 	pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
9725256Slh155975 	    pRing->trunk_num, KM_NOSLEEP);
9735256Slh155975 	pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
9745256Slh155975 	    pRing->trunk_num, KM_NOSLEEP);
9755256Slh155975 	pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) *
9765256Slh155975 	    pRing->trunk_num, KM_NOSLEEP);
9775256Slh155975 	pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) *
9785256Slh155975 	    pRing->trunk_num, KM_NOSLEEP);
9795256Slh155975 	if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL ||
9805256Slh155975 	    pRing->acc_hdl == NULL || pRing->trunk_addr == NULL ||
9815256Slh155975 	    pRing->dma_cookie == NULL) {
9825256Slh155975 		amd8111s_log(pLayerPointers, CE_NOTE,
9835256Slh155975 		    "kmem_zalloc failed");
9845256Slh155975 		goto failed;
9855256Slh155975 	}
9865256Slh155975 
9875256Slh155975 	for (idx = 0; idx < pRing->trunk_num; ++idx) {
9885256Slh155975 		if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t,
9895256Slh155975 		    DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx]))
9905256Slh155975 		    != DDI_SUCCESS) {
9915256Slh155975 
9925256Slh155975 			amd8111s_log(pLayerPointers, CE_WARN,
9935256Slh155975 			    "ddi_dma_alloc_handle failed");
9945256Slh155975 			goto failed;
9955256Slh155975 		} else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx],
9965256Slh155975 		    pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING,
9975256Slh155975 		    DDI_DMA_SLEEP, NULL,
9985256Slh155975 		    (caddr_t *)&(pRing->trunk_addr[idx]),
9995256Slh155975 		    (size_t *)(&real_length), &pRing->acc_hdl[idx])
10005256Slh155975 		    != DDI_SUCCESS) {
10015256Slh155975 
10025256Slh155975 			amd8111s_log(pLayerPointers, CE_WARN,
10035256Slh155975 			    "ddi_dma_mem_alloc failed");
10045256Slh155975 			goto failed;
10055256Slh155975 		} else if (real_length != pRing->trunk_sz) {
10065256Slh155975 			amd8111s_log(pLayerPointers, CE_WARN,
10075256Slh155975 			    "ddi_dma_mem_alloc failed");
10085256Slh155975 			goto failed;
10095256Slh155975 		} else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx],
10105256Slh155975 		    NULL, (caddr_t)pRing->trunk_addr[idx], real_length,
10115256Slh155975 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
10125256Slh155975 		    &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) {
10135256Slh155975 
10145256Slh155975 			amd8111s_log(pLayerPointers, CE_WARN,
10155256Slh155975 			    "ddi_dma_addr_bind_handle failed");
10165256Slh155975 			goto failed;
10175256Slh155975 		} else {
10185256Slh155975 			for (msg_acc = 0;
10195256Slh155975 			    msg_acc < pRing->trunk_sz / pRing->dma_buf_sz;
10205256Slh155975 			    ++ msg_acc) {
10215256Slh155975 				pRing->msg_buf[msg_idx].offset =
10225256Slh155975 				    msg_acc * pRing->dma_buf_sz;
10235256Slh155975 				pRing->msg_buf[msg_idx].vir_addr =
10245256Slh155975 				    pRing->trunk_addr[idx] +
10255256Slh155975 				    pRing->msg_buf[msg_idx].offset;
10265256Slh155975 				pRing->msg_buf[msg_idx].phy_addr =
10275256Slh155975 				    pRing->dma_cookie[idx].dmac_laddress +
10285256Slh155975 				    pRing->msg_buf[msg_idx].offset;
10295256Slh155975 				pRing->msg_buf[msg_idx].p_hdl =
10305256Slh155975 				    pRing->dma_hdl[idx];
10315256Slh155975 				msg_idx ++;
10325256Slh155975 			}
10335256Slh155975 		}
10345256Slh155975 	}
10355256Slh155975 
10365256Slh155975 	pRing->free = pRing->msg_buf;
10375256Slh155975 	pRing->next = pRing->msg_buf;
10385256Slh155975 	pRing->curr = pRing->msg_buf;
10395256Slh155975 
10405256Slh155975 	return (B_TRUE);
10415256Slh155975 failed:
10425256Slh155975 	amd8111s_free_dma_ringbuf(pRing);
10435256Slh155975 	return (B_FALSE);
10445256Slh155975 }
10455256Slh155975 
10465256Slh155975 /*
10475256Slh155975  * Free Tx/Rx ring buffer
10485256Slh155975  */
10495256Slh155975 static void
10505256Slh155975 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing)
10515256Slh155975 {
10525256Slh155975 	int idx;
10535256Slh155975 
10545256Slh155975 	if (pRing->dma_cookie != NULL) {
10555256Slh155975 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
10565256Slh155975 			if (pRing->dma_cookie[idx].dmac_laddress == 0) {
10575256Slh155975 				break;
10585256Slh155975 			}
10595256Slh155975 			(void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]);
10605256Slh155975 		}
10615256Slh155975 		kmem_free(pRing->dma_cookie,
10625256Slh155975 		    sizeof (ddi_dma_cookie_t) * pRing->trunk_num);
10635256Slh155975 	}
10645256Slh155975 
10655256Slh155975 	if (pRing->acc_hdl != NULL) {
10665256Slh155975 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
10675256Slh155975 			if (pRing->acc_hdl[idx] == NULL)
10685256Slh155975 				break;
10695256Slh155975 			ddi_dma_mem_free(&pRing->acc_hdl[idx]);
10705256Slh155975 		}
10715256Slh155975 		kmem_free(pRing->acc_hdl,
10725256Slh155975 		    sizeof (ddi_acc_handle_t) * pRing->trunk_num);
10735256Slh155975 	}
10745256Slh155975 
10755256Slh155975 	if (pRing->dma_hdl != NULL) {
10765256Slh155975 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
10775256Slh155975 			if (pRing->dma_hdl[idx] == 0) {
10785256Slh155975 				break;
10795256Slh155975 			}
10805256Slh155975 			ddi_dma_free_handle(&pRing->dma_hdl[idx]);
10815256Slh155975 		}
10825256Slh155975 		kmem_free(pRing->dma_hdl,
10835256Slh155975 		    sizeof (ddi_dma_handle_t) * pRing->trunk_num);
10845256Slh155975 	}
10855256Slh155975 
10865256Slh155975 	if (pRing->msg_buf != NULL) {
10875256Slh155975 		kmem_free(pRing->msg_buf,
10885256Slh155975 		    sizeof (struct amd8111s_msgbuf) * pRing->ring_size);
10895256Slh155975 	}
10905256Slh155975 
10915256Slh155975 	if (pRing->trunk_addr != NULL) {
10925256Slh155975 		kmem_free(pRing->trunk_addr,
10935256Slh155975 		    sizeof (caddr_t) * pRing->trunk_num);
10945256Slh155975 	}
10955256Slh155975 
10965256Slh155975 	bzero(pRing, sizeof (*pRing));
10975256Slh155975 }
10985256Slh155975 
10995256Slh155975 
11005256Slh155975 /*
11015256Slh155975  * Allocate all Tx buffer.
11025256Slh155975  * Allocate a Rx buffer for each Rx descriptor. Then
11035256Slh155975  * call mil routine to fill physical address of Rx
11045256Slh155975  * buffer into Rx descriptors
11055256Slh155975  */
11065256Slh155975 static boolean_t
11075256Slh155975 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers)
11085256Slh155975 {
11095256Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
11105256Slh155975 
11115256Slh155975 	/*
11125256Slh155975 	 * Allocate rx Buffers
11135256Slh155975 	 */
11145256Slh155975 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf,
11155256Slh155975 	    RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) {
11165256Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
11175256Slh155975 		    "amd8111s_alloc_dma_ringbuf for tx failed");
11185256Slh155975 		goto allocate_buf_fail;
11195256Slh155975 	}
11205256Slh155975 
11215256Slh155975 	/*
11225256Slh155975 	 * Allocate Tx buffers
11235256Slh155975 	 */
11245256Slh155975 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf,
11255256Slh155975 	    TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) {
11265256Slh155975 		amd8111s_log(pLayerPointers, CE_WARN,
11275256Slh155975 		    "amd8111s_alloc_dma_ringbuf for tx failed");
11285256Slh155975 		goto allocate_buf_fail;
11295256Slh155975 	}
11305256Slh155975 
11315256Slh155975 	/*
11325256Slh155975 	 * Initilize the mil Queues
11335256Slh155975 	 */
11345256Slh155975 	milInitGlbds(pLayerPointers);
11355256Slh155975 
11365256Slh155975 	milInitRxQ(pLayerPointers);
11375256Slh155975 
11385256Slh155975 	return (B_TRUE);
11395256Slh155975 
11405256Slh155975 allocate_buf_fail:
11415256Slh155975 
11425256Slh155975 	amd8111s_log(pLayerPointers, CE_WARN,
11435256Slh155975 	    "amd8111s_allocate_buffers failed");
11445256Slh155975 	return (B_FALSE);
11455256Slh155975 }
11465256Slh155975 
11475256Slh155975 /*
11485256Slh155975  * Free all Rx/Tx buffer
11495256Slh155975  */
11505256Slh155975 
11515256Slh155975 static void
11525256Slh155975 amd8111s_free_buffers(struct LayerPointers *pLayerPointers)
11535256Slh155975 {
11545256Slh155975 	/* Free Tx buffers */
11555256Slh155975 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf);
11565256Slh155975 
11575256Slh155975 	/* Free Rx Buffers */
11585256Slh155975 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf);
11595256Slh155975 }
11605256Slh155975 
11615256Slh155975 /*
11625256Slh155975  * Try to recycle all the descriptors and Tx buffers
11635256Slh155975  * which are already freed by hardware.
11645256Slh155975  */
11655256Slh155975 static int
11665256Slh155975 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers)
11675256Slh155975 {
11685256Slh155975 	struct nonphysical *pNonphysical;
11695256Slh155975 	uint32_t count = 0;
11705256Slh155975 
11715256Slh155975 	pNonphysical = pLayerPointers->pMil->pNonphysical;
11725256Slh155975 	while (pNonphysical->TxDescQRead->Tx_OWN == 0 &&
11735256Slh155975 	    pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) {
11745256Slh155975 		pLayerPointers->pOdl->tx_buf.free =
11755256Slh155975 		    NEXT(pLayerPointers->pOdl->tx_buf, free);
11765256Slh155975 		pNonphysical->TxDescQRead++;
11775256Slh155975 		if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) {
11785256Slh155975 			pNonphysical->TxDescQRead = pNonphysical->TxDescQStart;
11795256Slh155975 		}
11805256Slh155975 		count ++;
11815256Slh155975 	}
11825256Slh155975 
11835256Slh155975 	if (pLayerPointers->pMil->tx_reschedule)
11845256Slh155975 		ddi_trigger_softintr(pLayerPointers->pOdl->drain_id);
11855256Slh155975 
11865256Slh155975 	return (count);
11875256Slh155975 }
11885256Slh155975 
11895256Slh155975 /*
11905256Slh155975  * Get packets in the Tx buffer, then copy them to the send buffer.
11915256Slh155975  * Trigger hardware to send out packets.
11925256Slh155975  */
11935256Slh155975 static void
11945256Slh155975 amd8111s_send_serial(struct LayerPointers *pLayerPointers)
11955256Slh155975 {
11965256Slh155975 	struct nonphysical *pNonphysical;
11975256Slh155975 	uint32_t count;
11985256Slh155975 
11995256Slh155975 	pNonphysical = pLayerPointers->pMil->pNonphysical;
12005256Slh155975 
12015256Slh155975 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
12025256Slh155975 
12035256Slh155975 	for (count = 0; count < AMD8111S_SEND_MAX; count ++) {
12045256Slh155975 		if (pLayerPointers->pOdl->tx_buf.curr ==
12055256Slh155975 		    pLayerPointers->pOdl->tx_buf.next) {
12065256Slh155975 			break;
12075256Slh155975 		}
12085256Slh155975 		/* to verify if it needs to recycle the tx Buf */
12095256Slh155975 		if (((pNonphysical->TxDescQWrite + 1 >
12105256Slh155975 		    pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart :
12115256Slh155975 		    (pNonphysical->TxDescQWrite + 1)) ==
12125256Slh155975 		    pNonphysical->TxDescQRead)
12135256Slh155975 			if (amd8111s_recycle_tx(pLayerPointers) == 0) {
12145256Slh155975 				pLayerPointers->pOdl
12155256Slh155975 				    ->statistics.tx_no_descriptor ++;
12165256Slh155975 				break;
12175256Slh155975 			}
12185256Slh155975 
12195256Slh155975 		/* Fill packet length */
12205256Slh155975 		pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers
12215256Slh155975 		    ->pOdl->tx_buf.curr->msg_size;
12225256Slh155975 
12235256Slh155975 		/* Fill physical buffer address */
12245256Slh155975 		pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int)
12255256Slh155975 		    pLayerPointers->pOdl->tx_buf.curr->phy_addr;
12265256Slh155975 
12275256Slh155975 		pNonphysical->TxDescQWrite->Tx_SOP = 1;
12285256Slh155975 		pNonphysical->TxDescQWrite->Tx_EOP = 1;
12295256Slh155975 		pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1;
12305256Slh155975 		pNonphysical->TxDescQWrite->Tx_LTINT = 1;
12315256Slh155975 		pNonphysical->TxDescQWrite->Tx_USPACE = 0;
12325256Slh155975 		pNonphysical->TxDescQWrite->Tx_OWN = 1;
12335256Slh155975 
12345256Slh155975 		pNonphysical->TxDescQWrite++;
12355256Slh155975 		if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) {
12365256Slh155975 			pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart;
12375256Slh155975 		}
12385256Slh155975 
12395256Slh155975 		pLayerPointers->pOdl->tx_buf.curr =
12405256Slh155975 		    NEXT(pLayerPointers->pOdl->tx_buf, curr);
12415256Slh155975 
12425256Slh155975 	}
12435256Slh155975 
12445256Slh155975 	pLayerPointers->pOdl->statistics.tx_ok_packets += count;
12455256Slh155975 
12465256Slh155975 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
12475256Slh155975 
12485256Slh155975 	/* Call mdlTransmit to send the pkt out on the network */
12495256Slh155975 	mdlTransmit(pLayerPointers);
12505256Slh155975 
12515256Slh155975 }
12525256Slh155975 
12535256Slh155975 /*
12545256Slh155975  * Softintr entrance. try to send out packets in the Tx buffer.
12555256Slh155975  * If reschedule is True, call mac_tx_update to re-enable the
12565256Slh155975  * transmit
12575256Slh155975  */
12585256Slh155975 static uint_t
12595256Slh155975 amd8111s_send_drain(caddr_t arg)
12605256Slh155975 {
12616990Sgd78059 	struct LayerPointers *pLayerPointers = (void *)arg;
12625256Slh155975 
12635256Slh155975 	amd8111s_send_serial(pLayerPointers);
12645256Slh155975 
12655256Slh155975 	if (pLayerPointers->pMil->tx_reschedule &&
12665256Slh155975 	    NEXT(pLayerPointers->pOdl->tx_buf, next) !=
12675256Slh155975 	    pLayerPointers->pOdl->tx_buf.free) {
12685256Slh155975 		mac_tx_update(pLayerPointers->pOdl->mh);
12695256Slh155975 		pLayerPointers->pMil->tx_reschedule = B_FALSE;
12705256Slh155975 	}
12715256Slh155975 
12725256Slh155975 	return (DDI_INTR_CLAIMED);
12735256Slh155975 }
12745256Slh155975 
12755256Slh155975 /*
12765256Slh155975  * Get a Tx buffer
12775256Slh155975  */
12785256Slh155975 static struct amd8111s_msgbuf *
12795256Slh155975 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers)
12805256Slh155975 {
12815256Slh155975 	struct amd8111s_msgbuf *tmp, *next;
12825256Slh155975 
12835256Slh155975 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
12845256Slh155975 	next = NEXT(pLayerPointers->pOdl->tx_buf, next);
12855256Slh155975 	if (next == pLayerPointers->pOdl->tx_buf.free) {
12865256Slh155975 		tmp = NULL;
12875256Slh155975 	} else {
12885256Slh155975 		tmp = pLayerPointers->pOdl->tx_buf.next;
12895256Slh155975 		pLayerPointers->pOdl->tx_buf.next = next;
12905256Slh155975 	}
12915256Slh155975 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
12925256Slh155975 
12935256Slh155975 	return (tmp);
12945256Slh155975 }
12955256Slh155975 
12965256Slh155975 static boolean_t
12975256Slh155975 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp)
12985256Slh155975 {
12995256Slh155975 	struct odl *pOdl;
13005256Slh155975 	size_t frag_len;
13015256Slh155975 	mblk_t *tmp;
13025256Slh155975 	struct amd8111s_msgbuf *txBuf;
13035256Slh155975 	uint8_t *pMsg;
13045256Slh155975 
13055256Slh155975 	pOdl = pLayerPointers->pOdl;
13065256Slh155975 
13075256Slh155975 	/* alloc send buffer */
13085256Slh155975 	txBuf = amd8111s_getTxbuf(pLayerPointers);
13095256Slh155975 	if (txBuf == NULL) {
13105256Slh155975 		pOdl->statistics.tx_no_buffer ++;
13115256Slh155975 		pLayerPointers->pMil->tx_reschedule = B_TRUE;
13125256Slh155975 		amd8111s_send_serial(pLayerPointers);
13135256Slh155975 		return (B_FALSE);
13145256Slh155975 	}
13155256Slh155975 
13165256Slh155975 	/* copy packet to send buffer */
13175256Slh155975 	txBuf->msg_size = 0;
13185256Slh155975 	pMsg = (uint8_t *)txBuf->vir_addr;
13195256Slh155975 	for (tmp = mp; tmp; tmp = tmp->b_cont) {
13205256Slh155975 		frag_len = MBLKL(tmp);
13215256Slh155975 		bcopy(tmp->b_rptr, pMsg, frag_len);
13225256Slh155975 		txBuf->msg_size += frag_len;
13235256Slh155975 		pMsg += frag_len;
13245256Slh155975 	}
13255256Slh155975 	freemsg(mp);
13265256Slh155975 
13275256Slh155975 	amd8111s_send_serial(pLayerPointers);
13285256Slh155975 
13295256Slh155975 	return (B_TRUE);
13305256Slh155975 }
13315256Slh155975 
13325256Slh155975 /*
13335256Slh155975  * (GLD Entry Point) Send the message block to lower layer
13345256Slh155975  */
13355256Slh155975 static mblk_t *
13365256Slh155975 amd8111s_m_tx(void *arg, mblk_t *mp)
13375256Slh155975 {
13385256Slh155975 	struct LayerPointers *pLayerPointers = arg;
13395256Slh155975 	mblk_t *next;
13405256Slh155975 
13415256Slh155975 	rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER);
13425256Slh155975 	if (!pLayerPointers->run) {
13435256Slh155975 		pLayerPointers->pOdl->statistics.tx_afterunplumb ++;
13445256Slh155975 		freemsgchain(mp);
13455256Slh155975 		mp = NULL;
13465256Slh155975 	}
13475256Slh155975 
13485256Slh155975 	while (mp != NULL) {
13495256Slh155975 		next = mp->b_next;
13505256Slh155975 		mp->b_next = NULL;
13515256Slh155975 		if (!amd8111s_send(pLayerPointers, mp)) {
13525256Slh155975 			/* Send fail */
13535256Slh155975 			mp->b_next = next;
13545256Slh155975 			break;
13555256Slh155975 		}
13565256Slh155975 		mp = next;
13575256Slh155975 	}
13585256Slh155975 
13595256Slh155975 	rw_exit(&pLayerPointers->pOdl->chip_lock);
13605256Slh155975 	return (mp);
13615256Slh155975 }
13625256Slh155975 
13635256Slh155975 /*
13645256Slh155975  * (GLD Entry Point) Interrupt Service Routine
13655256Slh155975  */
13665256Slh155975 static uint_t
13675256Slh155975 amd8111s_intr(caddr_t arg)
13685256Slh155975 {
13695256Slh155975 	unsigned int intrCauses;
13706990Sgd78059 	struct LayerPointers *pLayerPointers = (void *)arg;
13715256Slh155975 
13725256Slh155975 	/* Read the interrupt status from mdl */
13735256Slh155975 	intrCauses = mdlReadInterrupt(pLayerPointers);
13745256Slh155975 
13755256Slh155975 	if (intrCauses == 0) {
13765256Slh155975 		pLayerPointers->pOdl->statistics.intr_OTHER ++;
13775256Slh155975 		return (DDI_INTR_UNCLAIMED);
13785256Slh155975 	}
13795256Slh155975 
13805256Slh155975 	if (intrCauses & LCINT) {
13815256Slh155975 		if (mdlReadLink(pLayerPointers) == LINK_UP) {
13825256Slh155975 			mdlGetActiveMediaInfo(pLayerPointers);
13835256Slh155975 			/* Link status changed */
13845256Slh155975 			if (pLayerPointers->pOdl->LinkStatus !=
13855256Slh155975 			    LINK_STATE_UP) {
13865256Slh155975 				pLayerPointers->pOdl->LinkStatus =
13875256Slh155975 				    LINK_STATE_UP;
13885256Slh155975 				mac_link_update(pLayerPointers->pOdl->mh,
13895256Slh155975 				    LINK_STATE_UP);
13905256Slh155975 			}
13915256Slh155975 		} else {
13925256Slh155975 			if (pLayerPointers->pOdl->LinkStatus !=
13935256Slh155975 			    LINK_STATE_DOWN) {
13945256Slh155975 				pLayerPointers->pOdl->LinkStatus =
13955256Slh155975 				    LINK_STATE_DOWN;
13965256Slh155975 				mac_link_update(pLayerPointers->pOdl->mh,
13975256Slh155975 				    LINK_STATE_DOWN);
13985256Slh155975 			}
13995256Slh155975 		}
14005256Slh155975 	}
14015256Slh155975 	/*
14025256Slh155975 	 * RINT0: Receive Interrupt is set by the controller after the last
14035256Slh155975 	 * descriptor of a receive frame for this ring has been updated by
14045256Slh155975 	 * writing a 0 to the OWNership bit.
14055256Slh155975 	 */
14065256Slh155975 	if (intrCauses & RINT0) {
14075256Slh155975 		pLayerPointers->pOdl->statistics.intr_RINT0 ++;
14085256Slh155975 		amd8111s_receive(pLayerPointers);
14095256Slh155975 	}
14105256Slh155975 
14115256Slh155975 	/*
14125256Slh155975 	 * TINT0: Transmit Interrupt is set by the controller after the OWN bit
14135256Slh155975 	 * in the last descriptor of a transmit frame in this particular ring
14145256Slh155975 	 * has been cleared to indicate the frame has been copied to the
14155256Slh155975 	 * transmit FIFO.
14165256Slh155975 	 */
14175256Slh155975 	if (intrCauses & TINT0) {
14185256Slh155975 		pLayerPointers->pOdl->statistics.intr_TINT0 ++;
14195256Slh155975 		/*
14205256Slh155975 		 * if desc ring is NULL and tx buf is not NULL, it should
14215256Slh155975 		 * drain tx buffer
14225256Slh155975 		 */
14235256Slh155975 		amd8111s_send_serial(pLayerPointers);
14245256Slh155975 	}
14255256Slh155975 
14265256Slh155975 	if (intrCauses & STINT) {
14275256Slh155975 		pLayerPointers->pOdl->statistics.intr_STINT ++;
14285256Slh155975 	}
14295256Slh155975 
14305256Slh155975 
14315256Slh155975 	return (DDI_INTR_CLAIMED);
14325256Slh155975 }
14335256Slh155975 
14345256Slh155975 /*
14355256Slh155975  * To re-initilize data structures.
14365256Slh155975  */
14375256Slh155975 static void
14385256Slh155975 amd8111s_sw_reset(struct LayerPointers *pLayerPointers)
14395256Slh155975 {
14405256Slh155975 	/* Reset all Tx/Rx queues and descriptors */
14415256Slh155975 	milResetTxQ(pLayerPointers);
14425256Slh155975 	milInitRxQ(pLayerPointers);
14435256Slh155975 }
14445256Slh155975 
14455256Slh155975 /*
14465256Slh155975  * Send all pending tx packets
14475256Slh155975  */
14485256Slh155975 static void
14495256Slh155975 amd8111s_tx_drain(struct LayerPointers *adapter)
14505256Slh155975 {
14515256Slh155975 	struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart;
14525256Slh155975 	int i, desc_count = 0;
14535256Slh155975 	for (i = 0; i < 30; i++) {
14545256Slh155975 		while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) {
14555256Slh155975 			/* This packet has been transmitted */
14565256Slh155975 			pTx_desc ++;
14575256Slh155975 			desc_count ++;
14585256Slh155975 		}
14595256Slh155975 		if (desc_count == TX_RING_SIZE) {
14605256Slh155975 			break;
14615256Slh155975 		}
14625256Slh155975 		/* Wait 1 ms */
14635256Slh155975 		drv_usecwait(1000);
14645256Slh155975 	}
14655256Slh155975 	adapter->pOdl->statistics.tx_draintime = i;
14665256Slh155975 }
14675256Slh155975 
14685256Slh155975 /*
14695256Slh155975  * (GLD Entry Point) To start card will be called at
14705256Slh155975  * ifconfig plumb
14715256Slh155975  */
14725256Slh155975 static int
14735256Slh155975 amd8111s_m_start(void *arg)
14745256Slh155975 {
14755256Slh155975 	struct LayerPointers *pLayerPointers = arg;
14765256Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
14775256Slh155975 
14785256Slh155975 	amd8111s_sw_reset(pLayerPointers);
14795256Slh155975 	mdlHWReset(pLayerPointers);
14805256Slh155975 	rw_enter(&pOdl->chip_lock, RW_WRITER);
14815256Slh155975 	pLayerPointers->run = B_TRUE;
14825256Slh155975 	rw_exit(&pOdl->chip_lock);
14835256Slh155975 	return (0);
14845256Slh155975 }
14855256Slh155975 
14865256Slh155975 /*
14875256Slh155975  * (GLD Entry Point) To stop card will be called at
14885256Slh155975  * ifconfig unplumb
14895256Slh155975  */
14905256Slh155975 static void
14915256Slh155975 amd8111s_m_stop(void *arg)
14925256Slh155975 {
14935256Slh155975 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
14945256Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
14955256Slh155975 
14965256Slh155975 	/* Ensure send all pending tx packets */
14975256Slh155975 	amd8111s_tx_drain(pLayerPointers);
14985256Slh155975 	/*
14995256Slh155975 	 * Stop the controller and disable the controller interrupt
15005256Slh155975 	 */
15015256Slh155975 	rw_enter(&pOdl->chip_lock, RW_WRITER);
15025256Slh155975 	mdlStopChip(pLayerPointers);
15035256Slh155975 	pLayerPointers->run = B_FALSE;
15045256Slh155975 	rw_exit(&pOdl->chip_lock);
15055256Slh155975 }
15065256Slh155975 
15075256Slh155975 /*
15085256Slh155975  *	To clean up all
15095256Slh155975  */
15105256Slh155975 static void
15115256Slh155975 amd8111s_free_resource(struct LayerPointers *pLayerPointers)
15125256Slh155975 {
15135256Slh155975 	unsigned long mem_free_array[100];
15145256Slh155975 	unsigned long *pmem_free_array, size;
15155256Slh155975 
15165256Slh155975 	/* Free Rx/Tx descriptors */
15175256Slh155975 	amd8111s_free_descriptors(pLayerPointers);
15185256Slh155975 
15195256Slh155975 	/* Free memory on lower layers */
15205256Slh155975 	milFreeResources(pLayerPointers, mem_free_array);
15215256Slh155975 	pmem_free_array = mem_free_array;
15225256Slh155975 	while (*pmem_free_array) {
15235256Slh155975 		switch (*pmem_free_array) {
15245256Slh155975 		case VIRTUAL:
15255256Slh155975 			size = *(++pmem_free_array);
15265256Slh155975 			pmem_free_array++;
15275256Slh155975 			kmem_free((void *)*(pmem_free_array), size);
15285256Slh155975 			break;
15295256Slh155975 		}
15305256Slh155975 		pmem_free_array++;
15315256Slh155975 	}
15325256Slh155975 
15335256Slh155975 	amd8111s_free_buffers(pLayerPointers);
15345256Slh155975 }
15355256Slh155975 
15365256Slh155975 /*
15375256Slh155975  * (GLD Enty pointer) To add/delete multi cast addresses
15385256Slh155975  *
15395256Slh155975  */
15405256Slh155975 static int
15415256Slh155975 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
15425256Slh155975 {
15435256Slh155975 	struct LayerPointers *pLayerPointers = arg;
15445256Slh155975 
15455256Slh155975 	if (add) {
15465256Slh155975 		/* Add a multicast entry */
15475256Slh155975 		mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr);
15485256Slh155975 	} else {
15495256Slh155975 		/* Delete a multicast entry */
15505256Slh155975 		mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr);
15515256Slh155975 	}
15525256Slh155975 
15535256Slh155975 	return (0);
15545256Slh155975 }
15555256Slh155975 
15565256Slh155975 #ifdef AMD8111S_DEBUG
15575256Slh155975 /*
15585256Slh155975  * The size of MIB registers is only 32 bits. Dump them before one
15595256Slh155975  * of them overflows.
15605256Slh155975  */
15615256Slh155975 static void
15625256Slh155975 amd8111s_dump_mib(struct LayerPointers *pLayerPointers)
15635256Slh155975 {
15645256Slh155975 	struct amd8111s_statistics *adapterStat;
15655256Slh155975 
15665256Slh155975 	adapterStat = &pLayerPointers->pOdl->statistics;
15675256Slh155975 
15685256Slh155975 	adapterStat->mib_dump_counter ++;
15695256Slh155975 
15705256Slh155975 	/*
15715256Slh155975 	 * Rx Counters
15725256Slh155975 	 */
15735256Slh155975 	adapterStat->rx_mib_unicst_packets +=
15745256Slh155975 	    mdlReadMib(pLayerPointers, RcvUniCastPkts);
15755256Slh155975 	adapterStat->rx_mib_multicst_packets +=
15765256Slh155975 	    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
15775256Slh155975 	adapterStat->rx_mib_broadcst_packets +=
15785256Slh155975 	    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
15795256Slh155975 	adapterStat->rx_mib_macctrl_packets +=
15805256Slh155975 	    mdlReadMib(pLayerPointers, RcvMACCtrl);
15815256Slh155975 	adapterStat->rx_mib_flowctrl_packets +=
15825256Slh155975 	    mdlReadMib(pLayerPointers, RcvFlowCtrl);
15835256Slh155975 
15845256Slh155975 	adapterStat->rx_mib_bytes +=
15855256Slh155975 	    mdlReadMib(pLayerPointers, RcvOctets);
15865256Slh155975 	adapterStat->rx_mib_good_bytes +=
15875256Slh155975 	    mdlReadMib(pLayerPointers, RcvGoodOctets);
15885256Slh155975 
15895256Slh155975 	adapterStat->rx_mib_undersize_packets +=
15905256Slh155975 	    mdlReadMib(pLayerPointers, RcvUndersizePkts);
15915256Slh155975 	adapterStat->rx_mib_oversize_packets +=
15925256Slh155975 	    mdlReadMib(pLayerPointers, RcvOversizePkts);
15935256Slh155975 
15945256Slh155975 	adapterStat->rx_mib_drop_packets +=
15955256Slh155975 	    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
15965256Slh155975 	adapterStat->rx_mib_align_err_packets +=
15975256Slh155975 	    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
15985256Slh155975 	adapterStat->rx_mib_fcs_err_packets +=
15995256Slh155975 	    mdlReadMib(pLayerPointers, RcvFCSErrors);
16005256Slh155975 	adapterStat->rx_mib_symbol_err_packets +=
16015256Slh155975 	    mdlReadMib(pLayerPointers, RcvSymbolErrors);
16025256Slh155975 	adapterStat->rx_mib_miss_packets +=
16035256Slh155975 	    mdlReadMib(pLayerPointers, RcvMissPkts);
16045256Slh155975 
16055256Slh155975 	/*
16065256Slh155975 	 * Tx Counters
16075256Slh155975 	 */
16085256Slh155975 	adapterStat->tx_mib_packets +=
16095256Slh155975 	    mdlReadMib(pLayerPointers, XmtPackets);
16105256Slh155975 	adapterStat->tx_mib_multicst_packets +=
16115256Slh155975 	    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
16125256Slh155975 	adapterStat->tx_mib_broadcst_packets +=
16135256Slh155975 	    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
16145256Slh155975 	adapterStat->tx_mib_flowctrl_packets +=
16155256Slh155975 	    mdlReadMib(pLayerPointers, XmtFlowCtrl);
16165256Slh155975 
16175256Slh155975 	adapterStat->tx_mib_bytes +=
16185256Slh155975 	    mdlReadMib(pLayerPointers, XmtOctets);
16195256Slh155975 
16205256Slh155975 	adapterStat->tx_mib_defer_trans_packets +=
16215256Slh155975 	    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
16225256Slh155975 	adapterStat->tx_mib_collision_packets +=
16235256Slh155975 	    mdlReadMib(pLayerPointers, XmtCollisions);
16245256Slh155975 	adapterStat->tx_mib_one_coll_packets +=
16255256Slh155975 	    mdlReadMib(pLayerPointers, XmtOneCollision);
16265256Slh155975 	adapterStat->tx_mib_multi_coll_packets +=
16275256Slh155975 	    mdlReadMib(pLayerPointers, XmtMultipleCollision);
16285256Slh155975 	adapterStat->tx_mib_late_coll_packets +=
16295256Slh155975 	    mdlReadMib(pLayerPointers, XmtLateCollision);
16305256Slh155975 	adapterStat->tx_mib_ex_coll_packets +=
16315256Slh155975 	    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
16325256Slh155975 
16335256Slh155975 
16345256Slh155975 	/* Clear all MIB registers */
16355256Slh155975 	WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address
16365256Slh155975 	    + MIB_ADDR, MIB_CLEAR);
16375256Slh155975 }
16385256Slh155975 #endif
16395256Slh155975 
16405256Slh155975 /*
16415256Slh155975  * (GLD Entry Point) set/unset promiscus mode
16425256Slh155975  */
16435256Slh155975 static int
16445256Slh155975 amd8111s_m_promisc(void *arg, boolean_t on)
16455256Slh155975 {
16465256Slh155975 	struct LayerPointers *pLayerPointers = arg;
16475256Slh155975 
16485256Slh155975 	if (on) {
16495256Slh155975 		mdlSetPromiscuous(pLayerPointers);
16505256Slh155975 	} else {
16515256Slh155975 		mdlDisablePromiscuous(pLayerPointers);
16525256Slh155975 	}
16535256Slh155975 
16545256Slh155975 	return (0);
16555256Slh155975 }
16565256Slh155975 
16575256Slh155975 /*
16585256Slh155975  * (Gld Entry point) Changes the Mac address of card
16595256Slh155975  */
16605256Slh155975 static int
16615256Slh155975 amd8111s_m_unicst(void *arg, const uint8_t *macaddr)
16625256Slh155975 {
16635256Slh155975 	struct LayerPointers *pLayerPointers = arg;
16645256Slh155975 
16655256Slh155975 	mdlDisableInterrupt(pLayerPointers);
16665256Slh155975 	mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr);
16675256Slh155975 	mdlEnableInterrupt(pLayerPointers);
16685256Slh155975 
16695256Slh155975 	return (0);
16705256Slh155975 }
16715256Slh155975 
16725256Slh155975 /*
16735256Slh155975  * Reset the card
16745256Slh155975  */
16755256Slh155975 void
16765256Slh155975 amd8111s_reset(struct LayerPointers *pLayerPointers)
16775256Slh155975 {
16785256Slh155975 	amd8111s_sw_reset(pLayerPointers);
16795256Slh155975 	mdlHWReset(pLayerPointers);
16805256Slh155975 }
16815256Slh155975 
16825256Slh155975 /*
16835256Slh155975  * attach(9E) -- Attach a device to the system
16845256Slh155975  *
16855256Slh155975  * Called once for each board after successfully probed.
16865256Slh155975  * will do
16875256Slh155975  * 	a. creating minor device node for the instance.
16885256Slh155975  *	b. allocate & Initilize four layers (call odlInit)
16895256Slh155975  *	c. get MAC address
16905256Slh155975  *	d. initilize pLayerPointers to gld private pointer
16915256Slh155975  *	e. register with GLD
16925256Slh155975  * if any action fails does clean up & returns DDI_FAILURE
16935256Slh155975  * else retursn DDI_SUCCESS
16945256Slh155975  */
16955256Slh155975 static int
16965256Slh155975 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
16975256Slh155975 {
16985256Slh155975 	mac_register_t *macp;
16995256Slh155975 	struct LayerPointers *pLayerPointers;
17005256Slh155975 	struct odl *pOdl;
17015256Slh155975 	ddi_acc_handle_t *pci_handle;
17025256Slh155975 	ddi_device_acc_attr_t dev_attr;
17035256Slh155975 	caddr_t addrp = NULL;
17045256Slh155975 
17055256Slh155975 	switch (cmd) {
17065256Slh155975 	case DDI_ATTACH:
17075256Slh155975 		break;
17085256Slh155975 	default:
17095256Slh155975 		return (DDI_FAILURE);
17105256Slh155975 	}
17115256Slh155975 
17125256Slh155975 	pLayerPointers = (struct LayerPointers *)
17135256Slh155975 	    kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP);
17145256Slh155975 	amd8111sadapter = pLayerPointers;
17155256Slh155975 
17165256Slh155975 	/* Get device instance number */
17175256Slh155975 	pLayerPointers->instance = ddi_get_instance(devinfo);
17185256Slh155975 	ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers);
17195256Slh155975 
17205256Slh155975 	pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP);
17215256Slh155975 	pLayerPointers->pOdl = pOdl;
17225256Slh155975 
17235256Slh155975 	pOdl->devinfo = devinfo;
17245256Slh155975 
17255256Slh155975 	/*
17265256Slh155975 	 * Here, we only allocate memory for struct odl and initilize it.
17275256Slh155975 	 * All other memory allocation & initilization will be done in odlInit
17285256Slh155975 	 * later on this routine.
17295256Slh155975 	 */
17305256Slh155975 	if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock)
17315256Slh155975 	    != DDI_SUCCESS) {
17325256Slh155975 		amd8111s_log(pLayerPointers, CE_NOTE,
17335256Slh155975 		    "attach: get iblock cookies failed");
17345256Slh155975 		goto attach_failure;
17355256Slh155975 	}
17365256Slh155975 
17375256Slh155975 	rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock);
17385256Slh155975 	mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock",
17395256Slh155975 	    MUTEX_DRIVER, (void *)pOdl->iblock);
17405256Slh155975 	mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock",
17415256Slh155975 	    MUTEX_DRIVER, (void *)pOdl->iblock);
17425256Slh155975 
17435256Slh155975 	/* Setup PCI space */
17445256Slh155975 	if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) {
17455256Slh155975 		return (DDI_FAILURE);
17465256Slh155975 	}
17475256Slh155975 	pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI;
17485256Slh155975 	pci_handle = &pOdl->pci_handle;
17495256Slh155975 
17505256Slh155975 	pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID);
17515256Slh155975 	pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID);
17525256Slh155975 
17535256Slh155975 	/*
17545256Slh155975 	 * Allocate and initialize all resource and map device registers.
17555256Slh155975 	 * If failed, it returns a non-zero value.
17565256Slh155975 	 */
17575256Slh155975 	if (amd8111s_odlInit(pLayerPointers) != 0) {
17585256Slh155975 		goto attach_failure;
17595256Slh155975 	}
17605256Slh155975 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE;
17615256Slh155975 
17625256Slh155975 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
17635256Slh155975 	dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
17645256Slh155975 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
17655256Slh155975 
17665256Slh155975 	if (ddi_regs_map_setup(devinfo, 1, &addrp, 0,  4096, &dev_attr,
17675256Slh155975 	    &(pLayerPointers->pOdl->MemBasehandle)) != 0) {
17685256Slh155975 		amd8111s_log(pLayerPointers, CE_NOTE,
17695256Slh155975 		    "attach: ddi_regs_map_setup failed");
17705256Slh155975 		goto attach_failure;
17715256Slh155975 	}
17725256Slh155975 	pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp;
17735256Slh155975 
17745256Slh155975 	/* Initialize HW */
17755256Slh155975 	mdlOpen(pLayerPointers);
17765256Slh155975 	mdlGetActiveMediaInfo(pLayerPointers);
17775256Slh155975 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS;
17785256Slh155975 
17795256Slh155975 	/*
17805256Slh155975 	 * Setup the interrupt
17815256Slh155975 	 */
17825256Slh155975 	if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr,
17835256Slh155975 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
17845256Slh155975 		goto attach_failure;
17855256Slh155975 	}
17865256Slh155975 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED;
17875256Slh155975 
17885256Slh155975 	/*
17895256Slh155975 	 * Setup soft intr
17905256Slh155975 	 */
17915256Slh155975 	if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id,
17925256Slh155975 	    NULL, NULL, amd8111s_send_drain,
17935256Slh155975 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
17945256Slh155975 		goto attach_failure;
17955256Slh155975 	}
17965256Slh155975 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED;
17975256Slh155975 
17985256Slh155975 	/*
17995256Slh155975 	 * Initilize the mac structure
18005256Slh155975 	 */
18015256Slh155975 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
18025256Slh155975 		goto attach_failure;
18035256Slh155975 
18045256Slh155975 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
18055256Slh155975 	macp->m_driver = pLayerPointers;
18065256Slh155975 	macp->m_dip = devinfo;
18075256Slh155975 	/* Get MAC address */
18085256Slh155975 	mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress);
18095256Slh155975 	macp->m_src_addr = pOdl->MacAddress;
18105256Slh155975 	macp->m_callbacks = &amd8111s_m_callbacks;
18115256Slh155975 	macp->m_min_sdu = 0;
18125256Slh155975 	/* 1518 - 14 (ether header) - 4 (CRC) */
18135256Slh155975 	macp->m_max_sdu = ETHERMTU;
18145895Syz147064 	macp->m_margin = VLAN_TAGSZ;
18155256Slh155975 
18165256Slh155975 	/*
18175256Slh155975 	 * Finally, we're ready to register ourselves with the MAC layer
18185256Slh155975 	 * interface; if this succeeds, we're ready to start.
18195256Slh155975 	 */
18205256Slh155975 	if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) {
18215256Slh155975 		mac_free(macp);
18225256Slh155975 		goto attach_failure;
18235256Slh155975 	}
18245256Slh155975 	mac_free(macp);
18255256Slh155975 
18265256Slh155975 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED;
18275256Slh155975 
18285256Slh155975 	return (DDI_SUCCESS);
18295256Slh155975 
18305256Slh155975 attach_failure:
18315256Slh155975 	(void) amd8111s_unattach(devinfo, pLayerPointers);
18325256Slh155975 	return (DDI_FAILURE);
18335256Slh155975 
18345256Slh155975 }
18355256Slh155975 
18365256Slh155975 /*
18375256Slh155975  * detach(9E) -- Detach a device from the system
18385256Slh155975  *
18395256Slh155975  * It is called for each device instance when the system is preparing to
18405256Slh155975  * unload a dynamically unloadable driver.
18415256Slh155975  * will Do
18425256Slh155975  * 	a. check if any driver buffers are held by OS.
18435256Slh155975  *	b. do clean up of all allocated memory if it is not in use by OS.
18445256Slh155975  *	c. un register with GLD
18455256Slh155975  *	d. return DDI_SUCCESS on succes full free & unregister
18465256Slh155975  *	else GLD_FAILURE
18475256Slh155975  */
18485256Slh155975 static int
18495256Slh155975 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
18505256Slh155975 {
18515256Slh155975 	struct LayerPointers *pLayerPointers;
18525256Slh155975 
18535256Slh155975 	switch (cmd) {
18545256Slh155975 	case DDI_DETACH:
18555256Slh155975 		break;
18565256Slh155975 	default:
18575256Slh155975 		return (DDI_FAILURE);
18585256Slh155975 	}
18595256Slh155975 
18605256Slh155975 	/*
18615256Slh155975 	 * Get the driver private (struct LayerPointers *) structure
18625256Slh155975 	 */
18635256Slh155975 	if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private
18645256Slh155975 	    (devinfo)) == NULL) {
18655256Slh155975 		return (DDI_FAILURE);
18665256Slh155975 	}
18675256Slh155975 
18685256Slh155975 	return (amd8111s_unattach(devinfo, pLayerPointers));
18695256Slh155975 }
18705256Slh155975 
18715256Slh155975 static int
18725256Slh155975 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers)
18735256Slh155975 {
18745256Slh155975 	struct odl *pOdl = pLayerPointers->pOdl;
18755256Slh155975 
18765256Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) {
18775256Slh155975 		/* Unregister driver from the GLD interface */
18785256Slh155975 		if (mac_unregister(pOdl->mh) != DDI_SUCCESS) {
18795256Slh155975 			return (DDI_FAILURE);
18805256Slh155975 		}
18815256Slh155975 	}
18825256Slh155975 
18835256Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) {
18845256Slh155975 		ddi_remove_intr(devinfo, 0, pOdl->iblock);
18855256Slh155975 	}
18865256Slh155975 
18875256Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) {
18885256Slh155975 		ddi_remove_softintr(pOdl->drain_id);
18895256Slh155975 	}
18905256Slh155975 
18915256Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) {
18925256Slh155975 		/* Stop HW */
18935256Slh155975 		mdlStopChip(pLayerPointers);
18945256Slh155975 		ddi_regs_map_free(&(pOdl->MemBasehandle));
18955256Slh155975 	}
18965256Slh155975 
18975256Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) {
18985256Slh155975 		/* Free All memory allocated */
18995256Slh155975 		amd8111s_free_resource(pLayerPointers);
19005256Slh155975 	}
19015256Slh155975 
19025256Slh155975 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) {
19035256Slh155975 		pci_config_teardown(&pOdl->pci_handle);
19045256Slh155975 		mutex_destroy(&pOdl->mdlSendLock);
19055256Slh155975 		mutex_destroy(&pOdl->mdlRcvLock);
19065256Slh155975 		rw_destroy(&pOdl->chip_lock);
19075256Slh155975 	}
19085256Slh155975 
19095256Slh155975 	kmem_free(pOdl, sizeof (struct odl));
19105256Slh155975 	kmem_free(pLayerPointers, sizeof (struct LayerPointers));
19115256Slh155975 
19125256Slh155975 	return (DDI_SUCCESS);
19135256Slh155975 }
19145256Slh155975 
19155256Slh155975 /*
19165256Slh155975  * (GLD Entry Point)GLD will call this entry point perodicaly to
19175256Slh155975  * get driver statistices.
19185256Slh155975  */
19195256Slh155975 static int
19205256Slh155975 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val)
19215256Slh155975 {
19225256Slh155975 	struct LayerPointers *pLayerPointers = arg;
19235256Slh155975 	struct amd8111s_statistics *adapterStat;
19245256Slh155975 
19255256Slh155975 	adapterStat = &pLayerPointers->pOdl->statistics;
19265256Slh155975 
19275256Slh155975 	switch (stat) {
19285256Slh155975 
19295256Slh155975 	/*
19305256Slh155975 	 * Current Status
19315256Slh155975 	 */
19325256Slh155975 	case MAC_STAT_IFSPEED:
19335256Slh155975 		*val = 	pLayerPointers->pMdl->Speed * 1000000;
19345256Slh155975 		break;
19355256Slh155975 
19365256Slh155975 	case ETHER_STAT_LINK_DUPLEX:
19375256Slh155975 		if (pLayerPointers->pMdl->FullDuplex) {
19385256Slh155975 			*val = LINK_DUPLEX_FULL;
19395256Slh155975 		} else {
19405256Slh155975 			*val = LINK_DUPLEX_HALF;
19415256Slh155975 		}
19425256Slh155975 		break;
19435256Slh155975 
19445256Slh155975 	/*
19455256Slh155975 	 * Capabilities
19465256Slh155975 	 */
19475256Slh155975 	case ETHER_STAT_CAP_1000FDX:
19485256Slh155975 		*val = 0;
19495256Slh155975 		break;
19505256Slh155975 
19515256Slh155975 	case ETHER_STAT_CAP_1000HDX:
19525256Slh155975 		*val = 0;
19535256Slh155975 		break;
19545256Slh155975 
19555256Slh155975 	case ETHER_STAT_CAP_100FDX:
19565256Slh155975 		*val = 1;
19575256Slh155975 		break;
19585256Slh155975 
19595256Slh155975 	case ETHER_STAT_CAP_100HDX:
19605256Slh155975 		*val = 1;
19615256Slh155975 		break;
19625256Slh155975 
19635256Slh155975 	case ETHER_STAT_CAP_10FDX:
19645256Slh155975 		*val = 1;
19655256Slh155975 		break;
19665256Slh155975 
19675256Slh155975 	case ETHER_STAT_CAP_10HDX:
19685256Slh155975 		*val = 1;
19695256Slh155975 		break;
19705256Slh155975 
19715256Slh155975 	case ETHER_STAT_CAP_ASMPAUSE:
19725256Slh155975 		*val = 1;
19735256Slh155975 		break;
19745256Slh155975 
19755256Slh155975 	case ETHER_STAT_CAP_PAUSE:
19765256Slh155975 		*val = 1;
19775256Slh155975 		break;
19785256Slh155975 
19795256Slh155975 	case ETHER_STAT_CAP_AUTONEG:
19805256Slh155975 		*val = 1;
19815256Slh155975 		break;
19825256Slh155975 
19835256Slh155975 	case ETHER_STAT_ADV_CAP_1000FDX:
19845256Slh155975 		*val = 0;
19855256Slh155975 		break;
19865256Slh155975 
19875256Slh155975 	case ETHER_STAT_ADV_CAP_1000HDX:
19885256Slh155975 		*val = 0;
19895256Slh155975 		break;
19905256Slh155975 
19915256Slh155975 	case ETHER_STAT_ADV_CAP_100FDX:
19925256Slh155975 		*val = 1;
19935256Slh155975 		break;
19945256Slh155975 
19955256Slh155975 	case ETHER_STAT_ADV_CAP_100HDX:
19965256Slh155975 		*val = 1;
19975256Slh155975 		break;
19985256Slh155975 
19995256Slh155975 	case ETHER_STAT_ADV_CAP_10FDX:
20005256Slh155975 		*val = 1;
20015256Slh155975 		break;
20025256Slh155975 
20035256Slh155975 	case ETHER_STAT_ADV_CAP_10HDX:
20045256Slh155975 		*val = 1;
20055256Slh155975 		break;
20065256Slh155975 
20075256Slh155975 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
20085256Slh155975 		*val = 1;
20095256Slh155975 		break;
20105256Slh155975 
20115256Slh155975 	case ETHER_STAT_ADV_CAP_PAUSE:
20125256Slh155975 		*val = 1;
20135256Slh155975 		break;
20145256Slh155975 
20155256Slh155975 	case ETHER_STAT_ADV_CAP_AUTONEG:
20165256Slh155975 		*val = 1;
20175256Slh155975 		break;
20185256Slh155975 
20195256Slh155975 	/*
20205256Slh155975 	 * Rx Counters
20215256Slh155975 	 */
20225256Slh155975 	case MAC_STAT_IPACKETS:
20235256Slh155975 		*val = adapterStat->rx_mib_unicst_packets +
20245256Slh155975 		    adapterStat->rx_mib_multicst_packets +
20255256Slh155975 		    adapterStat->rx_mib_broadcst_packets +
20265256Slh155975 		    mdlReadMib(pLayerPointers, RcvUniCastPkts) +
20275256Slh155975 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts) +
20285256Slh155975 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
20295256Slh155975 		break;
20305256Slh155975 
20315256Slh155975 	case MAC_STAT_RBYTES:
20325256Slh155975 		*val = adapterStat->rx_mib_bytes +
20335256Slh155975 		    mdlReadMib(pLayerPointers, RcvOctets);
20345256Slh155975 		break;
20355256Slh155975 
20365256Slh155975 	case MAC_STAT_MULTIRCV:
20375256Slh155975 		*val = adapterStat->rx_mib_multicst_packets +
20385256Slh155975 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
20395256Slh155975 		break;
20405256Slh155975 
20415256Slh155975 	case MAC_STAT_BRDCSTRCV:
20425256Slh155975 		*val = adapterStat->rx_mib_broadcst_packets +
20435256Slh155975 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
20445256Slh155975 		break;
20455256Slh155975 
20465256Slh155975 	case MAC_STAT_NORCVBUF:
20475256Slh155975 		*val = adapterStat->rx_allocfail +
20485256Slh155975 		    adapterStat->rx_mib_drop_packets +
20495256Slh155975 		    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
20505256Slh155975 		break;
20515256Slh155975 
20525256Slh155975 	case MAC_STAT_IERRORS:
20535256Slh155975 		*val = adapterStat->rx_mib_align_err_packets +
20545256Slh155975 		    adapterStat->rx_mib_fcs_err_packets +
20555256Slh155975 		    adapterStat->rx_mib_symbol_err_packets +
20565256Slh155975 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors) +
20575256Slh155975 		    mdlReadMib(pLayerPointers, RcvFCSErrors) +
20585256Slh155975 		    mdlReadMib(pLayerPointers, RcvSymbolErrors);
20595256Slh155975 		break;
20605256Slh155975 
20615256Slh155975 	case ETHER_STAT_ALIGN_ERRORS:
20625256Slh155975 		*val = adapterStat->rx_mib_align_err_packets +
20635256Slh155975 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
20645256Slh155975 		break;
20655256Slh155975 
20665256Slh155975 	case ETHER_STAT_FCS_ERRORS:
20675256Slh155975 		*val = adapterStat->rx_mib_fcs_err_packets +
20685256Slh155975 		    mdlReadMib(pLayerPointers, RcvFCSErrors);
20695256Slh155975 		break;
20705256Slh155975 
20715256Slh155975 	/*
20725256Slh155975 	 * Tx Counters
20735256Slh155975 	 */
20745256Slh155975 	case MAC_STAT_OPACKETS:
20755256Slh155975 		*val = adapterStat->tx_mib_packets +
20765256Slh155975 		    mdlReadMib(pLayerPointers, XmtPackets);
20775256Slh155975 		break;
20785256Slh155975 
20795256Slh155975 	case MAC_STAT_OBYTES:
20805256Slh155975 		*val = adapterStat->tx_mib_bytes +
20815256Slh155975 		    mdlReadMib(pLayerPointers, XmtOctets);
20825256Slh155975 		break;
20835256Slh155975 
20845256Slh155975 	case MAC_STAT_MULTIXMT:
20855256Slh155975 		*val = adapterStat->tx_mib_multicst_packets +
20865256Slh155975 		    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
20875256Slh155975 		break;
20885256Slh155975 
20895256Slh155975 	case MAC_STAT_BRDCSTXMT:
20905256Slh155975 		*val = adapterStat->tx_mib_broadcst_packets +
20915256Slh155975 		    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
20925256Slh155975 		break;
20935256Slh155975 
20945256Slh155975 	case MAC_STAT_NOXMTBUF:
20955256Slh155975 		*val = adapterStat->tx_no_descriptor;
20965256Slh155975 		break;
20975256Slh155975 
20985256Slh155975 	case MAC_STAT_OERRORS:
20995256Slh155975 		*val = adapterStat->tx_mib_ex_coll_packets +
21005256Slh155975 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
21015256Slh155975 		break;
21025256Slh155975 
21035256Slh155975 	case MAC_STAT_COLLISIONS:
21045256Slh155975 		*val = adapterStat->tx_mib_ex_coll_packets +
21055256Slh155975 		    mdlReadMib(pLayerPointers, XmtCollisions);
21065256Slh155975 		break;
21075256Slh155975 
21085256Slh155975 	case ETHER_STAT_FIRST_COLLISIONS:
21095256Slh155975 		*val = adapterStat->tx_mib_one_coll_packets +
21105256Slh155975 		    mdlReadMib(pLayerPointers, XmtOneCollision);
21115256Slh155975 		break;
21125256Slh155975 
21135256Slh155975 	case ETHER_STAT_MULTI_COLLISIONS:
21145256Slh155975 		*val = adapterStat->tx_mib_multi_coll_packets +
21155256Slh155975 		    mdlReadMib(pLayerPointers, XmtMultipleCollision);
21165256Slh155975 		break;
21175256Slh155975 
21185256Slh155975 	case ETHER_STAT_EX_COLLISIONS:
21195256Slh155975 		*val = adapterStat->tx_mib_ex_coll_packets +
21205256Slh155975 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
21215256Slh155975 		break;
21225256Slh155975 
21235256Slh155975 	case ETHER_STAT_TX_LATE_COLLISIONS:
21245256Slh155975 		*val = adapterStat->tx_mib_late_coll_packets +
21255256Slh155975 		    mdlReadMib(pLayerPointers, XmtLateCollision);
21265256Slh155975 		break;
21275256Slh155975 
21285256Slh155975 	case ETHER_STAT_DEFER_XMTS:
21295256Slh155975 		*val = adapterStat->tx_mib_defer_trans_packets +
21305256Slh155975 		    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
21315256Slh155975 		break;
21325256Slh155975 
21335256Slh155975 	default:
21345256Slh155975 		return (ENOTSUP);
21355256Slh155975 	}
21365256Slh155975 	return (0);
21375256Slh155975 }
21385256Slh155975 
21395256Slh155975 /*
21405256Slh155975  *	Memory Read Function Used by MDL to set card registers.
21415256Slh155975  */
21425256Slh155975 unsigned char
21435256Slh155975 READ_REG8(struct LayerPointers *pLayerPointers, long x)
21445256Slh155975 {
21455256Slh155975 	return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x));
21465256Slh155975 }
21475256Slh155975 
21485256Slh155975 int
21495256Slh155975 READ_REG16(struct LayerPointers *pLayerPointers, long x)
21505256Slh155975 {
21515256Slh155975 	return (ddi_get16(pLayerPointers->pOdl->MemBasehandle,
21525256Slh155975 	    (uint16_t *)(x)));
21535256Slh155975 }
21545256Slh155975 
21555256Slh155975 long
21565256Slh155975 READ_REG32(struct LayerPointers *pLayerPointers, long x)
21575256Slh155975 {
21585256Slh155975 	return (ddi_get32(pLayerPointers->pOdl->MemBasehandle,
21595256Slh155975 	    (uint32_t *)(x)));
21605256Slh155975 }
21615256Slh155975 
21625256Slh155975 void
21635256Slh155975 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y)
21645256Slh155975 {
21655256Slh155975 	ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y);
21665256Slh155975 }
21675256Slh155975 
21685256Slh155975 void
21695256Slh155975 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y)
21705256Slh155975 {
21715256Slh155975 	ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y);
21725256Slh155975 }
21735256Slh155975 
21745256Slh155975 void
21755256Slh155975 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y)
21765256Slh155975 {
21775256Slh155975 	ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y);
21785256Slh155975 }
21795256Slh155975 
21805256Slh155975 void
21815256Slh155975 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y)
21825256Slh155975 {
21835256Slh155975 	int i;
21845256Slh155975 	for (i = 0; i < 8; i++) {
21855256Slh155975 		WRITE_REG8(pLayerPointers, (x + i), y[i]);
21865256Slh155975 	}
21875256Slh155975 }
2188