15256Slh155975 /* 25895Syz147064 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 35256Slh155975 * Use is subject to license terms. 45256Slh155975 */ 55256Slh155975 65256Slh155975 #pragma ident "%Z%%M% %I% %E% SMI" 75256Slh155975 85256Slh155975 /* 95256Slh155975 * Copyright (c) 2001-2006 Advanced Micro Devices, Inc. All rights reserved. 105256Slh155975 * 115256Slh155975 * Redistribution and use in source and binary forms, with or without 125256Slh155975 * modification, are permitted provided that the following conditions are met: 135256Slh155975 * 145256Slh155975 * + Redistributions of source code must retain the above copyright notice, 155256Slh155975 * + this list of conditions and the following disclaimer. 165256Slh155975 * 175256Slh155975 * + Redistributions in binary form must reproduce the above copyright 185256Slh155975 * + notice, this list of conditions and the following disclaimer in the 195256Slh155975 * + documentation and/or other materials provided with the distribution. 205256Slh155975 * 215256Slh155975 * + Neither the name of Advanced Micro Devices, Inc. nor the names of its 225256Slh155975 * + contributors may be used to endorse or promote products derived from 235256Slh155975 * + this software without specific prior written permission. 245256Slh155975 * 255256Slh155975 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND 265256Slh155975 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 275256Slh155975 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 285256Slh155975 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 295256Slh155975 * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR 305256Slh155975 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 315256Slh155975 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 325256Slh155975 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 335256Slh155975 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 345256Slh155975 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 355256Slh155975 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 365256Slh155975 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 375256Slh155975 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 385256Slh155975 * 395256Slh155975 * Import/Export/Re-Export/Use/Release/Transfer Restrictions and 405256Slh155975 * Compliance with Applicable Laws. Notice is hereby given that 415256Slh155975 * the software may be subject to restrictions on use, release, 425256Slh155975 * transfer, importation, exportation and/or re-exportation under 435256Slh155975 * the laws and regulations of the United States or other 445256Slh155975 * countries ("Applicable Laws"), which include but are not 455256Slh155975 * limited to U.S. export control laws such as the Export 465256Slh155975 * Administration Regulations and national security controls as 475256Slh155975 * defined thereunder, as well as State Department controls under 485256Slh155975 * the U.S. Munitions List. Permission to use and/or 495256Slh155975 * redistribute the software is conditioned upon compliance with 505256Slh155975 * all Applicable Laws, including U.S. export control laws 515256Slh155975 * regarding specifically designated persons, countries and 525256Slh155975 * nationals of countries subject to national security controls. 535256Slh155975 */ 545256Slh155975 555256Slh155975 565256Slh155975 #pragma ident "@(#)$RCSfile: solaris_odl.c,v $ $Revision: 1.3 $ " \ 575256Slh155975 " $Date: 2004/04/22 15:22:54 $ AMD" 585256Slh155975 595256Slh155975 605256Slh155975 /* include files */ 615256Slh155975 #include <sys/disp.h> 625256Slh155975 #include <sys/atomic.h> 635895Syz147064 #include <sys/vlan.h> 645256Slh155975 #include "amd8111s_main.h" 655256Slh155975 665256Slh155975 /* Global macro Definations */ 675256Slh155975 #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1)) 685256Slh155975 #define INTERFACE_NAME "amd8111s" 695256Slh155975 #define AMD8111S_SPLIT 128 705256Slh155975 #define AMD8111S_SEND_MAX 64 715256Slh155975 725256Slh155975 static char ident[] = "AMD8111 10/100M Ethernet 1.0"; 735256Slh155975 745256Slh155975 /* 755256Slh155975 * Driver Entry Points 765256Slh155975 */ 775256Slh155975 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t); 785256Slh155975 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t); 795256Slh155975 805256Slh155975 /* 815256Slh155975 * GLD Entry points prototype 825256Slh155975 */ 835256Slh155975 static int amd8111s_m_unicst(void *, const uint8_t *); 845256Slh155975 static int amd8111s_m_promisc(void *, boolean_t); 855256Slh155975 static int amd8111s_m_stat(void *, uint_t, uint64_t *); 865256Slh155975 static void amd8111s_m_resources(void *arg); 875256Slh155975 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *); 885256Slh155975 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr); 895256Slh155975 static int amd8111s_m_start(void *); 905256Slh155975 static void amd8111s_m_stop(void *); 915256Slh155975 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp); 925256Slh155975 static uint_t amd8111s_intr(caddr_t); 935256Slh155975 945256Slh155975 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *); 955256Slh155975 965256Slh155975 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *); 975256Slh155975 static int amd8111s_odlInit(struct LayerPointers *); 985256Slh155975 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *); 995256Slh155975 static void amd8111s_free_descriptors(struct LayerPointers *); 1005256Slh155975 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *, 1015256Slh155975 struct amd8111s_dma_ringbuf *, uint32_t, uint32_t); 1025256Slh155975 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *); 1035256Slh155975 1045256Slh155975 1055256Slh155975 static void amd8111s_log(struct LayerPointers *adapter, int level, 1065256Slh155975 char *fmt, ...); 1075256Slh155975 1085256Slh155975 static struct cb_ops amd8111s_cb_ops = { 1095256Slh155975 nulldev, 1105256Slh155975 nulldev, 1115256Slh155975 nodev, 1125256Slh155975 nodev, 1135256Slh155975 nodev, 1145256Slh155975 nodev, 1155256Slh155975 nodev, 1165256Slh155975 nodev, 1175256Slh155975 nodev, 1185256Slh155975 nodev, 1195256Slh155975 nodev, 1205256Slh155975 nochpoll, 1215256Slh155975 ddi_prop_op, 1225256Slh155975 NULL, 1235256Slh155975 D_NEW | D_MP, 1245256Slh155975 CB_REV, /* cb_rev */ 1255256Slh155975 nodev, /* cb_aread */ 1265256Slh155975 nodev /* cb_awrite */ 1275256Slh155975 }; 1285256Slh155975 1295256Slh155975 static struct dev_ops amd8111s_dev_ops = { 1305256Slh155975 DEVO_REV, /* devo_rev */ 1315256Slh155975 0, /* devo_refcnt */ 1325256Slh155975 NULL, /* devo_getinfo */ 1335256Slh155975 nulldev, /* devo_identify */ 1345256Slh155975 nulldev, /* devo_probe */ 1355256Slh155975 amd8111s_attach, /* devo_attach */ 1365256Slh155975 amd8111s_detach, /* devo_detach */ 1375256Slh155975 nodev, /* devo_reset */ 1385256Slh155975 &amd8111s_cb_ops, /* devo_cb_ops */ 1395256Slh155975 NULL, /* devo_bus_ops */ 1405256Slh155975 nodev 1415256Slh155975 }; 1425256Slh155975 1435256Slh155975 struct modldrv amd8111s_modldrv = { 1445256Slh155975 &mod_driverops, /* Type of module. This one is a driver */ 1455256Slh155975 ident, /* short description */ 1465256Slh155975 &amd8111s_dev_ops /* driver specific ops */ 1475256Slh155975 }; 1485256Slh155975 1495256Slh155975 struct modlinkage amd8111s_modlinkage = { 1505256Slh155975 MODREV_1, (void *)&amd8111s_modldrv, NULL 1515256Slh155975 }; 1525256Slh155975 1535256Slh155975 /* 1545256Slh155975 * Global Variables 1555256Slh155975 */ 1565256Slh155975 struct LayerPointers *amd8111sadapter; 1575256Slh155975 1585256Slh155975 static ddi_dma_attr_t pcn_buff_dma_attr_t = { 1595256Slh155975 DMA_ATTR_V0, /* dma_attr_version */ 1605256Slh155975 (uint64_t)0, /* dma_attr_addr_lo */ 1615256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 1625256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_count_max */ 1635256Slh155975 (uint64_t)1, /* dma_attr_align */ 1645256Slh155975 (uint_t)0x7F, /* dma_attr_burstsizes */ 1655256Slh155975 (uint32_t)1, /* dma_attr_minxfer */ 1665256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 1675256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 1685256Slh155975 (int)1, /* dma_attr_sgllen */ 1695256Slh155975 (uint32_t)1, /* granularity */ 1705256Slh155975 (uint_t)0 /* dma_attr_flags */ 1715256Slh155975 }; 1725256Slh155975 1735256Slh155975 static ddi_dma_attr_t pcn_desc_dma_attr_t = { 1745256Slh155975 DMA_ATTR_V0, /* dma_attr_version */ 1755256Slh155975 (uint64_t)0, /* dma_attr_addr_lo */ 1765256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 1775256Slh155975 (uint64_t)0x7FFFFFFF, /* dma_attr_count_max */ 1785256Slh155975 (uint64_t)0x10, /* dma_attr_align */ 1795256Slh155975 (uint_t)0xFFFFFFFFU, /* dma_attr_burstsizes */ 1805256Slh155975 (uint32_t)1, /* dma_attr_minxfer */ 1815256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 1825256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 1835256Slh155975 (int)1, /* dma_attr_sgllen */ 1845256Slh155975 (uint32_t)1, /* granularity */ 1855256Slh155975 (uint_t)0 /* dma_attr_flags */ 1865256Slh155975 }; 1875256Slh155975 1885256Slh155975 /* PIO access attributes for registers */ 1895256Slh155975 static ddi_device_acc_attr_t pcn_acc_attr = { 1905256Slh155975 DDI_DEVICE_ATTR_V0, 1915256Slh155975 DDI_STRUCTURE_LE_ACC, 1925256Slh155975 DDI_STRICTORDER_ACC 1935256Slh155975 }; 1945256Slh155975 1955256Slh155975 #define AMD8111S_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL) 1965256Slh155975 1975256Slh155975 1985256Slh155975 static mac_callbacks_t amd8111s_m_callbacks = { 1995256Slh155975 AMD8111S_M_CALLBACK_FLAGS, 2005256Slh155975 amd8111s_m_stat, 2015256Slh155975 amd8111s_m_start, 2025256Slh155975 amd8111s_m_stop, 2035256Slh155975 amd8111s_m_promisc, 2045256Slh155975 amd8111s_m_multicst, 2055256Slh155975 amd8111s_m_unicst, 2065256Slh155975 amd8111s_m_tx, 2075256Slh155975 amd8111s_m_resources, 2085256Slh155975 amd8111s_m_ioctl 2095256Slh155975 }; 2105256Slh155975 2115256Slh155975 2125256Slh155975 /* 2135256Slh155975 * Standard Driver Load Entry Point 2145256Slh155975 * It will be called at load time of driver. 2155256Slh155975 */ 2165256Slh155975 int 2175256Slh155975 _init() 2185256Slh155975 { 2195256Slh155975 int status; 2205256Slh155975 mac_init_ops(&amd8111s_dev_ops, "amd8111s"); 2215256Slh155975 2225256Slh155975 status = mod_install(&amd8111s_modlinkage); 2235256Slh155975 if (status != DDI_SUCCESS) { 2245256Slh155975 mac_fini_ops(&amd8111s_dev_ops); 2255256Slh155975 } 2265256Slh155975 2275256Slh155975 return (status); 2285256Slh155975 } 2295256Slh155975 2305256Slh155975 /* 2315256Slh155975 * Standard Driver Entry Point for Query. 2325256Slh155975 * It will be called at any time to get Driver info. 2335256Slh155975 */ 2345256Slh155975 int 2355256Slh155975 _info(struct modinfo *modinfop) 2365256Slh155975 { 2375256Slh155975 return (mod_info(&amd8111s_modlinkage, modinfop)); 2385256Slh155975 } 2395256Slh155975 2405256Slh155975 /* 2415256Slh155975 * Standard Driver Entry Point for Unload. 2425256Slh155975 * It will be called at unload time of driver. 2435256Slh155975 */ 2445256Slh155975 int 2455256Slh155975 _fini() 2465256Slh155975 { 2475256Slh155975 int status; 2485256Slh155975 2495256Slh155975 status = mod_remove(&amd8111s_modlinkage); 2505256Slh155975 if (status == DDI_SUCCESS) { 2515256Slh155975 mac_fini_ops(&amd8111s_dev_ops); 2525256Slh155975 } 2535256Slh155975 2545256Slh155975 return (status); 2555256Slh155975 } 2565256Slh155975 2575256Slh155975 /* Adjust Interrupt Coalescing Register to coalesce interrupts */ 2585256Slh155975 static void 2595256Slh155975 amd8111s_m_blank(void *arg, time_t ticks, uint32_t count) 2605256Slh155975 { 2615256Slh155975 _NOTE(ARGUNUSED(arg, ticks, count)); 2625256Slh155975 } 2635256Slh155975 2645256Slh155975 static void 2655256Slh155975 amd8111s_m_resources(void *arg) 2665256Slh155975 { 2675256Slh155975 struct LayerPointers *adapter = arg; 2685256Slh155975 mac_rx_fifo_t mrf; 2695256Slh155975 2705256Slh155975 mrf.mrf_type = MAC_RX_FIFO; 2715256Slh155975 mrf.mrf_blank = amd8111s_m_blank; 2725256Slh155975 mrf.mrf_arg = (void *)adapter; 2735256Slh155975 mrf.mrf_normal_blank_time = 128; 2745256Slh155975 mrf.mrf_normal_pkt_count = 8; 2755256Slh155975 2765256Slh155975 adapter->pOdl->mrh = mac_resource_add(adapter->pOdl->mh, 2775256Slh155975 (mac_resource_t *)&mrf); 2785256Slh155975 } 2795256Slh155975 2805256Slh155975 /* 2815256Slh155975 * Loopback Support 2825256Slh155975 */ 2835256Slh155975 static lb_property_t loopmodes[] = { 2845256Slh155975 { normal, "normal", AMD8111S_LB_NONE }, 2855256Slh155975 { external, "100Mbps", AMD8111S_LB_EXTERNAL_100 }, 2865256Slh155975 { external, "10Mbps", AMD8111S_LB_EXTERNAL_10 }, 2875256Slh155975 { internal, "MAC", AMD8111S_LB_INTERNAL_MAC } 2885256Slh155975 }; 2895256Slh155975 2905256Slh155975 static void 2915256Slh155975 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode) 2925256Slh155975 { 2935256Slh155975 2945256Slh155975 /* 2955256Slh155975 * If the mode isn't being changed, there's nothing to do ... 2965256Slh155975 */ 2975256Slh155975 if (mode == adapter->pOdl->loopback_mode) 2985256Slh155975 return; 2995256Slh155975 3005256Slh155975 /* 3015256Slh155975 * Validate the requested mode and prepare a suitable message 3025256Slh155975 * to explain the link down/up cycle that the change will 3035256Slh155975 * probably induce ... 3045256Slh155975 */ 3055256Slh155975 switch (mode) { 3065256Slh155975 default: 3075256Slh155975 return; 3085256Slh155975 3095256Slh155975 case AMD8111S_LB_NONE: 3105256Slh155975 mdlStopChip(adapter); 3115256Slh155975 if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) { 3125256Slh155975 cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB"); 3135256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 3145256Slh155975 INLOOP); 3155256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3, 3165256Slh155975 FORCE_FULL_DUPLEX | FORCE_LINK_STATUS); 3175256Slh155975 } else { 3185256Slh155975 cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB"); 3195256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 3205256Slh155975 EXLOOP); 3215256Slh155975 } 3225256Slh155975 3235256Slh155975 amd8111s_reset(adapter); 3245256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_DOWN; 3255256Slh155975 adapter->pOdl->rx_fcs_stripped = B_FALSE; 3265256Slh155975 mdlStartChip(adapter); 3275256Slh155975 break; 3285256Slh155975 3295256Slh155975 case AMD8111S_LB_EXTERNAL_100: 3305256Slh155975 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100"); 3315256Slh155975 mdlStopChip(adapter); 3325256Slh155975 amd8111s_reset(adapter); 3335256Slh155975 SetIntrCoalesc(adapter, B_FALSE); 3345256Slh155975 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100); 3355256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 3365256Slh155975 VAL0 | EXLOOP); 3375256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_UP; 3385256Slh155975 adapter->pMdl->Speed = 100; 3395256Slh155975 adapter->pMdl->FullDuplex = B_TRUE; 3405256Slh155975 /* Tell GLD the state of the physical link. */ 3415256Slh155975 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP); 3425256Slh155975 3435256Slh155975 adapter->pOdl->rx_fcs_stripped = B_TRUE; 3445256Slh155975 3455256Slh155975 mdlStartChip(adapter); 3465256Slh155975 break; 3475256Slh155975 3485256Slh155975 case AMD8111S_LB_EXTERNAL_10: 3495256Slh155975 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10"); 3505256Slh155975 mdlStopChip(adapter); 3515256Slh155975 amd8111s_reset(adapter); 3525256Slh155975 SetIntrCoalesc(adapter, B_FALSE); 3535256Slh155975 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10); 3545256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 3555256Slh155975 VAL0 | EXLOOP); 3565256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_UP; 3575256Slh155975 adapter->pMdl->Speed = 10; 3585256Slh155975 adapter->pMdl->FullDuplex = B_TRUE; 3595256Slh155975 /* Tell GLD the state of the physical link. */ 3605256Slh155975 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP); 3615256Slh155975 3625256Slh155975 adapter->pOdl->rx_fcs_stripped = B_TRUE; 3635256Slh155975 3645256Slh155975 mdlStartChip(adapter); 3655256Slh155975 break; 3665256Slh155975 3675256Slh155975 case AMD8111S_LB_INTERNAL_MAC: 3685256Slh155975 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC"); 3695256Slh155975 mdlStopChip(adapter); 3705256Slh155975 amd8111s_reset(adapter); 3715256Slh155975 SetIntrCoalesc(adapter, B_FALSE); 3725256Slh155975 /* Disable Port Manager */ 3735256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3, 3745256Slh155975 EN_PMGR); 3755256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 3765256Slh155975 VAL0 | INLOOP); 3775256Slh155975 3785256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3, 3795256Slh155975 VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS); 3805256Slh155975 3815256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_UP; 3825256Slh155975 adapter->pMdl->FullDuplex = B_TRUE; 3835256Slh155975 /* Tell GLD the state of the physical link. */ 3845256Slh155975 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP); 3855256Slh155975 3865256Slh155975 adapter->pOdl->rx_fcs_stripped = B_TRUE; 3875256Slh155975 3885256Slh155975 mdlStartChip(adapter); 3895256Slh155975 break; 3905256Slh155975 } 3915256Slh155975 3925256Slh155975 /* 3935256Slh155975 * All OK; tell the caller to reprogram 3945256Slh155975 * the PHY and/or MAC for the new mode ... 3955256Slh155975 */ 3965256Slh155975 adapter->pOdl->loopback_mode = mode; 3975256Slh155975 } 3985256Slh155975 3995256Slh155975 static enum ioc_reply 4005256Slh155975 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp, 4015256Slh155975 mblk_t *mp) 4025256Slh155975 { 4035256Slh155975 lb_info_sz_t *lbsp; 4045256Slh155975 lb_property_t *lbpp; 4055256Slh155975 uint32_t *lbmp; 4065256Slh155975 int cmd; 4075256Slh155975 4085256Slh155975 /* 4095256Slh155975 * Validate format of ioctl 4105256Slh155975 */ 4115256Slh155975 if (mp->b_cont == NULL) 4125256Slh155975 return (IOC_INVAL); 4135256Slh155975 4145256Slh155975 cmd = iocp->ioc_cmd; 4155256Slh155975 switch (cmd) { 4165256Slh155975 default: 4175256Slh155975 /* NOTREACHED */ 4185256Slh155975 amd8111s_log(adapter, CE_NOTE, 4195256Slh155975 "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd); 4205256Slh155975 return (IOC_INVAL); 4215256Slh155975 4225256Slh155975 case LB_GET_INFO_SIZE: 4235256Slh155975 if (iocp->ioc_count != sizeof (lb_info_sz_t)) { 4245256Slh155975 amd8111s_log(adapter, CE_NOTE, 4255256Slh155975 "wrong LB_GET_INFO_SIZE size"); 4265256Slh155975 return (IOC_INVAL); 4275256Slh155975 } 428*6990Sgd78059 lbsp = (void *)mp->b_cont->b_rptr; 4295256Slh155975 *lbsp = sizeof (loopmodes); 4305256Slh155975 break; 4315256Slh155975 4325256Slh155975 case LB_GET_INFO: 4335256Slh155975 if (iocp->ioc_count != sizeof (loopmodes)) { 4345256Slh155975 amd8111s_log(adapter, CE_NOTE, 4355256Slh155975 "Wrong LB_GET_INFO size"); 4365256Slh155975 return (IOC_INVAL); 4375256Slh155975 } 438*6990Sgd78059 lbpp = (void *)mp->b_cont->b_rptr; 4395256Slh155975 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 4405256Slh155975 break; 4415256Slh155975 4425256Slh155975 case LB_GET_MODE: 4435256Slh155975 if (iocp->ioc_count != sizeof (uint32_t)) { 4445256Slh155975 amd8111s_log(adapter, CE_NOTE, 4455256Slh155975 "Wrong LB_GET_MODE size"); 4465256Slh155975 return (IOC_INVAL); 4475256Slh155975 } 448*6990Sgd78059 lbmp = (void *)mp->b_cont->b_rptr; 4495256Slh155975 *lbmp = adapter->pOdl->loopback_mode; 4505256Slh155975 break; 4515256Slh155975 4525256Slh155975 case LB_SET_MODE: 4535256Slh155975 if (iocp->ioc_count != sizeof (uint32_t)) { 4545256Slh155975 amd8111s_log(adapter, CE_NOTE, 4555256Slh155975 "Wrong LB_SET_MODE size"); 4565256Slh155975 return (IOC_INVAL); 4575256Slh155975 } 458*6990Sgd78059 lbmp = (void *)mp->b_cont->b_rptr; 4595256Slh155975 amd8111s_set_loop_mode(adapter, *lbmp); 4605256Slh155975 break; 4615256Slh155975 } 4625256Slh155975 return (IOC_REPLY); 4635256Slh155975 } 4645256Slh155975 4655256Slh155975 static void 4665256Slh155975 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 4675256Slh155975 { 4685256Slh155975 struct iocblk *iocp; 4695256Slh155975 struct LayerPointers *adapter; 4705256Slh155975 enum ioc_reply status; 4715256Slh155975 472*6990Sgd78059 iocp = (void *)mp->b_rptr; 4735256Slh155975 iocp->ioc_error = 0; 474*6990Sgd78059 adapter = arg; 4755256Slh155975 4765256Slh155975 ASSERT(adapter); 4775256Slh155975 if (adapter == NULL) { 4785256Slh155975 miocnak(q, mp, 0, EINVAL); 4795256Slh155975 return; 4805256Slh155975 } 4815256Slh155975 4825256Slh155975 switch (iocp->ioc_cmd) { 4835256Slh155975 4845256Slh155975 case LB_GET_INFO_SIZE: 4855256Slh155975 case LB_GET_INFO: 4865256Slh155975 case LB_GET_MODE: 4875256Slh155975 case LB_SET_MODE: 4885256Slh155975 status = amd8111s_loopback_ioctl(adapter, iocp, mp); 4895256Slh155975 break; 4905256Slh155975 4915256Slh155975 default: 4925256Slh155975 status = IOC_INVAL; 4935256Slh155975 break; 4945256Slh155975 } 4955256Slh155975 4965256Slh155975 /* 4975256Slh155975 * Decide how to reply 4985256Slh155975 */ 4995256Slh155975 switch (status) { 5005256Slh155975 default: 5015256Slh155975 case IOC_INVAL: 5025256Slh155975 /* 5035256Slh155975 * Error, reply with a NAK and EINVAL or the specified error 5045256Slh155975 */ 5055256Slh155975 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 5065256Slh155975 EINVAL : iocp->ioc_error); 5075256Slh155975 break; 5085256Slh155975 5095256Slh155975 case IOC_DONE: 5105256Slh155975 /* 5115256Slh155975 * OK, reply already sent 5125256Slh155975 */ 5135256Slh155975 break; 5145256Slh155975 5155256Slh155975 case IOC_ACK: 5165256Slh155975 /* 5175256Slh155975 * OK, reply with an ACK 5185256Slh155975 */ 5195256Slh155975 miocack(q, mp, 0, 0); 5205256Slh155975 break; 5215256Slh155975 5225256Slh155975 case IOC_REPLY: 5235256Slh155975 /* 5245256Slh155975 * OK, send prepared reply as ACK or NAK 5255256Slh155975 */ 5265256Slh155975 mp->b_datap->db_type = iocp->ioc_error == 0 ? 5275256Slh155975 M_IOCACK : M_IOCNAK; 5285256Slh155975 qreply(q, mp); 5295256Slh155975 break; 5305256Slh155975 } 5315256Slh155975 } 5325256Slh155975 5335256Slh155975 /* 5345256Slh155975 * Copy one packet from dma memory to mblk. Inc dma descriptor pointer. 5355256Slh155975 */ 5365256Slh155975 static boolean_t 5375256Slh155975 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp) 5385256Slh155975 { 5395256Slh155975 int length = 0; 5405256Slh155975 mblk_t *mp; 5415256Slh155975 struct rx_desc *descriptor; 5425256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 5435256Slh155975 struct amd8111s_statistics *statistics = &pOdl->statistics; 5445256Slh155975 struct nonphysical *pNonphysical = pLayerPointers->pMil 5455256Slh155975 ->pNonphysical; 5465256Slh155975 5475256Slh155975 mutex_enter(&pOdl->mdlRcvLock); 5485256Slh155975 descriptor = pNonphysical->RxBufDescQRead->descriptor; 5495256Slh155975 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 5505256Slh155975 pNonphysical->RxBufDescQRead->descriptor - 5515256Slh155975 pNonphysical->RxBufDescQStart->descriptor, 5525256Slh155975 sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU); 5535256Slh155975 if ((descriptor->Rx_OWN) == 0) { 5545256Slh155975 /* 5555256Slh155975 * If the frame is received with errors, then set MCNT 5565256Slh155975 * of that pkt in ReceiveArray to 0. This packet would 5575256Slh155975 * be discarded later and not indicated to OS. 5585256Slh155975 */ 5595256Slh155975 if (descriptor->Rx_ERR) { 5605256Slh155975 statistics->rx_desc_err ++; 5615256Slh155975 descriptor->Rx_ERR = 0; 5625256Slh155975 if (descriptor->Rx_FRAM == 1) { 5635256Slh155975 statistics->rx_desc_err_FRAM ++; 5645256Slh155975 descriptor->Rx_FRAM = 0; 5655256Slh155975 } 5665256Slh155975 if (descriptor->Rx_OFLO == 1) { 5675256Slh155975 statistics->rx_desc_err_OFLO ++; 5685256Slh155975 descriptor->Rx_OFLO = 0; 5695256Slh155975 pOdl->rx_overflow_counter ++; 5705256Slh155975 if ((pOdl->rx_overflow_counter > 5) && 5715256Slh155975 (pOdl->pause_interval == 0)) { 5725256Slh155975 statistics->rx_double_overflow ++; 5735256Slh155975 mdlSendPause(pLayerPointers); 5745256Slh155975 pOdl->rx_overflow_counter = 0; 5755256Slh155975 pOdl->pause_interval = 25; 5765256Slh155975 } 5775256Slh155975 } 5785256Slh155975 if (descriptor->Rx_CRC == 1) { 5795256Slh155975 statistics->rx_desc_err_CRC ++; 5805256Slh155975 descriptor->Rx_CRC = 0; 5815256Slh155975 } 5825256Slh155975 if (descriptor->Rx_BUFF == 1) { 5835256Slh155975 statistics->rx_desc_err_BUFF ++; 5845256Slh155975 descriptor->Rx_BUFF = 0; 5855256Slh155975 } 5865256Slh155975 goto Next_Descriptor; 5875256Slh155975 } 5885256Slh155975 5895256Slh155975 /* Length of incoming packet */ 5905256Slh155975 if (pOdl->rx_fcs_stripped) { 5915256Slh155975 length = descriptor->Rx_MCNT -4; 5925256Slh155975 } else { 5935256Slh155975 length = descriptor->Rx_MCNT; 5945256Slh155975 } 5955256Slh155975 if (length < 62) { 5965256Slh155975 statistics->rx_error_zerosize ++; 5975256Slh155975 } 5985256Slh155975 5995256Slh155975 if ((mp = allocb(length, BPRI_MED)) == NULL) { 6005256Slh155975 statistics->rx_allocfail ++; 6015256Slh155975 goto failed; 6025256Slh155975 } 6035256Slh155975 /* Copy from virtual address of incoming packet */ 6045256Slh155975 bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap), 6055256Slh155975 mp->b_rptr, length); 6065256Slh155975 mp->b_wptr = mp->b_rptr + length; 6075256Slh155975 statistics->rx_ok_packets ++; 6085256Slh155975 if (*last_mp == NULL) { 6095256Slh155975 *last_mp = mp; 6105256Slh155975 } else { 6115256Slh155975 (*last_mp)->b_next = mp; 6125256Slh155975 *last_mp = mp; 6135256Slh155975 } 6145256Slh155975 6155256Slh155975 Next_Descriptor: 6165256Slh155975 descriptor->Rx_MCNT = 0; 6175256Slh155975 descriptor->Rx_SOP = 0; 6185256Slh155975 descriptor->Rx_EOP = 0; 6195256Slh155975 descriptor->Rx_PAM = 0; 6205256Slh155975 descriptor->Rx_BAM = 0; 6215256Slh155975 descriptor->TT = 0; 6225256Slh155975 descriptor->Rx_OWN = 1; 6235256Slh155975 pNonphysical->RxBufDescQRead->descriptor++; 6245256Slh155975 pNonphysical->RxBufDescQRead->USpaceMap++; 6255256Slh155975 if (pNonphysical->RxBufDescQRead->descriptor > 6265256Slh155975 pNonphysical->RxBufDescQEnd->descriptor) { 6275256Slh155975 pNonphysical->RxBufDescQRead->descriptor = 6285256Slh155975 pNonphysical->RxBufDescQStart->descriptor; 6295256Slh155975 pNonphysical->RxBufDescQRead->USpaceMap = 6305256Slh155975 pNonphysical->RxBufDescQStart->USpaceMap; 6315256Slh155975 } 6325256Slh155975 mutex_exit(&pOdl->mdlRcvLock); 6335256Slh155975 6345256Slh155975 return (B_TRUE); 6355256Slh155975 } 6365256Slh155975 6375256Slh155975 failed: 6385256Slh155975 mutex_exit(&pOdl->mdlRcvLock); 6395256Slh155975 return (B_FALSE); 6405256Slh155975 } 6415256Slh155975 6425256Slh155975 /* 6435256Slh155975 * Get the received packets from NIC card and send them to GLD. 6445256Slh155975 */ 6455256Slh155975 static void 6465256Slh155975 amd8111s_receive(struct LayerPointers *pLayerPointers) 6475256Slh155975 { 6485256Slh155975 int numOfPkts = 0; 6495256Slh155975 struct odl *pOdl; 6505256Slh155975 mblk_t *ret_mp = NULL, *last_mp = NULL; 6515256Slh155975 6525256Slh155975 pOdl = pLayerPointers->pOdl; 6535256Slh155975 6545256Slh155975 rw_enter(&pOdl->chip_lock, RW_READER); 6555256Slh155975 if (!pLayerPointers->run) { 6565256Slh155975 rw_exit(&pOdl->chip_lock); 6575256Slh155975 return; 6585256Slh155975 } 6595256Slh155975 6605256Slh155975 if (pOdl->pause_interval > 0) 6615256Slh155975 pOdl->pause_interval --; 6625256Slh155975 6635256Slh155975 while (numOfPkts < RX_RING_SIZE) { 6645256Slh155975 6655256Slh155975 if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) { 6665256Slh155975 break; 6675256Slh155975 } 6685256Slh155975 if (ret_mp == NULL) 6695256Slh155975 ret_mp = last_mp; 6705256Slh155975 numOfPkts++; 6715256Slh155975 } 6725256Slh155975 6735256Slh155975 if (ret_mp) { 6745256Slh155975 mac_rx(pOdl->mh, pOdl->mrh, ret_mp); 6755256Slh155975 } 6765256Slh155975 6775256Slh155975 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0, 6785256Slh155975 DDI_DMA_SYNC_FORDEV); 6795256Slh155975 6805256Slh155975 mdlReceive(pLayerPointers); 6815256Slh155975 6825256Slh155975 rw_exit(&pOdl->chip_lock); 6835256Slh155975 6845256Slh155975 } 6855256Slh155975 6865256Slh155975 /* 6875256Slh155975 * Print message in release-version driver. 6885256Slh155975 */ 6895256Slh155975 static void 6905256Slh155975 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...) 6915256Slh155975 { 6925256Slh155975 auto char name[32]; 6935256Slh155975 auto char buf[256]; 6945256Slh155975 va_list ap; 6955256Slh155975 6965256Slh155975 if (adapter != NULL) { 6975256Slh155975 (void) sprintf(name, "amd8111s%d", 6985256Slh155975 ddi_get_instance(adapter->pOdl->devinfo)); 6995256Slh155975 } else { 7005256Slh155975 (void) sprintf(name, "amd8111s"); 7015256Slh155975 } 7025256Slh155975 va_start(ap, fmt); 7035256Slh155975 (void) vsprintf(buf, fmt, ap); 7045256Slh155975 va_end(ap); 7055256Slh155975 cmn_err(level, "%s: %s", name, buf); 7065256Slh155975 } 7075256Slh155975 7085256Slh155975 /* 7095256Slh155975 * To allocate & initilize all resources. 7105256Slh155975 * Called by amd8111s_attach(). 7115256Slh155975 */ 7125256Slh155975 static int 7135256Slh155975 amd8111s_odlInit(struct LayerPointers *pLayerPointers) 7145256Slh155975 { 7155256Slh155975 unsigned long mem_req_array[MEM_REQ_MAX]; 7165256Slh155975 unsigned long mem_set_array[MEM_REQ_MAX]; 7175256Slh155975 unsigned long *pmem_req_array; 7185256Slh155975 unsigned long *pmem_set_array; 7195256Slh155975 int i, size; 7205256Slh155975 7215256Slh155975 for (i = 0; i < MEM_REQ_MAX; i++) { 7225256Slh155975 mem_req_array[i] = 0; 7235256Slh155975 mem_set_array[i] = 0; 7245256Slh155975 } 7255256Slh155975 7265256Slh155975 milRequestResources(mem_req_array); 7275256Slh155975 7285256Slh155975 pmem_req_array = mem_req_array; 7295256Slh155975 pmem_set_array = mem_set_array; 7305256Slh155975 while (*pmem_req_array) { 7315256Slh155975 switch (*pmem_req_array) { 7325256Slh155975 case VIRTUAL: 7335256Slh155975 *pmem_set_array = VIRTUAL; 7345256Slh155975 pmem_req_array++; 7355256Slh155975 pmem_set_array++; 7365256Slh155975 *(pmem_set_array) = *(pmem_req_array); 7375256Slh155975 pmem_set_array++; 7385256Slh155975 *(pmem_set_array) = (unsigned long) kmem_zalloc( 7395256Slh155975 *(pmem_req_array), KM_NOSLEEP); 7405256Slh155975 if (*pmem_set_array == NULL) 7415256Slh155975 goto odl_init_failure; 7425256Slh155975 break; 7435256Slh155975 } 7445256Slh155975 pmem_req_array++; 7455256Slh155975 pmem_set_array++; 7465256Slh155975 } 7475256Slh155975 7485256Slh155975 /* 7495256Slh155975 * Initilize memory on lower layers 7505256Slh155975 */ 7515256Slh155975 milSetResources(pLayerPointers, mem_set_array); 7525256Slh155975 7535256Slh155975 /* Allocate Rx/Tx descriptors */ 7545256Slh155975 if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) { 7555256Slh155975 *pmem_set_array = NULL; 7565256Slh155975 goto odl_init_failure; 7575256Slh155975 } 7585256Slh155975 7595256Slh155975 /* 7605256Slh155975 * Allocate Rx buffer for each Rx descriptor. Then call mil layer 7615256Slh155975 * routine to fill physical address of Rx buffer into Rx descriptor. 7625256Slh155975 */ 7635256Slh155975 if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) { 7645256Slh155975 amd8111s_free_descriptors(pLayerPointers); 7655256Slh155975 *pmem_set_array = NULL; 7665256Slh155975 goto odl_init_failure; 7675256Slh155975 } 7685256Slh155975 milInitGlbds(pLayerPointers); 7695256Slh155975 7705256Slh155975 return (0); 7715256Slh155975 7725256Slh155975 odl_init_failure: 7735256Slh155975 /* 7745256Slh155975 * Free All memory allocated so far 7755256Slh155975 */ 7765256Slh155975 pmem_req_array = mem_set_array; 7775256Slh155975 while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) { 7785256Slh155975 switch (*pmem_req_array) { 7795256Slh155975 case VIRTUAL: 7805256Slh155975 pmem_req_array++; /* Size */ 7815256Slh155975 size = *(pmem_req_array); 7825256Slh155975 pmem_req_array++; /* Virtual Address */ 7835256Slh155975 if (pmem_req_array == NULL) 7845256Slh155975 return (1); 7855256Slh155975 kmem_free((int *)*pmem_req_array, size); 7865256Slh155975 break; 7875256Slh155975 } 7885256Slh155975 pmem_req_array++; 7895256Slh155975 } 7905256Slh155975 return (1); 7915256Slh155975 } 7925256Slh155975 7935256Slh155975 /* 7945256Slh155975 * Allocate and initialize Tx/Rx descriptors 7955256Slh155975 */ 7965256Slh155975 static boolean_t 7975256Slh155975 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers) 7985256Slh155975 { 7995256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 8005256Slh155975 struct mil *pMil = pLayerPointers->pMil; 8015256Slh155975 dev_info_t *devinfo = pOdl->devinfo; 8025256Slh155975 uint_t length, count, i; 8035256Slh155975 size_t real_length; 8045256Slh155975 8055256Slh155975 /* 8065256Slh155975 * Allocate Rx descriptors 8075256Slh155975 */ 8085256Slh155975 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP, 8095256Slh155975 NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) { 8105256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 8115256Slh155975 "ddi_dma_alloc_handle for Rx desc failed"); 8125256Slh155975 pOdl->rx_desc_dma_handle = NULL; 8135256Slh155975 return (B_FALSE); 8145256Slh155975 } 8155256Slh155975 8165256Slh155975 length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT; 8175256Slh155975 if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length, 8185256Slh155975 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 8195256Slh155975 NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length, 8205256Slh155975 &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) { 8215256Slh155975 8225256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 8235256Slh155975 "ddi_dma_mem_handle for Rx desc failed"); 8245256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 8255256Slh155975 pOdl->rx_desc_dma_handle = NULL; 8265256Slh155975 return (B_FALSE); 8275256Slh155975 } 8285256Slh155975 8295256Slh155975 if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle, 8305256Slh155975 NULL, (caddr_t)pMil->Rx_desc_original, real_length, 8315256Slh155975 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 8325256Slh155975 NULL, &pOdl->rx_desc_dma_cookie, 8335256Slh155975 &count) != DDI_SUCCESS) { 8345256Slh155975 8355256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 8365256Slh155975 "ddi_dma_addr_bind_handle for Rx desc failed"); 8375256Slh155975 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle); 8385256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 8395256Slh155975 pOdl->rx_desc_dma_handle = NULL; 8405256Slh155975 return (B_FALSE); 8415256Slh155975 } 8425256Slh155975 ASSERT(count == 1); 8435256Slh155975 8445256Slh155975 /* Initialize Rx descriptors related variables */ 8455256Slh155975 pMil->Rx_desc = (struct rx_desc *) 8465256Slh155975 ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT); 8475256Slh155975 pMil->Rx_desc_pa = (unsigned int) 8485256Slh155975 ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT); 8495256Slh155975 8505256Slh155975 pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa; 8515256Slh155975 8525256Slh155975 8535256Slh155975 /* 8545256Slh155975 * Allocate Tx descriptors 8555256Slh155975 */ 8565256Slh155975 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP, 8575256Slh155975 NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) { 8585256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 8595256Slh155975 "ddi_dma_alloc_handle for Tx desc failed"); 8605256Slh155975 goto allocate_desc_fail; 8615256Slh155975 } 8625256Slh155975 8635256Slh155975 length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT; 8645256Slh155975 if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length, 8655256Slh155975 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 8665256Slh155975 NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length, 8675256Slh155975 &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) { 8685256Slh155975 8695256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 8705256Slh155975 "ddi_dma_mem_handle for Tx desc failed"); 8715256Slh155975 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle); 8725256Slh155975 goto allocate_desc_fail; 8735256Slh155975 } 8745256Slh155975 8755256Slh155975 if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle, 8765256Slh155975 NULL, (caddr_t)pMil->Tx_desc_original, real_length, 8775256Slh155975 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 8785256Slh155975 NULL, &pOdl->tx_desc_dma_cookie, 8795256Slh155975 &count) != DDI_SUCCESS) { 8805256Slh155975 8815256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 8825256Slh155975 "ddi_dma_addr_bind_handle for Tx desc failed"); 8835256Slh155975 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle); 8845256Slh155975 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle); 8855256Slh155975 goto allocate_desc_fail; 8865256Slh155975 } 8875256Slh155975 ASSERT(count == 1); 8885256Slh155975 /* Set the DMA area to all zeros */ 8895256Slh155975 bzero((caddr_t)pMil->Tx_desc_original, length); 8905256Slh155975 8915256Slh155975 /* Initialize Tx descriptors related variables */ 8925256Slh155975 pMil->Tx_desc = (struct tx_desc *) 8935256Slh155975 ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT); 8945256Slh155975 pMil->pNonphysical->TxDescQRead = pMil->Tx_desc; 8955256Slh155975 pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc; 8965256Slh155975 pMil->pNonphysical->TxDescQStart = pMil->Tx_desc; 8975256Slh155975 pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]); 8985256Slh155975 8995256Slh155975 /* Physical Addr of Tx_desc_original & Tx_desc */ 9005256Slh155975 pLayerPointers->pMil->Tx_desc_pa = 9015256Slh155975 ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & 9025256Slh155975 ~ALIGNMENT); 9035256Slh155975 9045256Slh155975 /* Setting the reserved bits in the tx descriptors */ 9055256Slh155975 for (i = 0; i < TX_RING_SIZE; i++) { 9065256Slh155975 pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f; 9075256Slh155975 pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0; 9085256Slh155975 pMil->pNonphysical->TxDescQWrite++; 9095256Slh155975 } 9105256Slh155975 pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart; 9115256Slh155975 9125256Slh155975 pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa; 9135256Slh155975 9145256Slh155975 return (B_TRUE); 9155256Slh155975 9165256Slh155975 allocate_desc_fail: 9175256Slh155975 pOdl->tx_desc_dma_handle = NULL; 9185256Slh155975 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle); 9195256Slh155975 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle); 9205256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 9215256Slh155975 pOdl->rx_desc_dma_handle = NULL; 9225256Slh155975 return (B_FALSE); 9235256Slh155975 } 9245256Slh155975 9255256Slh155975 /* 9265256Slh155975 * Free Tx/Rx descriptors 9275256Slh155975 */ 9285256Slh155975 static void 9295256Slh155975 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers) 9305256Slh155975 { 9315256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 9325256Slh155975 9335256Slh155975 /* Free Rx descriptors */ 9345256Slh155975 if (pOdl->rx_desc_dma_handle) { 9355256Slh155975 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle); 9365256Slh155975 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle); 9375256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 9385256Slh155975 pOdl->rx_desc_dma_handle = NULL; 9395256Slh155975 } 9405256Slh155975 9415256Slh155975 /* Free Rx descriptors */ 9425256Slh155975 if (pOdl->tx_desc_dma_handle) { 9435256Slh155975 (void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle); 9445256Slh155975 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle); 9455256Slh155975 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle); 9465256Slh155975 pOdl->tx_desc_dma_handle = NULL; 9475256Slh155975 } 9485256Slh155975 } 9495256Slh155975 9505256Slh155975 /* 9515256Slh155975 * Allocate Tx/Rx Ring buffer 9525256Slh155975 */ 9535256Slh155975 static boolean_t 9545256Slh155975 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers, 9555256Slh155975 struct amd8111s_dma_ringbuf *pRing, 9565256Slh155975 uint32_t ring_size, uint32_t msg_size) 9575256Slh155975 { 9585256Slh155975 uint32_t idx, msg_idx = 0, msg_acc; 9595256Slh155975 dev_info_t *devinfo = pLayerPointers->pOdl->devinfo; 9605256Slh155975 size_t real_length; 9615256Slh155975 uint_t count = 0; 9625256Slh155975 9635256Slh155975 ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1); 9645256Slh155975 pRing->dma_buf_sz = msg_size; 9655256Slh155975 pRing->ring_size = ring_size; 9665256Slh155975 pRing->trunk_num = AMD8111S_SPLIT; 9675256Slh155975 pRing->buf_sz = msg_size * ring_size; 9685256Slh155975 if (ring_size < pRing->trunk_num) 9695256Slh155975 pRing->trunk_num = ring_size; 9705256Slh155975 ASSERT((pRing->buf_sz % pRing->trunk_num) == 0); 9715256Slh155975 9725256Slh155975 pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num; 9735256Slh155975 ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0); 9745256Slh155975 9755256Slh155975 pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) * 9765256Slh155975 ring_size, KM_NOSLEEP); 9775256Slh155975 pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) * 9785256Slh155975 pRing->trunk_num, KM_NOSLEEP); 9795256Slh155975 pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) * 9805256Slh155975 pRing->trunk_num, KM_NOSLEEP); 9815256Slh155975 pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) * 9825256Slh155975 pRing->trunk_num, KM_NOSLEEP); 9835256Slh155975 pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) * 9845256Slh155975 pRing->trunk_num, KM_NOSLEEP); 9855256Slh155975 if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL || 9865256Slh155975 pRing->acc_hdl == NULL || pRing->trunk_addr == NULL || 9875256Slh155975 pRing->dma_cookie == NULL) { 9885256Slh155975 amd8111s_log(pLayerPointers, CE_NOTE, 9895256Slh155975 "kmem_zalloc failed"); 9905256Slh155975 goto failed; 9915256Slh155975 } 9925256Slh155975 9935256Slh155975 for (idx = 0; idx < pRing->trunk_num; ++idx) { 9945256Slh155975 if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t, 9955256Slh155975 DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx])) 9965256Slh155975 != DDI_SUCCESS) { 9975256Slh155975 9985256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 9995256Slh155975 "ddi_dma_alloc_handle failed"); 10005256Slh155975 goto failed; 10015256Slh155975 } else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx], 10025256Slh155975 pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING, 10035256Slh155975 DDI_DMA_SLEEP, NULL, 10045256Slh155975 (caddr_t *)&(pRing->trunk_addr[idx]), 10055256Slh155975 (size_t *)(&real_length), &pRing->acc_hdl[idx]) 10065256Slh155975 != DDI_SUCCESS) { 10075256Slh155975 10085256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 10095256Slh155975 "ddi_dma_mem_alloc failed"); 10105256Slh155975 goto failed; 10115256Slh155975 } else if (real_length != pRing->trunk_sz) { 10125256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 10135256Slh155975 "ddi_dma_mem_alloc failed"); 10145256Slh155975 goto failed; 10155256Slh155975 } else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx], 10165256Slh155975 NULL, (caddr_t)pRing->trunk_addr[idx], real_length, 10175256Slh155975 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 10185256Slh155975 &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) { 10195256Slh155975 10205256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 10215256Slh155975 "ddi_dma_addr_bind_handle failed"); 10225256Slh155975 goto failed; 10235256Slh155975 } else { 10245256Slh155975 for (msg_acc = 0; 10255256Slh155975 msg_acc < pRing->trunk_sz / pRing->dma_buf_sz; 10265256Slh155975 ++ msg_acc) { 10275256Slh155975 pRing->msg_buf[msg_idx].offset = 10285256Slh155975 msg_acc * pRing->dma_buf_sz; 10295256Slh155975 pRing->msg_buf[msg_idx].vir_addr = 10305256Slh155975 pRing->trunk_addr[idx] + 10315256Slh155975 pRing->msg_buf[msg_idx].offset; 10325256Slh155975 pRing->msg_buf[msg_idx].phy_addr = 10335256Slh155975 pRing->dma_cookie[idx].dmac_laddress + 10345256Slh155975 pRing->msg_buf[msg_idx].offset; 10355256Slh155975 pRing->msg_buf[msg_idx].p_hdl = 10365256Slh155975 pRing->dma_hdl[idx]; 10375256Slh155975 msg_idx ++; 10385256Slh155975 } 10395256Slh155975 } 10405256Slh155975 } 10415256Slh155975 10425256Slh155975 pRing->free = pRing->msg_buf; 10435256Slh155975 pRing->next = pRing->msg_buf; 10445256Slh155975 pRing->curr = pRing->msg_buf; 10455256Slh155975 10465256Slh155975 return (B_TRUE); 10475256Slh155975 failed: 10485256Slh155975 amd8111s_free_dma_ringbuf(pRing); 10495256Slh155975 return (B_FALSE); 10505256Slh155975 } 10515256Slh155975 10525256Slh155975 /* 10535256Slh155975 * Free Tx/Rx ring buffer 10545256Slh155975 */ 10555256Slh155975 static void 10565256Slh155975 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing) 10575256Slh155975 { 10585256Slh155975 int idx; 10595256Slh155975 10605256Slh155975 if (pRing->dma_cookie != NULL) { 10615256Slh155975 for (idx = 0; idx < pRing->trunk_num; idx ++) { 10625256Slh155975 if (pRing->dma_cookie[idx].dmac_laddress == 0) { 10635256Slh155975 break; 10645256Slh155975 } 10655256Slh155975 (void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]); 10665256Slh155975 } 10675256Slh155975 kmem_free(pRing->dma_cookie, 10685256Slh155975 sizeof (ddi_dma_cookie_t) * pRing->trunk_num); 10695256Slh155975 } 10705256Slh155975 10715256Slh155975 if (pRing->acc_hdl != NULL) { 10725256Slh155975 for (idx = 0; idx < pRing->trunk_num; idx ++) { 10735256Slh155975 if (pRing->acc_hdl[idx] == NULL) 10745256Slh155975 break; 10755256Slh155975 ddi_dma_mem_free(&pRing->acc_hdl[idx]); 10765256Slh155975 } 10775256Slh155975 kmem_free(pRing->acc_hdl, 10785256Slh155975 sizeof (ddi_acc_handle_t) * pRing->trunk_num); 10795256Slh155975 } 10805256Slh155975 10815256Slh155975 if (pRing->dma_hdl != NULL) { 10825256Slh155975 for (idx = 0; idx < pRing->trunk_num; idx ++) { 10835256Slh155975 if (pRing->dma_hdl[idx] == 0) { 10845256Slh155975 break; 10855256Slh155975 } 10865256Slh155975 ddi_dma_free_handle(&pRing->dma_hdl[idx]); 10875256Slh155975 } 10885256Slh155975 kmem_free(pRing->dma_hdl, 10895256Slh155975 sizeof (ddi_dma_handle_t) * pRing->trunk_num); 10905256Slh155975 } 10915256Slh155975 10925256Slh155975 if (pRing->msg_buf != NULL) { 10935256Slh155975 kmem_free(pRing->msg_buf, 10945256Slh155975 sizeof (struct amd8111s_msgbuf) * pRing->ring_size); 10955256Slh155975 } 10965256Slh155975 10975256Slh155975 if (pRing->trunk_addr != NULL) { 10985256Slh155975 kmem_free(pRing->trunk_addr, 10995256Slh155975 sizeof (caddr_t) * pRing->trunk_num); 11005256Slh155975 } 11015256Slh155975 11025256Slh155975 bzero(pRing, sizeof (*pRing)); 11035256Slh155975 } 11045256Slh155975 11055256Slh155975 11065256Slh155975 /* 11075256Slh155975 * Allocate all Tx buffer. 11085256Slh155975 * Allocate a Rx buffer for each Rx descriptor. Then 11095256Slh155975 * call mil routine to fill physical address of Rx 11105256Slh155975 * buffer into Rx descriptors 11115256Slh155975 */ 11125256Slh155975 static boolean_t 11135256Slh155975 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers) 11145256Slh155975 { 11155256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 11165256Slh155975 11175256Slh155975 /* 11185256Slh155975 * Allocate rx Buffers 11195256Slh155975 */ 11205256Slh155975 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf, 11215256Slh155975 RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) { 11225256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 11235256Slh155975 "amd8111s_alloc_dma_ringbuf for tx failed"); 11245256Slh155975 goto allocate_buf_fail; 11255256Slh155975 } 11265256Slh155975 11275256Slh155975 /* 11285256Slh155975 * Allocate Tx buffers 11295256Slh155975 */ 11305256Slh155975 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf, 11315256Slh155975 TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) { 11325256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 11335256Slh155975 "amd8111s_alloc_dma_ringbuf for tx failed"); 11345256Slh155975 goto allocate_buf_fail; 11355256Slh155975 } 11365256Slh155975 11375256Slh155975 /* 11385256Slh155975 * Initilize the mil Queues 11395256Slh155975 */ 11405256Slh155975 milInitGlbds(pLayerPointers); 11415256Slh155975 11425256Slh155975 milInitRxQ(pLayerPointers); 11435256Slh155975 11445256Slh155975 return (B_TRUE); 11455256Slh155975 11465256Slh155975 allocate_buf_fail: 11475256Slh155975 11485256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 11495256Slh155975 "amd8111s_allocate_buffers failed"); 11505256Slh155975 return (B_FALSE); 11515256Slh155975 } 11525256Slh155975 11535256Slh155975 /* 11545256Slh155975 * Free all Rx/Tx buffer 11555256Slh155975 */ 11565256Slh155975 11575256Slh155975 static void 11585256Slh155975 amd8111s_free_buffers(struct LayerPointers *pLayerPointers) 11595256Slh155975 { 11605256Slh155975 /* Free Tx buffers */ 11615256Slh155975 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf); 11625256Slh155975 11635256Slh155975 /* Free Rx Buffers */ 11645256Slh155975 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf); 11655256Slh155975 } 11665256Slh155975 11675256Slh155975 /* 11685256Slh155975 * Try to recycle all the descriptors and Tx buffers 11695256Slh155975 * which are already freed by hardware. 11705256Slh155975 */ 11715256Slh155975 static int 11725256Slh155975 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers) 11735256Slh155975 { 11745256Slh155975 struct nonphysical *pNonphysical; 11755256Slh155975 uint32_t count = 0; 11765256Slh155975 11775256Slh155975 pNonphysical = pLayerPointers->pMil->pNonphysical; 11785256Slh155975 while (pNonphysical->TxDescQRead->Tx_OWN == 0 && 11795256Slh155975 pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) { 11805256Slh155975 pLayerPointers->pOdl->tx_buf.free = 11815256Slh155975 NEXT(pLayerPointers->pOdl->tx_buf, free); 11825256Slh155975 pNonphysical->TxDescQRead++; 11835256Slh155975 if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) { 11845256Slh155975 pNonphysical->TxDescQRead = pNonphysical->TxDescQStart; 11855256Slh155975 } 11865256Slh155975 count ++; 11875256Slh155975 } 11885256Slh155975 11895256Slh155975 if (pLayerPointers->pMil->tx_reschedule) 11905256Slh155975 ddi_trigger_softintr(pLayerPointers->pOdl->drain_id); 11915256Slh155975 11925256Slh155975 return (count); 11935256Slh155975 } 11945256Slh155975 11955256Slh155975 /* 11965256Slh155975 * Get packets in the Tx buffer, then copy them to the send buffer. 11975256Slh155975 * Trigger hardware to send out packets. 11985256Slh155975 */ 11995256Slh155975 static void 12005256Slh155975 amd8111s_send_serial(struct LayerPointers *pLayerPointers) 12015256Slh155975 { 12025256Slh155975 struct nonphysical *pNonphysical; 12035256Slh155975 uint32_t count; 12045256Slh155975 12055256Slh155975 pNonphysical = pLayerPointers->pMil->pNonphysical; 12065256Slh155975 12075256Slh155975 mutex_enter(&pLayerPointers->pOdl->mdlSendLock); 12085256Slh155975 12095256Slh155975 for (count = 0; count < AMD8111S_SEND_MAX; count ++) { 12105256Slh155975 if (pLayerPointers->pOdl->tx_buf.curr == 12115256Slh155975 pLayerPointers->pOdl->tx_buf.next) { 12125256Slh155975 break; 12135256Slh155975 } 12145256Slh155975 /* to verify if it needs to recycle the tx Buf */ 12155256Slh155975 if (((pNonphysical->TxDescQWrite + 1 > 12165256Slh155975 pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart : 12175256Slh155975 (pNonphysical->TxDescQWrite + 1)) == 12185256Slh155975 pNonphysical->TxDescQRead) 12195256Slh155975 if (amd8111s_recycle_tx(pLayerPointers) == 0) { 12205256Slh155975 pLayerPointers->pOdl 12215256Slh155975 ->statistics.tx_no_descriptor ++; 12225256Slh155975 break; 12235256Slh155975 } 12245256Slh155975 12255256Slh155975 /* Fill packet length */ 12265256Slh155975 pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers 12275256Slh155975 ->pOdl->tx_buf.curr->msg_size; 12285256Slh155975 12295256Slh155975 /* Fill physical buffer address */ 12305256Slh155975 pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int) 12315256Slh155975 pLayerPointers->pOdl->tx_buf.curr->phy_addr; 12325256Slh155975 12335256Slh155975 pNonphysical->TxDescQWrite->Tx_SOP = 1; 12345256Slh155975 pNonphysical->TxDescQWrite->Tx_EOP = 1; 12355256Slh155975 pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1; 12365256Slh155975 pNonphysical->TxDescQWrite->Tx_LTINT = 1; 12375256Slh155975 pNonphysical->TxDescQWrite->Tx_USPACE = 0; 12385256Slh155975 pNonphysical->TxDescQWrite->Tx_OWN = 1; 12395256Slh155975 12405256Slh155975 pNonphysical->TxDescQWrite++; 12415256Slh155975 if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) { 12425256Slh155975 pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart; 12435256Slh155975 } 12445256Slh155975 12455256Slh155975 pLayerPointers->pOdl->tx_buf.curr = 12465256Slh155975 NEXT(pLayerPointers->pOdl->tx_buf, curr); 12475256Slh155975 12485256Slh155975 } 12495256Slh155975 12505256Slh155975 pLayerPointers->pOdl->statistics.tx_ok_packets += count; 12515256Slh155975 12525256Slh155975 mutex_exit(&pLayerPointers->pOdl->mdlSendLock); 12535256Slh155975 12545256Slh155975 /* Call mdlTransmit to send the pkt out on the network */ 12555256Slh155975 mdlTransmit(pLayerPointers); 12565256Slh155975 12575256Slh155975 } 12585256Slh155975 12595256Slh155975 /* 12605256Slh155975 * Softintr entrance. try to send out packets in the Tx buffer. 12615256Slh155975 * If reschedule is True, call mac_tx_update to re-enable the 12625256Slh155975 * transmit 12635256Slh155975 */ 12645256Slh155975 static uint_t 12655256Slh155975 amd8111s_send_drain(caddr_t arg) 12665256Slh155975 { 1267*6990Sgd78059 struct LayerPointers *pLayerPointers = (void *)arg; 12685256Slh155975 12695256Slh155975 amd8111s_send_serial(pLayerPointers); 12705256Slh155975 12715256Slh155975 if (pLayerPointers->pMil->tx_reschedule && 12725256Slh155975 NEXT(pLayerPointers->pOdl->tx_buf, next) != 12735256Slh155975 pLayerPointers->pOdl->tx_buf.free) { 12745256Slh155975 mac_tx_update(pLayerPointers->pOdl->mh); 12755256Slh155975 pLayerPointers->pMil->tx_reschedule = B_FALSE; 12765256Slh155975 } 12775256Slh155975 12785256Slh155975 return (DDI_INTR_CLAIMED); 12795256Slh155975 } 12805256Slh155975 12815256Slh155975 /* 12825256Slh155975 * Get a Tx buffer 12835256Slh155975 */ 12845256Slh155975 static struct amd8111s_msgbuf * 12855256Slh155975 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers) 12865256Slh155975 { 12875256Slh155975 struct amd8111s_msgbuf *tmp, *next; 12885256Slh155975 12895256Slh155975 mutex_enter(&pLayerPointers->pOdl->mdlSendLock); 12905256Slh155975 next = NEXT(pLayerPointers->pOdl->tx_buf, next); 12915256Slh155975 if (next == pLayerPointers->pOdl->tx_buf.free) { 12925256Slh155975 tmp = NULL; 12935256Slh155975 } else { 12945256Slh155975 tmp = pLayerPointers->pOdl->tx_buf.next; 12955256Slh155975 pLayerPointers->pOdl->tx_buf.next = next; 12965256Slh155975 } 12975256Slh155975 mutex_exit(&pLayerPointers->pOdl->mdlSendLock); 12985256Slh155975 12995256Slh155975 return (tmp); 13005256Slh155975 } 13015256Slh155975 13025256Slh155975 static boolean_t 13035256Slh155975 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp) 13045256Slh155975 { 13055256Slh155975 struct odl *pOdl; 13065256Slh155975 size_t frag_len; 13075256Slh155975 mblk_t *tmp; 13085256Slh155975 struct amd8111s_msgbuf *txBuf; 13095256Slh155975 uint8_t *pMsg; 13105256Slh155975 13115256Slh155975 pOdl = pLayerPointers->pOdl; 13125256Slh155975 13135256Slh155975 /* alloc send buffer */ 13145256Slh155975 txBuf = amd8111s_getTxbuf(pLayerPointers); 13155256Slh155975 if (txBuf == NULL) { 13165256Slh155975 pOdl->statistics.tx_no_buffer ++; 13175256Slh155975 pLayerPointers->pMil->tx_reschedule = B_TRUE; 13185256Slh155975 amd8111s_send_serial(pLayerPointers); 13195256Slh155975 return (B_FALSE); 13205256Slh155975 } 13215256Slh155975 13225256Slh155975 /* copy packet to send buffer */ 13235256Slh155975 txBuf->msg_size = 0; 13245256Slh155975 pMsg = (uint8_t *)txBuf->vir_addr; 13255256Slh155975 for (tmp = mp; tmp; tmp = tmp->b_cont) { 13265256Slh155975 frag_len = MBLKL(tmp); 13275256Slh155975 bcopy(tmp->b_rptr, pMsg, frag_len); 13285256Slh155975 txBuf->msg_size += frag_len; 13295256Slh155975 pMsg += frag_len; 13305256Slh155975 } 13315256Slh155975 freemsg(mp); 13325256Slh155975 13335256Slh155975 amd8111s_send_serial(pLayerPointers); 13345256Slh155975 13355256Slh155975 return (B_TRUE); 13365256Slh155975 } 13375256Slh155975 13385256Slh155975 /* 13395256Slh155975 * (GLD Entry Point) Send the message block to lower layer 13405256Slh155975 */ 13415256Slh155975 static mblk_t * 13425256Slh155975 amd8111s_m_tx(void *arg, mblk_t *mp) 13435256Slh155975 { 13445256Slh155975 struct LayerPointers *pLayerPointers = arg; 13455256Slh155975 mblk_t *next; 13465256Slh155975 13475256Slh155975 rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER); 13485256Slh155975 if (!pLayerPointers->run) { 13495256Slh155975 pLayerPointers->pOdl->statistics.tx_afterunplumb ++; 13505256Slh155975 freemsgchain(mp); 13515256Slh155975 mp = NULL; 13525256Slh155975 } 13535256Slh155975 13545256Slh155975 while (mp != NULL) { 13555256Slh155975 next = mp->b_next; 13565256Slh155975 mp->b_next = NULL; 13575256Slh155975 if (!amd8111s_send(pLayerPointers, mp)) { 13585256Slh155975 /* Send fail */ 13595256Slh155975 mp->b_next = next; 13605256Slh155975 break; 13615256Slh155975 } 13625256Slh155975 mp = next; 13635256Slh155975 } 13645256Slh155975 13655256Slh155975 rw_exit(&pLayerPointers->pOdl->chip_lock); 13665256Slh155975 return (mp); 13675256Slh155975 } 13685256Slh155975 13695256Slh155975 /* 13705256Slh155975 * (GLD Entry Point) Interrupt Service Routine 13715256Slh155975 */ 13725256Slh155975 static uint_t 13735256Slh155975 amd8111s_intr(caddr_t arg) 13745256Slh155975 { 13755256Slh155975 unsigned int intrCauses; 1376*6990Sgd78059 struct LayerPointers *pLayerPointers = (void *)arg; 13775256Slh155975 13785256Slh155975 /* Read the interrupt status from mdl */ 13795256Slh155975 intrCauses = mdlReadInterrupt(pLayerPointers); 13805256Slh155975 13815256Slh155975 if (intrCauses == 0) { 13825256Slh155975 pLayerPointers->pOdl->statistics.intr_OTHER ++; 13835256Slh155975 return (DDI_INTR_UNCLAIMED); 13845256Slh155975 } 13855256Slh155975 13865256Slh155975 if (intrCauses & LCINT) { 13875256Slh155975 if (mdlReadLink(pLayerPointers) == LINK_UP) { 13885256Slh155975 mdlGetActiveMediaInfo(pLayerPointers); 13895256Slh155975 /* Link status changed */ 13905256Slh155975 if (pLayerPointers->pOdl->LinkStatus != 13915256Slh155975 LINK_STATE_UP) { 13925256Slh155975 pLayerPointers->pOdl->LinkStatus = 13935256Slh155975 LINK_STATE_UP; 13945256Slh155975 mac_link_update(pLayerPointers->pOdl->mh, 13955256Slh155975 LINK_STATE_UP); 13965256Slh155975 } 13975256Slh155975 } else { 13985256Slh155975 if (pLayerPointers->pOdl->LinkStatus != 13995256Slh155975 LINK_STATE_DOWN) { 14005256Slh155975 pLayerPointers->pOdl->LinkStatus = 14015256Slh155975 LINK_STATE_DOWN; 14025256Slh155975 mac_link_update(pLayerPointers->pOdl->mh, 14035256Slh155975 LINK_STATE_DOWN); 14045256Slh155975 } 14055256Slh155975 } 14065256Slh155975 } 14075256Slh155975 /* 14085256Slh155975 * RINT0: Receive Interrupt is set by the controller after the last 14095256Slh155975 * descriptor of a receive frame for this ring has been updated by 14105256Slh155975 * writing a 0 to the OWNership bit. 14115256Slh155975 */ 14125256Slh155975 if (intrCauses & RINT0) { 14135256Slh155975 pLayerPointers->pOdl->statistics.intr_RINT0 ++; 14145256Slh155975 amd8111s_receive(pLayerPointers); 14155256Slh155975 } 14165256Slh155975 14175256Slh155975 /* 14185256Slh155975 * TINT0: Transmit Interrupt is set by the controller after the OWN bit 14195256Slh155975 * in the last descriptor of a transmit frame in this particular ring 14205256Slh155975 * has been cleared to indicate the frame has been copied to the 14215256Slh155975 * transmit FIFO. 14225256Slh155975 */ 14235256Slh155975 if (intrCauses & TINT0) { 14245256Slh155975 pLayerPointers->pOdl->statistics.intr_TINT0 ++; 14255256Slh155975 /* 14265256Slh155975 * if desc ring is NULL and tx buf is not NULL, it should 14275256Slh155975 * drain tx buffer 14285256Slh155975 */ 14295256Slh155975 amd8111s_send_serial(pLayerPointers); 14305256Slh155975 } 14315256Slh155975 14325256Slh155975 if (intrCauses & STINT) { 14335256Slh155975 pLayerPointers->pOdl->statistics.intr_STINT ++; 14345256Slh155975 } 14355256Slh155975 14365256Slh155975 14375256Slh155975 return (DDI_INTR_CLAIMED); 14385256Slh155975 } 14395256Slh155975 14405256Slh155975 /* 14415256Slh155975 * To re-initilize data structures. 14425256Slh155975 */ 14435256Slh155975 static void 14445256Slh155975 amd8111s_sw_reset(struct LayerPointers *pLayerPointers) 14455256Slh155975 { 14465256Slh155975 /* Reset all Tx/Rx queues and descriptors */ 14475256Slh155975 milResetTxQ(pLayerPointers); 14485256Slh155975 milInitRxQ(pLayerPointers); 14495256Slh155975 } 14505256Slh155975 14515256Slh155975 /* 14525256Slh155975 * Send all pending tx packets 14535256Slh155975 */ 14545256Slh155975 static void 14555256Slh155975 amd8111s_tx_drain(struct LayerPointers *adapter) 14565256Slh155975 { 14575256Slh155975 struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart; 14585256Slh155975 int i, desc_count = 0; 14595256Slh155975 for (i = 0; i < 30; i++) { 14605256Slh155975 while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) { 14615256Slh155975 /* This packet has been transmitted */ 14625256Slh155975 pTx_desc ++; 14635256Slh155975 desc_count ++; 14645256Slh155975 } 14655256Slh155975 if (desc_count == TX_RING_SIZE) { 14665256Slh155975 break; 14675256Slh155975 } 14685256Slh155975 /* Wait 1 ms */ 14695256Slh155975 drv_usecwait(1000); 14705256Slh155975 } 14715256Slh155975 adapter->pOdl->statistics.tx_draintime = i; 14725256Slh155975 } 14735256Slh155975 14745256Slh155975 /* 14755256Slh155975 * (GLD Entry Point) To start card will be called at 14765256Slh155975 * ifconfig plumb 14775256Slh155975 */ 14785256Slh155975 static int 14795256Slh155975 amd8111s_m_start(void *arg) 14805256Slh155975 { 14815256Slh155975 struct LayerPointers *pLayerPointers = arg; 14825256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 14835256Slh155975 14845256Slh155975 amd8111s_sw_reset(pLayerPointers); 14855256Slh155975 mdlHWReset(pLayerPointers); 14865256Slh155975 rw_enter(&pOdl->chip_lock, RW_WRITER); 14875256Slh155975 pLayerPointers->run = B_TRUE; 14885256Slh155975 rw_exit(&pOdl->chip_lock); 14895256Slh155975 return (0); 14905256Slh155975 } 14915256Slh155975 14925256Slh155975 /* 14935256Slh155975 * (GLD Entry Point) To stop card will be called at 14945256Slh155975 * ifconfig unplumb 14955256Slh155975 */ 14965256Slh155975 static void 14975256Slh155975 amd8111s_m_stop(void *arg) 14985256Slh155975 { 14995256Slh155975 struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg; 15005256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 15015256Slh155975 15025256Slh155975 /* Ensure send all pending tx packets */ 15035256Slh155975 amd8111s_tx_drain(pLayerPointers); 15045256Slh155975 /* 15055256Slh155975 * Stop the controller and disable the controller interrupt 15065256Slh155975 */ 15075256Slh155975 rw_enter(&pOdl->chip_lock, RW_WRITER); 15085256Slh155975 mdlStopChip(pLayerPointers); 15095256Slh155975 pLayerPointers->run = B_FALSE; 15105256Slh155975 rw_exit(&pOdl->chip_lock); 15115256Slh155975 } 15125256Slh155975 15135256Slh155975 /* 15145256Slh155975 * To clean up all 15155256Slh155975 */ 15165256Slh155975 static void 15175256Slh155975 amd8111s_free_resource(struct LayerPointers *pLayerPointers) 15185256Slh155975 { 15195256Slh155975 unsigned long mem_free_array[100]; 15205256Slh155975 unsigned long *pmem_free_array, size; 15215256Slh155975 15225256Slh155975 /* Free Rx/Tx descriptors */ 15235256Slh155975 amd8111s_free_descriptors(pLayerPointers); 15245256Slh155975 15255256Slh155975 /* Free memory on lower layers */ 15265256Slh155975 milFreeResources(pLayerPointers, mem_free_array); 15275256Slh155975 pmem_free_array = mem_free_array; 15285256Slh155975 while (*pmem_free_array) { 15295256Slh155975 switch (*pmem_free_array) { 15305256Slh155975 case VIRTUAL: 15315256Slh155975 size = *(++pmem_free_array); 15325256Slh155975 pmem_free_array++; 15335256Slh155975 kmem_free((void *)*(pmem_free_array), size); 15345256Slh155975 break; 15355256Slh155975 } 15365256Slh155975 pmem_free_array++; 15375256Slh155975 } 15385256Slh155975 15395256Slh155975 amd8111s_free_buffers(pLayerPointers); 15405256Slh155975 } 15415256Slh155975 15425256Slh155975 /* 15435256Slh155975 * (GLD Enty pointer) To add/delete multi cast addresses 15445256Slh155975 * 15455256Slh155975 */ 15465256Slh155975 static int 15475256Slh155975 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 15485256Slh155975 { 15495256Slh155975 struct LayerPointers *pLayerPointers = arg; 15505256Slh155975 15515256Slh155975 if (add) { 15525256Slh155975 /* Add a multicast entry */ 15535256Slh155975 mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr); 15545256Slh155975 } else { 15555256Slh155975 /* Delete a multicast entry */ 15565256Slh155975 mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr); 15575256Slh155975 } 15585256Slh155975 15595256Slh155975 return (0); 15605256Slh155975 } 15615256Slh155975 15625256Slh155975 #ifdef AMD8111S_DEBUG 15635256Slh155975 /* 15645256Slh155975 * The size of MIB registers is only 32 bits. Dump them before one 15655256Slh155975 * of them overflows. 15665256Slh155975 */ 15675256Slh155975 static void 15685256Slh155975 amd8111s_dump_mib(struct LayerPointers *pLayerPointers) 15695256Slh155975 { 15705256Slh155975 struct amd8111s_statistics *adapterStat; 15715256Slh155975 15725256Slh155975 adapterStat = &pLayerPointers->pOdl->statistics; 15735256Slh155975 15745256Slh155975 adapterStat->mib_dump_counter ++; 15755256Slh155975 15765256Slh155975 /* 15775256Slh155975 * Rx Counters 15785256Slh155975 */ 15795256Slh155975 adapterStat->rx_mib_unicst_packets += 15805256Slh155975 mdlReadMib(pLayerPointers, RcvUniCastPkts); 15815256Slh155975 adapterStat->rx_mib_multicst_packets += 15825256Slh155975 mdlReadMib(pLayerPointers, RcvMultiCastPkts); 15835256Slh155975 adapterStat->rx_mib_broadcst_packets += 15845256Slh155975 mdlReadMib(pLayerPointers, RcvBroadCastPkts); 15855256Slh155975 adapterStat->rx_mib_macctrl_packets += 15865256Slh155975 mdlReadMib(pLayerPointers, RcvMACCtrl); 15875256Slh155975 adapterStat->rx_mib_flowctrl_packets += 15885256Slh155975 mdlReadMib(pLayerPointers, RcvFlowCtrl); 15895256Slh155975 15905256Slh155975 adapterStat->rx_mib_bytes += 15915256Slh155975 mdlReadMib(pLayerPointers, RcvOctets); 15925256Slh155975 adapterStat->rx_mib_good_bytes += 15935256Slh155975 mdlReadMib(pLayerPointers, RcvGoodOctets); 15945256Slh155975 15955256Slh155975 adapterStat->rx_mib_undersize_packets += 15965256Slh155975 mdlReadMib(pLayerPointers, RcvUndersizePkts); 15975256Slh155975 adapterStat->rx_mib_oversize_packets += 15985256Slh155975 mdlReadMib(pLayerPointers, RcvOversizePkts); 15995256Slh155975 16005256Slh155975 adapterStat->rx_mib_drop_packets += 16015256Slh155975 mdlReadMib(pLayerPointers, RcvDropPktsRing0); 16025256Slh155975 adapterStat->rx_mib_align_err_packets += 16035256Slh155975 mdlReadMib(pLayerPointers, RcvAlignmentErrors); 16045256Slh155975 adapterStat->rx_mib_fcs_err_packets += 16055256Slh155975 mdlReadMib(pLayerPointers, RcvFCSErrors); 16065256Slh155975 adapterStat->rx_mib_symbol_err_packets += 16075256Slh155975 mdlReadMib(pLayerPointers, RcvSymbolErrors); 16085256Slh155975 adapterStat->rx_mib_miss_packets += 16095256Slh155975 mdlReadMib(pLayerPointers, RcvMissPkts); 16105256Slh155975 16115256Slh155975 /* 16125256Slh155975 * Tx Counters 16135256Slh155975 */ 16145256Slh155975 adapterStat->tx_mib_packets += 16155256Slh155975 mdlReadMib(pLayerPointers, XmtPackets); 16165256Slh155975 adapterStat->tx_mib_multicst_packets += 16175256Slh155975 mdlReadMib(pLayerPointers, XmtMultiCastPkts); 16185256Slh155975 adapterStat->tx_mib_broadcst_packets += 16195256Slh155975 mdlReadMib(pLayerPointers, XmtBroadCastPkts); 16205256Slh155975 adapterStat->tx_mib_flowctrl_packets += 16215256Slh155975 mdlReadMib(pLayerPointers, XmtFlowCtrl); 16225256Slh155975 16235256Slh155975 adapterStat->tx_mib_bytes += 16245256Slh155975 mdlReadMib(pLayerPointers, XmtOctets); 16255256Slh155975 16265256Slh155975 adapterStat->tx_mib_defer_trans_packets += 16275256Slh155975 mdlReadMib(pLayerPointers, XmtDeferredTransmit); 16285256Slh155975 adapterStat->tx_mib_collision_packets += 16295256Slh155975 mdlReadMib(pLayerPointers, XmtCollisions); 16305256Slh155975 adapterStat->tx_mib_one_coll_packets += 16315256Slh155975 mdlReadMib(pLayerPointers, XmtOneCollision); 16325256Slh155975 adapterStat->tx_mib_multi_coll_packets += 16335256Slh155975 mdlReadMib(pLayerPointers, XmtMultipleCollision); 16345256Slh155975 adapterStat->tx_mib_late_coll_packets += 16355256Slh155975 mdlReadMib(pLayerPointers, XmtLateCollision); 16365256Slh155975 adapterStat->tx_mib_ex_coll_packets += 16375256Slh155975 mdlReadMib(pLayerPointers, XmtExcessiveCollision); 16385256Slh155975 16395256Slh155975 16405256Slh155975 /* Clear all MIB registers */ 16415256Slh155975 WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address 16425256Slh155975 + MIB_ADDR, MIB_CLEAR); 16435256Slh155975 } 16445256Slh155975 #endif 16455256Slh155975 16465256Slh155975 /* 16475256Slh155975 * (GLD Entry Point) set/unset promiscus mode 16485256Slh155975 */ 16495256Slh155975 static int 16505256Slh155975 amd8111s_m_promisc(void *arg, boolean_t on) 16515256Slh155975 { 16525256Slh155975 struct LayerPointers *pLayerPointers = arg; 16535256Slh155975 16545256Slh155975 if (on) { 16555256Slh155975 mdlSetPromiscuous(pLayerPointers); 16565256Slh155975 } else { 16575256Slh155975 mdlDisablePromiscuous(pLayerPointers); 16585256Slh155975 } 16595256Slh155975 16605256Slh155975 return (0); 16615256Slh155975 } 16625256Slh155975 16635256Slh155975 /* 16645256Slh155975 * (Gld Entry point) Changes the Mac address of card 16655256Slh155975 */ 16665256Slh155975 static int 16675256Slh155975 amd8111s_m_unicst(void *arg, const uint8_t *macaddr) 16685256Slh155975 { 16695256Slh155975 struct LayerPointers *pLayerPointers = arg; 16705256Slh155975 16715256Slh155975 mdlDisableInterrupt(pLayerPointers); 16725256Slh155975 mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr); 16735256Slh155975 mdlEnableInterrupt(pLayerPointers); 16745256Slh155975 16755256Slh155975 return (0); 16765256Slh155975 } 16775256Slh155975 16785256Slh155975 /* 16795256Slh155975 * Reset the card 16805256Slh155975 */ 16815256Slh155975 void 16825256Slh155975 amd8111s_reset(struct LayerPointers *pLayerPointers) 16835256Slh155975 { 16845256Slh155975 amd8111s_sw_reset(pLayerPointers); 16855256Slh155975 mdlHWReset(pLayerPointers); 16865256Slh155975 } 16875256Slh155975 16885256Slh155975 /* 16895256Slh155975 * attach(9E) -- Attach a device to the system 16905256Slh155975 * 16915256Slh155975 * Called once for each board after successfully probed. 16925256Slh155975 * will do 16935256Slh155975 * a. creating minor device node for the instance. 16945256Slh155975 * b. allocate & Initilize four layers (call odlInit) 16955256Slh155975 * c. get MAC address 16965256Slh155975 * d. initilize pLayerPointers to gld private pointer 16975256Slh155975 * e. register with GLD 16985256Slh155975 * if any action fails does clean up & returns DDI_FAILURE 16995256Slh155975 * else retursn DDI_SUCCESS 17005256Slh155975 */ 17015256Slh155975 static int 17025256Slh155975 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 17035256Slh155975 { 17045256Slh155975 mac_register_t *macp; 17055256Slh155975 struct LayerPointers *pLayerPointers; 17065256Slh155975 struct odl *pOdl; 17075256Slh155975 ddi_acc_handle_t *pci_handle; 17085256Slh155975 ddi_device_acc_attr_t dev_attr; 17095256Slh155975 caddr_t addrp = NULL; 17105256Slh155975 17115256Slh155975 switch (cmd) { 17125256Slh155975 case DDI_ATTACH: 17135256Slh155975 break; 17145256Slh155975 default: 17155256Slh155975 return (DDI_FAILURE); 17165256Slh155975 } 17175256Slh155975 17185256Slh155975 pLayerPointers = (struct LayerPointers *) 17195256Slh155975 kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP); 17205256Slh155975 amd8111sadapter = pLayerPointers; 17215256Slh155975 17225256Slh155975 /* Get device instance number */ 17235256Slh155975 pLayerPointers->instance = ddi_get_instance(devinfo); 17245256Slh155975 ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers); 17255256Slh155975 17265256Slh155975 pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP); 17275256Slh155975 pLayerPointers->pOdl = pOdl; 17285256Slh155975 17295256Slh155975 pOdl->devinfo = devinfo; 17305256Slh155975 17315256Slh155975 /* 17325256Slh155975 * Here, we only allocate memory for struct odl and initilize it. 17335256Slh155975 * All other memory allocation & initilization will be done in odlInit 17345256Slh155975 * later on this routine. 17355256Slh155975 */ 17365256Slh155975 if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock) 17375256Slh155975 != DDI_SUCCESS) { 17385256Slh155975 amd8111s_log(pLayerPointers, CE_NOTE, 17395256Slh155975 "attach: get iblock cookies failed"); 17405256Slh155975 goto attach_failure; 17415256Slh155975 } 17425256Slh155975 17435256Slh155975 rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock); 17445256Slh155975 mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock", 17455256Slh155975 MUTEX_DRIVER, (void *)pOdl->iblock); 17465256Slh155975 mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock", 17475256Slh155975 MUTEX_DRIVER, (void *)pOdl->iblock); 17485256Slh155975 17495256Slh155975 /* Setup PCI space */ 17505256Slh155975 if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) { 17515256Slh155975 return (DDI_FAILURE); 17525256Slh155975 } 17535256Slh155975 pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI; 17545256Slh155975 pci_handle = &pOdl->pci_handle; 17555256Slh155975 17565256Slh155975 pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID); 17575256Slh155975 pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID); 17585256Slh155975 17595256Slh155975 /* 17605256Slh155975 * Allocate and initialize all resource and map device registers. 17615256Slh155975 * If failed, it returns a non-zero value. 17625256Slh155975 */ 17635256Slh155975 if (amd8111s_odlInit(pLayerPointers) != 0) { 17645256Slh155975 goto attach_failure; 17655256Slh155975 } 17665256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE; 17675256Slh155975 17685256Slh155975 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 17695256Slh155975 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 17705256Slh155975 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 17715256Slh155975 17725256Slh155975 if (ddi_regs_map_setup(devinfo, 1, &addrp, 0, 4096, &dev_attr, 17735256Slh155975 &(pLayerPointers->pOdl->MemBasehandle)) != 0) { 17745256Slh155975 amd8111s_log(pLayerPointers, CE_NOTE, 17755256Slh155975 "attach: ddi_regs_map_setup failed"); 17765256Slh155975 goto attach_failure; 17775256Slh155975 } 17785256Slh155975 pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp; 17795256Slh155975 17805256Slh155975 /* Initialize HW */ 17815256Slh155975 mdlOpen(pLayerPointers); 17825256Slh155975 mdlGetActiveMediaInfo(pLayerPointers); 17835256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS; 17845256Slh155975 17855256Slh155975 /* 17865256Slh155975 * Setup the interrupt 17875256Slh155975 */ 17885256Slh155975 if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr, 17895256Slh155975 (caddr_t)pLayerPointers) != DDI_SUCCESS) { 17905256Slh155975 goto attach_failure; 17915256Slh155975 } 17925256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED; 17935256Slh155975 17945256Slh155975 /* 17955256Slh155975 * Setup soft intr 17965256Slh155975 */ 17975256Slh155975 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id, 17985256Slh155975 NULL, NULL, amd8111s_send_drain, 17995256Slh155975 (caddr_t)pLayerPointers) != DDI_SUCCESS) { 18005256Slh155975 goto attach_failure; 18015256Slh155975 } 18025256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED; 18035256Slh155975 18045256Slh155975 /* 18055256Slh155975 * Initilize the mac structure 18065256Slh155975 */ 18075256Slh155975 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 18085256Slh155975 goto attach_failure; 18095256Slh155975 18105256Slh155975 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 18115256Slh155975 macp->m_driver = pLayerPointers; 18125256Slh155975 macp->m_dip = devinfo; 18135256Slh155975 /* Get MAC address */ 18145256Slh155975 mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress); 18155256Slh155975 macp->m_src_addr = pOdl->MacAddress; 18165256Slh155975 macp->m_callbacks = &amd8111s_m_callbacks; 18175256Slh155975 macp->m_min_sdu = 0; 18185256Slh155975 /* 1518 - 14 (ether header) - 4 (CRC) */ 18195256Slh155975 macp->m_max_sdu = ETHERMTU; 18205895Syz147064 macp->m_margin = VLAN_TAGSZ; 18215256Slh155975 18225256Slh155975 /* 18235256Slh155975 * Finally, we're ready to register ourselves with the MAC layer 18245256Slh155975 * interface; if this succeeds, we're ready to start. 18255256Slh155975 */ 18265256Slh155975 if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) { 18275256Slh155975 mac_free(macp); 18285256Slh155975 goto attach_failure; 18295256Slh155975 } 18305256Slh155975 mac_free(macp); 18315256Slh155975 18325256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED; 18335256Slh155975 18345256Slh155975 return (DDI_SUCCESS); 18355256Slh155975 18365256Slh155975 attach_failure: 18375256Slh155975 (void) amd8111s_unattach(devinfo, pLayerPointers); 18385256Slh155975 return (DDI_FAILURE); 18395256Slh155975 18405256Slh155975 } 18415256Slh155975 18425256Slh155975 /* 18435256Slh155975 * detach(9E) -- Detach a device from the system 18445256Slh155975 * 18455256Slh155975 * It is called for each device instance when the system is preparing to 18465256Slh155975 * unload a dynamically unloadable driver. 18475256Slh155975 * will Do 18485256Slh155975 * a. check if any driver buffers are held by OS. 18495256Slh155975 * b. do clean up of all allocated memory if it is not in use by OS. 18505256Slh155975 * c. un register with GLD 18515256Slh155975 * d. return DDI_SUCCESS on succes full free & unregister 18525256Slh155975 * else GLD_FAILURE 18535256Slh155975 */ 18545256Slh155975 static int 18555256Slh155975 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 18565256Slh155975 { 18575256Slh155975 struct LayerPointers *pLayerPointers; 18585256Slh155975 18595256Slh155975 switch (cmd) { 18605256Slh155975 case DDI_DETACH: 18615256Slh155975 break; 18625256Slh155975 default: 18635256Slh155975 return (DDI_FAILURE); 18645256Slh155975 } 18655256Slh155975 18665256Slh155975 /* 18675256Slh155975 * Get the driver private (struct LayerPointers *) structure 18685256Slh155975 */ 18695256Slh155975 if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private 18705256Slh155975 (devinfo)) == NULL) { 18715256Slh155975 return (DDI_FAILURE); 18725256Slh155975 } 18735256Slh155975 18745256Slh155975 return (amd8111s_unattach(devinfo, pLayerPointers)); 18755256Slh155975 } 18765256Slh155975 18775256Slh155975 static int 18785256Slh155975 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers) 18795256Slh155975 { 18805256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 18815256Slh155975 18825256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) { 18835256Slh155975 /* Unregister driver from the GLD interface */ 18845256Slh155975 if (mac_unregister(pOdl->mh) != DDI_SUCCESS) { 18855256Slh155975 return (DDI_FAILURE); 18865256Slh155975 } 18875256Slh155975 } 18885256Slh155975 18895256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) { 18905256Slh155975 ddi_remove_intr(devinfo, 0, pOdl->iblock); 18915256Slh155975 } 18925256Slh155975 18935256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) { 18945256Slh155975 ddi_remove_softintr(pOdl->drain_id); 18955256Slh155975 } 18965256Slh155975 18975256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) { 18985256Slh155975 /* Stop HW */ 18995256Slh155975 mdlStopChip(pLayerPointers); 19005256Slh155975 ddi_regs_map_free(&(pOdl->MemBasehandle)); 19015256Slh155975 } 19025256Slh155975 19035256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) { 19045256Slh155975 /* Free All memory allocated */ 19055256Slh155975 amd8111s_free_resource(pLayerPointers); 19065256Slh155975 } 19075256Slh155975 19085256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) { 19095256Slh155975 pci_config_teardown(&pOdl->pci_handle); 19105256Slh155975 mutex_destroy(&pOdl->mdlSendLock); 19115256Slh155975 mutex_destroy(&pOdl->mdlRcvLock); 19125256Slh155975 rw_destroy(&pOdl->chip_lock); 19135256Slh155975 } 19145256Slh155975 19155256Slh155975 kmem_free(pOdl, sizeof (struct odl)); 19165256Slh155975 kmem_free(pLayerPointers, sizeof (struct LayerPointers)); 19175256Slh155975 19185256Slh155975 return (DDI_SUCCESS); 19195256Slh155975 } 19205256Slh155975 19215256Slh155975 /* 19225256Slh155975 * (GLD Entry Point)GLD will call this entry point perodicaly to 19235256Slh155975 * get driver statistices. 19245256Slh155975 */ 19255256Slh155975 static int 19265256Slh155975 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val) 19275256Slh155975 { 19285256Slh155975 struct LayerPointers *pLayerPointers = arg; 19295256Slh155975 struct amd8111s_statistics *adapterStat; 19305256Slh155975 19315256Slh155975 adapterStat = &pLayerPointers->pOdl->statistics; 19325256Slh155975 19335256Slh155975 switch (stat) { 19345256Slh155975 19355256Slh155975 /* 19365256Slh155975 * Current Status 19375256Slh155975 */ 19385256Slh155975 case MAC_STAT_IFSPEED: 19395256Slh155975 *val = pLayerPointers->pMdl->Speed * 1000000; 19405256Slh155975 break; 19415256Slh155975 19425256Slh155975 case ETHER_STAT_LINK_DUPLEX: 19435256Slh155975 if (pLayerPointers->pMdl->FullDuplex) { 19445256Slh155975 *val = LINK_DUPLEX_FULL; 19455256Slh155975 } else { 19465256Slh155975 *val = LINK_DUPLEX_HALF; 19475256Slh155975 } 19485256Slh155975 break; 19495256Slh155975 19505256Slh155975 /* 19515256Slh155975 * Capabilities 19525256Slh155975 */ 19535256Slh155975 case ETHER_STAT_CAP_1000FDX: 19545256Slh155975 *val = 0; 19555256Slh155975 break; 19565256Slh155975 19575256Slh155975 case ETHER_STAT_CAP_1000HDX: 19585256Slh155975 *val = 0; 19595256Slh155975 break; 19605256Slh155975 19615256Slh155975 case ETHER_STAT_CAP_100FDX: 19625256Slh155975 *val = 1; 19635256Slh155975 break; 19645256Slh155975 19655256Slh155975 case ETHER_STAT_CAP_100HDX: 19665256Slh155975 *val = 1; 19675256Slh155975 break; 19685256Slh155975 19695256Slh155975 case ETHER_STAT_CAP_10FDX: 19705256Slh155975 *val = 1; 19715256Slh155975 break; 19725256Slh155975 19735256Slh155975 case ETHER_STAT_CAP_10HDX: 19745256Slh155975 *val = 1; 19755256Slh155975 break; 19765256Slh155975 19775256Slh155975 case ETHER_STAT_CAP_ASMPAUSE: 19785256Slh155975 *val = 1; 19795256Slh155975 break; 19805256Slh155975 19815256Slh155975 case ETHER_STAT_CAP_PAUSE: 19825256Slh155975 *val = 1; 19835256Slh155975 break; 19845256Slh155975 19855256Slh155975 case ETHER_STAT_CAP_AUTONEG: 19865256Slh155975 *val = 1; 19875256Slh155975 break; 19885256Slh155975 19895256Slh155975 case ETHER_STAT_ADV_CAP_1000FDX: 19905256Slh155975 *val = 0; 19915256Slh155975 break; 19925256Slh155975 19935256Slh155975 case ETHER_STAT_ADV_CAP_1000HDX: 19945256Slh155975 *val = 0; 19955256Slh155975 break; 19965256Slh155975 19975256Slh155975 case ETHER_STAT_ADV_CAP_100FDX: 19985256Slh155975 *val = 1; 19995256Slh155975 break; 20005256Slh155975 20015256Slh155975 case ETHER_STAT_ADV_CAP_100HDX: 20025256Slh155975 *val = 1; 20035256Slh155975 break; 20045256Slh155975 20055256Slh155975 case ETHER_STAT_ADV_CAP_10FDX: 20065256Slh155975 *val = 1; 20075256Slh155975 break; 20085256Slh155975 20095256Slh155975 case ETHER_STAT_ADV_CAP_10HDX: 20105256Slh155975 *val = 1; 20115256Slh155975 break; 20125256Slh155975 20135256Slh155975 case ETHER_STAT_ADV_CAP_ASMPAUSE: 20145256Slh155975 *val = 1; 20155256Slh155975 break; 20165256Slh155975 20175256Slh155975 case ETHER_STAT_ADV_CAP_PAUSE: 20185256Slh155975 *val = 1; 20195256Slh155975 break; 20205256Slh155975 20215256Slh155975 case ETHER_STAT_ADV_CAP_AUTONEG: 20225256Slh155975 *val = 1; 20235256Slh155975 break; 20245256Slh155975 20255256Slh155975 /* 20265256Slh155975 * Rx Counters 20275256Slh155975 */ 20285256Slh155975 case MAC_STAT_IPACKETS: 20295256Slh155975 *val = adapterStat->rx_mib_unicst_packets + 20305256Slh155975 adapterStat->rx_mib_multicst_packets + 20315256Slh155975 adapterStat->rx_mib_broadcst_packets + 20325256Slh155975 mdlReadMib(pLayerPointers, RcvUniCastPkts) + 20335256Slh155975 mdlReadMib(pLayerPointers, RcvMultiCastPkts) + 20345256Slh155975 mdlReadMib(pLayerPointers, RcvBroadCastPkts); 20355256Slh155975 break; 20365256Slh155975 20375256Slh155975 case MAC_STAT_RBYTES: 20385256Slh155975 *val = adapterStat->rx_mib_bytes + 20395256Slh155975 mdlReadMib(pLayerPointers, RcvOctets); 20405256Slh155975 break; 20415256Slh155975 20425256Slh155975 case MAC_STAT_MULTIRCV: 20435256Slh155975 *val = adapterStat->rx_mib_multicst_packets + 20445256Slh155975 mdlReadMib(pLayerPointers, RcvMultiCastPkts); 20455256Slh155975 break; 20465256Slh155975 20475256Slh155975 case MAC_STAT_BRDCSTRCV: 20485256Slh155975 *val = adapterStat->rx_mib_broadcst_packets + 20495256Slh155975 mdlReadMib(pLayerPointers, RcvBroadCastPkts); 20505256Slh155975 break; 20515256Slh155975 20525256Slh155975 case MAC_STAT_NORCVBUF: 20535256Slh155975 *val = adapterStat->rx_allocfail + 20545256Slh155975 adapterStat->rx_mib_drop_packets + 20555256Slh155975 mdlReadMib(pLayerPointers, RcvDropPktsRing0); 20565256Slh155975 break; 20575256Slh155975 20585256Slh155975 case MAC_STAT_IERRORS: 20595256Slh155975 *val = adapterStat->rx_mib_align_err_packets + 20605256Slh155975 adapterStat->rx_mib_fcs_err_packets + 20615256Slh155975 adapterStat->rx_mib_symbol_err_packets + 20625256Slh155975 mdlReadMib(pLayerPointers, RcvAlignmentErrors) + 20635256Slh155975 mdlReadMib(pLayerPointers, RcvFCSErrors) + 20645256Slh155975 mdlReadMib(pLayerPointers, RcvSymbolErrors); 20655256Slh155975 break; 20665256Slh155975 20675256Slh155975 case ETHER_STAT_ALIGN_ERRORS: 20685256Slh155975 *val = adapterStat->rx_mib_align_err_packets + 20695256Slh155975 mdlReadMib(pLayerPointers, RcvAlignmentErrors); 20705256Slh155975 break; 20715256Slh155975 20725256Slh155975 case ETHER_STAT_FCS_ERRORS: 20735256Slh155975 *val = adapterStat->rx_mib_fcs_err_packets + 20745256Slh155975 mdlReadMib(pLayerPointers, RcvFCSErrors); 20755256Slh155975 break; 20765256Slh155975 20775256Slh155975 /* 20785256Slh155975 * Tx Counters 20795256Slh155975 */ 20805256Slh155975 case MAC_STAT_OPACKETS: 20815256Slh155975 *val = adapterStat->tx_mib_packets + 20825256Slh155975 mdlReadMib(pLayerPointers, XmtPackets); 20835256Slh155975 break; 20845256Slh155975 20855256Slh155975 case MAC_STAT_OBYTES: 20865256Slh155975 *val = adapterStat->tx_mib_bytes + 20875256Slh155975 mdlReadMib(pLayerPointers, XmtOctets); 20885256Slh155975 break; 20895256Slh155975 20905256Slh155975 case MAC_STAT_MULTIXMT: 20915256Slh155975 *val = adapterStat->tx_mib_multicst_packets + 20925256Slh155975 mdlReadMib(pLayerPointers, XmtMultiCastPkts); 20935256Slh155975 break; 20945256Slh155975 20955256Slh155975 case MAC_STAT_BRDCSTXMT: 20965256Slh155975 *val = adapterStat->tx_mib_broadcst_packets + 20975256Slh155975 mdlReadMib(pLayerPointers, XmtBroadCastPkts); 20985256Slh155975 break; 20995256Slh155975 21005256Slh155975 case MAC_STAT_NOXMTBUF: 21015256Slh155975 *val = adapterStat->tx_no_descriptor; 21025256Slh155975 break; 21035256Slh155975 21045256Slh155975 case MAC_STAT_OERRORS: 21055256Slh155975 *val = adapterStat->tx_mib_ex_coll_packets + 21065256Slh155975 mdlReadMib(pLayerPointers, XmtExcessiveCollision); 21075256Slh155975 break; 21085256Slh155975 21095256Slh155975 case MAC_STAT_COLLISIONS: 21105256Slh155975 *val = adapterStat->tx_mib_ex_coll_packets + 21115256Slh155975 mdlReadMib(pLayerPointers, XmtCollisions); 21125256Slh155975 break; 21135256Slh155975 21145256Slh155975 case ETHER_STAT_FIRST_COLLISIONS: 21155256Slh155975 *val = adapterStat->tx_mib_one_coll_packets + 21165256Slh155975 mdlReadMib(pLayerPointers, XmtOneCollision); 21175256Slh155975 break; 21185256Slh155975 21195256Slh155975 case ETHER_STAT_MULTI_COLLISIONS: 21205256Slh155975 *val = adapterStat->tx_mib_multi_coll_packets + 21215256Slh155975 mdlReadMib(pLayerPointers, XmtMultipleCollision); 21225256Slh155975 break; 21235256Slh155975 21245256Slh155975 case ETHER_STAT_EX_COLLISIONS: 21255256Slh155975 *val = adapterStat->tx_mib_ex_coll_packets + 21265256Slh155975 mdlReadMib(pLayerPointers, XmtExcessiveCollision); 21275256Slh155975 break; 21285256Slh155975 21295256Slh155975 case ETHER_STAT_TX_LATE_COLLISIONS: 21305256Slh155975 *val = adapterStat->tx_mib_late_coll_packets + 21315256Slh155975 mdlReadMib(pLayerPointers, XmtLateCollision); 21325256Slh155975 break; 21335256Slh155975 21345256Slh155975 case ETHER_STAT_DEFER_XMTS: 21355256Slh155975 *val = adapterStat->tx_mib_defer_trans_packets + 21365256Slh155975 mdlReadMib(pLayerPointers, XmtDeferredTransmit); 21375256Slh155975 break; 21385256Slh155975 21395256Slh155975 default: 21405256Slh155975 return (ENOTSUP); 21415256Slh155975 } 21425256Slh155975 return (0); 21435256Slh155975 } 21445256Slh155975 21455256Slh155975 /* 21465256Slh155975 * Memory Read Function Used by MDL to set card registers. 21475256Slh155975 */ 21485256Slh155975 unsigned char 21495256Slh155975 READ_REG8(struct LayerPointers *pLayerPointers, long x) 21505256Slh155975 { 21515256Slh155975 return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x)); 21525256Slh155975 } 21535256Slh155975 21545256Slh155975 int 21555256Slh155975 READ_REG16(struct LayerPointers *pLayerPointers, long x) 21565256Slh155975 { 21575256Slh155975 return (ddi_get16(pLayerPointers->pOdl->MemBasehandle, 21585256Slh155975 (uint16_t *)(x))); 21595256Slh155975 } 21605256Slh155975 21615256Slh155975 long 21625256Slh155975 READ_REG32(struct LayerPointers *pLayerPointers, long x) 21635256Slh155975 { 21645256Slh155975 return (ddi_get32(pLayerPointers->pOdl->MemBasehandle, 21655256Slh155975 (uint32_t *)(x))); 21665256Slh155975 } 21675256Slh155975 21685256Slh155975 void 21695256Slh155975 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y) 21705256Slh155975 { 21715256Slh155975 ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y); 21725256Slh155975 } 21735256Slh155975 21745256Slh155975 void 21755256Slh155975 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y) 21765256Slh155975 { 21775256Slh155975 ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y); 21785256Slh155975 } 21795256Slh155975 21805256Slh155975 void 21815256Slh155975 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y) 21825256Slh155975 { 21835256Slh155975 ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y); 21845256Slh155975 } 21855256Slh155975 21865256Slh155975 void 21875256Slh155975 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y) 21885256Slh155975 { 21895256Slh155975 int i; 21905256Slh155975 for (i = 0; i < 8; i++) { 21915256Slh155975 WRITE_REG8(pLayerPointers, (x + i), y[i]); 21925256Slh155975 } 21935256Slh155975 } 2194