15256Slh155975 /* 25895Syz147064 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 35256Slh155975 * Use is subject to license terms. 45256Slh155975 */ 55256Slh155975 65256Slh155975 /* 75256Slh155975 * Copyright (c) 2001-2006 Advanced Micro Devices, Inc. All rights reserved. 85256Slh155975 * 95256Slh155975 * Redistribution and use in source and binary forms, with or without 105256Slh155975 * modification, are permitted provided that the following conditions are met: 115256Slh155975 * 125256Slh155975 * + Redistributions of source code must retain the above copyright notice, 135256Slh155975 * + this list of conditions and the following disclaimer. 145256Slh155975 * 155256Slh155975 * + Redistributions in binary form must reproduce the above copyright 165256Slh155975 * + notice, this list of conditions and the following disclaimer in the 175256Slh155975 * + documentation and/or other materials provided with the distribution. 185256Slh155975 * 195256Slh155975 * + Neither the name of Advanced Micro Devices, Inc. nor the names of its 205256Slh155975 * + contributors may be used to endorse or promote products derived from 215256Slh155975 * + this software without specific prior written permission. 225256Slh155975 * 235256Slh155975 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND 245256Slh155975 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 255256Slh155975 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265256Slh155975 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 275256Slh155975 * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR 285256Slh155975 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 295256Slh155975 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 305256Slh155975 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 315256Slh155975 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 325256Slh155975 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 335256Slh155975 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 345256Slh155975 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 355256Slh155975 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 365256Slh155975 * 375256Slh155975 * Import/Export/Re-Export/Use/Release/Transfer Restrictions and 385256Slh155975 * Compliance with Applicable Laws. Notice is hereby given that 395256Slh155975 * the software may be subject to restrictions on use, release, 405256Slh155975 * transfer, importation, exportation and/or re-exportation under 415256Slh155975 * the laws and regulations of the United States or other 425256Slh155975 * countries ("Applicable Laws"), which include but are not 435256Slh155975 * limited to U.S. export control laws such as the Export 445256Slh155975 * Administration Regulations and national security controls as 455256Slh155975 * defined thereunder, as well as State Department controls under 465256Slh155975 * the U.S. Munitions List. Permission to use and/or 475256Slh155975 * redistribute the software is conditioned upon compliance with 485256Slh155975 * all Applicable Laws, including U.S. export control laws 495256Slh155975 * regarding specifically designated persons, countries and 505256Slh155975 * nationals of countries subject to national security controls. 515256Slh155975 */ 525256Slh155975 535256Slh155975 /* include files */ 545256Slh155975 #include <sys/disp.h> 555256Slh155975 #include <sys/atomic.h> 565895Syz147064 #include <sys/vlan.h> 575256Slh155975 #include "amd8111s_main.h" 585256Slh155975 595256Slh155975 /* Global macro Definations */ 605256Slh155975 #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1)) 615256Slh155975 #define INTERFACE_NAME "amd8111s" 625256Slh155975 #define AMD8111S_SPLIT 128 635256Slh155975 #define AMD8111S_SEND_MAX 64 645256Slh155975 657656SSherry.Moore@Sun.COM static char ident[] = "AMD8111 10/100M Ethernet"; 665256Slh155975 675256Slh155975 /* 685256Slh155975 * Driver Entry Points 695256Slh155975 */ 705256Slh155975 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t); 715256Slh155975 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t); 725256Slh155975 735256Slh155975 /* 745256Slh155975 * GLD Entry points prototype 755256Slh155975 */ 765256Slh155975 static int amd8111s_m_unicst(void *, const uint8_t *); 775256Slh155975 static int amd8111s_m_promisc(void *, boolean_t); 785256Slh155975 static int amd8111s_m_stat(void *, uint_t, uint64_t *); 795256Slh155975 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *); 805256Slh155975 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr); 815256Slh155975 static int amd8111s_m_start(void *); 825256Slh155975 static void amd8111s_m_stop(void *); 835256Slh155975 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp); 845256Slh155975 static uint_t amd8111s_intr(caddr_t); 855256Slh155975 865256Slh155975 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *); 875256Slh155975 885256Slh155975 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *); 895256Slh155975 static int amd8111s_odlInit(struct LayerPointers *); 905256Slh155975 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *); 915256Slh155975 static void amd8111s_free_descriptors(struct LayerPointers *); 925256Slh155975 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *, 935256Slh155975 struct amd8111s_dma_ringbuf *, uint32_t, uint32_t); 945256Slh155975 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *); 955256Slh155975 965256Slh155975 975256Slh155975 static void amd8111s_log(struct LayerPointers *adapter, int level, 985256Slh155975 char *fmt, ...); 995256Slh155975 1005256Slh155975 static struct cb_ops amd8111s_cb_ops = { 1015256Slh155975 nulldev, 1025256Slh155975 nulldev, 1035256Slh155975 nodev, 1045256Slh155975 nodev, 1055256Slh155975 nodev, 1065256Slh155975 nodev, 1075256Slh155975 nodev, 1085256Slh155975 nodev, 1095256Slh155975 nodev, 1105256Slh155975 nodev, 1115256Slh155975 nodev, 1125256Slh155975 nochpoll, 1135256Slh155975 ddi_prop_op, 1145256Slh155975 NULL, 1155256Slh155975 D_NEW | D_MP, 1165256Slh155975 CB_REV, /* cb_rev */ 1175256Slh155975 nodev, /* cb_aread */ 1185256Slh155975 nodev /* cb_awrite */ 1195256Slh155975 }; 1205256Slh155975 1215256Slh155975 static struct dev_ops amd8111s_dev_ops = { 1225256Slh155975 DEVO_REV, /* devo_rev */ 1235256Slh155975 0, /* devo_refcnt */ 1245256Slh155975 NULL, /* devo_getinfo */ 1255256Slh155975 nulldev, /* devo_identify */ 1265256Slh155975 nulldev, /* devo_probe */ 1275256Slh155975 amd8111s_attach, /* devo_attach */ 1285256Slh155975 amd8111s_detach, /* devo_detach */ 1295256Slh155975 nodev, /* devo_reset */ 1305256Slh155975 &amd8111s_cb_ops, /* devo_cb_ops */ 1315256Slh155975 NULL, /* devo_bus_ops */ 1327656SSherry.Moore@Sun.COM nodev, /* devo_power */ 1337656SSherry.Moore@Sun.COM ddi_quiesce_not_supported, /* devo_quiesce */ 1345256Slh155975 }; 1355256Slh155975 1365256Slh155975 struct modldrv amd8111s_modldrv = { 1375256Slh155975 &mod_driverops, /* Type of module. This one is a driver */ 1385256Slh155975 ident, /* short description */ 1395256Slh155975 &amd8111s_dev_ops /* driver specific ops */ 1405256Slh155975 }; 1415256Slh155975 1425256Slh155975 struct modlinkage amd8111s_modlinkage = { 1435256Slh155975 MODREV_1, (void *)&amd8111s_modldrv, NULL 1445256Slh155975 }; 1455256Slh155975 1465256Slh155975 /* 1475256Slh155975 * Global Variables 1485256Slh155975 */ 1495256Slh155975 struct LayerPointers *amd8111sadapter; 1505256Slh155975 1515256Slh155975 static ddi_dma_attr_t pcn_buff_dma_attr_t = { 1525256Slh155975 DMA_ATTR_V0, /* dma_attr_version */ 1535256Slh155975 (uint64_t)0, /* dma_attr_addr_lo */ 1545256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 1555256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_count_max */ 1565256Slh155975 (uint64_t)1, /* dma_attr_align */ 1575256Slh155975 (uint_t)0x7F, /* dma_attr_burstsizes */ 1585256Slh155975 (uint32_t)1, /* dma_attr_minxfer */ 1595256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 1605256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 1615256Slh155975 (int)1, /* dma_attr_sgllen */ 1625256Slh155975 (uint32_t)1, /* granularity */ 1635256Slh155975 (uint_t)0 /* dma_attr_flags */ 1645256Slh155975 }; 1655256Slh155975 1665256Slh155975 static ddi_dma_attr_t pcn_desc_dma_attr_t = { 1675256Slh155975 DMA_ATTR_V0, /* dma_attr_version */ 1685256Slh155975 (uint64_t)0, /* dma_attr_addr_lo */ 1695256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 1705256Slh155975 (uint64_t)0x7FFFFFFF, /* dma_attr_count_max */ 1715256Slh155975 (uint64_t)0x10, /* dma_attr_align */ 1725256Slh155975 (uint_t)0xFFFFFFFFU, /* dma_attr_burstsizes */ 1735256Slh155975 (uint32_t)1, /* dma_attr_minxfer */ 1745256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 1755256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 1765256Slh155975 (int)1, /* dma_attr_sgllen */ 1775256Slh155975 (uint32_t)1, /* granularity */ 1785256Slh155975 (uint_t)0 /* dma_attr_flags */ 1795256Slh155975 }; 1805256Slh155975 1815256Slh155975 /* PIO access attributes for registers */ 1825256Slh155975 static ddi_device_acc_attr_t pcn_acc_attr = { 1835256Slh155975 DDI_DEVICE_ATTR_V0, 1845256Slh155975 DDI_STRUCTURE_LE_ACC, 1855256Slh155975 DDI_STRICTORDER_ACC 1865256Slh155975 }; 1875256Slh155975 1885256Slh155975 1895256Slh155975 static mac_callbacks_t amd8111s_m_callbacks = { 190*8275SEric Cheng MC_IOCTL, 1915256Slh155975 amd8111s_m_stat, 1925256Slh155975 amd8111s_m_start, 1935256Slh155975 amd8111s_m_stop, 1945256Slh155975 amd8111s_m_promisc, 1955256Slh155975 amd8111s_m_multicst, 1965256Slh155975 amd8111s_m_unicst, 1975256Slh155975 amd8111s_m_tx, 1985256Slh155975 amd8111s_m_ioctl 1995256Slh155975 }; 2005256Slh155975 2015256Slh155975 2025256Slh155975 /* 2035256Slh155975 * Standard Driver Load Entry Point 2045256Slh155975 * It will be called at load time of driver. 2055256Slh155975 */ 2065256Slh155975 int 2075256Slh155975 _init() 2085256Slh155975 { 2095256Slh155975 int status; 2105256Slh155975 mac_init_ops(&amd8111s_dev_ops, "amd8111s"); 2115256Slh155975 2125256Slh155975 status = mod_install(&amd8111s_modlinkage); 2135256Slh155975 if (status != DDI_SUCCESS) { 2145256Slh155975 mac_fini_ops(&amd8111s_dev_ops); 2155256Slh155975 } 2165256Slh155975 2175256Slh155975 return (status); 2185256Slh155975 } 2195256Slh155975 2205256Slh155975 /* 2215256Slh155975 * Standard Driver Entry Point for Query. 2225256Slh155975 * It will be called at any time to get Driver info. 2235256Slh155975 */ 2245256Slh155975 int 2255256Slh155975 _info(struct modinfo *modinfop) 2265256Slh155975 { 2275256Slh155975 return (mod_info(&amd8111s_modlinkage, modinfop)); 2285256Slh155975 } 2295256Slh155975 2305256Slh155975 /* 2315256Slh155975 * Standard Driver Entry Point for Unload. 2325256Slh155975 * It will be called at unload time of driver. 2335256Slh155975 */ 2345256Slh155975 int 2355256Slh155975 _fini() 2365256Slh155975 { 2375256Slh155975 int status; 2385256Slh155975 2395256Slh155975 status = mod_remove(&amd8111s_modlinkage); 2405256Slh155975 if (status == DDI_SUCCESS) { 2415256Slh155975 mac_fini_ops(&amd8111s_dev_ops); 2425256Slh155975 } 2435256Slh155975 2445256Slh155975 return (status); 2455256Slh155975 } 2465256Slh155975 2475256Slh155975 /* 2485256Slh155975 * Loopback Support 2495256Slh155975 */ 2505256Slh155975 static lb_property_t loopmodes[] = { 2515256Slh155975 { normal, "normal", AMD8111S_LB_NONE }, 2525256Slh155975 { external, "100Mbps", AMD8111S_LB_EXTERNAL_100 }, 2535256Slh155975 { external, "10Mbps", AMD8111S_LB_EXTERNAL_10 }, 2545256Slh155975 { internal, "MAC", AMD8111S_LB_INTERNAL_MAC } 2555256Slh155975 }; 2565256Slh155975 2575256Slh155975 static void 2585256Slh155975 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode) 2595256Slh155975 { 2605256Slh155975 2615256Slh155975 /* 2625256Slh155975 * If the mode isn't being changed, there's nothing to do ... 2635256Slh155975 */ 2645256Slh155975 if (mode == adapter->pOdl->loopback_mode) 2655256Slh155975 return; 2665256Slh155975 2675256Slh155975 /* 2685256Slh155975 * Validate the requested mode and prepare a suitable message 2695256Slh155975 * to explain the link down/up cycle that the change will 2705256Slh155975 * probably induce ... 2715256Slh155975 */ 2725256Slh155975 switch (mode) { 2735256Slh155975 default: 2745256Slh155975 return; 2755256Slh155975 2765256Slh155975 case AMD8111S_LB_NONE: 2775256Slh155975 mdlStopChip(adapter); 2785256Slh155975 if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) { 2795256Slh155975 cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB"); 2805256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 2815256Slh155975 INLOOP); 2825256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3, 2835256Slh155975 FORCE_FULL_DUPLEX | FORCE_LINK_STATUS); 2845256Slh155975 } else { 2855256Slh155975 cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB"); 2865256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 2875256Slh155975 EXLOOP); 2885256Slh155975 } 2895256Slh155975 2905256Slh155975 amd8111s_reset(adapter); 2915256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_DOWN; 2925256Slh155975 adapter->pOdl->rx_fcs_stripped = B_FALSE; 2935256Slh155975 mdlStartChip(adapter); 2945256Slh155975 break; 2955256Slh155975 2965256Slh155975 case AMD8111S_LB_EXTERNAL_100: 2975256Slh155975 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100"); 2985256Slh155975 mdlStopChip(adapter); 2995256Slh155975 amd8111s_reset(adapter); 3005256Slh155975 SetIntrCoalesc(adapter, B_FALSE); 3015256Slh155975 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100); 3025256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 3035256Slh155975 VAL0 | EXLOOP); 3045256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_UP; 3055256Slh155975 adapter->pMdl->Speed = 100; 3065256Slh155975 adapter->pMdl->FullDuplex = B_TRUE; 3075256Slh155975 /* Tell GLD the state of the physical link. */ 3085256Slh155975 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP); 3095256Slh155975 3105256Slh155975 adapter->pOdl->rx_fcs_stripped = B_TRUE; 3115256Slh155975 3125256Slh155975 mdlStartChip(adapter); 3135256Slh155975 break; 3145256Slh155975 3155256Slh155975 case AMD8111S_LB_EXTERNAL_10: 3165256Slh155975 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10"); 3175256Slh155975 mdlStopChip(adapter); 3185256Slh155975 amd8111s_reset(adapter); 3195256Slh155975 SetIntrCoalesc(adapter, B_FALSE); 3205256Slh155975 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10); 3215256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 3225256Slh155975 VAL0 | EXLOOP); 3235256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_UP; 3245256Slh155975 adapter->pMdl->Speed = 10; 3255256Slh155975 adapter->pMdl->FullDuplex = B_TRUE; 3265256Slh155975 /* Tell GLD the state of the physical link. */ 3275256Slh155975 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP); 3285256Slh155975 3295256Slh155975 adapter->pOdl->rx_fcs_stripped = B_TRUE; 3305256Slh155975 3315256Slh155975 mdlStartChip(adapter); 3325256Slh155975 break; 3335256Slh155975 3345256Slh155975 case AMD8111S_LB_INTERNAL_MAC: 3355256Slh155975 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC"); 3365256Slh155975 mdlStopChip(adapter); 3375256Slh155975 amd8111s_reset(adapter); 3385256Slh155975 SetIntrCoalesc(adapter, B_FALSE); 3395256Slh155975 /* Disable Port Manager */ 3405256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3, 3415256Slh155975 EN_PMGR); 3425256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 3435256Slh155975 VAL0 | INLOOP); 3445256Slh155975 3455256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3, 3465256Slh155975 VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS); 3475256Slh155975 3485256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_UP; 3495256Slh155975 adapter->pMdl->FullDuplex = B_TRUE; 3505256Slh155975 /* Tell GLD the state of the physical link. */ 3515256Slh155975 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP); 3525256Slh155975 3535256Slh155975 adapter->pOdl->rx_fcs_stripped = B_TRUE; 3545256Slh155975 3555256Slh155975 mdlStartChip(adapter); 3565256Slh155975 break; 3575256Slh155975 } 3585256Slh155975 3595256Slh155975 /* 3605256Slh155975 * All OK; tell the caller to reprogram 3615256Slh155975 * the PHY and/or MAC for the new mode ... 3625256Slh155975 */ 3635256Slh155975 adapter->pOdl->loopback_mode = mode; 3645256Slh155975 } 3655256Slh155975 3665256Slh155975 static enum ioc_reply 3675256Slh155975 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp, 3685256Slh155975 mblk_t *mp) 3695256Slh155975 { 3705256Slh155975 lb_info_sz_t *lbsp; 3715256Slh155975 lb_property_t *lbpp; 3725256Slh155975 uint32_t *lbmp; 3735256Slh155975 int cmd; 3745256Slh155975 3755256Slh155975 /* 3765256Slh155975 * Validate format of ioctl 3775256Slh155975 */ 3785256Slh155975 if (mp->b_cont == NULL) 3795256Slh155975 return (IOC_INVAL); 3805256Slh155975 3815256Slh155975 cmd = iocp->ioc_cmd; 3825256Slh155975 switch (cmd) { 3835256Slh155975 default: 3845256Slh155975 /* NOTREACHED */ 3855256Slh155975 amd8111s_log(adapter, CE_NOTE, 3865256Slh155975 "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd); 3875256Slh155975 return (IOC_INVAL); 3885256Slh155975 3895256Slh155975 case LB_GET_INFO_SIZE: 3905256Slh155975 if (iocp->ioc_count != sizeof (lb_info_sz_t)) { 3915256Slh155975 amd8111s_log(adapter, CE_NOTE, 3925256Slh155975 "wrong LB_GET_INFO_SIZE size"); 3935256Slh155975 return (IOC_INVAL); 3945256Slh155975 } 3956990Sgd78059 lbsp = (void *)mp->b_cont->b_rptr; 3965256Slh155975 *lbsp = sizeof (loopmodes); 3975256Slh155975 break; 3985256Slh155975 3995256Slh155975 case LB_GET_INFO: 4005256Slh155975 if (iocp->ioc_count != sizeof (loopmodes)) { 4015256Slh155975 amd8111s_log(adapter, CE_NOTE, 4025256Slh155975 "Wrong LB_GET_INFO size"); 4035256Slh155975 return (IOC_INVAL); 4045256Slh155975 } 4056990Sgd78059 lbpp = (void *)mp->b_cont->b_rptr; 4065256Slh155975 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 4075256Slh155975 break; 4085256Slh155975 4095256Slh155975 case LB_GET_MODE: 4105256Slh155975 if (iocp->ioc_count != sizeof (uint32_t)) { 4115256Slh155975 amd8111s_log(adapter, CE_NOTE, 4125256Slh155975 "Wrong LB_GET_MODE size"); 4135256Slh155975 return (IOC_INVAL); 4145256Slh155975 } 4156990Sgd78059 lbmp = (void *)mp->b_cont->b_rptr; 4165256Slh155975 *lbmp = adapter->pOdl->loopback_mode; 4175256Slh155975 break; 4185256Slh155975 4195256Slh155975 case LB_SET_MODE: 4205256Slh155975 if (iocp->ioc_count != sizeof (uint32_t)) { 4215256Slh155975 amd8111s_log(adapter, CE_NOTE, 4225256Slh155975 "Wrong LB_SET_MODE size"); 4235256Slh155975 return (IOC_INVAL); 4245256Slh155975 } 4256990Sgd78059 lbmp = (void *)mp->b_cont->b_rptr; 4265256Slh155975 amd8111s_set_loop_mode(adapter, *lbmp); 4275256Slh155975 break; 4285256Slh155975 } 4295256Slh155975 return (IOC_REPLY); 4305256Slh155975 } 4315256Slh155975 4325256Slh155975 static void 4335256Slh155975 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 4345256Slh155975 { 4355256Slh155975 struct iocblk *iocp; 4365256Slh155975 struct LayerPointers *adapter; 4375256Slh155975 enum ioc_reply status; 4385256Slh155975 4396990Sgd78059 iocp = (void *)mp->b_rptr; 4405256Slh155975 iocp->ioc_error = 0; 4416990Sgd78059 adapter = arg; 4425256Slh155975 4435256Slh155975 ASSERT(adapter); 4445256Slh155975 if (adapter == NULL) { 4455256Slh155975 miocnak(q, mp, 0, EINVAL); 4465256Slh155975 return; 4475256Slh155975 } 4485256Slh155975 4495256Slh155975 switch (iocp->ioc_cmd) { 4505256Slh155975 4515256Slh155975 case LB_GET_INFO_SIZE: 4525256Slh155975 case LB_GET_INFO: 4535256Slh155975 case LB_GET_MODE: 4545256Slh155975 case LB_SET_MODE: 4555256Slh155975 status = amd8111s_loopback_ioctl(adapter, iocp, mp); 4565256Slh155975 break; 4575256Slh155975 4585256Slh155975 default: 4595256Slh155975 status = IOC_INVAL; 4605256Slh155975 break; 4615256Slh155975 } 4625256Slh155975 4635256Slh155975 /* 4645256Slh155975 * Decide how to reply 4655256Slh155975 */ 4665256Slh155975 switch (status) { 4675256Slh155975 default: 4685256Slh155975 case IOC_INVAL: 4695256Slh155975 /* 4705256Slh155975 * Error, reply with a NAK and EINVAL or the specified error 4715256Slh155975 */ 4725256Slh155975 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 4735256Slh155975 EINVAL : iocp->ioc_error); 4745256Slh155975 break; 4755256Slh155975 4765256Slh155975 case IOC_DONE: 4775256Slh155975 /* 4785256Slh155975 * OK, reply already sent 4795256Slh155975 */ 4805256Slh155975 break; 4815256Slh155975 4825256Slh155975 case IOC_ACK: 4835256Slh155975 /* 4845256Slh155975 * OK, reply with an ACK 4855256Slh155975 */ 4865256Slh155975 miocack(q, mp, 0, 0); 4875256Slh155975 break; 4885256Slh155975 4895256Slh155975 case IOC_REPLY: 4905256Slh155975 /* 4915256Slh155975 * OK, send prepared reply as ACK or NAK 4925256Slh155975 */ 4935256Slh155975 mp->b_datap->db_type = iocp->ioc_error == 0 ? 4945256Slh155975 M_IOCACK : M_IOCNAK; 4955256Slh155975 qreply(q, mp); 4965256Slh155975 break; 4975256Slh155975 } 4985256Slh155975 } 4995256Slh155975 5005256Slh155975 /* 5015256Slh155975 * Copy one packet from dma memory to mblk. Inc dma descriptor pointer. 5025256Slh155975 */ 5035256Slh155975 static boolean_t 5045256Slh155975 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp) 5055256Slh155975 { 5065256Slh155975 int length = 0; 5075256Slh155975 mblk_t *mp; 5085256Slh155975 struct rx_desc *descriptor; 5095256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 5105256Slh155975 struct amd8111s_statistics *statistics = &pOdl->statistics; 5115256Slh155975 struct nonphysical *pNonphysical = pLayerPointers->pMil 5125256Slh155975 ->pNonphysical; 5135256Slh155975 5145256Slh155975 mutex_enter(&pOdl->mdlRcvLock); 5155256Slh155975 descriptor = pNonphysical->RxBufDescQRead->descriptor; 5165256Slh155975 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 5175256Slh155975 pNonphysical->RxBufDescQRead->descriptor - 5185256Slh155975 pNonphysical->RxBufDescQStart->descriptor, 5195256Slh155975 sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU); 5205256Slh155975 if ((descriptor->Rx_OWN) == 0) { 5215256Slh155975 /* 5225256Slh155975 * If the frame is received with errors, then set MCNT 5235256Slh155975 * of that pkt in ReceiveArray to 0. This packet would 5245256Slh155975 * be discarded later and not indicated to OS. 5255256Slh155975 */ 5265256Slh155975 if (descriptor->Rx_ERR) { 5275256Slh155975 statistics->rx_desc_err ++; 5285256Slh155975 descriptor->Rx_ERR = 0; 5295256Slh155975 if (descriptor->Rx_FRAM == 1) { 5305256Slh155975 statistics->rx_desc_err_FRAM ++; 5315256Slh155975 descriptor->Rx_FRAM = 0; 5325256Slh155975 } 5335256Slh155975 if (descriptor->Rx_OFLO == 1) { 5345256Slh155975 statistics->rx_desc_err_OFLO ++; 5355256Slh155975 descriptor->Rx_OFLO = 0; 5365256Slh155975 pOdl->rx_overflow_counter ++; 5375256Slh155975 if ((pOdl->rx_overflow_counter > 5) && 5385256Slh155975 (pOdl->pause_interval == 0)) { 5395256Slh155975 statistics->rx_double_overflow ++; 5405256Slh155975 mdlSendPause(pLayerPointers); 5415256Slh155975 pOdl->rx_overflow_counter = 0; 5425256Slh155975 pOdl->pause_interval = 25; 5435256Slh155975 } 5445256Slh155975 } 5455256Slh155975 if (descriptor->Rx_CRC == 1) { 5465256Slh155975 statistics->rx_desc_err_CRC ++; 5475256Slh155975 descriptor->Rx_CRC = 0; 5485256Slh155975 } 5495256Slh155975 if (descriptor->Rx_BUFF == 1) { 5505256Slh155975 statistics->rx_desc_err_BUFF ++; 5515256Slh155975 descriptor->Rx_BUFF = 0; 5525256Slh155975 } 5535256Slh155975 goto Next_Descriptor; 5545256Slh155975 } 5555256Slh155975 5565256Slh155975 /* Length of incoming packet */ 5575256Slh155975 if (pOdl->rx_fcs_stripped) { 5585256Slh155975 length = descriptor->Rx_MCNT -4; 5595256Slh155975 } else { 5605256Slh155975 length = descriptor->Rx_MCNT; 5615256Slh155975 } 5625256Slh155975 if (length < 62) { 5635256Slh155975 statistics->rx_error_zerosize ++; 5645256Slh155975 } 5655256Slh155975 5665256Slh155975 if ((mp = allocb(length, BPRI_MED)) == NULL) { 5675256Slh155975 statistics->rx_allocfail ++; 5685256Slh155975 goto failed; 5695256Slh155975 } 5705256Slh155975 /* Copy from virtual address of incoming packet */ 5715256Slh155975 bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap), 5725256Slh155975 mp->b_rptr, length); 5735256Slh155975 mp->b_wptr = mp->b_rptr + length; 5745256Slh155975 statistics->rx_ok_packets ++; 5755256Slh155975 if (*last_mp == NULL) { 5765256Slh155975 *last_mp = mp; 5775256Slh155975 } else { 5785256Slh155975 (*last_mp)->b_next = mp; 5795256Slh155975 *last_mp = mp; 5805256Slh155975 } 5815256Slh155975 5825256Slh155975 Next_Descriptor: 5835256Slh155975 descriptor->Rx_MCNT = 0; 5845256Slh155975 descriptor->Rx_SOP = 0; 5855256Slh155975 descriptor->Rx_EOP = 0; 5865256Slh155975 descriptor->Rx_PAM = 0; 5875256Slh155975 descriptor->Rx_BAM = 0; 5885256Slh155975 descriptor->TT = 0; 5895256Slh155975 descriptor->Rx_OWN = 1; 5905256Slh155975 pNonphysical->RxBufDescQRead->descriptor++; 5915256Slh155975 pNonphysical->RxBufDescQRead->USpaceMap++; 5925256Slh155975 if (pNonphysical->RxBufDescQRead->descriptor > 5935256Slh155975 pNonphysical->RxBufDescQEnd->descriptor) { 5945256Slh155975 pNonphysical->RxBufDescQRead->descriptor = 5955256Slh155975 pNonphysical->RxBufDescQStart->descriptor; 5965256Slh155975 pNonphysical->RxBufDescQRead->USpaceMap = 5975256Slh155975 pNonphysical->RxBufDescQStart->USpaceMap; 5985256Slh155975 } 5995256Slh155975 mutex_exit(&pOdl->mdlRcvLock); 6005256Slh155975 6015256Slh155975 return (B_TRUE); 6025256Slh155975 } 6035256Slh155975 6045256Slh155975 failed: 6055256Slh155975 mutex_exit(&pOdl->mdlRcvLock); 6065256Slh155975 return (B_FALSE); 6075256Slh155975 } 6085256Slh155975 6095256Slh155975 /* 6105256Slh155975 * Get the received packets from NIC card and send them to GLD. 6115256Slh155975 */ 6125256Slh155975 static void 6135256Slh155975 amd8111s_receive(struct LayerPointers *pLayerPointers) 6145256Slh155975 { 6155256Slh155975 int numOfPkts = 0; 6165256Slh155975 struct odl *pOdl; 6175256Slh155975 mblk_t *ret_mp = NULL, *last_mp = NULL; 6185256Slh155975 6195256Slh155975 pOdl = pLayerPointers->pOdl; 6205256Slh155975 6215256Slh155975 rw_enter(&pOdl->chip_lock, RW_READER); 6225256Slh155975 if (!pLayerPointers->run) { 6235256Slh155975 rw_exit(&pOdl->chip_lock); 6245256Slh155975 return; 6255256Slh155975 } 6265256Slh155975 6275256Slh155975 if (pOdl->pause_interval > 0) 6285256Slh155975 pOdl->pause_interval --; 6295256Slh155975 6305256Slh155975 while (numOfPkts < RX_RING_SIZE) { 6315256Slh155975 6325256Slh155975 if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) { 6335256Slh155975 break; 6345256Slh155975 } 6355256Slh155975 if (ret_mp == NULL) 6365256Slh155975 ret_mp = last_mp; 6375256Slh155975 numOfPkts++; 6385256Slh155975 } 6395256Slh155975 6405256Slh155975 if (ret_mp) { 641*8275SEric Cheng mac_rx(pOdl->mh, NULL, ret_mp); 6425256Slh155975 } 6435256Slh155975 6445256Slh155975 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0, 6455256Slh155975 DDI_DMA_SYNC_FORDEV); 6465256Slh155975 6475256Slh155975 mdlReceive(pLayerPointers); 6485256Slh155975 6495256Slh155975 rw_exit(&pOdl->chip_lock); 6505256Slh155975 6515256Slh155975 } 6525256Slh155975 6535256Slh155975 /* 6545256Slh155975 * Print message in release-version driver. 6555256Slh155975 */ 6565256Slh155975 static void 6575256Slh155975 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...) 6585256Slh155975 { 6595256Slh155975 auto char name[32]; 6605256Slh155975 auto char buf[256]; 6615256Slh155975 va_list ap; 6625256Slh155975 6635256Slh155975 if (adapter != NULL) { 6645256Slh155975 (void) sprintf(name, "amd8111s%d", 6655256Slh155975 ddi_get_instance(adapter->pOdl->devinfo)); 6665256Slh155975 } else { 6675256Slh155975 (void) sprintf(name, "amd8111s"); 6685256Slh155975 } 6695256Slh155975 va_start(ap, fmt); 6705256Slh155975 (void) vsprintf(buf, fmt, ap); 6715256Slh155975 va_end(ap); 6725256Slh155975 cmn_err(level, "%s: %s", name, buf); 6735256Slh155975 } 6745256Slh155975 6755256Slh155975 /* 6765256Slh155975 * To allocate & initilize all resources. 6775256Slh155975 * Called by amd8111s_attach(). 6785256Slh155975 */ 6795256Slh155975 static int 6805256Slh155975 amd8111s_odlInit(struct LayerPointers *pLayerPointers) 6815256Slh155975 { 6825256Slh155975 unsigned long mem_req_array[MEM_REQ_MAX]; 6835256Slh155975 unsigned long mem_set_array[MEM_REQ_MAX]; 6845256Slh155975 unsigned long *pmem_req_array; 6855256Slh155975 unsigned long *pmem_set_array; 6865256Slh155975 int i, size; 6875256Slh155975 6885256Slh155975 for (i = 0; i < MEM_REQ_MAX; i++) { 6895256Slh155975 mem_req_array[i] = 0; 6905256Slh155975 mem_set_array[i] = 0; 6915256Slh155975 } 6925256Slh155975 6935256Slh155975 milRequestResources(mem_req_array); 6945256Slh155975 6955256Slh155975 pmem_req_array = mem_req_array; 6965256Slh155975 pmem_set_array = mem_set_array; 6975256Slh155975 while (*pmem_req_array) { 6985256Slh155975 switch (*pmem_req_array) { 6995256Slh155975 case VIRTUAL: 7005256Slh155975 *pmem_set_array = VIRTUAL; 7015256Slh155975 pmem_req_array++; 7025256Slh155975 pmem_set_array++; 7035256Slh155975 *(pmem_set_array) = *(pmem_req_array); 7045256Slh155975 pmem_set_array++; 7055256Slh155975 *(pmem_set_array) = (unsigned long) kmem_zalloc( 7065256Slh155975 *(pmem_req_array), KM_NOSLEEP); 7075256Slh155975 if (*pmem_set_array == NULL) 7085256Slh155975 goto odl_init_failure; 7095256Slh155975 break; 7105256Slh155975 } 7115256Slh155975 pmem_req_array++; 7125256Slh155975 pmem_set_array++; 7135256Slh155975 } 7145256Slh155975 7155256Slh155975 /* 7165256Slh155975 * Initilize memory on lower layers 7175256Slh155975 */ 7185256Slh155975 milSetResources(pLayerPointers, mem_set_array); 7195256Slh155975 7205256Slh155975 /* Allocate Rx/Tx descriptors */ 7215256Slh155975 if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) { 7225256Slh155975 *pmem_set_array = NULL; 7235256Slh155975 goto odl_init_failure; 7245256Slh155975 } 7255256Slh155975 7265256Slh155975 /* 7275256Slh155975 * Allocate Rx buffer for each Rx descriptor. Then call mil layer 7285256Slh155975 * routine to fill physical address of Rx buffer into Rx descriptor. 7295256Slh155975 */ 7305256Slh155975 if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) { 7315256Slh155975 amd8111s_free_descriptors(pLayerPointers); 7325256Slh155975 *pmem_set_array = NULL; 7335256Slh155975 goto odl_init_failure; 7345256Slh155975 } 7355256Slh155975 milInitGlbds(pLayerPointers); 7365256Slh155975 7375256Slh155975 return (0); 7385256Slh155975 7395256Slh155975 odl_init_failure: 7405256Slh155975 /* 7415256Slh155975 * Free All memory allocated so far 7425256Slh155975 */ 7435256Slh155975 pmem_req_array = mem_set_array; 7445256Slh155975 while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) { 7455256Slh155975 switch (*pmem_req_array) { 7465256Slh155975 case VIRTUAL: 7475256Slh155975 pmem_req_array++; /* Size */ 7485256Slh155975 size = *(pmem_req_array); 7495256Slh155975 pmem_req_array++; /* Virtual Address */ 7505256Slh155975 if (pmem_req_array == NULL) 7515256Slh155975 return (1); 7525256Slh155975 kmem_free((int *)*pmem_req_array, size); 7535256Slh155975 break; 7545256Slh155975 } 7555256Slh155975 pmem_req_array++; 7565256Slh155975 } 7575256Slh155975 return (1); 7585256Slh155975 } 7595256Slh155975 7605256Slh155975 /* 7615256Slh155975 * Allocate and initialize Tx/Rx descriptors 7625256Slh155975 */ 7635256Slh155975 static boolean_t 7645256Slh155975 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers) 7655256Slh155975 { 7665256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 7675256Slh155975 struct mil *pMil = pLayerPointers->pMil; 7685256Slh155975 dev_info_t *devinfo = pOdl->devinfo; 7695256Slh155975 uint_t length, count, i; 7705256Slh155975 size_t real_length; 7715256Slh155975 7725256Slh155975 /* 7735256Slh155975 * Allocate Rx descriptors 7745256Slh155975 */ 7755256Slh155975 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP, 7765256Slh155975 NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) { 7775256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 7785256Slh155975 "ddi_dma_alloc_handle for Rx desc failed"); 7795256Slh155975 pOdl->rx_desc_dma_handle = NULL; 7805256Slh155975 return (B_FALSE); 7815256Slh155975 } 7825256Slh155975 7835256Slh155975 length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT; 7845256Slh155975 if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length, 7855256Slh155975 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 7865256Slh155975 NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length, 7875256Slh155975 &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) { 7885256Slh155975 7895256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 7905256Slh155975 "ddi_dma_mem_handle for Rx desc failed"); 7915256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 7925256Slh155975 pOdl->rx_desc_dma_handle = NULL; 7935256Slh155975 return (B_FALSE); 7945256Slh155975 } 7955256Slh155975 7965256Slh155975 if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle, 7975256Slh155975 NULL, (caddr_t)pMil->Rx_desc_original, real_length, 7985256Slh155975 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 7995256Slh155975 NULL, &pOdl->rx_desc_dma_cookie, 8005256Slh155975 &count) != DDI_SUCCESS) { 8015256Slh155975 8025256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 8035256Slh155975 "ddi_dma_addr_bind_handle for Rx desc failed"); 8045256Slh155975 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle); 8055256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 8065256Slh155975 pOdl->rx_desc_dma_handle = NULL; 8075256Slh155975 return (B_FALSE); 8085256Slh155975 } 8095256Slh155975 ASSERT(count == 1); 8105256Slh155975 8115256Slh155975 /* Initialize Rx descriptors related variables */ 8125256Slh155975 pMil->Rx_desc = (struct rx_desc *) 8135256Slh155975 ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT); 8145256Slh155975 pMil->Rx_desc_pa = (unsigned int) 8155256Slh155975 ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT); 8165256Slh155975 8175256Slh155975 pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa; 8185256Slh155975 8195256Slh155975 8205256Slh155975 /* 8215256Slh155975 * Allocate Tx descriptors 8225256Slh155975 */ 8235256Slh155975 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP, 8245256Slh155975 NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) { 8255256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 8265256Slh155975 "ddi_dma_alloc_handle for Tx desc failed"); 8275256Slh155975 goto allocate_desc_fail; 8285256Slh155975 } 8295256Slh155975 8305256Slh155975 length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT; 8315256Slh155975 if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length, 8325256Slh155975 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 8335256Slh155975 NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length, 8345256Slh155975 &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) { 8355256Slh155975 8365256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 8375256Slh155975 "ddi_dma_mem_handle for Tx desc failed"); 8385256Slh155975 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle); 8395256Slh155975 goto allocate_desc_fail; 8405256Slh155975 } 8415256Slh155975 8425256Slh155975 if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle, 8435256Slh155975 NULL, (caddr_t)pMil->Tx_desc_original, real_length, 8445256Slh155975 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 8455256Slh155975 NULL, &pOdl->tx_desc_dma_cookie, 8465256Slh155975 &count) != DDI_SUCCESS) { 8475256Slh155975 8485256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 8495256Slh155975 "ddi_dma_addr_bind_handle for Tx desc failed"); 8505256Slh155975 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle); 8515256Slh155975 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle); 8525256Slh155975 goto allocate_desc_fail; 8535256Slh155975 } 8545256Slh155975 ASSERT(count == 1); 8555256Slh155975 /* Set the DMA area to all zeros */ 8565256Slh155975 bzero((caddr_t)pMil->Tx_desc_original, length); 8575256Slh155975 8585256Slh155975 /* Initialize Tx descriptors related variables */ 8595256Slh155975 pMil->Tx_desc = (struct tx_desc *) 8605256Slh155975 ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT); 8615256Slh155975 pMil->pNonphysical->TxDescQRead = pMil->Tx_desc; 8625256Slh155975 pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc; 8635256Slh155975 pMil->pNonphysical->TxDescQStart = pMil->Tx_desc; 8645256Slh155975 pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]); 8655256Slh155975 8665256Slh155975 /* Physical Addr of Tx_desc_original & Tx_desc */ 8675256Slh155975 pLayerPointers->pMil->Tx_desc_pa = 8685256Slh155975 ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & 8695256Slh155975 ~ALIGNMENT); 8705256Slh155975 8715256Slh155975 /* Setting the reserved bits in the tx descriptors */ 8725256Slh155975 for (i = 0; i < TX_RING_SIZE; i++) { 8735256Slh155975 pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f; 8745256Slh155975 pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0; 8755256Slh155975 pMil->pNonphysical->TxDescQWrite++; 8765256Slh155975 } 8775256Slh155975 pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart; 8785256Slh155975 8795256Slh155975 pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa; 8805256Slh155975 8815256Slh155975 return (B_TRUE); 8825256Slh155975 8835256Slh155975 allocate_desc_fail: 8845256Slh155975 pOdl->tx_desc_dma_handle = NULL; 8855256Slh155975 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle); 8865256Slh155975 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle); 8875256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 8885256Slh155975 pOdl->rx_desc_dma_handle = NULL; 8895256Slh155975 return (B_FALSE); 8905256Slh155975 } 8915256Slh155975 8925256Slh155975 /* 8935256Slh155975 * Free Tx/Rx descriptors 8945256Slh155975 */ 8955256Slh155975 static void 8965256Slh155975 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers) 8975256Slh155975 { 8985256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 8995256Slh155975 9005256Slh155975 /* Free Rx descriptors */ 9015256Slh155975 if (pOdl->rx_desc_dma_handle) { 9025256Slh155975 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle); 9035256Slh155975 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle); 9045256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 9055256Slh155975 pOdl->rx_desc_dma_handle = NULL; 9065256Slh155975 } 9075256Slh155975 9085256Slh155975 /* Free Rx descriptors */ 9095256Slh155975 if (pOdl->tx_desc_dma_handle) { 9105256Slh155975 (void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle); 9115256Slh155975 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle); 9125256Slh155975 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle); 9135256Slh155975 pOdl->tx_desc_dma_handle = NULL; 9145256Slh155975 } 9155256Slh155975 } 9165256Slh155975 9175256Slh155975 /* 9185256Slh155975 * Allocate Tx/Rx Ring buffer 9195256Slh155975 */ 9205256Slh155975 static boolean_t 9215256Slh155975 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers, 9225256Slh155975 struct amd8111s_dma_ringbuf *pRing, 9235256Slh155975 uint32_t ring_size, uint32_t msg_size) 9245256Slh155975 { 9255256Slh155975 uint32_t idx, msg_idx = 0, msg_acc; 9265256Slh155975 dev_info_t *devinfo = pLayerPointers->pOdl->devinfo; 9275256Slh155975 size_t real_length; 9285256Slh155975 uint_t count = 0; 9295256Slh155975 9305256Slh155975 ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1); 9315256Slh155975 pRing->dma_buf_sz = msg_size; 9325256Slh155975 pRing->ring_size = ring_size; 9335256Slh155975 pRing->trunk_num = AMD8111S_SPLIT; 9345256Slh155975 pRing->buf_sz = msg_size * ring_size; 9355256Slh155975 if (ring_size < pRing->trunk_num) 9365256Slh155975 pRing->trunk_num = ring_size; 9375256Slh155975 ASSERT((pRing->buf_sz % pRing->trunk_num) == 0); 9385256Slh155975 9395256Slh155975 pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num; 9405256Slh155975 ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0); 9415256Slh155975 9425256Slh155975 pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) * 9435256Slh155975 ring_size, KM_NOSLEEP); 9445256Slh155975 pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) * 9455256Slh155975 pRing->trunk_num, KM_NOSLEEP); 9465256Slh155975 pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) * 9475256Slh155975 pRing->trunk_num, KM_NOSLEEP); 9485256Slh155975 pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) * 9495256Slh155975 pRing->trunk_num, KM_NOSLEEP); 9505256Slh155975 pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) * 9515256Slh155975 pRing->trunk_num, KM_NOSLEEP); 9525256Slh155975 if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL || 9535256Slh155975 pRing->acc_hdl == NULL || pRing->trunk_addr == NULL || 9545256Slh155975 pRing->dma_cookie == NULL) { 9555256Slh155975 amd8111s_log(pLayerPointers, CE_NOTE, 9565256Slh155975 "kmem_zalloc failed"); 9575256Slh155975 goto failed; 9585256Slh155975 } 9595256Slh155975 9605256Slh155975 for (idx = 0; idx < pRing->trunk_num; ++idx) { 9615256Slh155975 if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t, 9625256Slh155975 DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx])) 9635256Slh155975 != DDI_SUCCESS) { 9645256Slh155975 9655256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 9665256Slh155975 "ddi_dma_alloc_handle failed"); 9675256Slh155975 goto failed; 9685256Slh155975 } else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx], 9695256Slh155975 pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING, 9705256Slh155975 DDI_DMA_SLEEP, NULL, 9715256Slh155975 (caddr_t *)&(pRing->trunk_addr[idx]), 9725256Slh155975 (size_t *)(&real_length), &pRing->acc_hdl[idx]) 9735256Slh155975 != DDI_SUCCESS) { 9745256Slh155975 9755256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 9765256Slh155975 "ddi_dma_mem_alloc failed"); 9775256Slh155975 goto failed; 9785256Slh155975 } else if (real_length != pRing->trunk_sz) { 9795256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 9805256Slh155975 "ddi_dma_mem_alloc failed"); 9815256Slh155975 goto failed; 9825256Slh155975 } else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx], 9835256Slh155975 NULL, (caddr_t)pRing->trunk_addr[idx], real_length, 9845256Slh155975 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 9855256Slh155975 &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) { 9865256Slh155975 9875256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 9885256Slh155975 "ddi_dma_addr_bind_handle failed"); 9895256Slh155975 goto failed; 9905256Slh155975 } else { 9915256Slh155975 for (msg_acc = 0; 9925256Slh155975 msg_acc < pRing->trunk_sz / pRing->dma_buf_sz; 9935256Slh155975 ++ msg_acc) { 9945256Slh155975 pRing->msg_buf[msg_idx].offset = 9955256Slh155975 msg_acc * pRing->dma_buf_sz; 9965256Slh155975 pRing->msg_buf[msg_idx].vir_addr = 9975256Slh155975 pRing->trunk_addr[idx] + 9985256Slh155975 pRing->msg_buf[msg_idx].offset; 9995256Slh155975 pRing->msg_buf[msg_idx].phy_addr = 10005256Slh155975 pRing->dma_cookie[idx].dmac_laddress + 10015256Slh155975 pRing->msg_buf[msg_idx].offset; 10025256Slh155975 pRing->msg_buf[msg_idx].p_hdl = 10035256Slh155975 pRing->dma_hdl[idx]; 10045256Slh155975 msg_idx ++; 10055256Slh155975 } 10065256Slh155975 } 10075256Slh155975 } 10085256Slh155975 10095256Slh155975 pRing->free = pRing->msg_buf; 10105256Slh155975 pRing->next = pRing->msg_buf; 10115256Slh155975 pRing->curr = pRing->msg_buf; 10125256Slh155975 10135256Slh155975 return (B_TRUE); 10145256Slh155975 failed: 10155256Slh155975 amd8111s_free_dma_ringbuf(pRing); 10165256Slh155975 return (B_FALSE); 10175256Slh155975 } 10185256Slh155975 10195256Slh155975 /* 10205256Slh155975 * Free Tx/Rx ring buffer 10215256Slh155975 */ 10225256Slh155975 static void 10235256Slh155975 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing) 10245256Slh155975 { 10255256Slh155975 int idx; 10265256Slh155975 10275256Slh155975 if (pRing->dma_cookie != NULL) { 10285256Slh155975 for (idx = 0; idx < pRing->trunk_num; idx ++) { 10295256Slh155975 if (pRing->dma_cookie[idx].dmac_laddress == 0) { 10305256Slh155975 break; 10315256Slh155975 } 10325256Slh155975 (void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]); 10335256Slh155975 } 10345256Slh155975 kmem_free(pRing->dma_cookie, 10355256Slh155975 sizeof (ddi_dma_cookie_t) * pRing->trunk_num); 10365256Slh155975 } 10375256Slh155975 10385256Slh155975 if (pRing->acc_hdl != NULL) { 10395256Slh155975 for (idx = 0; idx < pRing->trunk_num; idx ++) { 10405256Slh155975 if (pRing->acc_hdl[idx] == NULL) 10415256Slh155975 break; 10425256Slh155975 ddi_dma_mem_free(&pRing->acc_hdl[idx]); 10435256Slh155975 } 10445256Slh155975 kmem_free(pRing->acc_hdl, 10455256Slh155975 sizeof (ddi_acc_handle_t) * pRing->trunk_num); 10465256Slh155975 } 10475256Slh155975 10485256Slh155975 if (pRing->dma_hdl != NULL) { 10495256Slh155975 for (idx = 0; idx < pRing->trunk_num; idx ++) { 10505256Slh155975 if (pRing->dma_hdl[idx] == 0) { 10515256Slh155975 break; 10525256Slh155975 } 10535256Slh155975 ddi_dma_free_handle(&pRing->dma_hdl[idx]); 10545256Slh155975 } 10555256Slh155975 kmem_free(pRing->dma_hdl, 10565256Slh155975 sizeof (ddi_dma_handle_t) * pRing->trunk_num); 10575256Slh155975 } 10585256Slh155975 10595256Slh155975 if (pRing->msg_buf != NULL) { 10605256Slh155975 kmem_free(pRing->msg_buf, 10615256Slh155975 sizeof (struct amd8111s_msgbuf) * pRing->ring_size); 10625256Slh155975 } 10635256Slh155975 10645256Slh155975 if (pRing->trunk_addr != NULL) { 10655256Slh155975 kmem_free(pRing->trunk_addr, 10665256Slh155975 sizeof (caddr_t) * pRing->trunk_num); 10675256Slh155975 } 10685256Slh155975 10695256Slh155975 bzero(pRing, sizeof (*pRing)); 10705256Slh155975 } 10715256Slh155975 10725256Slh155975 10735256Slh155975 /* 10745256Slh155975 * Allocate all Tx buffer. 10755256Slh155975 * Allocate a Rx buffer for each Rx descriptor. Then 10765256Slh155975 * call mil routine to fill physical address of Rx 10775256Slh155975 * buffer into Rx descriptors 10785256Slh155975 */ 10795256Slh155975 static boolean_t 10805256Slh155975 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers) 10815256Slh155975 { 10825256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 10835256Slh155975 10845256Slh155975 /* 10855256Slh155975 * Allocate rx Buffers 10865256Slh155975 */ 10875256Slh155975 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf, 10885256Slh155975 RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) { 10895256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 10905256Slh155975 "amd8111s_alloc_dma_ringbuf for tx failed"); 10915256Slh155975 goto allocate_buf_fail; 10925256Slh155975 } 10935256Slh155975 10945256Slh155975 /* 10955256Slh155975 * Allocate Tx buffers 10965256Slh155975 */ 10975256Slh155975 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf, 10985256Slh155975 TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) { 10995256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 11005256Slh155975 "amd8111s_alloc_dma_ringbuf for tx failed"); 11015256Slh155975 goto allocate_buf_fail; 11025256Slh155975 } 11035256Slh155975 11045256Slh155975 /* 11055256Slh155975 * Initilize the mil Queues 11065256Slh155975 */ 11075256Slh155975 milInitGlbds(pLayerPointers); 11085256Slh155975 11095256Slh155975 milInitRxQ(pLayerPointers); 11105256Slh155975 11115256Slh155975 return (B_TRUE); 11125256Slh155975 11135256Slh155975 allocate_buf_fail: 11145256Slh155975 11155256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 11165256Slh155975 "amd8111s_allocate_buffers failed"); 11175256Slh155975 return (B_FALSE); 11185256Slh155975 } 11195256Slh155975 11205256Slh155975 /* 11215256Slh155975 * Free all Rx/Tx buffer 11225256Slh155975 */ 11235256Slh155975 11245256Slh155975 static void 11255256Slh155975 amd8111s_free_buffers(struct LayerPointers *pLayerPointers) 11265256Slh155975 { 11275256Slh155975 /* Free Tx buffers */ 11285256Slh155975 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf); 11295256Slh155975 11305256Slh155975 /* Free Rx Buffers */ 11315256Slh155975 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf); 11325256Slh155975 } 11335256Slh155975 11345256Slh155975 /* 11355256Slh155975 * Try to recycle all the descriptors and Tx buffers 11365256Slh155975 * which are already freed by hardware. 11375256Slh155975 */ 11385256Slh155975 static int 11395256Slh155975 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers) 11405256Slh155975 { 11415256Slh155975 struct nonphysical *pNonphysical; 11425256Slh155975 uint32_t count = 0; 11435256Slh155975 11445256Slh155975 pNonphysical = pLayerPointers->pMil->pNonphysical; 11455256Slh155975 while (pNonphysical->TxDescQRead->Tx_OWN == 0 && 11465256Slh155975 pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) { 11475256Slh155975 pLayerPointers->pOdl->tx_buf.free = 11485256Slh155975 NEXT(pLayerPointers->pOdl->tx_buf, free); 11495256Slh155975 pNonphysical->TxDescQRead++; 11505256Slh155975 if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) { 11515256Slh155975 pNonphysical->TxDescQRead = pNonphysical->TxDescQStart; 11525256Slh155975 } 11535256Slh155975 count ++; 11545256Slh155975 } 11555256Slh155975 11565256Slh155975 if (pLayerPointers->pMil->tx_reschedule) 11575256Slh155975 ddi_trigger_softintr(pLayerPointers->pOdl->drain_id); 11585256Slh155975 11595256Slh155975 return (count); 11605256Slh155975 } 11615256Slh155975 11625256Slh155975 /* 11635256Slh155975 * Get packets in the Tx buffer, then copy them to the send buffer. 11645256Slh155975 * Trigger hardware to send out packets. 11655256Slh155975 */ 11665256Slh155975 static void 11675256Slh155975 amd8111s_send_serial(struct LayerPointers *pLayerPointers) 11685256Slh155975 { 11695256Slh155975 struct nonphysical *pNonphysical; 11705256Slh155975 uint32_t count; 11715256Slh155975 11725256Slh155975 pNonphysical = pLayerPointers->pMil->pNonphysical; 11735256Slh155975 11745256Slh155975 mutex_enter(&pLayerPointers->pOdl->mdlSendLock); 11755256Slh155975 11765256Slh155975 for (count = 0; count < AMD8111S_SEND_MAX; count ++) { 11775256Slh155975 if (pLayerPointers->pOdl->tx_buf.curr == 11785256Slh155975 pLayerPointers->pOdl->tx_buf.next) { 11795256Slh155975 break; 11805256Slh155975 } 11815256Slh155975 /* to verify if it needs to recycle the tx Buf */ 11825256Slh155975 if (((pNonphysical->TxDescQWrite + 1 > 11835256Slh155975 pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart : 11845256Slh155975 (pNonphysical->TxDescQWrite + 1)) == 11855256Slh155975 pNonphysical->TxDescQRead) 11865256Slh155975 if (amd8111s_recycle_tx(pLayerPointers) == 0) { 11875256Slh155975 pLayerPointers->pOdl 11885256Slh155975 ->statistics.tx_no_descriptor ++; 11895256Slh155975 break; 11905256Slh155975 } 11915256Slh155975 11925256Slh155975 /* Fill packet length */ 11935256Slh155975 pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers 11945256Slh155975 ->pOdl->tx_buf.curr->msg_size; 11955256Slh155975 11965256Slh155975 /* Fill physical buffer address */ 11975256Slh155975 pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int) 11985256Slh155975 pLayerPointers->pOdl->tx_buf.curr->phy_addr; 11995256Slh155975 12005256Slh155975 pNonphysical->TxDescQWrite->Tx_SOP = 1; 12015256Slh155975 pNonphysical->TxDescQWrite->Tx_EOP = 1; 12025256Slh155975 pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1; 12035256Slh155975 pNonphysical->TxDescQWrite->Tx_LTINT = 1; 12045256Slh155975 pNonphysical->TxDescQWrite->Tx_USPACE = 0; 12055256Slh155975 pNonphysical->TxDescQWrite->Tx_OWN = 1; 12065256Slh155975 12075256Slh155975 pNonphysical->TxDescQWrite++; 12085256Slh155975 if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) { 12095256Slh155975 pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart; 12105256Slh155975 } 12115256Slh155975 12125256Slh155975 pLayerPointers->pOdl->tx_buf.curr = 12135256Slh155975 NEXT(pLayerPointers->pOdl->tx_buf, curr); 12145256Slh155975 12155256Slh155975 } 12165256Slh155975 12175256Slh155975 pLayerPointers->pOdl->statistics.tx_ok_packets += count; 12185256Slh155975 12195256Slh155975 mutex_exit(&pLayerPointers->pOdl->mdlSendLock); 12205256Slh155975 12215256Slh155975 /* Call mdlTransmit to send the pkt out on the network */ 12225256Slh155975 mdlTransmit(pLayerPointers); 12235256Slh155975 12245256Slh155975 } 12255256Slh155975 12265256Slh155975 /* 12275256Slh155975 * Softintr entrance. try to send out packets in the Tx buffer. 12285256Slh155975 * If reschedule is True, call mac_tx_update to re-enable the 12295256Slh155975 * transmit 12305256Slh155975 */ 12315256Slh155975 static uint_t 12325256Slh155975 amd8111s_send_drain(caddr_t arg) 12335256Slh155975 { 12346990Sgd78059 struct LayerPointers *pLayerPointers = (void *)arg; 12355256Slh155975 12365256Slh155975 amd8111s_send_serial(pLayerPointers); 12375256Slh155975 12385256Slh155975 if (pLayerPointers->pMil->tx_reschedule && 12395256Slh155975 NEXT(pLayerPointers->pOdl->tx_buf, next) != 12405256Slh155975 pLayerPointers->pOdl->tx_buf.free) { 12415256Slh155975 mac_tx_update(pLayerPointers->pOdl->mh); 12425256Slh155975 pLayerPointers->pMil->tx_reschedule = B_FALSE; 12435256Slh155975 } 12445256Slh155975 12455256Slh155975 return (DDI_INTR_CLAIMED); 12465256Slh155975 } 12475256Slh155975 12485256Slh155975 /* 12495256Slh155975 * Get a Tx buffer 12505256Slh155975 */ 12515256Slh155975 static struct amd8111s_msgbuf * 12525256Slh155975 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers) 12535256Slh155975 { 12545256Slh155975 struct amd8111s_msgbuf *tmp, *next; 12555256Slh155975 12565256Slh155975 mutex_enter(&pLayerPointers->pOdl->mdlSendLock); 12575256Slh155975 next = NEXT(pLayerPointers->pOdl->tx_buf, next); 12585256Slh155975 if (next == pLayerPointers->pOdl->tx_buf.free) { 12595256Slh155975 tmp = NULL; 12605256Slh155975 } else { 12615256Slh155975 tmp = pLayerPointers->pOdl->tx_buf.next; 12625256Slh155975 pLayerPointers->pOdl->tx_buf.next = next; 12635256Slh155975 } 12645256Slh155975 mutex_exit(&pLayerPointers->pOdl->mdlSendLock); 12655256Slh155975 12665256Slh155975 return (tmp); 12675256Slh155975 } 12685256Slh155975 12695256Slh155975 static boolean_t 12705256Slh155975 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp) 12715256Slh155975 { 12725256Slh155975 struct odl *pOdl; 12735256Slh155975 size_t frag_len; 12745256Slh155975 mblk_t *tmp; 12755256Slh155975 struct amd8111s_msgbuf *txBuf; 12765256Slh155975 uint8_t *pMsg; 12775256Slh155975 12785256Slh155975 pOdl = pLayerPointers->pOdl; 12795256Slh155975 12805256Slh155975 /* alloc send buffer */ 12815256Slh155975 txBuf = amd8111s_getTxbuf(pLayerPointers); 12825256Slh155975 if (txBuf == NULL) { 12835256Slh155975 pOdl->statistics.tx_no_buffer ++; 12845256Slh155975 pLayerPointers->pMil->tx_reschedule = B_TRUE; 12855256Slh155975 amd8111s_send_serial(pLayerPointers); 12865256Slh155975 return (B_FALSE); 12875256Slh155975 } 12885256Slh155975 12895256Slh155975 /* copy packet to send buffer */ 12905256Slh155975 txBuf->msg_size = 0; 12915256Slh155975 pMsg = (uint8_t *)txBuf->vir_addr; 12925256Slh155975 for (tmp = mp; tmp; tmp = tmp->b_cont) { 12935256Slh155975 frag_len = MBLKL(tmp); 12945256Slh155975 bcopy(tmp->b_rptr, pMsg, frag_len); 12955256Slh155975 txBuf->msg_size += frag_len; 12965256Slh155975 pMsg += frag_len; 12975256Slh155975 } 12985256Slh155975 freemsg(mp); 12995256Slh155975 13005256Slh155975 amd8111s_send_serial(pLayerPointers); 13015256Slh155975 13025256Slh155975 return (B_TRUE); 13035256Slh155975 } 13045256Slh155975 13055256Slh155975 /* 13065256Slh155975 * (GLD Entry Point) Send the message block to lower layer 13075256Slh155975 */ 13085256Slh155975 static mblk_t * 13095256Slh155975 amd8111s_m_tx(void *arg, mblk_t *mp) 13105256Slh155975 { 13115256Slh155975 struct LayerPointers *pLayerPointers = arg; 13125256Slh155975 mblk_t *next; 13135256Slh155975 13145256Slh155975 rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER); 13155256Slh155975 if (!pLayerPointers->run) { 13165256Slh155975 pLayerPointers->pOdl->statistics.tx_afterunplumb ++; 13175256Slh155975 freemsgchain(mp); 13185256Slh155975 mp = NULL; 13195256Slh155975 } 13205256Slh155975 13215256Slh155975 while (mp != NULL) { 13225256Slh155975 next = mp->b_next; 13235256Slh155975 mp->b_next = NULL; 13245256Slh155975 if (!amd8111s_send(pLayerPointers, mp)) { 13255256Slh155975 /* Send fail */ 13265256Slh155975 mp->b_next = next; 13275256Slh155975 break; 13285256Slh155975 } 13295256Slh155975 mp = next; 13305256Slh155975 } 13315256Slh155975 13325256Slh155975 rw_exit(&pLayerPointers->pOdl->chip_lock); 13335256Slh155975 return (mp); 13345256Slh155975 } 13355256Slh155975 13365256Slh155975 /* 13375256Slh155975 * (GLD Entry Point) Interrupt Service Routine 13385256Slh155975 */ 13395256Slh155975 static uint_t 13405256Slh155975 amd8111s_intr(caddr_t arg) 13415256Slh155975 { 13425256Slh155975 unsigned int intrCauses; 13436990Sgd78059 struct LayerPointers *pLayerPointers = (void *)arg; 13445256Slh155975 13455256Slh155975 /* Read the interrupt status from mdl */ 13465256Slh155975 intrCauses = mdlReadInterrupt(pLayerPointers); 13475256Slh155975 13485256Slh155975 if (intrCauses == 0) { 13495256Slh155975 pLayerPointers->pOdl->statistics.intr_OTHER ++; 13505256Slh155975 return (DDI_INTR_UNCLAIMED); 13515256Slh155975 } 13525256Slh155975 13535256Slh155975 if (intrCauses & LCINT) { 13545256Slh155975 if (mdlReadLink(pLayerPointers) == LINK_UP) { 13555256Slh155975 mdlGetActiveMediaInfo(pLayerPointers); 13565256Slh155975 /* Link status changed */ 13575256Slh155975 if (pLayerPointers->pOdl->LinkStatus != 13585256Slh155975 LINK_STATE_UP) { 13595256Slh155975 pLayerPointers->pOdl->LinkStatus = 13605256Slh155975 LINK_STATE_UP; 13615256Slh155975 mac_link_update(pLayerPointers->pOdl->mh, 13625256Slh155975 LINK_STATE_UP); 13635256Slh155975 } 13645256Slh155975 } else { 13655256Slh155975 if (pLayerPointers->pOdl->LinkStatus != 13665256Slh155975 LINK_STATE_DOWN) { 13675256Slh155975 pLayerPointers->pOdl->LinkStatus = 13685256Slh155975 LINK_STATE_DOWN; 13695256Slh155975 mac_link_update(pLayerPointers->pOdl->mh, 13705256Slh155975 LINK_STATE_DOWN); 13715256Slh155975 } 13725256Slh155975 } 13735256Slh155975 } 13745256Slh155975 /* 13755256Slh155975 * RINT0: Receive Interrupt is set by the controller after the last 13765256Slh155975 * descriptor of a receive frame for this ring has been updated by 13775256Slh155975 * writing a 0 to the OWNership bit. 13785256Slh155975 */ 13795256Slh155975 if (intrCauses & RINT0) { 13805256Slh155975 pLayerPointers->pOdl->statistics.intr_RINT0 ++; 13815256Slh155975 amd8111s_receive(pLayerPointers); 13825256Slh155975 } 13835256Slh155975 13845256Slh155975 /* 13855256Slh155975 * TINT0: Transmit Interrupt is set by the controller after the OWN bit 13865256Slh155975 * in the last descriptor of a transmit frame in this particular ring 13875256Slh155975 * has been cleared to indicate the frame has been copied to the 13885256Slh155975 * transmit FIFO. 13895256Slh155975 */ 13905256Slh155975 if (intrCauses & TINT0) { 13915256Slh155975 pLayerPointers->pOdl->statistics.intr_TINT0 ++; 13925256Slh155975 /* 13935256Slh155975 * if desc ring is NULL and tx buf is not NULL, it should 13945256Slh155975 * drain tx buffer 13955256Slh155975 */ 13965256Slh155975 amd8111s_send_serial(pLayerPointers); 13975256Slh155975 } 13985256Slh155975 13995256Slh155975 if (intrCauses & STINT) { 14005256Slh155975 pLayerPointers->pOdl->statistics.intr_STINT ++; 14015256Slh155975 } 14025256Slh155975 14035256Slh155975 14045256Slh155975 return (DDI_INTR_CLAIMED); 14055256Slh155975 } 14065256Slh155975 14075256Slh155975 /* 14085256Slh155975 * To re-initilize data structures. 14095256Slh155975 */ 14105256Slh155975 static void 14115256Slh155975 amd8111s_sw_reset(struct LayerPointers *pLayerPointers) 14125256Slh155975 { 14135256Slh155975 /* Reset all Tx/Rx queues and descriptors */ 14145256Slh155975 milResetTxQ(pLayerPointers); 14155256Slh155975 milInitRxQ(pLayerPointers); 14165256Slh155975 } 14175256Slh155975 14185256Slh155975 /* 14195256Slh155975 * Send all pending tx packets 14205256Slh155975 */ 14215256Slh155975 static void 14225256Slh155975 amd8111s_tx_drain(struct LayerPointers *adapter) 14235256Slh155975 { 14245256Slh155975 struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart; 14255256Slh155975 int i, desc_count = 0; 14265256Slh155975 for (i = 0; i < 30; i++) { 14275256Slh155975 while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) { 14285256Slh155975 /* This packet has been transmitted */ 14295256Slh155975 pTx_desc ++; 14305256Slh155975 desc_count ++; 14315256Slh155975 } 14325256Slh155975 if (desc_count == TX_RING_SIZE) { 14335256Slh155975 break; 14345256Slh155975 } 14355256Slh155975 /* Wait 1 ms */ 14365256Slh155975 drv_usecwait(1000); 14375256Slh155975 } 14385256Slh155975 adapter->pOdl->statistics.tx_draintime = i; 14395256Slh155975 } 14405256Slh155975 14415256Slh155975 /* 14425256Slh155975 * (GLD Entry Point) To start card will be called at 14435256Slh155975 * ifconfig plumb 14445256Slh155975 */ 14455256Slh155975 static int 14465256Slh155975 amd8111s_m_start(void *arg) 14475256Slh155975 { 14485256Slh155975 struct LayerPointers *pLayerPointers = arg; 14495256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 14505256Slh155975 14515256Slh155975 amd8111s_sw_reset(pLayerPointers); 14525256Slh155975 mdlHWReset(pLayerPointers); 14535256Slh155975 rw_enter(&pOdl->chip_lock, RW_WRITER); 14545256Slh155975 pLayerPointers->run = B_TRUE; 14555256Slh155975 rw_exit(&pOdl->chip_lock); 14565256Slh155975 return (0); 14575256Slh155975 } 14585256Slh155975 14595256Slh155975 /* 14605256Slh155975 * (GLD Entry Point) To stop card will be called at 14615256Slh155975 * ifconfig unplumb 14625256Slh155975 */ 14635256Slh155975 static void 14645256Slh155975 amd8111s_m_stop(void *arg) 14655256Slh155975 { 14665256Slh155975 struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg; 14675256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 14685256Slh155975 14695256Slh155975 /* Ensure send all pending tx packets */ 14705256Slh155975 amd8111s_tx_drain(pLayerPointers); 14715256Slh155975 /* 14725256Slh155975 * Stop the controller and disable the controller interrupt 14735256Slh155975 */ 14745256Slh155975 rw_enter(&pOdl->chip_lock, RW_WRITER); 14755256Slh155975 mdlStopChip(pLayerPointers); 14765256Slh155975 pLayerPointers->run = B_FALSE; 14775256Slh155975 rw_exit(&pOdl->chip_lock); 14785256Slh155975 } 14795256Slh155975 14805256Slh155975 /* 14815256Slh155975 * To clean up all 14825256Slh155975 */ 14835256Slh155975 static void 14845256Slh155975 amd8111s_free_resource(struct LayerPointers *pLayerPointers) 14855256Slh155975 { 14865256Slh155975 unsigned long mem_free_array[100]; 14875256Slh155975 unsigned long *pmem_free_array, size; 14885256Slh155975 14895256Slh155975 /* Free Rx/Tx descriptors */ 14905256Slh155975 amd8111s_free_descriptors(pLayerPointers); 14915256Slh155975 14925256Slh155975 /* Free memory on lower layers */ 14935256Slh155975 milFreeResources(pLayerPointers, mem_free_array); 14945256Slh155975 pmem_free_array = mem_free_array; 14955256Slh155975 while (*pmem_free_array) { 14965256Slh155975 switch (*pmem_free_array) { 14975256Slh155975 case VIRTUAL: 14985256Slh155975 size = *(++pmem_free_array); 14995256Slh155975 pmem_free_array++; 15005256Slh155975 kmem_free((void *)*(pmem_free_array), size); 15015256Slh155975 break; 15025256Slh155975 } 15035256Slh155975 pmem_free_array++; 15045256Slh155975 } 15055256Slh155975 15065256Slh155975 amd8111s_free_buffers(pLayerPointers); 15075256Slh155975 } 15085256Slh155975 15095256Slh155975 /* 15105256Slh155975 * (GLD Enty pointer) To add/delete multi cast addresses 15115256Slh155975 * 15125256Slh155975 */ 15135256Slh155975 static int 15145256Slh155975 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 15155256Slh155975 { 15165256Slh155975 struct LayerPointers *pLayerPointers = arg; 15175256Slh155975 15185256Slh155975 if (add) { 15195256Slh155975 /* Add a multicast entry */ 15205256Slh155975 mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr); 15215256Slh155975 } else { 15225256Slh155975 /* Delete a multicast entry */ 15235256Slh155975 mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr); 15245256Slh155975 } 15255256Slh155975 15265256Slh155975 return (0); 15275256Slh155975 } 15285256Slh155975 15295256Slh155975 #ifdef AMD8111S_DEBUG 15305256Slh155975 /* 15315256Slh155975 * The size of MIB registers is only 32 bits. Dump them before one 15325256Slh155975 * of them overflows. 15335256Slh155975 */ 15345256Slh155975 static void 15355256Slh155975 amd8111s_dump_mib(struct LayerPointers *pLayerPointers) 15365256Slh155975 { 15375256Slh155975 struct amd8111s_statistics *adapterStat; 15385256Slh155975 15395256Slh155975 adapterStat = &pLayerPointers->pOdl->statistics; 15405256Slh155975 15415256Slh155975 adapterStat->mib_dump_counter ++; 15425256Slh155975 15435256Slh155975 /* 15445256Slh155975 * Rx Counters 15455256Slh155975 */ 15465256Slh155975 adapterStat->rx_mib_unicst_packets += 15475256Slh155975 mdlReadMib(pLayerPointers, RcvUniCastPkts); 15485256Slh155975 adapterStat->rx_mib_multicst_packets += 15495256Slh155975 mdlReadMib(pLayerPointers, RcvMultiCastPkts); 15505256Slh155975 adapterStat->rx_mib_broadcst_packets += 15515256Slh155975 mdlReadMib(pLayerPointers, RcvBroadCastPkts); 15525256Slh155975 adapterStat->rx_mib_macctrl_packets += 15535256Slh155975 mdlReadMib(pLayerPointers, RcvMACCtrl); 15545256Slh155975 adapterStat->rx_mib_flowctrl_packets += 15555256Slh155975 mdlReadMib(pLayerPointers, RcvFlowCtrl); 15565256Slh155975 15575256Slh155975 adapterStat->rx_mib_bytes += 15585256Slh155975 mdlReadMib(pLayerPointers, RcvOctets); 15595256Slh155975 adapterStat->rx_mib_good_bytes += 15605256Slh155975 mdlReadMib(pLayerPointers, RcvGoodOctets); 15615256Slh155975 15625256Slh155975 adapterStat->rx_mib_undersize_packets += 15635256Slh155975 mdlReadMib(pLayerPointers, RcvUndersizePkts); 15645256Slh155975 adapterStat->rx_mib_oversize_packets += 15655256Slh155975 mdlReadMib(pLayerPointers, RcvOversizePkts); 15665256Slh155975 15675256Slh155975 adapterStat->rx_mib_drop_packets += 15685256Slh155975 mdlReadMib(pLayerPointers, RcvDropPktsRing0); 15695256Slh155975 adapterStat->rx_mib_align_err_packets += 15705256Slh155975 mdlReadMib(pLayerPointers, RcvAlignmentErrors); 15715256Slh155975 adapterStat->rx_mib_fcs_err_packets += 15725256Slh155975 mdlReadMib(pLayerPointers, RcvFCSErrors); 15735256Slh155975 adapterStat->rx_mib_symbol_err_packets += 15745256Slh155975 mdlReadMib(pLayerPointers, RcvSymbolErrors); 15755256Slh155975 adapterStat->rx_mib_miss_packets += 15765256Slh155975 mdlReadMib(pLayerPointers, RcvMissPkts); 15775256Slh155975 15785256Slh155975 /* 15795256Slh155975 * Tx Counters 15805256Slh155975 */ 15815256Slh155975 adapterStat->tx_mib_packets += 15825256Slh155975 mdlReadMib(pLayerPointers, XmtPackets); 15835256Slh155975 adapterStat->tx_mib_multicst_packets += 15845256Slh155975 mdlReadMib(pLayerPointers, XmtMultiCastPkts); 15855256Slh155975 adapterStat->tx_mib_broadcst_packets += 15865256Slh155975 mdlReadMib(pLayerPointers, XmtBroadCastPkts); 15875256Slh155975 adapterStat->tx_mib_flowctrl_packets += 15885256Slh155975 mdlReadMib(pLayerPointers, XmtFlowCtrl); 15895256Slh155975 15905256Slh155975 adapterStat->tx_mib_bytes += 15915256Slh155975 mdlReadMib(pLayerPointers, XmtOctets); 15925256Slh155975 15935256Slh155975 adapterStat->tx_mib_defer_trans_packets += 15945256Slh155975 mdlReadMib(pLayerPointers, XmtDeferredTransmit); 15955256Slh155975 adapterStat->tx_mib_collision_packets += 15965256Slh155975 mdlReadMib(pLayerPointers, XmtCollisions); 15975256Slh155975 adapterStat->tx_mib_one_coll_packets += 15985256Slh155975 mdlReadMib(pLayerPointers, XmtOneCollision); 15995256Slh155975 adapterStat->tx_mib_multi_coll_packets += 16005256Slh155975 mdlReadMib(pLayerPointers, XmtMultipleCollision); 16015256Slh155975 adapterStat->tx_mib_late_coll_packets += 16025256Slh155975 mdlReadMib(pLayerPointers, XmtLateCollision); 16035256Slh155975 adapterStat->tx_mib_ex_coll_packets += 16045256Slh155975 mdlReadMib(pLayerPointers, XmtExcessiveCollision); 16055256Slh155975 16065256Slh155975 16075256Slh155975 /* Clear all MIB registers */ 16085256Slh155975 WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address 16095256Slh155975 + MIB_ADDR, MIB_CLEAR); 16105256Slh155975 } 16115256Slh155975 #endif 16125256Slh155975 16135256Slh155975 /* 16145256Slh155975 * (GLD Entry Point) set/unset promiscus mode 16155256Slh155975 */ 16165256Slh155975 static int 16175256Slh155975 amd8111s_m_promisc(void *arg, boolean_t on) 16185256Slh155975 { 16195256Slh155975 struct LayerPointers *pLayerPointers = arg; 16205256Slh155975 16215256Slh155975 if (on) { 16225256Slh155975 mdlSetPromiscuous(pLayerPointers); 16235256Slh155975 } else { 16245256Slh155975 mdlDisablePromiscuous(pLayerPointers); 16255256Slh155975 } 16265256Slh155975 16275256Slh155975 return (0); 16285256Slh155975 } 16295256Slh155975 16305256Slh155975 /* 16315256Slh155975 * (Gld Entry point) Changes the Mac address of card 16325256Slh155975 */ 16335256Slh155975 static int 16345256Slh155975 amd8111s_m_unicst(void *arg, const uint8_t *macaddr) 16355256Slh155975 { 16365256Slh155975 struct LayerPointers *pLayerPointers = arg; 16375256Slh155975 16385256Slh155975 mdlDisableInterrupt(pLayerPointers); 16395256Slh155975 mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr); 16405256Slh155975 mdlEnableInterrupt(pLayerPointers); 16415256Slh155975 16425256Slh155975 return (0); 16435256Slh155975 } 16445256Slh155975 16455256Slh155975 /* 16465256Slh155975 * Reset the card 16475256Slh155975 */ 16485256Slh155975 void 16495256Slh155975 amd8111s_reset(struct LayerPointers *pLayerPointers) 16505256Slh155975 { 16515256Slh155975 amd8111s_sw_reset(pLayerPointers); 16525256Slh155975 mdlHWReset(pLayerPointers); 16535256Slh155975 } 16545256Slh155975 16555256Slh155975 /* 16565256Slh155975 * attach(9E) -- Attach a device to the system 16575256Slh155975 * 16585256Slh155975 * Called once for each board after successfully probed. 16595256Slh155975 * will do 16605256Slh155975 * a. creating minor device node for the instance. 16615256Slh155975 * b. allocate & Initilize four layers (call odlInit) 16625256Slh155975 * c. get MAC address 16635256Slh155975 * d. initilize pLayerPointers to gld private pointer 16645256Slh155975 * e. register with GLD 16655256Slh155975 * if any action fails does clean up & returns DDI_FAILURE 16665256Slh155975 * else retursn DDI_SUCCESS 16675256Slh155975 */ 16685256Slh155975 static int 16695256Slh155975 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 16705256Slh155975 { 16715256Slh155975 mac_register_t *macp; 16725256Slh155975 struct LayerPointers *pLayerPointers; 16735256Slh155975 struct odl *pOdl; 16745256Slh155975 ddi_acc_handle_t *pci_handle; 16755256Slh155975 ddi_device_acc_attr_t dev_attr; 16765256Slh155975 caddr_t addrp = NULL; 16775256Slh155975 16785256Slh155975 switch (cmd) { 16795256Slh155975 case DDI_ATTACH: 16805256Slh155975 break; 16815256Slh155975 default: 16825256Slh155975 return (DDI_FAILURE); 16835256Slh155975 } 16845256Slh155975 16855256Slh155975 pLayerPointers = (struct LayerPointers *) 16865256Slh155975 kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP); 16875256Slh155975 amd8111sadapter = pLayerPointers; 16885256Slh155975 16895256Slh155975 /* Get device instance number */ 16905256Slh155975 pLayerPointers->instance = ddi_get_instance(devinfo); 16915256Slh155975 ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers); 16925256Slh155975 16935256Slh155975 pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP); 16945256Slh155975 pLayerPointers->pOdl = pOdl; 16955256Slh155975 16965256Slh155975 pOdl->devinfo = devinfo; 16975256Slh155975 16985256Slh155975 /* 16995256Slh155975 * Here, we only allocate memory for struct odl and initilize it. 17005256Slh155975 * All other memory allocation & initilization will be done in odlInit 17015256Slh155975 * later on this routine. 17025256Slh155975 */ 17035256Slh155975 if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock) 17045256Slh155975 != DDI_SUCCESS) { 17055256Slh155975 amd8111s_log(pLayerPointers, CE_NOTE, 17065256Slh155975 "attach: get iblock cookies failed"); 17075256Slh155975 goto attach_failure; 17085256Slh155975 } 17095256Slh155975 17105256Slh155975 rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock); 17115256Slh155975 mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock", 17125256Slh155975 MUTEX_DRIVER, (void *)pOdl->iblock); 17135256Slh155975 mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock", 17145256Slh155975 MUTEX_DRIVER, (void *)pOdl->iblock); 17155256Slh155975 17165256Slh155975 /* Setup PCI space */ 17175256Slh155975 if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) { 17185256Slh155975 return (DDI_FAILURE); 17195256Slh155975 } 17205256Slh155975 pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI; 17215256Slh155975 pci_handle = &pOdl->pci_handle; 17225256Slh155975 17235256Slh155975 pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID); 17245256Slh155975 pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID); 17255256Slh155975 17265256Slh155975 /* 17275256Slh155975 * Allocate and initialize all resource and map device registers. 17285256Slh155975 * If failed, it returns a non-zero value. 17295256Slh155975 */ 17305256Slh155975 if (amd8111s_odlInit(pLayerPointers) != 0) { 17315256Slh155975 goto attach_failure; 17325256Slh155975 } 17335256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE; 17345256Slh155975 17355256Slh155975 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 17365256Slh155975 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 17375256Slh155975 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 17385256Slh155975 17395256Slh155975 if (ddi_regs_map_setup(devinfo, 1, &addrp, 0, 4096, &dev_attr, 17405256Slh155975 &(pLayerPointers->pOdl->MemBasehandle)) != 0) { 17415256Slh155975 amd8111s_log(pLayerPointers, CE_NOTE, 17425256Slh155975 "attach: ddi_regs_map_setup failed"); 17435256Slh155975 goto attach_failure; 17445256Slh155975 } 17455256Slh155975 pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp; 17465256Slh155975 17475256Slh155975 /* Initialize HW */ 17485256Slh155975 mdlOpen(pLayerPointers); 17495256Slh155975 mdlGetActiveMediaInfo(pLayerPointers); 17505256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS; 17515256Slh155975 17525256Slh155975 /* 17535256Slh155975 * Setup the interrupt 17545256Slh155975 */ 17555256Slh155975 if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr, 17565256Slh155975 (caddr_t)pLayerPointers) != DDI_SUCCESS) { 17575256Slh155975 goto attach_failure; 17585256Slh155975 } 17595256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED; 17605256Slh155975 17615256Slh155975 /* 17625256Slh155975 * Setup soft intr 17635256Slh155975 */ 17645256Slh155975 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id, 17655256Slh155975 NULL, NULL, amd8111s_send_drain, 17665256Slh155975 (caddr_t)pLayerPointers) != DDI_SUCCESS) { 17675256Slh155975 goto attach_failure; 17685256Slh155975 } 17695256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED; 17705256Slh155975 17715256Slh155975 /* 17725256Slh155975 * Initilize the mac structure 17735256Slh155975 */ 17745256Slh155975 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 17755256Slh155975 goto attach_failure; 17765256Slh155975 17775256Slh155975 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 17785256Slh155975 macp->m_driver = pLayerPointers; 17795256Slh155975 macp->m_dip = devinfo; 17805256Slh155975 /* Get MAC address */ 17815256Slh155975 mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress); 17825256Slh155975 macp->m_src_addr = pOdl->MacAddress; 17835256Slh155975 macp->m_callbacks = &amd8111s_m_callbacks; 17845256Slh155975 macp->m_min_sdu = 0; 17855256Slh155975 /* 1518 - 14 (ether header) - 4 (CRC) */ 17865256Slh155975 macp->m_max_sdu = ETHERMTU; 17875895Syz147064 macp->m_margin = VLAN_TAGSZ; 17885256Slh155975 17895256Slh155975 /* 17905256Slh155975 * Finally, we're ready to register ourselves with the MAC layer 17915256Slh155975 * interface; if this succeeds, we're ready to start. 17925256Slh155975 */ 17935256Slh155975 if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) { 17945256Slh155975 mac_free(macp); 17955256Slh155975 goto attach_failure; 17965256Slh155975 } 17975256Slh155975 mac_free(macp); 17985256Slh155975 17995256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED; 18005256Slh155975 18015256Slh155975 return (DDI_SUCCESS); 18025256Slh155975 18035256Slh155975 attach_failure: 18045256Slh155975 (void) amd8111s_unattach(devinfo, pLayerPointers); 18055256Slh155975 return (DDI_FAILURE); 18065256Slh155975 18075256Slh155975 } 18085256Slh155975 18095256Slh155975 /* 18105256Slh155975 * detach(9E) -- Detach a device from the system 18115256Slh155975 * 18125256Slh155975 * It is called for each device instance when the system is preparing to 18135256Slh155975 * unload a dynamically unloadable driver. 18145256Slh155975 * will Do 18155256Slh155975 * a. check if any driver buffers are held by OS. 18165256Slh155975 * b. do clean up of all allocated memory if it is not in use by OS. 18175256Slh155975 * c. un register with GLD 18185256Slh155975 * d. return DDI_SUCCESS on succes full free & unregister 18195256Slh155975 * else GLD_FAILURE 18205256Slh155975 */ 18215256Slh155975 static int 18225256Slh155975 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 18235256Slh155975 { 18245256Slh155975 struct LayerPointers *pLayerPointers; 18255256Slh155975 18265256Slh155975 switch (cmd) { 18275256Slh155975 case DDI_DETACH: 18285256Slh155975 break; 18295256Slh155975 default: 18305256Slh155975 return (DDI_FAILURE); 18315256Slh155975 } 18325256Slh155975 18335256Slh155975 /* 18345256Slh155975 * Get the driver private (struct LayerPointers *) structure 18355256Slh155975 */ 18365256Slh155975 if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private 18375256Slh155975 (devinfo)) == NULL) { 18385256Slh155975 return (DDI_FAILURE); 18395256Slh155975 } 18405256Slh155975 18415256Slh155975 return (amd8111s_unattach(devinfo, pLayerPointers)); 18425256Slh155975 } 18435256Slh155975 18445256Slh155975 static int 18455256Slh155975 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers) 18465256Slh155975 { 18475256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 18485256Slh155975 18495256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) { 18505256Slh155975 /* Unregister driver from the GLD interface */ 18515256Slh155975 if (mac_unregister(pOdl->mh) != DDI_SUCCESS) { 18525256Slh155975 return (DDI_FAILURE); 18535256Slh155975 } 18545256Slh155975 } 18555256Slh155975 18565256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) { 18575256Slh155975 ddi_remove_intr(devinfo, 0, pOdl->iblock); 18585256Slh155975 } 18595256Slh155975 18605256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) { 18615256Slh155975 ddi_remove_softintr(pOdl->drain_id); 18625256Slh155975 } 18635256Slh155975 18645256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) { 18655256Slh155975 /* Stop HW */ 18665256Slh155975 mdlStopChip(pLayerPointers); 18675256Slh155975 ddi_regs_map_free(&(pOdl->MemBasehandle)); 18685256Slh155975 } 18695256Slh155975 18705256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) { 18715256Slh155975 /* Free All memory allocated */ 18725256Slh155975 amd8111s_free_resource(pLayerPointers); 18735256Slh155975 } 18745256Slh155975 18755256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) { 18765256Slh155975 pci_config_teardown(&pOdl->pci_handle); 18775256Slh155975 mutex_destroy(&pOdl->mdlSendLock); 18785256Slh155975 mutex_destroy(&pOdl->mdlRcvLock); 18795256Slh155975 rw_destroy(&pOdl->chip_lock); 18805256Slh155975 } 18815256Slh155975 18825256Slh155975 kmem_free(pOdl, sizeof (struct odl)); 18835256Slh155975 kmem_free(pLayerPointers, sizeof (struct LayerPointers)); 18845256Slh155975 18855256Slh155975 return (DDI_SUCCESS); 18865256Slh155975 } 18875256Slh155975 18885256Slh155975 /* 18895256Slh155975 * (GLD Entry Point)GLD will call this entry point perodicaly to 18905256Slh155975 * get driver statistices. 18915256Slh155975 */ 18925256Slh155975 static int 18935256Slh155975 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val) 18945256Slh155975 { 18955256Slh155975 struct LayerPointers *pLayerPointers = arg; 18965256Slh155975 struct amd8111s_statistics *adapterStat; 18975256Slh155975 18985256Slh155975 adapterStat = &pLayerPointers->pOdl->statistics; 18995256Slh155975 19005256Slh155975 switch (stat) { 19015256Slh155975 19025256Slh155975 /* 19035256Slh155975 * Current Status 19045256Slh155975 */ 19055256Slh155975 case MAC_STAT_IFSPEED: 19065256Slh155975 *val = pLayerPointers->pMdl->Speed * 1000000; 19075256Slh155975 break; 19085256Slh155975 19095256Slh155975 case ETHER_STAT_LINK_DUPLEX: 19105256Slh155975 if (pLayerPointers->pMdl->FullDuplex) { 19115256Slh155975 *val = LINK_DUPLEX_FULL; 19125256Slh155975 } else { 19135256Slh155975 *val = LINK_DUPLEX_HALF; 19145256Slh155975 } 19155256Slh155975 break; 19165256Slh155975 19175256Slh155975 /* 19185256Slh155975 * Capabilities 19195256Slh155975 */ 19205256Slh155975 case ETHER_STAT_CAP_1000FDX: 19215256Slh155975 *val = 0; 19225256Slh155975 break; 19235256Slh155975 19245256Slh155975 case ETHER_STAT_CAP_1000HDX: 19255256Slh155975 *val = 0; 19265256Slh155975 break; 19275256Slh155975 19285256Slh155975 case ETHER_STAT_CAP_100FDX: 19295256Slh155975 *val = 1; 19305256Slh155975 break; 19315256Slh155975 19325256Slh155975 case ETHER_STAT_CAP_100HDX: 19335256Slh155975 *val = 1; 19345256Slh155975 break; 19355256Slh155975 19365256Slh155975 case ETHER_STAT_CAP_10FDX: 19375256Slh155975 *val = 1; 19385256Slh155975 break; 19395256Slh155975 19405256Slh155975 case ETHER_STAT_CAP_10HDX: 19415256Slh155975 *val = 1; 19425256Slh155975 break; 19435256Slh155975 19445256Slh155975 case ETHER_STAT_CAP_ASMPAUSE: 19455256Slh155975 *val = 1; 19465256Slh155975 break; 19475256Slh155975 19485256Slh155975 case ETHER_STAT_CAP_PAUSE: 19495256Slh155975 *val = 1; 19505256Slh155975 break; 19515256Slh155975 19525256Slh155975 case ETHER_STAT_CAP_AUTONEG: 19535256Slh155975 *val = 1; 19545256Slh155975 break; 19555256Slh155975 19565256Slh155975 case ETHER_STAT_ADV_CAP_1000FDX: 19575256Slh155975 *val = 0; 19585256Slh155975 break; 19595256Slh155975 19605256Slh155975 case ETHER_STAT_ADV_CAP_1000HDX: 19615256Slh155975 *val = 0; 19625256Slh155975 break; 19635256Slh155975 19645256Slh155975 case ETHER_STAT_ADV_CAP_100FDX: 19655256Slh155975 *val = 1; 19665256Slh155975 break; 19675256Slh155975 19685256Slh155975 case ETHER_STAT_ADV_CAP_100HDX: 19695256Slh155975 *val = 1; 19705256Slh155975 break; 19715256Slh155975 19725256Slh155975 case ETHER_STAT_ADV_CAP_10FDX: 19735256Slh155975 *val = 1; 19745256Slh155975 break; 19755256Slh155975 19765256Slh155975 case ETHER_STAT_ADV_CAP_10HDX: 19775256Slh155975 *val = 1; 19785256Slh155975 break; 19795256Slh155975 19805256Slh155975 case ETHER_STAT_ADV_CAP_ASMPAUSE: 19815256Slh155975 *val = 1; 19825256Slh155975 break; 19835256Slh155975 19845256Slh155975 case ETHER_STAT_ADV_CAP_PAUSE: 19855256Slh155975 *val = 1; 19865256Slh155975 break; 19875256Slh155975 19885256Slh155975 case ETHER_STAT_ADV_CAP_AUTONEG: 19895256Slh155975 *val = 1; 19905256Slh155975 break; 19915256Slh155975 19925256Slh155975 /* 19935256Slh155975 * Rx Counters 19945256Slh155975 */ 19955256Slh155975 case MAC_STAT_IPACKETS: 19965256Slh155975 *val = adapterStat->rx_mib_unicst_packets + 19975256Slh155975 adapterStat->rx_mib_multicst_packets + 19985256Slh155975 adapterStat->rx_mib_broadcst_packets + 19995256Slh155975 mdlReadMib(pLayerPointers, RcvUniCastPkts) + 20005256Slh155975 mdlReadMib(pLayerPointers, RcvMultiCastPkts) + 20015256Slh155975 mdlReadMib(pLayerPointers, RcvBroadCastPkts); 20025256Slh155975 break; 20035256Slh155975 20045256Slh155975 case MAC_STAT_RBYTES: 20055256Slh155975 *val = adapterStat->rx_mib_bytes + 20065256Slh155975 mdlReadMib(pLayerPointers, RcvOctets); 20075256Slh155975 break; 20085256Slh155975 20095256Slh155975 case MAC_STAT_MULTIRCV: 20105256Slh155975 *val = adapterStat->rx_mib_multicst_packets + 20115256Slh155975 mdlReadMib(pLayerPointers, RcvMultiCastPkts); 20125256Slh155975 break; 20135256Slh155975 20145256Slh155975 case MAC_STAT_BRDCSTRCV: 20155256Slh155975 *val = adapterStat->rx_mib_broadcst_packets + 20165256Slh155975 mdlReadMib(pLayerPointers, RcvBroadCastPkts); 20175256Slh155975 break; 20185256Slh155975 20195256Slh155975 case MAC_STAT_NORCVBUF: 20205256Slh155975 *val = adapterStat->rx_allocfail + 20215256Slh155975 adapterStat->rx_mib_drop_packets + 20225256Slh155975 mdlReadMib(pLayerPointers, RcvDropPktsRing0); 20235256Slh155975 break; 20245256Slh155975 20255256Slh155975 case MAC_STAT_IERRORS: 20265256Slh155975 *val = adapterStat->rx_mib_align_err_packets + 20275256Slh155975 adapterStat->rx_mib_fcs_err_packets + 20285256Slh155975 adapterStat->rx_mib_symbol_err_packets + 20295256Slh155975 mdlReadMib(pLayerPointers, RcvAlignmentErrors) + 20305256Slh155975 mdlReadMib(pLayerPointers, RcvFCSErrors) + 20315256Slh155975 mdlReadMib(pLayerPointers, RcvSymbolErrors); 20325256Slh155975 break; 20335256Slh155975 20345256Slh155975 case ETHER_STAT_ALIGN_ERRORS: 20355256Slh155975 *val = adapterStat->rx_mib_align_err_packets + 20365256Slh155975 mdlReadMib(pLayerPointers, RcvAlignmentErrors); 20375256Slh155975 break; 20385256Slh155975 20395256Slh155975 case ETHER_STAT_FCS_ERRORS: 20405256Slh155975 *val = adapterStat->rx_mib_fcs_err_packets + 20415256Slh155975 mdlReadMib(pLayerPointers, RcvFCSErrors); 20425256Slh155975 break; 20435256Slh155975 20445256Slh155975 /* 20455256Slh155975 * Tx Counters 20465256Slh155975 */ 20475256Slh155975 case MAC_STAT_OPACKETS: 20485256Slh155975 *val = adapterStat->tx_mib_packets + 20495256Slh155975 mdlReadMib(pLayerPointers, XmtPackets); 20505256Slh155975 break; 20515256Slh155975 20525256Slh155975 case MAC_STAT_OBYTES: 20535256Slh155975 *val = adapterStat->tx_mib_bytes + 20545256Slh155975 mdlReadMib(pLayerPointers, XmtOctets); 20555256Slh155975 break; 20565256Slh155975 20575256Slh155975 case MAC_STAT_MULTIXMT: 20585256Slh155975 *val = adapterStat->tx_mib_multicst_packets + 20595256Slh155975 mdlReadMib(pLayerPointers, XmtMultiCastPkts); 20605256Slh155975 break; 20615256Slh155975 20625256Slh155975 case MAC_STAT_BRDCSTXMT: 20635256Slh155975 *val = adapterStat->tx_mib_broadcst_packets + 20645256Slh155975 mdlReadMib(pLayerPointers, XmtBroadCastPkts); 20655256Slh155975 break; 20665256Slh155975 20675256Slh155975 case MAC_STAT_NOXMTBUF: 20685256Slh155975 *val = adapterStat->tx_no_descriptor; 20695256Slh155975 break; 20705256Slh155975 20715256Slh155975 case MAC_STAT_OERRORS: 20725256Slh155975 *val = adapterStat->tx_mib_ex_coll_packets + 20735256Slh155975 mdlReadMib(pLayerPointers, XmtExcessiveCollision); 20745256Slh155975 break; 20755256Slh155975 20765256Slh155975 case MAC_STAT_COLLISIONS: 20775256Slh155975 *val = adapterStat->tx_mib_ex_coll_packets + 20785256Slh155975 mdlReadMib(pLayerPointers, XmtCollisions); 20795256Slh155975 break; 20805256Slh155975 20815256Slh155975 case ETHER_STAT_FIRST_COLLISIONS: 20825256Slh155975 *val = adapterStat->tx_mib_one_coll_packets + 20835256Slh155975 mdlReadMib(pLayerPointers, XmtOneCollision); 20845256Slh155975 break; 20855256Slh155975 20865256Slh155975 case ETHER_STAT_MULTI_COLLISIONS: 20875256Slh155975 *val = adapterStat->tx_mib_multi_coll_packets + 20885256Slh155975 mdlReadMib(pLayerPointers, XmtMultipleCollision); 20895256Slh155975 break; 20905256Slh155975 20915256Slh155975 case ETHER_STAT_EX_COLLISIONS: 20925256Slh155975 *val = adapterStat->tx_mib_ex_coll_packets + 20935256Slh155975 mdlReadMib(pLayerPointers, XmtExcessiveCollision); 20945256Slh155975 break; 20955256Slh155975 20965256Slh155975 case ETHER_STAT_TX_LATE_COLLISIONS: 20975256Slh155975 *val = adapterStat->tx_mib_late_coll_packets + 20985256Slh155975 mdlReadMib(pLayerPointers, XmtLateCollision); 20995256Slh155975 break; 21005256Slh155975 21015256Slh155975 case ETHER_STAT_DEFER_XMTS: 21025256Slh155975 *val = adapterStat->tx_mib_defer_trans_packets + 21035256Slh155975 mdlReadMib(pLayerPointers, XmtDeferredTransmit); 21045256Slh155975 break; 21055256Slh155975 21065256Slh155975 default: 21075256Slh155975 return (ENOTSUP); 21085256Slh155975 } 21095256Slh155975 return (0); 21105256Slh155975 } 21115256Slh155975 21125256Slh155975 /* 21135256Slh155975 * Memory Read Function Used by MDL to set card registers. 21145256Slh155975 */ 21155256Slh155975 unsigned char 21165256Slh155975 READ_REG8(struct LayerPointers *pLayerPointers, long x) 21175256Slh155975 { 21185256Slh155975 return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x)); 21195256Slh155975 } 21205256Slh155975 21215256Slh155975 int 21225256Slh155975 READ_REG16(struct LayerPointers *pLayerPointers, long x) 21235256Slh155975 { 21245256Slh155975 return (ddi_get16(pLayerPointers->pOdl->MemBasehandle, 21255256Slh155975 (uint16_t *)(x))); 21265256Slh155975 } 21275256Slh155975 21285256Slh155975 long 21295256Slh155975 READ_REG32(struct LayerPointers *pLayerPointers, long x) 21305256Slh155975 { 21315256Slh155975 return (ddi_get32(pLayerPointers->pOdl->MemBasehandle, 21325256Slh155975 (uint32_t *)(x))); 21335256Slh155975 } 21345256Slh155975 21355256Slh155975 void 21365256Slh155975 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y) 21375256Slh155975 { 21385256Slh155975 ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y); 21395256Slh155975 } 21405256Slh155975 21415256Slh155975 void 21425256Slh155975 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y) 21435256Slh155975 { 21445256Slh155975 ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y); 21455256Slh155975 } 21465256Slh155975 21475256Slh155975 void 21485256Slh155975 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y) 21495256Slh155975 { 21505256Slh155975 ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y); 21515256Slh155975 } 21525256Slh155975 21535256Slh155975 void 21545256Slh155975 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y) 21555256Slh155975 { 21565256Slh155975 int i; 21575256Slh155975 for (i = 0; i < 8; i++) { 21585256Slh155975 WRITE_REG8(pLayerPointers, (x + i), y[i]); 21595256Slh155975 } 21605256Slh155975 } 2161