1*5256Slh155975 /* 2*5256Slh155975 * CDDL HEADER START 3*5256Slh155975 * 4*5256Slh155975 * The contents of this file are subject to the terms of the 5*5256Slh155975 * Common Development and Distribution License (the "License"). 6*5256Slh155975 * You may not use this file except in compliance with the License. 7*5256Slh155975 * 8*5256Slh155975 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9*5256Slh155975 * or http://www.opensolaris.org/os/licensing. 10*5256Slh155975 * See the License for the specific language governing permissions 11*5256Slh155975 * and limitations under the License. 12*5256Slh155975 * 13*5256Slh155975 * When distributing Covered Code, include this CDDL HEADER in each 14*5256Slh155975 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15*5256Slh155975 * If applicable, add the following below this CDDL HEADER, with the 16*5256Slh155975 * fields enclosed by brackets "[]" replaced with your own identifying 17*5256Slh155975 * information: Portions Copyright [yyyy] [name of copyright owner] 18*5256Slh155975 * 19*5256Slh155975 * CDDL HEADER END 20*5256Slh155975 */ 21*5256Slh155975 22*5256Slh155975 /* 23*5256Slh155975 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24*5256Slh155975 * Use is subject to license terms. 25*5256Slh155975 */ 26*5256Slh155975 27*5256Slh155975 #pragma ident "%Z%%M% %I% %E% SMI" 28*5256Slh155975 29*5256Slh155975 /* 30*5256Slh155975 * Copyright (c) 2001-2006 Advanced Micro Devices, Inc. All rights reserved. 31*5256Slh155975 * 32*5256Slh155975 * Redistribution and use in source and binary forms, with or without 33*5256Slh155975 * modification, are permitted provided that the following conditions are met: 34*5256Slh155975 * 35*5256Slh155975 * + Redistributions of source code must retain the above copyright notice, 36*5256Slh155975 * + this list of conditions and the following disclaimer. 37*5256Slh155975 * 38*5256Slh155975 * + Redistributions in binary form must reproduce the above copyright 39*5256Slh155975 * + notice, this list of conditions and the following disclaimer in the 40*5256Slh155975 * + documentation and/or other materials provided with the distribution. 41*5256Slh155975 * 42*5256Slh155975 * + Neither the name of Advanced Micro Devices, Inc. nor the names of its 43*5256Slh155975 * + contributors may be used to endorse or promote products derived from 44*5256Slh155975 * + this software without specific prior written permission. 45*5256Slh155975 * 46*5256Slh155975 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND 47*5256Slh155975 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 48*5256Slh155975 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 49*5256Slh155975 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 50*5256Slh155975 * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR 51*5256Slh155975 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 52*5256Slh155975 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 53*5256Slh155975 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 54*5256Slh155975 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55*5256Slh155975 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 56*5256Slh155975 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 57*5256Slh155975 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 58*5256Slh155975 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 59*5256Slh155975 * 60*5256Slh155975 * Import/Export/Re-Export/Use/Release/Transfer Restrictions and 61*5256Slh155975 * Compliance with Applicable Laws. Notice is hereby given that 62*5256Slh155975 * the software may be subject to restrictions on use, release, 63*5256Slh155975 * transfer, importation, exportation and/or re-exportation under 64*5256Slh155975 * the laws and regulations of the United States or other 65*5256Slh155975 * countries ("Applicable Laws"), which include but are not 66*5256Slh155975 * limited to U.S. export control laws such as the Export 67*5256Slh155975 * Administration Regulations and national security controls as 68*5256Slh155975 * defined thereunder, as well as State Department controls under 69*5256Slh155975 * the U.S. Munitions List. Permission to use and/or 70*5256Slh155975 * redistribute the software is conditioned upon compliance with 71*5256Slh155975 * all Applicable Laws, including U.S. export control laws 72*5256Slh155975 * regarding specifically designated persons, countries and 73*5256Slh155975 * nationals of countries subject to national security controls. 74*5256Slh155975 */ 75*5256Slh155975 76*5256Slh155975 77*5256Slh155975 #pragma ident "@(#)$RCSfile: solaris_odl.c,v $ $Revision: 1.3 $ " \ 78*5256Slh155975 " $Date: 2004/04/22 15:22:54 $ AMD" 79*5256Slh155975 80*5256Slh155975 81*5256Slh155975 /* include files */ 82*5256Slh155975 #include <sys/disp.h> 83*5256Slh155975 #include <sys/atomic.h> 84*5256Slh155975 #include "amd8111s_main.h" 85*5256Slh155975 86*5256Slh155975 /* Global macro Definations */ 87*5256Slh155975 #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1)) 88*5256Slh155975 #define INTERFACE_NAME "amd8111s" 89*5256Slh155975 #define AMD8111S_SPLIT 128 90*5256Slh155975 #define AMD8111S_SEND_MAX 64 91*5256Slh155975 92*5256Slh155975 static char ident[] = "AMD8111 10/100M Ethernet 1.0"; 93*5256Slh155975 94*5256Slh155975 /* 95*5256Slh155975 * Driver Entry Points 96*5256Slh155975 */ 97*5256Slh155975 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t); 98*5256Slh155975 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t); 99*5256Slh155975 100*5256Slh155975 /* 101*5256Slh155975 * GLD Entry points prototype 102*5256Slh155975 */ 103*5256Slh155975 static int amd8111s_m_unicst(void *, const uint8_t *); 104*5256Slh155975 static int amd8111s_m_promisc(void *, boolean_t); 105*5256Slh155975 static int amd8111s_m_stat(void *, uint_t, uint64_t *); 106*5256Slh155975 static void amd8111s_m_resources(void *arg); 107*5256Slh155975 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *); 108*5256Slh155975 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr); 109*5256Slh155975 static int amd8111s_m_start(void *); 110*5256Slh155975 static void amd8111s_m_stop(void *); 111*5256Slh155975 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp); 112*5256Slh155975 static uint_t amd8111s_intr(caddr_t); 113*5256Slh155975 114*5256Slh155975 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *); 115*5256Slh155975 116*5256Slh155975 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *); 117*5256Slh155975 static int amd8111s_odlInit(struct LayerPointers *); 118*5256Slh155975 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *); 119*5256Slh155975 static void amd8111s_free_descriptors(struct LayerPointers *); 120*5256Slh155975 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *, 121*5256Slh155975 struct amd8111s_dma_ringbuf *, uint32_t, uint32_t); 122*5256Slh155975 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *); 123*5256Slh155975 124*5256Slh155975 125*5256Slh155975 static void amd8111s_log(struct LayerPointers *adapter, int level, 126*5256Slh155975 char *fmt, ...); 127*5256Slh155975 128*5256Slh155975 static struct cb_ops amd8111s_cb_ops = { 129*5256Slh155975 nulldev, 130*5256Slh155975 nulldev, 131*5256Slh155975 nodev, 132*5256Slh155975 nodev, 133*5256Slh155975 nodev, 134*5256Slh155975 nodev, 135*5256Slh155975 nodev, 136*5256Slh155975 nodev, 137*5256Slh155975 nodev, 138*5256Slh155975 nodev, 139*5256Slh155975 nodev, 140*5256Slh155975 nochpoll, 141*5256Slh155975 ddi_prop_op, 142*5256Slh155975 NULL, 143*5256Slh155975 D_NEW | D_MP, 144*5256Slh155975 CB_REV, /* cb_rev */ 145*5256Slh155975 nodev, /* cb_aread */ 146*5256Slh155975 nodev /* cb_awrite */ 147*5256Slh155975 }; 148*5256Slh155975 149*5256Slh155975 static struct dev_ops amd8111s_dev_ops = { 150*5256Slh155975 DEVO_REV, /* devo_rev */ 151*5256Slh155975 0, /* devo_refcnt */ 152*5256Slh155975 NULL, /* devo_getinfo */ 153*5256Slh155975 nulldev, /* devo_identify */ 154*5256Slh155975 nulldev, /* devo_probe */ 155*5256Slh155975 amd8111s_attach, /* devo_attach */ 156*5256Slh155975 amd8111s_detach, /* devo_detach */ 157*5256Slh155975 nodev, /* devo_reset */ 158*5256Slh155975 &amd8111s_cb_ops, /* devo_cb_ops */ 159*5256Slh155975 NULL, /* devo_bus_ops */ 160*5256Slh155975 nodev 161*5256Slh155975 }; 162*5256Slh155975 163*5256Slh155975 struct modldrv amd8111s_modldrv = { 164*5256Slh155975 &mod_driverops, /* Type of module. This one is a driver */ 165*5256Slh155975 ident, /* short description */ 166*5256Slh155975 &amd8111s_dev_ops /* driver specific ops */ 167*5256Slh155975 }; 168*5256Slh155975 169*5256Slh155975 struct modlinkage amd8111s_modlinkage = { 170*5256Slh155975 MODREV_1, (void *)&amd8111s_modldrv, NULL 171*5256Slh155975 }; 172*5256Slh155975 173*5256Slh155975 /* 174*5256Slh155975 * Global Variables 175*5256Slh155975 */ 176*5256Slh155975 struct LayerPointers *amd8111sadapter; 177*5256Slh155975 178*5256Slh155975 static ddi_dma_attr_t pcn_buff_dma_attr_t = { 179*5256Slh155975 DMA_ATTR_V0, /* dma_attr_version */ 180*5256Slh155975 (uint64_t)0, /* dma_attr_addr_lo */ 181*5256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 182*5256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_count_max */ 183*5256Slh155975 (uint64_t)1, /* dma_attr_align */ 184*5256Slh155975 (uint_t)0x7F, /* dma_attr_burstsizes */ 185*5256Slh155975 (uint32_t)1, /* dma_attr_minxfer */ 186*5256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 187*5256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 188*5256Slh155975 (int)1, /* dma_attr_sgllen */ 189*5256Slh155975 (uint32_t)1, /* granularity */ 190*5256Slh155975 (uint_t)0 /* dma_attr_flags */ 191*5256Slh155975 }; 192*5256Slh155975 193*5256Slh155975 static ddi_dma_attr_t pcn_desc_dma_attr_t = { 194*5256Slh155975 DMA_ATTR_V0, /* dma_attr_version */ 195*5256Slh155975 (uint64_t)0, /* dma_attr_addr_lo */ 196*5256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 197*5256Slh155975 (uint64_t)0x7FFFFFFF, /* dma_attr_count_max */ 198*5256Slh155975 (uint64_t)0x10, /* dma_attr_align */ 199*5256Slh155975 (uint_t)0xFFFFFFFFU, /* dma_attr_burstsizes */ 200*5256Slh155975 (uint32_t)1, /* dma_attr_minxfer */ 201*5256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 202*5256Slh155975 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 203*5256Slh155975 (int)1, /* dma_attr_sgllen */ 204*5256Slh155975 (uint32_t)1, /* granularity */ 205*5256Slh155975 (uint_t)0 /* dma_attr_flags */ 206*5256Slh155975 }; 207*5256Slh155975 208*5256Slh155975 /* PIO access attributes for registers */ 209*5256Slh155975 static ddi_device_acc_attr_t pcn_acc_attr = { 210*5256Slh155975 DDI_DEVICE_ATTR_V0, 211*5256Slh155975 DDI_STRUCTURE_LE_ACC, 212*5256Slh155975 DDI_STRICTORDER_ACC 213*5256Slh155975 }; 214*5256Slh155975 215*5256Slh155975 #define AMD8111S_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL) 216*5256Slh155975 217*5256Slh155975 218*5256Slh155975 static mac_callbacks_t amd8111s_m_callbacks = { 219*5256Slh155975 AMD8111S_M_CALLBACK_FLAGS, 220*5256Slh155975 amd8111s_m_stat, 221*5256Slh155975 amd8111s_m_start, 222*5256Slh155975 amd8111s_m_stop, 223*5256Slh155975 amd8111s_m_promisc, 224*5256Slh155975 amd8111s_m_multicst, 225*5256Slh155975 amd8111s_m_unicst, 226*5256Slh155975 amd8111s_m_tx, 227*5256Slh155975 amd8111s_m_resources, 228*5256Slh155975 amd8111s_m_ioctl 229*5256Slh155975 }; 230*5256Slh155975 231*5256Slh155975 232*5256Slh155975 /* 233*5256Slh155975 * Standard Driver Load Entry Point 234*5256Slh155975 * It will be called at load time of driver. 235*5256Slh155975 */ 236*5256Slh155975 int 237*5256Slh155975 _init() 238*5256Slh155975 { 239*5256Slh155975 int status; 240*5256Slh155975 mac_init_ops(&amd8111s_dev_ops, "amd8111s"); 241*5256Slh155975 242*5256Slh155975 status = mod_install(&amd8111s_modlinkage); 243*5256Slh155975 if (status != DDI_SUCCESS) { 244*5256Slh155975 mac_fini_ops(&amd8111s_dev_ops); 245*5256Slh155975 } 246*5256Slh155975 247*5256Slh155975 return (status); 248*5256Slh155975 } 249*5256Slh155975 250*5256Slh155975 /* 251*5256Slh155975 * Standard Driver Entry Point for Query. 252*5256Slh155975 * It will be called at any time to get Driver info. 253*5256Slh155975 */ 254*5256Slh155975 int 255*5256Slh155975 _info(struct modinfo *modinfop) 256*5256Slh155975 { 257*5256Slh155975 return (mod_info(&amd8111s_modlinkage, modinfop)); 258*5256Slh155975 } 259*5256Slh155975 260*5256Slh155975 /* 261*5256Slh155975 * Standard Driver Entry Point for Unload. 262*5256Slh155975 * It will be called at unload time of driver. 263*5256Slh155975 */ 264*5256Slh155975 int 265*5256Slh155975 _fini() 266*5256Slh155975 { 267*5256Slh155975 int status; 268*5256Slh155975 269*5256Slh155975 status = mod_remove(&amd8111s_modlinkage); 270*5256Slh155975 if (status == DDI_SUCCESS) { 271*5256Slh155975 mac_fini_ops(&amd8111s_dev_ops); 272*5256Slh155975 } 273*5256Slh155975 274*5256Slh155975 return (status); 275*5256Slh155975 } 276*5256Slh155975 277*5256Slh155975 /* Adjust Interrupt Coalescing Register to coalesce interrupts */ 278*5256Slh155975 static void 279*5256Slh155975 amd8111s_m_blank(void *arg, time_t ticks, uint32_t count) 280*5256Slh155975 { 281*5256Slh155975 _NOTE(ARGUNUSED(arg, ticks, count)); 282*5256Slh155975 } 283*5256Slh155975 284*5256Slh155975 static void 285*5256Slh155975 amd8111s_m_resources(void *arg) 286*5256Slh155975 { 287*5256Slh155975 struct LayerPointers *adapter = arg; 288*5256Slh155975 mac_rx_fifo_t mrf; 289*5256Slh155975 290*5256Slh155975 mrf.mrf_type = MAC_RX_FIFO; 291*5256Slh155975 mrf.mrf_blank = amd8111s_m_blank; 292*5256Slh155975 mrf.mrf_arg = (void *)adapter; 293*5256Slh155975 mrf.mrf_normal_blank_time = 128; 294*5256Slh155975 mrf.mrf_normal_pkt_count = 8; 295*5256Slh155975 296*5256Slh155975 adapter->pOdl->mrh = mac_resource_add(adapter->pOdl->mh, 297*5256Slh155975 (mac_resource_t *)&mrf); 298*5256Slh155975 } 299*5256Slh155975 300*5256Slh155975 /* 301*5256Slh155975 * Loopback Support 302*5256Slh155975 */ 303*5256Slh155975 static lb_property_t loopmodes[] = { 304*5256Slh155975 { normal, "normal", AMD8111S_LB_NONE }, 305*5256Slh155975 { external, "100Mbps", AMD8111S_LB_EXTERNAL_100 }, 306*5256Slh155975 { external, "10Mbps", AMD8111S_LB_EXTERNAL_10 }, 307*5256Slh155975 { internal, "MAC", AMD8111S_LB_INTERNAL_MAC } 308*5256Slh155975 }; 309*5256Slh155975 310*5256Slh155975 static void 311*5256Slh155975 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode) 312*5256Slh155975 { 313*5256Slh155975 314*5256Slh155975 /* 315*5256Slh155975 * If the mode isn't being changed, there's nothing to do ... 316*5256Slh155975 */ 317*5256Slh155975 if (mode == adapter->pOdl->loopback_mode) 318*5256Slh155975 return; 319*5256Slh155975 320*5256Slh155975 /* 321*5256Slh155975 * Validate the requested mode and prepare a suitable message 322*5256Slh155975 * to explain the link down/up cycle that the change will 323*5256Slh155975 * probably induce ... 324*5256Slh155975 */ 325*5256Slh155975 switch (mode) { 326*5256Slh155975 default: 327*5256Slh155975 return; 328*5256Slh155975 329*5256Slh155975 case AMD8111S_LB_NONE: 330*5256Slh155975 mdlStopChip(adapter); 331*5256Slh155975 if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) { 332*5256Slh155975 cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB"); 333*5256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 334*5256Slh155975 INLOOP); 335*5256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3, 336*5256Slh155975 FORCE_FULL_DUPLEX | FORCE_LINK_STATUS); 337*5256Slh155975 } else { 338*5256Slh155975 cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB"); 339*5256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 340*5256Slh155975 EXLOOP); 341*5256Slh155975 } 342*5256Slh155975 343*5256Slh155975 amd8111s_reset(adapter); 344*5256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_DOWN; 345*5256Slh155975 adapter->pOdl->rx_fcs_stripped = B_FALSE; 346*5256Slh155975 mdlStartChip(adapter); 347*5256Slh155975 break; 348*5256Slh155975 349*5256Slh155975 case AMD8111S_LB_EXTERNAL_100: 350*5256Slh155975 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100"); 351*5256Slh155975 mdlStopChip(adapter); 352*5256Slh155975 amd8111s_reset(adapter); 353*5256Slh155975 SetIntrCoalesc(adapter, B_FALSE); 354*5256Slh155975 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100); 355*5256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 356*5256Slh155975 VAL0 | EXLOOP); 357*5256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_UP; 358*5256Slh155975 adapter->pMdl->Speed = 100; 359*5256Slh155975 adapter->pMdl->FullDuplex = B_TRUE; 360*5256Slh155975 /* Tell GLD the state of the physical link. */ 361*5256Slh155975 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP); 362*5256Slh155975 363*5256Slh155975 adapter->pOdl->rx_fcs_stripped = B_TRUE; 364*5256Slh155975 365*5256Slh155975 mdlStartChip(adapter); 366*5256Slh155975 break; 367*5256Slh155975 368*5256Slh155975 case AMD8111S_LB_EXTERNAL_10: 369*5256Slh155975 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10"); 370*5256Slh155975 mdlStopChip(adapter); 371*5256Slh155975 amd8111s_reset(adapter); 372*5256Slh155975 SetIntrCoalesc(adapter, B_FALSE); 373*5256Slh155975 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10); 374*5256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 375*5256Slh155975 VAL0 | EXLOOP); 376*5256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_UP; 377*5256Slh155975 adapter->pMdl->Speed = 10; 378*5256Slh155975 adapter->pMdl->FullDuplex = B_TRUE; 379*5256Slh155975 /* Tell GLD the state of the physical link. */ 380*5256Slh155975 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP); 381*5256Slh155975 382*5256Slh155975 adapter->pOdl->rx_fcs_stripped = B_TRUE; 383*5256Slh155975 384*5256Slh155975 mdlStartChip(adapter); 385*5256Slh155975 break; 386*5256Slh155975 387*5256Slh155975 case AMD8111S_LB_INTERNAL_MAC: 388*5256Slh155975 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC"); 389*5256Slh155975 mdlStopChip(adapter); 390*5256Slh155975 amd8111s_reset(adapter); 391*5256Slh155975 SetIntrCoalesc(adapter, B_FALSE); 392*5256Slh155975 /* Disable Port Manager */ 393*5256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3, 394*5256Slh155975 EN_PMGR); 395*5256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2, 396*5256Slh155975 VAL0 | INLOOP); 397*5256Slh155975 398*5256Slh155975 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3, 399*5256Slh155975 VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS); 400*5256Slh155975 401*5256Slh155975 adapter->pOdl->LinkStatus = LINK_STATE_UP; 402*5256Slh155975 adapter->pMdl->FullDuplex = B_TRUE; 403*5256Slh155975 /* Tell GLD the state of the physical link. */ 404*5256Slh155975 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP); 405*5256Slh155975 406*5256Slh155975 adapter->pOdl->rx_fcs_stripped = B_TRUE; 407*5256Slh155975 408*5256Slh155975 mdlStartChip(adapter); 409*5256Slh155975 break; 410*5256Slh155975 } 411*5256Slh155975 412*5256Slh155975 /* 413*5256Slh155975 * All OK; tell the caller to reprogram 414*5256Slh155975 * the PHY and/or MAC for the new mode ... 415*5256Slh155975 */ 416*5256Slh155975 adapter->pOdl->loopback_mode = mode; 417*5256Slh155975 } 418*5256Slh155975 419*5256Slh155975 static enum ioc_reply 420*5256Slh155975 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp, 421*5256Slh155975 mblk_t *mp) 422*5256Slh155975 { 423*5256Slh155975 lb_info_sz_t *lbsp; 424*5256Slh155975 lb_property_t *lbpp; 425*5256Slh155975 uint32_t *lbmp; 426*5256Slh155975 int cmd; 427*5256Slh155975 428*5256Slh155975 /* 429*5256Slh155975 * Validate format of ioctl 430*5256Slh155975 */ 431*5256Slh155975 if (mp->b_cont == NULL) 432*5256Slh155975 return (IOC_INVAL); 433*5256Slh155975 434*5256Slh155975 cmd = iocp->ioc_cmd; 435*5256Slh155975 switch (cmd) { 436*5256Slh155975 default: 437*5256Slh155975 /* NOTREACHED */ 438*5256Slh155975 amd8111s_log(adapter, CE_NOTE, 439*5256Slh155975 "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd); 440*5256Slh155975 return (IOC_INVAL); 441*5256Slh155975 442*5256Slh155975 case LB_GET_INFO_SIZE: 443*5256Slh155975 if (iocp->ioc_count != sizeof (lb_info_sz_t)) { 444*5256Slh155975 amd8111s_log(adapter, CE_NOTE, 445*5256Slh155975 "wrong LB_GET_INFO_SIZE size"); 446*5256Slh155975 return (IOC_INVAL); 447*5256Slh155975 } 448*5256Slh155975 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 449*5256Slh155975 *lbsp = sizeof (loopmodes); 450*5256Slh155975 break; 451*5256Slh155975 452*5256Slh155975 case LB_GET_INFO: 453*5256Slh155975 if (iocp->ioc_count != sizeof (loopmodes)) { 454*5256Slh155975 amd8111s_log(adapter, CE_NOTE, 455*5256Slh155975 "Wrong LB_GET_INFO size"); 456*5256Slh155975 return (IOC_INVAL); 457*5256Slh155975 } 458*5256Slh155975 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 459*5256Slh155975 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 460*5256Slh155975 break; 461*5256Slh155975 462*5256Slh155975 case LB_GET_MODE: 463*5256Slh155975 if (iocp->ioc_count != sizeof (uint32_t)) { 464*5256Slh155975 amd8111s_log(adapter, CE_NOTE, 465*5256Slh155975 "Wrong LB_GET_MODE size"); 466*5256Slh155975 return (IOC_INVAL); 467*5256Slh155975 } 468*5256Slh155975 lbmp = (uint32_t *)mp->b_cont->b_rptr; 469*5256Slh155975 *lbmp = adapter->pOdl->loopback_mode; 470*5256Slh155975 break; 471*5256Slh155975 472*5256Slh155975 case LB_SET_MODE: 473*5256Slh155975 if (iocp->ioc_count != sizeof (uint32_t)) { 474*5256Slh155975 amd8111s_log(adapter, CE_NOTE, 475*5256Slh155975 "Wrong LB_SET_MODE size"); 476*5256Slh155975 return (IOC_INVAL); 477*5256Slh155975 } 478*5256Slh155975 lbmp = (uint32_t *)mp->b_cont->b_rptr; 479*5256Slh155975 amd8111s_set_loop_mode(adapter, *lbmp); 480*5256Slh155975 break; 481*5256Slh155975 } 482*5256Slh155975 return (IOC_REPLY); 483*5256Slh155975 } 484*5256Slh155975 485*5256Slh155975 static void 486*5256Slh155975 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 487*5256Slh155975 { 488*5256Slh155975 struct iocblk *iocp; 489*5256Slh155975 struct LayerPointers *adapter; 490*5256Slh155975 enum ioc_reply status; 491*5256Slh155975 492*5256Slh155975 iocp = (struct iocblk *)mp->b_rptr; 493*5256Slh155975 iocp->ioc_error = 0; 494*5256Slh155975 adapter = (struct LayerPointers *)arg; 495*5256Slh155975 496*5256Slh155975 ASSERT(adapter); 497*5256Slh155975 if (adapter == NULL) { 498*5256Slh155975 miocnak(q, mp, 0, EINVAL); 499*5256Slh155975 return; 500*5256Slh155975 } 501*5256Slh155975 502*5256Slh155975 switch (iocp->ioc_cmd) { 503*5256Slh155975 504*5256Slh155975 case LB_GET_INFO_SIZE: 505*5256Slh155975 case LB_GET_INFO: 506*5256Slh155975 case LB_GET_MODE: 507*5256Slh155975 case LB_SET_MODE: 508*5256Slh155975 status = amd8111s_loopback_ioctl(adapter, iocp, mp); 509*5256Slh155975 break; 510*5256Slh155975 511*5256Slh155975 default: 512*5256Slh155975 status = IOC_INVAL; 513*5256Slh155975 break; 514*5256Slh155975 } 515*5256Slh155975 516*5256Slh155975 /* 517*5256Slh155975 * Decide how to reply 518*5256Slh155975 */ 519*5256Slh155975 switch (status) { 520*5256Slh155975 default: 521*5256Slh155975 case IOC_INVAL: 522*5256Slh155975 /* 523*5256Slh155975 * Error, reply with a NAK and EINVAL or the specified error 524*5256Slh155975 */ 525*5256Slh155975 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 526*5256Slh155975 EINVAL : iocp->ioc_error); 527*5256Slh155975 break; 528*5256Slh155975 529*5256Slh155975 case IOC_DONE: 530*5256Slh155975 /* 531*5256Slh155975 * OK, reply already sent 532*5256Slh155975 */ 533*5256Slh155975 break; 534*5256Slh155975 535*5256Slh155975 case IOC_ACK: 536*5256Slh155975 /* 537*5256Slh155975 * OK, reply with an ACK 538*5256Slh155975 */ 539*5256Slh155975 miocack(q, mp, 0, 0); 540*5256Slh155975 break; 541*5256Slh155975 542*5256Slh155975 case IOC_REPLY: 543*5256Slh155975 /* 544*5256Slh155975 * OK, send prepared reply as ACK or NAK 545*5256Slh155975 */ 546*5256Slh155975 mp->b_datap->db_type = iocp->ioc_error == 0 ? 547*5256Slh155975 M_IOCACK : M_IOCNAK; 548*5256Slh155975 qreply(q, mp); 549*5256Slh155975 break; 550*5256Slh155975 } 551*5256Slh155975 } 552*5256Slh155975 553*5256Slh155975 /* 554*5256Slh155975 * Copy one packet from dma memory to mblk. Inc dma descriptor pointer. 555*5256Slh155975 */ 556*5256Slh155975 static boolean_t 557*5256Slh155975 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp) 558*5256Slh155975 { 559*5256Slh155975 int length = 0; 560*5256Slh155975 mblk_t *mp; 561*5256Slh155975 struct rx_desc *descriptor; 562*5256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 563*5256Slh155975 struct amd8111s_statistics *statistics = &pOdl->statistics; 564*5256Slh155975 struct nonphysical *pNonphysical = pLayerPointers->pMil 565*5256Slh155975 ->pNonphysical; 566*5256Slh155975 567*5256Slh155975 mutex_enter(&pOdl->mdlRcvLock); 568*5256Slh155975 descriptor = pNonphysical->RxBufDescQRead->descriptor; 569*5256Slh155975 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 570*5256Slh155975 pNonphysical->RxBufDescQRead->descriptor - 571*5256Slh155975 pNonphysical->RxBufDescQStart->descriptor, 572*5256Slh155975 sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU); 573*5256Slh155975 if ((descriptor->Rx_OWN) == 0) { 574*5256Slh155975 /* 575*5256Slh155975 * If the frame is received with errors, then set MCNT 576*5256Slh155975 * of that pkt in ReceiveArray to 0. This packet would 577*5256Slh155975 * be discarded later and not indicated to OS. 578*5256Slh155975 */ 579*5256Slh155975 if (descriptor->Rx_ERR) { 580*5256Slh155975 statistics->rx_desc_err ++; 581*5256Slh155975 descriptor->Rx_ERR = 0; 582*5256Slh155975 if (descriptor->Rx_FRAM == 1) { 583*5256Slh155975 statistics->rx_desc_err_FRAM ++; 584*5256Slh155975 descriptor->Rx_FRAM = 0; 585*5256Slh155975 } 586*5256Slh155975 if (descriptor->Rx_OFLO == 1) { 587*5256Slh155975 statistics->rx_desc_err_OFLO ++; 588*5256Slh155975 descriptor->Rx_OFLO = 0; 589*5256Slh155975 pOdl->rx_overflow_counter ++; 590*5256Slh155975 if ((pOdl->rx_overflow_counter > 5) && 591*5256Slh155975 (pOdl->pause_interval == 0)) { 592*5256Slh155975 statistics->rx_double_overflow ++; 593*5256Slh155975 mdlSendPause(pLayerPointers); 594*5256Slh155975 pOdl->rx_overflow_counter = 0; 595*5256Slh155975 pOdl->pause_interval = 25; 596*5256Slh155975 } 597*5256Slh155975 } 598*5256Slh155975 if (descriptor->Rx_CRC == 1) { 599*5256Slh155975 statistics->rx_desc_err_CRC ++; 600*5256Slh155975 descriptor->Rx_CRC = 0; 601*5256Slh155975 } 602*5256Slh155975 if (descriptor->Rx_BUFF == 1) { 603*5256Slh155975 statistics->rx_desc_err_BUFF ++; 604*5256Slh155975 descriptor->Rx_BUFF = 0; 605*5256Slh155975 } 606*5256Slh155975 goto Next_Descriptor; 607*5256Slh155975 } 608*5256Slh155975 609*5256Slh155975 /* Length of incoming packet */ 610*5256Slh155975 if (pOdl->rx_fcs_stripped) { 611*5256Slh155975 length = descriptor->Rx_MCNT -4; 612*5256Slh155975 } else { 613*5256Slh155975 length = descriptor->Rx_MCNT; 614*5256Slh155975 } 615*5256Slh155975 if (length < 62) { 616*5256Slh155975 statistics->rx_error_zerosize ++; 617*5256Slh155975 } 618*5256Slh155975 619*5256Slh155975 if ((mp = allocb(length, BPRI_MED)) == NULL) { 620*5256Slh155975 statistics->rx_allocfail ++; 621*5256Slh155975 goto failed; 622*5256Slh155975 } 623*5256Slh155975 /* Copy from virtual address of incoming packet */ 624*5256Slh155975 bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap), 625*5256Slh155975 mp->b_rptr, length); 626*5256Slh155975 mp->b_wptr = mp->b_rptr + length; 627*5256Slh155975 statistics->rx_ok_packets ++; 628*5256Slh155975 if (*last_mp == NULL) { 629*5256Slh155975 *last_mp = mp; 630*5256Slh155975 } else { 631*5256Slh155975 (*last_mp)->b_next = mp; 632*5256Slh155975 *last_mp = mp; 633*5256Slh155975 } 634*5256Slh155975 635*5256Slh155975 Next_Descriptor: 636*5256Slh155975 descriptor->Rx_MCNT = 0; 637*5256Slh155975 descriptor->Rx_SOP = 0; 638*5256Slh155975 descriptor->Rx_EOP = 0; 639*5256Slh155975 descriptor->Rx_PAM = 0; 640*5256Slh155975 descriptor->Rx_BAM = 0; 641*5256Slh155975 descriptor->TT = 0; 642*5256Slh155975 descriptor->Rx_OWN = 1; 643*5256Slh155975 pNonphysical->RxBufDescQRead->descriptor++; 644*5256Slh155975 pNonphysical->RxBufDescQRead->USpaceMap++; 645*5256Slh155975 if (pNonphysical->RxBufDescQRead->descriptor > 646*5256Slh155975 pNonphysical->RxBufDescQEnd->descriptor) { 647*5256Slh155975 pNonphysical->RxBufDescQRead->descriptor = 648*5256Slh155975 pNonphysical->RxBufDescQStart->descriptor; 649*5256Slh155975 pNonphysical->RxBufDescQRead->USpaceMap = 650*5256Slh155975 pNonphysical->RxBufDescQStart->USpaceMap; 651*5256Slh155975 } 652*5256Slh155975 mutex_exit(&pOdl->mdlRcvLock); 653*5256Slh155975 654*5256Slh155975 return (B_TRUE); 655*5256Slh155975 } 656*5256Slh155975 657*5256Slh155975 failed: 658*5256Slh155975 mutex_exit(&pOdl->mdlRcvLock); 659*5256Slh155975 return (B_FALSE); 660*5256Slh155975 } 661*5256Slh155975 662*5256Slh155975 /* 663*5256Slh155975 * Get the received packets from NIC card and send them to GLD. 664*5256Slh155975 */ 665*5256Slh155975 static void 666*5256Slh155975 amd8111s_receive(struct LayerPointers *pLayerPointers) 667*5256Slh155975 { 668*5256Slh155975 int numOfPkts = 0; 669*5256Slh155975 struct odl *pOdl; 670*5256Slh155975 mblk_t *ret_mp = NULL, *last_mp = NULL; 671*5256Slh155975 672*5256Slh155975 pOdl = pLayerPointers->pOdl; 673*5256Slh155975 674*5256Slh155975 rw_enter(&pOdl->chip_lock, RW_READER); 675*5256Slh155975 if (!pLayerPointers->run) { 676*5256Slh155975 rw_exit(&pOdl->chip_lock); 677*5256Slh155975 return; 678*5256Slh155975 } 679*5256Slh155975 680*5256Slh155975 if (pOdl->pause_interval > 0) 681*5256Slh155975 pOdl->pause_interval --; 682*5256Slh155975 683*5256Slh155975 while (numOfPkts < RX_RING_SIZE) { 684*5256Slh155975 685*5256Slh155975 if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) { 686*5256Slh155975 break; 687*5256Slh155975 } 688*5256Slh155975 if (ret_mp == NULL) 689*5256Slh155975 ret_mp = last_mp; 690*5256Slh155975 numOfPkts++; 691*5256Slh155975 } 692*5256Slh155975 693*5256Slh155975 if (ret_mp) { 694*5256Slh155975 mac_rx(pOdl->mh, pOdl->mrh, ret_mp); 695*5256Slh155975 } 696*5256Slh155975 697*5256Slh155975 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0, 698*5256Slh155975 DDI_DMA_SYNC_FORDEV); 699*5256Slh155975 700*5256Slh155975 mdlReceive(pLayerPointers); 701*5256Slh155975 702*5256Slh155975 rw_exit(&pOdl->chip_lock); 703*5256Slh155975 704*5256Slh155975 } 705*5256Slh155975 706*5256Slh155975 /* 707*5256Slh155975 * Print message in release-version driver. 708*5256Slh155975 */ 709*5256Slh155975 static void 710*5256Slh155975 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...) 711*5256Slh155975 { 712*5256Slh155975 auto char name[32]; 713*5256Slh155975 auto char buf[256]; 714*5256Slh155975 va_list ap; 715*5256Slh155975 716*5256Slh155975 if (adapter != NULL) { 717*5256Slh155975 (void) sprintf(name, "amd8111s%d", 718*5256Slh155975 ddi_get_instance(adapter->pOdl->devinfo)); 719*5256Slh155975 } else { 720*5256Slh155975 (void) sprintf(name, "amd8111s"); 721*5256Slh155975 } 722*5256Slh155975 va_start(ap, fmt); 723*5256Slh155975 (void) vsprintf(buf, fmt, ap); 724*5256Slh155975 va_end(ap); 725*5256Slh155975 cmn_err(level, "%s: %s", name, buf); 726*5256Slh155975 } 727*5256Slh155975 728*5256Slh155975 /* 729*5256Slh155975 * To allocate & initilize all resources. 730*5256Slh155975 * Called by amd8111s_attach(). 731*5256Slh155975 */ 732*5256Slh155975 static int 733*5256Slh155975 amd8111s_odlInit(struct LayerPointers *pLayerPointers) 734*5256Slh155975 { 735*5256Slh155975 unsigned long mem_req_array[MEM_REQ_MAX]; 736*5256Slh155975 unsigned long mem_set_array[MEM_REQ_MAX]; 737*5256Slh155975 unsigned long *pmem_req_array; 738*5256Slh155975 unsigned long *pmem_set_array; 739*5256Slh155975 int i, size; 740*5256Slh155975 741*5256Slh155975 for (i = 0; i < MEM_REQ_MAX; i++) { 742*5256Slh155975 mem_req_array[i] = 0; 743*5256Slh155975 mem_set_array[i] = 0; 744*5256Slh155975 } 745*5256Slh155975 746*5256Slh155975 milRequestResources(mem_req_array); 747*5256Slh155975 748*5256Slh155975 pmem_req_array = mem_req_array; 749*5256Slh155975 pmem_set_array = mem_set_array; 750*5256Slh155975 while (*pmem_req_array) { 751*5256Slh155975 switch (*pmem_req_array) { 752*5256Slh155975 case VIRTUAL: 753*5256Slh155975 *pmem_set_array = VIRTUAL; 754*5256Slh155975 pmem_req_array++; 755*5256Slh155975 pmem_set_array++; 756*5256Slh155975 *(pmem_set_array) = *(pmem_req_array); 757*5256Slh155975 pmem_set_array++; 758*5256Slh155975 *(pmem_set_array) = (unsigned long) kmem_zalloc( 759*5256Slh155975 *(pmem_req_array), KM_NOSLEEP); 760*5256Slh155975 if (*pmem_set_array == NULL) 761*5256Slh155975 goto odl_init_failure; 762*5256Slh155975 break; 763*5256Slh155975 } 764*5256Slh155975 pmem_req_array++; 765*5256Slh155975 pmem_set_array++; 766*5256Slh155975 } 767*5256Slh155975 768*5256Slh155975 /* 769*5256Slh155975 * Initilize memory on lower layers 770*5256Slh155975 */ 771*5256Slh155975 milSetResources(pLayerPointers, mem_set_array); 772*5256Slh155975 773*5256Slh155975 /* Allocate Rx/Tx descriptors */ 774*5256Slh155975 if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) { 775*5256Slh155975 *pmem_set_array = NULL; 776*5256Slh155975 goto odl_init_failure; 777*5256Slh155975 } 778*5256Slh155975 779*5256Slh155975 /* 780*5256Slh155975 * Allocate Rx buffer for each Rx descriptor. Then call mil layer 781*5256Slh155975 * routine to fill physical address of Rx buffer into Rx descriptor. 782*5256Slh155975 */ 783*5256Slh155975 if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) { 784*5256Slh155975 amd8111s_free_descriptors(pLayerPointers); 785*5256Slh155975 *pmem_set_array = NULL; 786*5256Slh155975 goto odl_init_failure; 787*5256Slh155975 } 788*5256Slh155975 milInitGlbds(pLayerPointers); 789*5256Slh155975 790*5256Slh155975 return (0); 791*5256Slh155975 792*5256Slh155975 odl_init_failure: 793*5256Slh155975 /* 794*5256Slh155975 * Free All memory allocated so far 795*5256Slh155975 */ 796*5256Slh155975 pmem_req_array = mem_set_array; 797*5256Slh155975 while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) { 798*5256Slh155975 switch (*pmem_req_array) { 799*5256Slh155975 case VIRTUAL: 800*5256Slh155975 pmem_req_array++; /* Size */ 801*5256Slh155975 size = *(pmem_req_array); 802*5256Slh155975 pmem_req_array++; /* Virtual Address */ 803*5256Slh155975 if (pmem_req_array == NULL) 804*5256Slh155975 return (1); 805*5256Slh155975 kmem_free((int *)*pmem_req_array, size); 806*5256Slh155975 break; 807*5256Slh155975 } 808*5256Slh155975 pmem_req_array++; 809*5256Slh155975 } 810*5256Slh155975 return (1); 811*5256Slh155975 } 812*5256Slh155975 813*5256Slh155975 /* 814*5256Slh155975 * Allocate and initialize Tx/Rx descriptors 815*5256Slh155975 */ 816*5256Slh155975 static boolean_t 817*5256Slh155975 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers) 818*5256Slh155975 { 819*5256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 820*5256Slh155975 struct mil *pMil = pLayerPointers->pMil; 821*5256Slh155975 dev_info_t *devinfo = pOdl->devinfo; 822*5256Slh155975 uint_t length, count, i; 823*5256Slh155975 size_t real_length; 824*5256Slh155975 825*5256Slh155975 /* 826*5256Slh155975 * Allocate Rx descriptors 827*5256Slh155975 */ 828*5256Slh155975 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP, 829*5256Slh155975 NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) { 830*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 831*5256Slh155975 "ddi_dma_alloc_handle for Rx desc failed"); 832*5256Slh155975 pOdl->rx_desc_dma_handle = NULL; 833*5256Slh155975 return (B_FALSE); 834*5256Slh155975 } 835*5256Slh155975 836*5256Slh155975 length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT; 837*5256Slh155975 if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length, 838*5256Slh155975 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 839*5256Slh155975 NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length, 840*5256Slh155975 &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) { 841*5256Slh155975 842*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 843*5256Slh155975 "ddi_dma_mem_handle for Rx desc failed"); 844*5256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 845*5256Slh155975 pOdl->rx_desc_dma_handle = NULL; 846*5256Slh155975 return (B_FALSE); 847*5256Slh155975 } 848*5256Slh155975 849*5256Slh155975 if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle, 850*5256Slh155975 NULL, (caddr_t)pMil->Rx_desc_original, real_length, 851*5256Slh155975 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 852*5256Slh155975 NULL, &pOdl->rx_desc_dma_cookie, 853*5256Slh155975 &count) != DDI_SUCCESS) { 854*5256Slh155975 855*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 856*5256Slh155975 "ddi_dma_addr_bind_handle for Rx desc failed"); 857*5256Slh155975 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle); 858*5256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 859*5256Slh155975 pOdl->rx_desc_dma_handle = NULL; 860*5256Slh155975 return (B_FALSE); 861*5256Slh155975 } 862*5256Slh155975 ASSERT(count == 1); 863*5256Slh155975 864*5256Slh155975 /* Initialize Rx descriptors related variables */ 865*5256Slh155975 pMil->Rx_desc = (struct rx_desc *) 866*5256Slh155975 ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT); 867*5256Slh155975 pMil->Rx_desc_pa = (unsigned int) 868*5256Slh155975 ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT); 869*5256Slh155975 870*5256Slh155975 pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa; 871*5256Slh155975 872*5256Slh155975 873*5256Slh155975 /* 874*5256Slh155975 * Allocate Tx descriptors 875*5256Slh155975 */ 876*5256Slh155975 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP, 877*5256Slh155975 NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) { 878*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 879*5256Slh155975 "ddi_dma_alloc_handle for Tx desc failed"); 880*5256Slh155975 goto allocate_desc_fail; 881*5256Slh155975 } 882*5256Slh155975 883*5256Slh155975 length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT; 884*5256Slh155975 if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length, 885*5256Slh155975 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 886*5256Slh155975 NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length, 887*5256Slh155975 &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) { 888*5256Slh155975 889*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 890*5256Slh155975 "ddi_dma_mem_handle for Tx desc failed"); 891*5256Slh155975 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle); 892*5256Slh155975 goto allocate_desc_fail; 893*5256Slh155975 } 894*5256Slh155975 895*5256Slh155975 if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle, 896*5256Slh155975 NULL, (caddr_t)pMil->Tx_desc_original, real_length, 897*5256Slh155975 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 898*5256Slh155975 NULL, &pOdl->tx_desc_dma_cookie, 899*5256Slh155975 &count) != DDI_SUCCESS) { 900*5256Slh155975 901*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 902*5256Slh155975 "ddi_dma_addr_bind_handle for Tx desc failed"); 903*5256Slh155975 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle); 904*5256Slh155975 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle); 905*5256Slh155975 goto allocate_desc_fail; 906*5256Slh155975 } 907*5256Slh155975 ASSERT(count == 1); 908*5256Slh155975 /* Set the DMA area to all zeros */ 909*5256Slh155975 bzero((caddr_t)pMil->Tx_desc_original, length); 910*5256Slh155975 911*5256Slh155975 /* Initialize Tx descriptors related variables */ 912*5256Slh155975 pMil->Tx_desc = (struct tx_desc *) 913*5256Slh155975 ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT); 914*5256Slh155975 pMil->pNonphysical->TxDescQRead = pMil->Tx_desc; 915*5256Slh155975 pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc; 916*5256Slh155975 pMil->pNonphysical->TxDescQStart = pMil->Tx_desc; 917*5256Slh155975 pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]); 918*5256Slh155975 919*5256Slh155975 /* Physical Addr of Tx_desc_original & Tx_desc */ 920*5256Slh155975 pLayerPointers->pMil->Tx_desc_pa = 921*5256Slh155975 ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & 922*5256Slh155975 ~ALIGNMENT); 923*5256Slh155975 924*5256Slh155975 /* Setting the reserved bits in the tx descriptors */ 925*5256Slh155975 for (i = 0; i < TX_RING_SIZE; i++) { 926*5256Slh155975 pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f; 927*5256Slh155975 pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0; 928*5256Slh155975 pMil->pNonphysical->TxDescQWrite++; 929*5256Slh155975 } 930*5256Slh155975 pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart; 931*5256Slh155975 932*5256Slh155975 pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa; 933*5256Slh155975 934*5256Slh155975 return (B_TRUE); 935*5256Slh155975 936*5256Slh155975 allocate_desc_fail: 937*5256Slh155975 pOdl->tx_desc_dma_handle = NULL; 938*5256Slh155975 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle); 939*5256Slh155975 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle); 940*5256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 941*5256Slh155975 pOdl->rx_desc_dma_handle = NULL; 942*5256Slh155975 return (B_FALSE); 943*5256Slh155975 } 944*5256Slh155975 945*5256Slh155975 /* 946*5256Slh155975 * Free Tx/Rx descriptors 947*5256Slh155975 */ 948*5256Slh155975 static void 949*5256Slh155975 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers) 950*5256Slh155975 { 951*5256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 952*5256Slh155975 953*5256Slh155975 /* Free Rx descriptors */ 954*5256Slh155975 if (pOdl->rx_desc_dma_handle) { 955*5256Slh155975 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle); 956*5256Slh155975 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle); 957*5256Slh155975 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle); 958*5256Slh155975 pOdl->rx_desc_dma_handle = NULL; 959*5256Slh155975 } 960*5256Slh155975 961*5256Slh155975 /* Free Rx descriptors */ 962*5256Slh155975 if (pOdl->tx_desc_dma_handle) { 963*5256Slh155975 (void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle); 964*5256Slh155975 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle); 965*5256Slh155975 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle); 966*5256Slh155975 pOdl->tx_desc_dma_handle = NULL; 967*5256Slh155975 } 968*5256Slh155975 } 969*5256Slh155975 970*5256Slh155975 /* 971*5256Slh155975 * Allocate Tx/Rx Ring buffer 972*5256Slh155975 */ 973*5256Slh155975 static boolean_t 974*5256Slh155975 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers, 975*5256Slh155975 struct amd8111s_dma_ringbuf *pRing, 976*5256Slh155975 uint32_t ring_size, uint32_t msg_size) 977*5256Slh155975 { 978*5256Slh155975 uint32_t idx, msg_idx = 0, msg_acc; 979*5256Slh155975 dev_info_t *devinfo = pLayerPointers->pOdl->devinfo; 980*5256Slh155975 size_t real_length; 981*5256Slh155975 uint_t count = 0; 982*5256Slh155975 983*5256Slh155975 ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1); 984*5256Slh155975 pRing->dma_buf_sz = msg_size; 985*5256Slh155975 pRing->ring_size = ring_size; 986*5256Slh155975 pRing->trunk_num = AMD8111S_SPLIT; 987*5256Slh155975 pRing->buf_sz = msg_size * ring_size; 988*5256Slh155975 if (ring_size < pRing->trunk_num) 989*5256Slh155975 pRing->trunk_num = ring_size; 990*5256Slh155975 ASSERT((pRing->buf_sz % pRing->trunk_num) == 0); 991*5256Slh155975 992*5256Slh155975 pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num; 993*5256Slh155975 ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0); 994*5256Slh155975 995*5256Slh155975 pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) * 996*5256Slh155975 ring_size, KM_NOSLEEP); 997*5256Slh155975 pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) * 998*5256Slh155975 pRing->trunk_num, KM_NOSLEEP); 999*5256Slh155975 pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) * 1000*5256Slh155975 pRing->trunk_num, KM_NOSLEEP); 1001*5256Slh155975 pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) * 1002*5256Slh155975 pRing->trunk_num, KM_NOSLEEP); 1003*5256Slh155975 pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) * 1004*5256Slh155975 pRing->trunk_num, KM_NOSLEEP); 1005*5256Slh155975 if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL || 1006*5256Slh155975 pRing->acc_hdl == NULL || pRing->trunk_addr == NULL || 1007*5256Slh155975 pRing->dma_cookie == NULL) { 1008*5256Slh155975 amd8111s_log(pLayerPointers, CE_NOTE, 1009*5256Slh155975 "kmem_zalloc failed"); 1010*5256Slh155975 goto failed; 1011*5256Slh155975 } 1012*5256Slh155975 1013*5256Slh155975 for (idx = 0; idx < pRing->trunk_num; ++idx) { 1014*5256Slh155975 if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t, 1015*5256Slh155975 DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx])) 1016*5256Slh155975 != DDI_SUCCESS) { 1017*5256Slh155975 1018*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 1019*5256Slh155975 "ddi_dma_alloc_handle failed"); 1020*5256Slh155975 goto failed; 1021*5256Slh155975 } else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx], 1022*5256Slh155975 pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING, 1023*5256Slh155975 DDI_DMA_SLEEP, NULL, 1024*5256Slh155975 (caddr_t *)&(pRing->trunk_addr[idx]), 1025*5256Slh155975 (size_t *)(&real_length), &pRing->acc_hdl[idx]) 1026*5256Slh155975 != DDI_SUCCESS) { 1027*5256Slh155975 1028*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 1029*5256Slh155975 "ddi_dma_mem_alloc failed"); 1030*5256Slh155975 goto failed; 1031*5256Slh155975 } else if (real_length != pRing->trunk_sz) { 1032*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 1033*5256Slh155975 "ddi_dma_mem_alloc failed"); 1034*5256Slh155975 goto failed; 1035*5256Slh155975 } else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx], 1036*5256Slh155975 NULL, (caddr_t)pRing->trunk_addr[idx], real_length, 1037*5256Slh155975 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 1038*5256Slh155975 &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) { 1039*5256Slh155975 1040*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 1041*5256Slh155975 "ddi_dma_addr_bind_handle failed"); 1042*5256Slh155975 goto failed; 1043*5256Slh155975 } else { 1044*5256Slh155975 for (msg_acc = 0; 1045*5256Slh155975 msg_acc < pRing->trunk_sz / pRing->dma_buf_sz; 1046*5256Slh155975 ++ msg_acc) { 1047*5256Slh155975 pRing->msg_buf[msg_idx].offset = 1048*5256Slh155975 msg_acc * pRing->dma_buf_sz; 1049*5256Slh155975 pRing->msg_buf[msg_idx].vir_addr = 1050*5256Slh155975 pRing->trunk_addr[idx] + 1051*5256Slh155975 pRing->msg_buf[msg_idx].offset; 1052*5256Slh155975 pRing->msg_buf[msg_idx].phy_addr = 1053*5256Slh155975 pRing->dma_cookie[idx].dmac_laddress + 1054*5256Slh155975 pRing->msg_buf[msg_idx].offset; 1055*5256Slh155975 pRing->msg_buf[msg_idx].p_hdl = 1056*5256Slh155975 pRing->dma_hdl[idx]; 1057*5256Slh155975 msg_idx ++; 1058*5256Slh155975 } 1059*5256Slh155975 } 1060*5256Slh155975 } 1061*5256Slh155975 1062*5256Slh155975 pRing->free = pRing->msg_buf; 1063*5256Slh155975 pRing->next = pRing->msg_buf; 1064*5256Slh155975 pRing->curr = pRing->msg_buf; 1065*5256Slh155975 1066*5256Slh155975 return (B_TRUE); 1067*5256Slh155975 failed: 1068*5256Slh155975 amd8111s_free_dma_ringbuf(pRing); 1069*5256Slh155975 return (B_FALSE); 1070*5256Slh155975 } 1071*5256Slh155975 1072*5256Slh155975 /* 1073*5256Slh155975 * Free Tx/Rx ring buffer 1074*5256Slh155975 */ 1075*5256Slh155975 static void 1076*5256Slh155975 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing) 1077*5256Slh155975 { 1078*5256Slh155975 int idx; 1079*5256Slh155975 1080*5256Slh155975 if (pRing->dma_cookie != NULL) { 1081*5256Slh155975 for (idx = 0; idx < pRing->trunk_num; idx ++) { 1082*5256Slh155975 if (pRing->dma_cookie[idx].dmac_laddress == 0) { 1083*5256Slh155975 break; 1084*5256Slh155975 } 1085*5256Slh155975 (void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]); 1086*5256Slh155975 } 1087*5256Slh155975 kmem_free(pRing->dma_cookie, 1088*5256Slh155975 sizeof (ddi_dma_cookie_t) * pRing->trunk_num); 1089*5256Slh155975 } 1090*5256Slh155975 1091*5256Slh155975 if (pRing->acc_hdl != NULL) { 1092*5256Slh155975 for (idx = 0; idx < pRing->trunk_num; idx ++) { 1093*5256Slh155975 if (pRing->acc_hdl[idx] == NULL) 1094*5256Slh155975 break; 1095*5256Slh155975 ddi_dma_mem_free(&pRing->acc_hdl[idx]); 1096*5256Slh155975 } 1097*5256Slh155975 kmem_free(pRing->acc_hdl, 1098*5256Slh155975 sizeof (ddi_acc_handle_t) * pRing->trunk_num); 1099*5256Slh155975 } 1100*5256Slh155975 1101*5256Slh155975 if (pRing->dma_hdl != NULL) { 1102*5256Slh155975 for (idx = 0; idx < pRing->trunk_num; idx ++) { 1103*5256Slh155975 if (pRing->dma_hdl[idx] == 0) { 1104*5256Slh155975 break; 1105*5256Slh155975 } 1106*5256Slh155975 ddi_dma_free_handle(&pRing->dma_hdl[idx]); 1107*5256Slh155975 } 1108*5256Slh155975 kmem_free(pRing->dma_hdl, 1109*5256Slh155975 sizeof (ddi_dma_handle_t) * pRing->trunk_num); 1110*5256Slh155975 } 1111*5256Slh155975 1112*5256Slh155975 if (pRing->msg_buf != NULL) { 1113*5256Slh155975 kmem_free(pRing->msg_buf, 1114*5256Slh155975 sizeof (struct amd8111s_msgbuf) * pRing->ring_size); 1115*5256Slh155975 } 1116*5256Slh155975 1117*5256Slh155975 if (pRing->trunk_addr != NULL) { 1118*5256Slh155975 kmem_free(pRing->trunk_addr, 1119*5256Slh155975 sizeof (caddr_t) * pRing->trunk_num); 1120*5256Slh155975 } 1121*5256Slh155975 1122*5256Slh155975 bzero(pRing, sizeof (*pRing)); 1123*5256Slh155975 } 1124*5256Slh155975 1125*5256Slh155975 1126*5256Slh155975 /* 1127*5256Slh155975 * Allocate all Tx buffer. 1128*5256Slh155975 * Allocate a Rx buffer for each Rx descriptor. Then 1129*5256Slh155975 * call mil routine to fill physical address of Rx 1130*5256Slh155975 * buffer into Rx descriptors 1131*5256Slh155975 */ 1132*5256Slh155975 static boolean_t 1133*5256Slh155975 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers) 1134*5256Slh155975 { 1135*5256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 1136*5256Slh155975 1137*5256Slh155975 /* 1138*5256Slh155975 * Allocate rx Buffers 1139*5256Slh155975 */ 1140*5256Slh155975 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf, 1141*5256Slh155975 RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) { 1142*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 1143*5256Slh155975 "amd8111s_alloc_dma_ringbuf for tx failed"); 1144*5256Slh155975 goto allocate_buf_fail; 1145*5256Slh155975 } 1146*5256Slh155975 1147*5256Slh155975 /* 1148*5256Slh155975 * Allocate Tx buffers 1149*5256Slh155975 */ 1150*5256Slh155975 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf, 1151*5256Slh155975 TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) { 1152*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 1153*5256Slh155975 "amd8111s_alloc_dma_ringbuf for tx failed"); 1154*5256Slh155975 goto allocate_buf_fail; 1155*5256Slh155975 } 1156*5256Slh155975 1157*5256Slh155975 /* 1158*5256Slh155975 * Initilize the mil Queues 1159*5256Slh155975 */ 1160*5256Slh155975 milInitGlbds(pLayerPointers); 1161*5256Slh155975 1162*5256Slh155975 milInitRxQ(pLayerPointers); 1163*5256Slh155975 1164*5256Slh155975 return (B_TRUE); 1165*5256Slh155975 1166*5256Slh155975 allocate_buf_fail: 1167*5256Slh155975 1168*5256Slh155975 amd8111s_log(pLayerPointers, CE_WARN, 1169*5256Slh155975 "amd8111s_allocate_buffers failed"); 1170*5256Slh155975 return (B_FALSE); 1171*5256Slh155975 } 1172*5256Slh155975 1173*5256Slh155975 /* 1174*5256Slh155975 * Free all Rx/Tx buffer 1175*5256Slh155975 */ 1176*5256Slh155975 1177*5256Slh155975 static void 1178*5256Slh155975 amd8111s_free_buffers(struct LayerPointers *pLayerPointers) 1179*5256Slh155975 { 1180*5256Slh155975 /* Free Tx buffers */ 1181*5256Slh155975 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf); 1182*5256Slh155975 1183*5256Slh155975 /* Free Rx Buffers */ 1184*5256Slh155975 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf); 1185*5256Slh155975 } 1186*5256Slh155975 1187*5256Slh155975 /* 1188*5256Slh155975 * Try to recycle all the descriptors and Tx buffers 1189*5256Slh155975 * which are already freed by hardware. 1190*5256Slh155975 */ 1191*5256Slh155975 static int 1192*5256Slh155975 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers) 1193*5256Slh155975 { 1194*5256Slh155975 struct nonphysical *pNonphysical; 1195*5256Slh155975 uint32_t count = 0; 1196*5256Slh155975 1197*5256Slh155975 pNonphysical = pLayerPointers->pMil->pNonphysical; 1198*5256Slh155975 while (pNonphysical->TxDescQRead->Tx_OWN == 0 && 1199*5256Slh155975 pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) { 1200*5256Slh155975 pLayerPointers->pOdl->tx_buf.free = 1201*5256Slh155975 NEXT(pLayerPointers->pOdl->tx_buf, free); 1202*5256Slh155975 pNonphysical->TxDescQRead++; 1203*5256Slh155975 if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) { 1204*5256Slh155975 pNonphysical->TxDescQRead = pNonphysical->TxDescQStart; 1205*5256Slh155975 } 1206*5256Slh155975 count ++; 1207*5256Slh155975 } 1208*5256Slh155975 1209*5256Slh155975 if (pLayerPointers->pMil->tx_reschedule) 1210*5256Slh155975 ddi_trigger_softintr(pLayerPointers->pOdl->drain_id); 1211*5256Slh155975 1212*5256Slh155975 return (count); 1213*5256Slh155975 } 1214*5256Slh155975 1215*5256Slh155975 /* 1216*5256Slh155975 * Get packets in the Tx buffer, then copy them to the send buffer. 1217*5256Slh155975 * Trigger hardware to send out packets. 1218*5256Slh155975 */ 1219*5256Slh155975 static void 1220*5256Slh155975 amd8111s_send_serial(struct LayerPointers *pLayerPointers) 1221*5256Slh155975 { 1222*5256Slh155975 struct nonphysical *pNonphysical; 1223*5256Slh155975 uint32_t count; 1224*5256Slh155975 1225*5256Slh155975 pNonphysical = pLayerPointers->pMil->pNonphysical; 1226*5256Slh155975 1227*5256Slh155975 mutex_enter(&pLayerPointers->pOdl->mdlSendLock); 1228*5256Slh155975 1229*5256Slh155975 for (count = 0; count < AMD8111S_SEND_MAX; count ++) { 1230*5256Slh155975 if (pLayerPointers->pOdl->tx_buf.curr == 1231*5256Slh155975 pLayerPointers->pOdl->tx_buf.next) { 1232*5256Slh155975 break; 1233*5256Slh155975 } 1234*5256Slh155975 /* to verify if it needs to recycle the tx Buf */ 1235*5256Slh155975 if (((pNonphysical->TxDescQWrite + 1 > 1236*5256Slh155975 pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart : 1237*5256Slh155975 (pNonphysical->TxDescQWrite + 1)) == 1238*5256Slh155975 pNonphysical->TxDescQRead) 1239*5256Slh155975 if (amd8111s_recycle_tx(pLayerPointers) == 0) { 1240*5256Slh155975 pLayerPointers->pOdl 1241*5256Slh155975 ->statistics.tx_no_descriptor ++; 1242*5256Slh155975 break; 1243*5256Slh155975 } 1244*5256Slh155975 1245*5256Slh155975 /* Fill packet length */ 1246*5256Slh155975 pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers 1247*5256Slh155975 ->pOdl->tx_buf.curr->msg_size; 1248*5256Slh155975 1249*5256Slh155975 /* Fill physical buffer address */ 1250*5256Slh155975 pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int) 1251*5256Slh155975 pLayerPointers->pOdl->tx_buf.curr->phy_addr; 1252*5256Slh155975 1253*5256Slh155975 pNonphysical->TxDescQWrite->Tx_SOP = 1; 1254*5256Slh155975 pNonphysical->TxDescQWrite->Tx_EOP = 1; 1255*5256Slh155975 pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1; 1256*5256Slh155975 pNonphysical->TxDescQWrite->Tx_LTINT = 1; 1257*5256Slh155975 pNonphysical->TxDescQWrite->Tx_USPACE = 0; 1258*5256Slh155975 pNonphysical->TxDescQWrite->Tx_OWN = 1; 1259*5256Slh155975 1260*5256Slh155975 pNonphysical->TxDescQWrite++; 1261*5256Slh155975 if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) { 1262*5256Slh155975 pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart; 1263*5256Slh155975 } 1264*5256Slh155975 1265*5256Slh155975 pLayerPointers->pOdl->tx_buf.curr = 1266*5256Slh155975 NEXT(pLayerPointers->pOdl->tx_buf, curr); 1267*5256Slh155975 1268*5256Slh155975 } 1269*5256Slh155975 1270*5256Slh155975 pLayerPointers->pOdl->statistics.tx_ok_packets += count; 1271*5256Slh155975 1272*5256Slh155975 mutex_exit(&pLayerPointers->pOdl->mdlSendLock); 1273*5256Slh155975 1274*5256Slh155975 /* Call mdlTransmit to send the pkt out on the network */ 1275*5256Slh155975 mdlTransmit(pLayerPointers); 1276*5256Slh155975 1277*5256Slh155975 } 1278*5256Slh155975 1279*5256Slh155975 /* 1280*5256Slh155975 * Softintr entrance. try to send out packets in the Tx buffer. 1281*5256Slh155975 * If reschedule is True, call mac_tx_update to re-enable the 1282*5256Slh155975 * transmit 1283*5256Slh155975 */ 1284*5256Slh155975 static uint_t 1285*5256Slh155975 amd8111s_send_drain(caddr_t arg) 1286*5256Slh155975 { 1287*5256Slh155975 struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg; 1288*5256Slh155975 1289*5256Slh155975 amd8111s_send_serial(pLayerPointers); 1290*5256Slh155975 1291*5256Slh155975 if (pLayerPointers->pMil->tx_reschedule && 1292*5256Slh155975 NEXT(pLayerPointers->pOdl->tx_buf, next) != 1293*5256Slh155975 pLayerPointers->pOdl->tx_buf.free) { 1294*5256Slh155975 mac_tx_update(pLayerPointers->pOdl->mh); 1295*5256Slh155975 pLayerPointers->pMil->tx_reschedule = B_FALSE; 1296*5256Slh155975 } 1297*5256Slh155975 1298*5256Slh155975 return (DDI_INTR_CLAIMED); 1299*5256Slh155975 } 1300*5256Slh155975 1301*5256Slh155975 /* 1302*5256Slh155975 * Get a Tx buffer 1303*5256Slh155975 */ 1304*5256Slh155975 static struct amd8111s_msgbuf * 1305*5256Slh155975 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers) 1306*5256Slh155975 { 1307*5256Slh155975 struct amd8111s_msgbuf *tmp, *next; 1308*5256Slh155975 1309*5256Slh155975 mutex_enter(&pLayerPointers->pOdl->mdlSendLock); 1310*5256Slh155975 next = NEXT(pLayerPointers->pOdl->tx_buf, next); 1311*5256Slh155975 if (next == pLayerPointers->pOdl->tx_buf.free) { 1312*5256Slh155975 tmp = NULL; 1313*5256Slh155975 } else { 1314*5256Slh155975 tmp = pLayerPointers->pOdl->tx_buf.next; 1315*5256Slh155975 pLayerPointers->pOdl->tx_buf.next = next; 1316*5256Slh155975 } 1317*5256Slh155975 mutex_exit(&pLayerPointers->pOdl->mdlSendLock); 1318*5256Slh155975 1319*5256Slh155975 return (tmp); 1320*5256Slh155975 } 1321*5256Slh155975 1322*5256Slh155975 static boolean_t 1323*5256Slh155975 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp) 1324*5256Slh155975 { 1325*5256Slh155975 struct odl *pOdl; 1326*5256Slh155975 size_t frag_len; 1327*5256Slh155975 mblk_t *tmp; 1328*5256Slh155975 struct amd8111s_msgbuf *txBuf; 1329*5256Slh155975 uint8_t *pMsg; 1330*5256Slh155975 1331*5256Slh155975 pOdl = pLayerPointers->pOdl; 1332*5256Slh155975 1333*5256Slh155975 /* alloc send buffer */ 1334*5256Slh155975 txBuf = amd8111s_getTxbuf(pLayerPointers); 1335*5256Slh155975 if (txBuf == NULL) { 1336*5256Slh155975 pOdl->statistics.tx_no_buffer ++; 1337*5256Slh155975 pLayerPointers->pMil->tx_reschedule = B_TRUE; 1338*5256Slh155975 amd8111s_send_serial(pLayerPointers); 1339*5256Slh155975 return (B_FALSE); 1340*5256Slh155975 } 1341*5256Slh155975 1342*5256Slh155975 /* copy packet to send buffer */ 1343*5256Slh155975 txBuf->msg_size = 0; 1344*5256Slh155975 pMsg = (uint8_t *)txBuf->vir_addr; 1345*5256Slh155975 for (tmp = mp; tmp; tmp = tmp->b_cont) { 1346*5256Slh155975 frag_len = MBLKL(tmp); 1347*5256Slh155975 bcopy(tmp->b_rptr, pMsg, frag_len); 1348*5256Slh155975 txBuf->msg_size += frag_len; 1349*5256Slh155975 pMsg += frag_len; 1350*5256Slh155975 } 1351*5256Slh155975 freemsg(mp); 1352*5256Slh155975 1353*5256Slh155975 amd8111s_send_serial(pLayerPointers); 1354*5256Slh155975 1355*5256Slh155975 return (B_TRUE); 1356*5256Slh155975 } 1357*5256Slh155975 1358*5256Slh155975 /* 1359*5256Slh155975 * (GLD Entry Point) Send the message block to lower layer 1360*5256Slh155975 */ 1361*5256Slh155975 static mblk_t * 1362*5256Slh155975 amd8111s_m_tx(void *arg, mblk_t *mp) 1363*5256Slh155975 { 1364*5256Slh155975 struct LayerPointers *pLayerPointers = arg; 1365*5256Slh155975 mblk_t *next; 1366*5256Slh155975 1367*5256Slh155975 rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER); 1368*5256Slh155975 if (!pLayerPointers->run) { 1369*5256Slh155975 pLayerPointers->pOdl->statistics.tx_afterunplumb ++; 1370*5256Slh155975 freemsgchain(mp); 1371*5256Slh155975 mp = NULL; 1372*5256Slh155975 } 1373*5256Slh155975 1374*5256Slh155975 while (mp != NULL) { 1375*5256Slh155975 next = mp->b_next; 1376*5256Slh155975 mp->b_next = NULL; 1377*5256Slh155975 if (!amd8111s_send(pLayerPointers, mp)) { 1378*5256Slh155975 /* Send fail */ 1379*5256Slh155975 mp->b_next = next; 1380*5256Slh155975 break; 1381*5256Slh155975 } 1382*5256Slh155975 mp = next; 1383*5256Slh155975 } 1384*5256Slh155975 1385*5256Slh155975 rw_exit(&pLayerPointers->pOdl->chip_lock); 1386*5256Slh155975 return (mp); 1387*5256Slh155975 } 1388*5256Slh155975 1389*5256Slh155975 /* 1390*5256Slh155975 * (GLD Entry Point) Interrupt Service Routine 1391*5256Slh155975 */ 1392*5256Slh155975 static uint_t 1393*5256Slh155975 amd8111s_intr(caddr_t arg) 1394*5256Slh155975 { 1395*5256Slh155975 unsigned int intrCauses; 1396*5256Slh155975 struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg; 1397*5256Slh155975 1398*5256Slh155975 /* Read the interrupt status from mdl */ 1399*5256Slh155975 intrCauses = mdlReadInterrupt(pLayerPointers); 1400*5256Slh155975 1401*5256Slh155975 if (intrCauses == 0) { 1402*5256Slh155975 pLayerPointers->pOdl->statistics.intr_OTHER ++; 1403*5256Slh155975 return (DDI_INTR_UNCLAIMED); 1404*5256Slh155975 } 1405*5256Slh155975 1406*5256Slh155975 if (intrCauses & LCINT) { 1407*5256Slh155975 if (mdlReadLink(pLayerPointers) == LINK_UP) { 1408*5256Slh155975 mdlGetActiveMediaInfo(pLayerPointers); 1409*5256Slh155975 /* Link status changed */ 1410*5256Slh155975 if (pLayerPointers->pOdl->LinkStatus != 1411*5256Slh155975 LINK_STATE_UP) { 1412*5256Slh155975 pLayerPointers->pOdl->LinkStatus = 1413*5256Slh155975 LINK_STATE_UP; 1414*5256Slh155975 mac_link_update(pLayerPointers->pOdl->mh, 1415*5256Slh155975 LINK_STATE_UP); 1416*5256Slh155975 } 1417*5256Slh155975 } else { 1418*5256Slh155975 if (pLayerPointers->pOdl->LinkStatus != 1419*5256Slh155975 LINK_STATE_DOWN) { 1420*5256Slh155975 pLayerPointers->pOdl->LinkStatus = 1421*5256Slh155975 LINK_STATE_DOWN; 1422*5256Slh155975 mac_link_update(pLayerPointers->pOdl->mh, 1423*5256Slh155975 LINK_STATE_DOWN); 1424*5256Slh155975 } 1425*5256Slh155975 } 1426*5256Slh155975 } 1427*5256Slh155975 /* 1428*5256Slh155975 * RINT0: Receive Interrupt is set by the controller after the last 1429*5256Slh155975 * descriptor of a receive frame for this ring has been updated by 1430*5256Slh155975 * writing a 0 to the OWNership bit. 1431*5256Slh155975 */ 1432*5256Slh155975 if (intrCauses & RINT0) { 1433*5256Slh155975 pLayerPointers->pOdl->statistics.intr_RINT0 ++; 1434*5256Slh155975 amd8111s_receive(pLayerPointers); 1435*5256Slh155975 } 1436*5256Slh155975 1437*5256Slh155975 /* 1438*5256Slh155975 * TINT0: Transmit Interrupt is set by the controller after the OWN bit 1439*5256Slh155975 * in the last descriptor of a transmit frame in this particular ring 1440*5256Slh155975 * has been cleared to indicate the frame has been copied to the 1441*5256Slh155975 * transmit FIFO. 1442*5256Slh155975 */ 1443*5256Slh155975 if (intrCauses & TINT0) { 1444*5256Slh155975 pLayerPointers->pOdl->statistics.intr_TINT0 ++; 1445*5256Slh155975 /* 1446*5256Slh155975 * if desc ring is NULL and tx buf is not NULL, it should 1447*5256Slh155975 * drain tx buffer 1448*5256Slh155975 */ 1449*5256Slh155975 amd8111s_send_serial(pLayerPointers); 1450*5256Slh155975 } 1451*5256Slh155975 1452*5256Slh155975 if (intrCauses & STINT) { 1453*5256Slh155975 pLayerPointers->pOdl->statistics.intr_STINT ++; 1454*5256Slh155975 } 1455*5256Slh155975 1456*5256Slh155975 1457*5256Slh155975 return (DDI_INTR_CLAIMED); 1458*5256Slh155975 } 1459*5256Slh155975 1460*5256Slh155975 /* 1461*5256Slh155975 * To re-initilize data structures. 1462*5256Slh155975 */ 1463*5256Slh155975 static void 1464*5256Slh155975 amd8111s_sw_reset(struct LayerPointers *pLayerPointers) 1465*5256Slh155975 { 1466*5256Slh155975 /* Reset all Tx/Rx queues and descriptors */ 1467*5256Slh155975 milResetTxQ(pLayerPointers); 1468*5256Slh155975 milInitRxQ(pLayerPointers); 1469*5256Slh155975 } 1470*5256Slh155975 1471*5256Slh155975 /* 1472*5256Slh155975 * Send all pending tx packets 1473*5256Slh155975 */ 1474*5256Slh155975 static void 1475*5256Slh155975 amd8111s_tx_drain(struct LayerPointers *adapter) 1476*5256Slh155975 { 1477*5256Slh155975 struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart; 1478*5256Slh155975 int i, desc_count = 0; 1479*5256Slh155975 for (i = 0; i < 30; i++) { 1480*5256Slh155975 while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) { 1481*5256Slh155975 /* This packet has been transmitted */ 1482*5256Slh155975 pTx_desc ++; 1483*5256Slh155975 desc_count ++; 1484*5256Slh155975 } 1485*5256Slh155975 if (desc_count == TX_RING_SIZE) { 1486*5256Slh155975 break; 1487*5256Slh155975 } 1488*5256Slh155975 /* Wait 1 ms */ 1489*5256Slh155975 drv_usecwait(1000); 1490*5256Slh155975 } 1491*5256Slh155975 adapter->pOdl->statistics.tx_draintime = i; 1492*5256Slh155975 } 1493*5256Slh155975 1494*5256Slh155975 /* 1495*5256Slh155975 * (GLD Entry Point) To start card will be called at 1496*5256Slh155975 * ifconfig plumb 1497*5256Slh155975 */ 1498*5256Slh155975 static int 1499*5256Slh155975 amd8111s_m_start(void *arg) 1500*5256Slh155975 { 1501*5256Slh155975 struct LayerPointers *pLayerPointers = arg; 1502*5256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 1503*5256Slh155975 1504*5256Slh155975 amd8111s_sw_reset(pLayerPointers); 1505*5256Slh155975 mdlHWReset(pLayerPointers); 1506*5256Slh155975 rw_enter(&pOdl->chip_lock, RW_WRITER); 1507*5256Slh155975 pLayerPointers->run = B_TRUE; 1508*5256Slh155975 rw_exit(&pOdl->chip_lock); 1509*5256Slh155975 return (0); 1510*5256Slh155975 } 1511*5256Slh155975 1512*5256Slh155975 /* 1513*5256Slh155975 * (GLD Entry Point) To stop card will be called at 1514*5256Slh155975 * ifconfig unplumb 1515*5256Slh155975 */ 1516*5256Slh155975 static void 1517*5256Slh155975 amd8111s_m_stop(void *arg) 1518*5256Slh155975 { 1519*5256Slh155975 struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg; 1520*5256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 1521*5256Slh155975 1522*5256Slh155975 /* Ensure send all pending tx packets */ 1523*5256Slh155975 amd8111s_tx_drain(pLayerPointers); 1524*5256Slh155975 /* 1525*5256Slh155975 * Stop the controller and disable the controller interrupt 1526*5256Slh155975 */ 1527*5256Slh155975 rw_enter(&pOdl->chip_lock, RW_WRITER); 1528*5256Slh155975 mdlStopChip(pLayerPointers); 1529*5256Slh155975 pLayerPointers->run = B_FALSE; 1530*5256Slh155975 rw_exit(&pOdl->chip_lock); 1531*5256Slh155975 } 1532*5256Slh155975 1533*5256Slh155975 /* 1534*5256Slh155975 * To clean up all 1535*5256Slh155975 */ 1536*5256Slh155975 static void 1537*5256Slh155975 amd8111s_free_resource(struct LayerPointers *pLayerPointers) 1538*5256Slh155975 { 1539*5256Slh155975 unsigned long mem_free_array[100]; 1540*5256Slh155975 unsigned long *pmem_free_array, size; 1541*5256Slh155975 1542*5256Slh155975 /* Free Rx/Tx descriptors */ 1543*5256Slh155975 amd8111s_free_descriptors(pLayerPointers); 1544*5256Slh155975 1545*5256Slh155975 /* Free memory on lower layers */ 1546*5256Slh155975 milFreeResources(pLayerPointers, mem_free_array); 1547*5256Slh155975 pmem_free_array = mem_free_array; 1548*5256Slh155975 while (*pmem_free_array) { 1549*5256Slh155975 switch (*pmem_free_array) { 1550*5256Slh155975 case VIRTUAL: 1551*5256Slh155975 size = *(++pmem_free_array); 1552*5256Slh155975 pmem_free_array++; 1553*5256Slh155975 kmem_free((void *)*(pmem_free_array), size); 1554*5256Slh155975 break; 1555*5256Slh155975 } 1556*5256Slh155975 pmem_free_array++; 1557*5256Slh155975 } 1558*5256Slh155975 1559*5256Slh155975 amd8111s_free_buffers(pLayerPointers); 1560*5256Slh155975 } 1561*5256Slh155975 1562*5256Slh155975 /* 1563*5256Slh155975 * (GLD Enty pointer) To add/delete multi cast addresses 1564*5256Slh155975 * 1565*5256Slh155975 */ 1566*5256Slh155975 static int 1567*5256Slh155975 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 1568*5256Slh155975 { 1569*5256Slh155975 struct LayerPointers *pLayerPointers = arg; 1570*5256Slh155975 1571*5256Slh155975 if (add) { 1572*5256Slh155975 /* Add a multicast entry */ 1573*5256Slh155975 mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr); 1574*5256Slh155975 } else { 1575*5256Slh155975 /* Delete a multicast entry */ 1576*5256Slh155975 mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr); 1577*5256Slh155975 } 1578*5256Slh155975 1579*5256Slh155975 return (0); 1580*5256Slh155975 } 1581*5256Slh155975 1582*5256Slh155975 #ifdef AMD8111S_DEBUG 1583*5256Slh155975 /* 1584*5256Slh155975 * The size of MIB registers is only 32 bits. Dump them before one 1585*5256Slh155975 * of them overflows. 1586*5256Slh155975 */ 1587*5256Slh155975 static void 1588*5256Slh155975 amd8111s_dump_mib(struct LayerPointers *pLayerPointers) 1589*5256Slh155975 { 1590*5256Slh155975 struct amd8111s_statistics *adapterStat; 1591*5256Slh155975 1592*5256Slh155975 adapterStat = &pLayerPointers->pOdl->statistics; 1593*5256Slh155975 1594*5256Slh155975 adapterStat->mib_dump_counter ++; 1595*5256Slh155975 1596*5256Slh155975 /* 1597*5256Slh155975 * Rx Counters 1598*5256Slh155975 */ 1599*5256Slh155975 adapterStat->rx_mib_unicst_packets += 1600*5256Slh155975 mdlReadMib(pLayerPointers, RcvUniCastPkts); 1601*5256Slh155975 adapterStat->rx_mib_multicst_packets += 1602*5256Slh155975 mdlReadMib(pLayerPointers, RcvMultiCastPkts); 1603*5256Slh155975 adapterStat->rx_mib_broadcst_packets += 1604*5256Slh155975 mdlReadMib(pLayerPointers, RcvBroadCastPkts); 1605*5256Slh155975 adapterStat->rx_mib_macctrl_packets += 1606*5256Slh155975 mdlReadMib(pLayerPointers, RcvMACCtrl); 1607*5256Slh155975 adapterStat->rx_mib_flowctrl_packets += 1608*5256Slh155975 mdlReadMib(pLayerPointers, RcvFlowCtrl); 1609*5256Slh155975 1610*5256Slh155975 adapterStat->rx_mib_bytes += 1611*5256Slh155975 mdlReadMib(pLayerPointers, RcvOctets); 1612*5256Slh155975 adapterStat->rx_mib_good_bytes += 1613*5256Slh155975 mdlReadMib(pLayerPointers, RcvGoodOctets); 1614*5256Slh155975 1615*5256Slh155975 adapterStat->rx_mib_undersize_packets += 1616*5256Slh155975 mdlReadMib(pLayerPointers, RcvUndersizePkts); 1617*5256Slh155975 adapterStat->rx_mib_oversize_packets += 1618*5256Slh155975 mdlReadMib(pLayerPointers, RcvOversizePkts); 1619*5256Slh155975 1620*5256Slh155975 adapterStat->rx_mib_drop_packets += 1621*5256Slh155975 mdlReadMib(pLayerPointers, RcvDropPktsRing0); 1622*5256Slh155975 adapterStat->rx_mib_align_err_packets += 1623*5256Slh155975 mdlReadMib(pLayerPointers, RcvAlignmentErrors); 1624*5256Slh155975 adapterStat->rx_mib_fcs_err_packets += 1625*5256Slh155975 mdlReadMib(pLayerPointers, RcvFCSErrors); 1626*5256Slh155975 adapterStat->rx_mib_symbol_err_packets += 1627*5256Slh155975 mdlReadMib(pLayerPointers, RcvSymbolErrors); 1628*5256Slh155975 adapterStat->rx_mib_miss_packets += 1629*5256Slh155975 mdlReadMib(pLayerPointers, RcvMissPkts); 1630*5256Slh155975 1631*5256Slh155975 /* 1632*5256Slh155975 * Tx Counters 1633*5256Slh155975 */ 1634*5256Slh155975 adapterStat->tx_mib_packets += 1635*5256Slh155975 mdlReadMib(pLayerPointers, XmtPackets); 1636*5256Slh155975 adapterStat->tx_mib_multicst_packets += 1637*5256Slh155975 mdlReadMib(pLayerPointers, XmtMultiCastPkts); 1638*5256Slh155975 adapterStat->tx_mib_broadcst_packets += 1639*5256Slh155975 mdlReadMib(pLayerPointers, XmtBroadCastPkts); 1640*5256Slh155975 adapterStat->tx_mib_flowctrl_packets += 1641*5256Slh155975 mdlReadMib(pLayerPointers, XmtFlowCtrl); 1642*5256Slh155975 1643*5256Slh155975 adapterStat->tx_mib_bytes += 1644*5256Slh155975 mdlReadMib(pLayerPointers, XmtOctets); 1645*5256Slh155975 1646*5256Slh155975 adapterStat->tx_mib_defer_trans_packets += 1647*5256Slh155975 mdlReadMib(pLayerPointers, XmtDeferredTransmit); 1648*5256Slh155975 adapterStat->tx_mib_collision_packets += 1649*5256Slh155975 mdlReadMib(pLayerPointers, XmtCollisions); 1650*5256Slh155975 adapterStat->tx_mib_one_coll_packets += 1651*5256Slh155975 mdlReadMib(pLayerPointers, XmtOneCollision); 1652*5256Slh155975 adapterStat->tx_mib_multi_coll_packets += 1653*5256Slh155975 mdlReadMib(pLayerPointers, XmtMultipleCollision); 1654*5256Slh155975 adapterStat->tx_mib_late_coll_packets += 1655*5256Slh155975 mdlReadMib(pLayerPointers, XmtLateCollision); 1656*5256Slh155975 adapterStat->tx_mib_ex_coll_packets += 1657*5256Slh155975 mdlReadMib(pLayerPointers, XmtExcessiveCollision); 1658*5256Slh155975 1659*5256Slh155975 1660*5256Slh155975 /* Clear all MIB registers */ 1661*5256Slh155975 WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address 1662*5256Slh155975 + MIB_ADDR, MIB_CLEAR); 1663*5256Slh155975 } 1664*5256Slh155975 #endif 1665*5256Slh155975 1666*5256Slh155975 /* 1667*5256Slh155975 * (GLD Entry Point) set/unset promiscus mode 1668*5256Slh155975 */ 1669*5256Slh155975 static int 1670*5256Slh155975 amd8111s_m_promisc(void *arg, boolean_t on) 1671*5256Slh155975 { 1672*5256Slh155975 struct LayerPointers *pLayerPointers = arg; 1673*5256Slh155975 1674*5256Slh155975 if (on) { 1675*5256Slh155975 mdlSetPromiscuous(pLayerPointers); 1676*5256Slh155975 } else { 1677*5256Slh155975 mdlDisablePromiscuous(pLayerPointers); 1678*5256Slh155975 } 1679*5256Slh155975 1680*5256Slh155975 return (0); 1681*5256Slh155975 } 1682*5256Slh155975 1683*5256Slh155975 /* 1684*5256Slh155975 * (Gld Entry point) Changes the Mac address of card 1685*5256Slh155975 */ 1686*5256Slh155975 static int 1687*5256Slh155975 amd8111s_m_unicst(void *arg, const uint8_t *macaddr) 1688*5256Slh155975 { 1689*5256Slh155975 struct LayerPointers *pLayerPointers = arg; 1690*5256Slh155975 1691*5256Slh155975 mdlDisableInterrupt(pLayerPointers); 1692*5256Slh155975 mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr); 1693*5256Slh155975 mdlEnableInterrupt(pLayerPointers); 1694*5256Slh155975 1695*5256Slh155975 return (0); 1696*5256Slh155975 } 1697*5256Slh155975 1698*5256Slh155975 /* 1699*5256Slh155975 * Reset the card 1700*5256Slh155975 */ 1701*5256Slh155975 void 1702*5256Slh155975 amd8111s_reset(struct LayerPointers *pLayerPointers) 1703*5256Slh155975 { 1704*5256Slh155975 amd8111s_sw_reset(pLayerPointers); 1705*5256Slh155975 mdlHWReset(pLayerPointers); 1706*5256Slh155975 } 1707*5256Slh155975 1708*5256Slh155975 /* 1709*5256Slh155975 * attach(9E) -- Attach a device to the system 1710*5256Slh155975 * 1711*5256Slh155975 * Called once for each board after successfully probed. 1712*5256Slh155975 * will do 1713*5256Slh155975 * a. creating minor device node for the instance. 1714*5256Slh155975 * b. allocate & Initilize four layers (call odlInit) 1715*5256Slh155975 * c. get MAC address 1716*5256Slh155975 * d. initilize pLayerPointers to gld private pointer 1717*5256Slh155975 * e. register with GLD 1718*5256Slh155975 * if any action fails does clean up & returns DDI_FAILURE 1719*5256Slh155975 * else retursn DDI_SUCCESS 1720*5256Slh155975 */ 1721*5256Slh155975 static int 1722*5256Slh155975 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 1723*5256Slh155975 { 1724*5256Slh155975 mac_register_t *macp; 1725*5256Slh155975 struct LayerPointers *pLayerPointers; 1726*5256Slh155975 struct odl *pOdl; 1727*5256Slh155975 ddi_acc_handle_t *pci_handle; 1728*5256Slh155975 ddi_device_acc_attr_t dev_attr; 1729*5256Slh155975 caddr_t addrp = NULL; 1730*5256Slh155975 1731*5256Slh155975 switch (cmd) { 1732*5256Slh155975 case DDI_ATTACH: 1733*5256Slh155975 break; 1734*5256Slh155975 default: 1735*5256Slh155975 return (DDI_FAILURE); 1736*5256Slh155975 } 1737*5256Slh155975 1738*5256Slh155975 pLayerPointers = (struct LayerPointers *) 1739*5256Slh155975 kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP); 1740*5256Slh155975 amd8111sadapter = pLayerPointers; 1741*5256Slh155975 1742*5256Slh155975 /* Get device instance number */ 1743*5256Slh155975 pLayerPointers->instance = ddi_get_instance(devinfo); 1744*5256Slh155975 ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers); 1745*5256Slh155975 1746*5256Slh155975 pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP); 1747*5256Slh155975 pLayerPointers->pOdl = pOdl; 1748*5256Slh155975 1749*5256Slh155975 pOdl->devinfo = devinfo; 1750*5256Slh155975 1751*5256Slh155975 /* 1752*5256Slh155975 * Here, we only allocate memory for struct odl and initilize it. 1753*5256Slh155975 * All other memory allocation & initilization will be done in odlInit 1754*5256Slh155975 * later on this routine. 1755*5256Slh155975 */ 1756*5256Slh155975 if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock) 1757*5256Slh155975 != DDI_SUCCESS) { 1758*5256Slh155975 amd8111s_log(pLayerPointers, CE_NOTE, 1759*5256Slh155975 "attach: get iblock cookies failed"); 1760*5256Slh155975 goto attach_failure; 1761*5256Slh155975 } 1762*5256Slh155975 1763*5256Slh155975 rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock); 1764*5256Slh155975 mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock", 1765*5256Slh155975 MUTEX_DRIVER, (void *)pOdl->iblock); 1766*5256Slh155975 mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock", 1767*5256Slh155975 MUTEX_DRIVER, (void *)pOdl->iblock); 1768*5256Slh155975 1769*5256Slh155975 /* Setup PCI space */ 1770*5256Slh155975 if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) { 1771*5256Slh155975 return (DDI_FAILURE); 1772*5256Slh155975 } 1773*5256Slh155975 pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI; 1774*5256Slh155975 pci_handle = &pOdl->pci_handle; 1775*5256Slh155975 1776*5256Slh155975 pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID); 1777*5256Slh155975 pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID); 1778*5256Slh155975 1779*5256Slh155975 /* 1780*5256Slh155975 * Allocate and initialize all resource and map device registers. 1781*5256Slh155975 * If failed, it returns a non-zero value. 1782*5256Slh155975 */ 1783*5256Slh155975 if (amd8111s_odlInit(pLayerPointers) != 0) { 1784*5256Slh155975 goto attach_failure; 1785*5256Slh155975 } 1786*5256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE; 1787*5256Slh155975 1788*5256Slh155975 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1789*5256Slh155975 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1790*5256Slh155975 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1791*5256Slh155975 1792*5256Slh155975 if (ddi_regs_map_setup(devinfo, 1, &addrp, 0, 4096, &dev_attr, 1793*5256Slh155975 &(pLayerPointers->pOdl->MemBasehandle)) != 0) { 1794*5256Slh155975 amd8111s_log(pLayerPointers, CE_NOTE, 1795*5256Slh155975 "attach: ddi_regs_map_setup failed"); 1796*5256Slh155975 goto attach_failure; 1797*5256Slh155975 } 1798*5256Slh155975 pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp; 1799*5256Slh155975 1800*5256Slh155975 /* Initialize HW */ 1801*5256Slh155975 mdlOpen(pLayerPointers); 1802*5256Slh155975 mdlGetActiveMediaInfo(pLayerPointers); 1803*5256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS; 1804*5256Slh155975 1805*5256Slh155975 /* 1806*5256Slh155975 * Setup the interrupt 1807*5256Slh155975 */ 1808*5256Slh155975 if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr, 1809*5256Slh155975 (caddr_t)pLayerPointers) != DDI_SUCCESS) { 1810*5256Slh155975 goto attach_failure; 1811*5256Slh155975 } 1812*5256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED; 1813*5256Slh155975 1814*5256Slh155975 /* 1815*5256Slh155975 * Setup soft intr 1816*5256Slh155975 */ 1817*5256Slh155975 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id, 1818*5256Slh155975 NULL, NULL, amd8111s_send_drain, 1819*5256Slh155975 (caddr_t)pLayerPointers) != DDI_SUCCESS) { 1820*5256Slh155975 goto attach_failure; 1821*5256Slh155975 } 1822*5256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED; 1823*5256Slh155975 1824*5256Slh155975 /* 1825*5256Slh155975 * Initilize the mac structure 1826*5256Slh155975 */ 1827*5256Slh155975 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 1828*5256Slh155975 goto attach_failure; 1829*5256Slh155975 1830*5256Slh155975 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 1831*5256Slh155975 macp->m_driver = pLayerPointers; 1832*5256Slh155975 macp->m_dip = devinfo; 1833*5256Slh155975 /* Get MAC address */ 1834*5256Slh155975 mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress); 1835*5256Slh155975 macp->m_src_addr = pOdl->MacAddress; 1836*5256Slh155975 macp->m_callbacks = &amd8111s_m_callbacks; 1837*5256Slh155975 macp->m_min_sdu = 0; 1838*5256Slh155975 /* 1518 - 14 (ether header) - 4 (CRC) */ 1839*5256Slh155975 macp->m_max_sdu = ETHERMTU; 1840*5256Slh155975 1841*5256Slh155975 /* 1842*5256Slh155975 * Finally, we're ready to register ourselves with the MAC layer 1843*5256Slh155975 * interface; if this succeeds, we're ready to start. 1844*5256Slh155975 */ 1845*5256Slh155975 if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) { 1846*5256Slh155975 mac_free(macp); 1847*5256Slh155975 goto attach_failure; 1848*5256Slh155975 } 1849*5256Slh155975 mac_free(macp); 1850*5256Slh155975 1851*5256Slh155975 pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED; 1852*5256Slh155975 1853*5256Slh155975 return (DDI_SUCCESS); 1854*5256Slh155975 1855*5256Slh155975 attach_failure: 1856*5256Slh155975 (void) amd8111s_unattach(devinfo, pLayerPointers); 1857*5256Slh155975 return (DDI_FAILURE); 1858*5256Slh155975 1859*5256Slh155975 } 1860*5256Slh155975 1861*5256Slh155975 /* 1862*5256Slh155975 * detach(9E) -- Detach a device from the system 1863*5256Slh155975 * 1864*5256Slh155975 * It is called for each device instance when the system is preparing to 1865*5256Slh155975 * unload a dynamically unloadable driver. 1866*5256Slh155975 * will Do 1867*5256Slh155975 * a. check if any driver buffers are held by OS. 1868*5256Slh155975 * b. do clean up of all allocated memory if it is not in use by OS. 1869*5256Slh155975 * c. un register with GLD 1870*5256Slh155975 * d. return DDI_SUCCESS on succes full free & unregister 1871*5256Slh155975 * else GLD_FAILURE 1872*5256Slh155975 */ 1873*5256Slh155975 static int 1874*5256Slh155975 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 1875*5256Slh155975 { 1876*5256Slh155975 struct LayerPointers *pLayerPointers; 1877*5256Slh155975 1878*5256Slh155975 switch (cmd) { 1879*5256Slh155975 case DDI_DETACH: 1880*5256Slh155975 break; 1881*5256Slh155975 default: 1882*5256Slh155975 return (DDI_FAILURE); 1883*5256Slh155975 } 1884*5256Slh155975 1885*5256Slh155975 /* 1886*5256Slh155975 * Get the driver private (struct LayerPointers *) structure 1887*5256Slh155975 */ 1888*5256Slh155975 if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private 1889*5256Slh155975 (devinfo)) == NULL) { 1890*5256Slh155975 return (DDI_FAILURE); 1891*5256Slh155975 } 1892*5256Slh155975 1893*5256Slh155975 return (amd8111s_unattach(devinfo, pLayerPointers)); 1894*5256Slh155975 } 1895*5256Slh155975 1896*5256Slh155975 static int 1897*5256Slh155975 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers) 1898*5256Slh155975 { 1899*5256Slh155975 struct odl *pOdl = pLayerPointers->pOdl; 1900*5256Slh155975 1901*5256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) { 1902*5256Slh155975 /* Unregister driver from the GLD interface */ 1903*5256Slh155975 if (mac_unregister(pOdl->mh) != DDI_SUCCESS) { 1904*5256Slh155975 return (DDI_FAILURE); 1905*5256Slh155975 } 1906*5256Slh155975 } 1907*5256Slh155975 1908*5256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) { 1909*5256Slh155975 ddi_remove_intr(devinfo, 0, pOdl->iblock); 1910*5256Slh155975 } 1911*5256Slh155975 1912*5256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) { 1913*5256Slh155975 ddi_remove_softintr(pOdl->drain_id); 1914*5256Slh155975 } 1915*5256Slh155975 1916*5256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) { 1917*5256Slh155975 /* Stop HW */ 1918*5256Slh155975 mdlStopChip(pLayerPointers); 1919*5256Slh155975 ddi_regs_map_free(&(pOdl->MemBasehandle)); 1920*5256Slh155975 } 1921*5256Slh155975 1922*5256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) { 1923*5256Slh155975 /* Free All memory allocated */ 1924*5256Slh155975 amd8111s_free_resource(pLayerPointers); 1925*5256Slh155975 } 1926*5256Slh155975 1927*5256Slh155975 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) { 1928*5256Slh155975 pci_config_teardown(&pOdl->pci_handle); 1929*5256Slh155975 mutex_destroy(&pOdl->mdlSendLock); 1930*5256Slh155975 mutex_destroy(&pOdl->mdlRcvLock); 1931*5256Slh155975 rw_destroy(&pOdl->chip_lock); 1932*5256Slh155975 } 1933*5256Slh155975 1934*5256Slh155975 kmem_free(pOdl, sizeof (struct odl)); 1935*5256Slh155975 kmem_free(pLayerPointers, sizeof (struct LayerPointers)); 1936*5256Slh155975 1937*5256Slh155975 return (DDI_SUCCESS); 1938*5256Slh155975 } 1939*5256Slh155975 1940*5256Slh155975 /* 1941*5256Slh155975 * (GLD Entry Point)GLD will call this entry point perodicaly to 1942*5256Slh155975 * get driver statistices. 1943*5256Slh155975 */ 1944*5256Slh155975 static int 1945*5256Slh155975 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val) 1946*5256Slh155975 { 1947*5256Slh155975 struct LayerPointers *pLayerPointers = arg; 1948*5256Slh155975 struct amd8111s_statistics *adapterStat; 1949*5256Slh155975 1950*5256Slh155975 adapterStat = &pLayerPointers->pOdl->statistics; 1951*5256Slh155975 1952*5256Slh155975 switch (stat) { 1953*5256Slh155975 1954*5256Slh155975 /* 1955*5256Slh155975 * Current Status 1956*5256Slh155975 */ 1957*5256Slh155975 case MAC_STAT_IFSPEED: 1958*5256Slh155975 *val = pLayerPointers->pMdl->Speed * 1000000; 1959*5256Slh155975 break; 1960*5256Slh155975 1961*5256Slh155975 case ETHER_STAT_LINK_DUPLEX: 1962*5256Slh155975 if (pLayerPointers->pMdl->FullDuplex) { 1963*5256Slh155975 *val = LINK_DUPLEX_FULL; 1964*5256Slh155975 } else { 1965*5256Slh155975 *val = LINK_DUPLEX_HALF; 1966*5256Slh155975 } 1967*5256Slh155975 break; 1968*5256Slh155975 1969*5256Slh155975 /* 1970*5256Slh155975 * Capabilities 1971*5256Slh155975 */ 1972*5256Slh155975 case ETHER_STAT_CAP_1000FDX: 1973*5256Slh155975 *val = 0; 1974*5256Slh155975 break; 1975*5256Slh155975 1976*5256Slh155975 case ETHER_STAT_CAP_1000HDX: 1977*5256Slh155975 *val = 0; 1978*5256Slh155975 break; 1979*5256Slh155975 1980*5256Slh155975 case ETHER_STAT_CAP_100FDX: 1981*5256Slh155975 *val = 1; 1982*5256Slh155975 break; 1983*5256Slh155975 1984*5256Slh155975 case ETHER_STAT_CAP_100HDX: 1985*5256Slh155975 *val = 1; 1986*5256Slh155975 break; 1987*5256Slh155975 1988*5256Slh155975 case ETHER_STAT_CAP_10FDX: 1989*5256Slh155975 *val = 1; 1990*5256Slh155975 break; 1991*5256Slh155975 1992*5256Slh155975 case ETHER_STAT_CAP_10HDX: 1993*5256Slh155975 *val = 1; 1994*5256Slh155975 break; 1995*5256Slh155975 1996*5256Slh155975 case ETHER_STAT_CAP_ASMPAUSE: 1997*5256Slh155975 *val = 1; 1998*5256Slh155975 break; 1999*5256Slh155975 2000*5256Slh155975 case ETHER_STAT_CAP_PAUSE: 2001*5256Slh155975 *val = 1; 2002*5256Slh155975 break; 2003*5256Slh155975 2004*5256Slh155975 case ETHER_STAT_CAP_AUTONEG: 2005*5256Slh155975 *val = 1; 2006*5256Slh155975 break; 2007*5256Slh155975 2008*5256Slh155975 case ETHER_STAT_ADV_CAP_1000FDX: 2009*5256Slh155975 *val = 0; 2010*5256Slh155975 break; 2011*5256Slh155975 2012*5256Slh155975 case ETHER_STAT_ADV_CAP_1000HDX: 2013*5256Slh155975 *val = 0; 2014*5256Slh155975 break; 2015*5256Slh155975 2016*5256Slh155975 case ETHER_STAT_ADV_CAP_100FDX: 2017*5256Slh155975 *val = 1; 2018*5256Slh155975 break; 2019*5256Slh155975 2020*5256Slh155975 case ETHER_STAT_ADV_CAP_100HDX: 2021*5256Slh155975 *val = 1; 2022*5256Slh155975 break; 2023*5256Slh155975 2024*5256Slh155975 case ETHER_STAT_ADV_CAP_10FDX: 2025*5256Slh155975 *val = 1; 2026*5256Slh155975 break; 2027*5256Slh155975 2028*5256Slh155975 case ETHER_STAT_ADV_CAP_10HDX: 2029*5256Slh155975 *val = 1; 2030*5256Slh155975 break; 2031*5256Slh155975 2032*5256Slh155975 case ETHER_STAT_ADV_CAP_ASMPAUSE: 2033*5256Slh155975 *val = 1; 2034*5256Slh155975 break; 2035*5256Slh155975 2036*5256Slh155975 case ETHER_STAT_ADV_CAP_PAUSE: 2037*5256Slh155975 *val = 1; 2038*5256Slh155975 break; 2039*5256Slh155975 2040*5256Slh155975 case ETHER_STAT_ADV_CAP_AUTONEG: 2041*5256Slh155975 *val = 1; 2042*5256Slh155975 break; 2043*5256Slh155975 2044*5256Slh155975 /* 2045*5256Slh155975 * Rx Counters 2046*5256Slh155975 */ 2047*5256Slh155975 case MAC_STAT_IPACKETS: 2048*5256Slh155975 *val = adapterStat->rx_mib_unicst_packets + 2049*5256Slh155975 adapterStat->rx_mib_multicst_packets + 2050*5256Slh155975 adapterStat->rx_mib_broadcst_packets + 2051*5256Slh155975 mdlReadMib(pLayerPointers, RcvUniCastPkts) + 2052*5256Slh155975 mdlReadMib(pLayerPointers, RcvMultiCastPkts) + 2053*5256Slh155975 mdlReadMib(pLayerPointers, RcvBroadCastPkts); 2054*5256Slh155975 break; 2055*5256Slh155975 2056*5256Slh155975 case MAC_STAT_RBYTES: 2057*5256Slh155975 *val = adapterStat->rx_mib_bytes + 2058*5256Slh155975 mdlReadMib(pLayerPointers, RcvOctets); 2059*5256Slh155975 break; 2060*5256Slh155975 2061*5256Slh155975 case MAC_STAT_MULTIRCV: 2062*5256Slh155975 *val = adapterStat->rx_mib_multicst_packets + 2063*5256Slh155975 mdlReadMib(pLayerPointers, RcvMultiCastPkts); 2064*5256Slh155975 break; 2065*5256Slh155975 2066*5256Slh155975 case MAC_STAT_BRDCSTRCV: 2067*5256Slh155975 *val = adapterStat->rx_mib_broadcst_packets + 2068*5256Slh155975 mdlReadMib(pLayerPointers, RcvBroadCastPkts); 2069*5256Slh155975 break; 2070*5256Slh155975 2071*5256Slh155975 case MAC_STAT_NORCVBUF: 2072*5256Slh155975 *val = adapterStat->rx_allocfail + 2073*5256Slh155975 adapterStat->rx_mib_drop_packets + 2074*5256Slh155975 mdlReadMib(pLayerPointers, RcvDropPktsRing0); 2075*5256Slh155975 break; 2076*5256Slh155975 2077*5256Slh155975 case MAC_STAT_IERRORS: 2078*5256Slh155975 *val = adapterStat->rx_mib_align_err_packets + 2079*5256Slh155975 adapterStat->rx_mib_fcs_err_packets + 2080*5256Slh155975 adapterStat->rx_mib_symbol_err_packets + 2081*5256Slh155975 mdlReadMib(pLayerPointers, RcvAlignmentErrors) + 2082*5256Slh155975 mdlReadMib(pLayerPointers, RcvFCSErrors) + 2083*5256Slh155975 mdlReadMib(pLayerPointers, RcvSymbolErrors); 2084*5256Slh155975 break; 2085*5256Slh155975 2086*5256Slh155975 case ETHER_STAT_ALIGN_ERRORS: 2087*5256Slh155975 *val = adapterStat->rx_mib_align_err_packets + 2088*5256Slh155975 mdlReadMib(pLayerPointers, RcvAlignmentErrors); 2089*5256Slh155975 break; 2090*5256Slh155975 2091*5256Slh155975 case ETHER_STAT_FCS_ERRORS: 2092*5256Slh155975 *val = adapterStat->rx_mib_fcs_err_packets + 2093*5256Slh155975 mdlReadMib(pLayerPointers, RcvFCSErrors); 2094*5256Slh155975 break; 2095*5256Slh155975 2096*5256Slh155975 /* 2097*5256Slh155975 * Tx Counters 2098*5256Slh155975 */ 2099*5256Slh155975 case MAC_STAT_OPACKETS: 2100*5256Slh155975 *val = adapterStat->tx_mib_packets + 2101*5256Slh155975 mdlReadMib(pLayerPointers, XmtPackets); 2102*5256Slh155975 break; 2103*5256Slh155975 2104*5256Slh155975 case MAC_STAT_OBYTES: 2105*5256Slh155975 *val = adapterStat->tx_mib_bytes + 2106*5256Slh155975 mdlReadMib(pLayerPointers, XmtOctets); 2107*5256Slh155975 break; 2108*5256Slh155975 2109*5256Slh155975 case MAC_STAT_MULTIXMT: 2110*5256Slh155975 *val = adapterStat->tx_mib_multicst_packets + 2111*5256Slh155975 mdlReadMib(pLayerPointers, XmtMultiCastPkts); 2112*5256Slh155975 break; 2113*5256Slh155975 2114*5256Slh155975 case MAC_STAT_BRDCSTXMT: 2115*5256Slh155975 *val = adapterStat->tx_mib_broadcst_packets + 2116*5256Slh155975 mdlReadMib(pLayerPointers, XmtBroadCastPkts); 2117*5256Slh155975 break; 2118*5256Slh155975 2119*5256Slh155975 case MAC_STAT_NOXMTBUF: 2120*5256Slh155975 *val = adapterStat->tx_no_descriptor; 2121*5256Slh155975 break; 2122*5256Slh155975 2123*5256Slh155975 case MAC_STAT_OERRORS: 2124*5256Slh155975 *val = adapterStat->tx_mib_ex_coll_packets + 2125*5256Slh155975 mdlReadMib(pLayerPointers, XmtExcessiveCollision); 2126*5256Slh155975 break; 2127*5256Slh155975 2128*5256Slh155975 case MAC_STAT_COLLISIONS: 2129*5256Slh155975 *val = adapterStat->tx_mib_ex_coll_packets + 2130*5256Slh155975 mdlReadMib(pLayerPointers, XmtCollisions); 2131*5256Slh155975 break; 2132*5256Slh155975 2133*5256Slh155975 case ETHER_STAT_FIRST_COLLISIONS: 2134*5256Slh155975 *val = adapterStat->tx_mib_one_coll_packets + 2135*5256Slh155975 mdlReadMib(pLayerPointers, XmtOneCollision); 2136*5256Slh155975 break; 2137*5256Slh155975 2138*5256Slh155975 case ETHER_STAT_MULTI_COLLISIONS: 2139*5256Slh155975 *val = adapterStat->tx_mib_multi_coll_packets + 2140*5256Slh155975 mdlReadMib(pLayerPointers, XmtMultipleCollision); 2141*5256Slh155975 break; 2142*5256Slh155975 2143*5256Slh155975 case ETHER_STAT_EX_COLLISIONS: 2144*5256Slh155975 *val = adapterStat->tx_mib_ex_coll_packets + 2145*5256Slh155975 mdlReadMib(pLayerPointers, XmtExcessiveCollision); 2146*5256Slh155975 break; 2147*5256Slh155975 2148*5256Slh155975 case ETHER_STAT_TX_LATE_COLLISIONS: 2149*5256Slh155975 *val = adapterStat->tx_mib_late_coll_packets + 2150*5256Slh155975 mdlReadMib(pLayerPointers, XmtLateCollision); 2151*5256Slh155975 break; 2152*5256Slh155975 2153*5256Slh155975 case ETHER_STAT_DEFER_XMTS: 2154*5256Slh155975 *val = adapterStat->tx_mib_defer_trans_packets + 2155*5256Slh155975 mdlReadMib(pLayerPointers, XmtDeferredTransmit); 2156*5256Slh155975 break; 2157*5256Slh155975 2158*5256Slh155975 default: 2159*5256Slh155975 return (ENOTSUP); 2160*5256Slh155975 } 2161*5256Slh155975 return (0); 2162*5256Slh155975 } 2163*5256Slh155975 2164*5256Slh155975 /* 2165*5256Slh155975 * Memory Read Function Used by MDL to set card registers. 2166*5256Slh155975 */ 2167*5256Slh155975 unsigned char 2168*5256Slh155975 READ_REG8(struct LayerPointers *pLayerPointers, long x) 2169*5256Slh155975 { 2170*5256Slh155975 return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x)); 2171*5256Slh155975 } 2172*5256Slh155975 2173*5256Slh155975 int 2174*5256Slh155975 READ_REG16(struct LayerPointers *pLayerPointers, long x) 2175*5256Slh155975 { 2176*5256Slh155975 return (ddi_get16(pLayerPointers->pOdl->MemBasehandle, 2177*5256Slh155975 (uint16_t *)(x))); 2178*5256Slh155975 } 2179*5256Slh155975 2180*5256Slh155975 long 2181*5256Slh155975 READ_REG32(struct LayerPointers *pLayerPointers, long x) 2182*5256Slh155975 { 2183*5256Slh155975 return (ddi_get32(pLayerPointers->pOdl->MemBasehandle, 2184*5256Slh155975 (uint32_t *)(x))); 2185*5256Slh155975 } 2186*5256Slh155975 2187*5256Slh155975 void 2188*5256Slh155975 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y) 2189*5256Slh155975 { 2190*5256Slh155975 ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y); 2191*5256Slh155975 } 2192*5256Slh155975 2193*5256Slh155975 void 2194*5256Slh155975 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y) 2195*5256Slh155975 { 2196*5256Slh155975 ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y); 2197*5256Slh155975 } 2198*5256Slh155975 2199*5256Slh155975 void 2200*5256Slh155975 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y) 2201*5256Slh155975 { 2202*5256Slh155975 ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y); 2203*5256Slh155975 } 2204*5256Slh155975 2205*5256Slh155975 void 2206*5256Slh155975 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y) 2207*5256Slh155975 { 2208*5256Slh155975 int i; 2209*5256Slh155975 for (i = 0; i < 8; i++) { 2210*5256Slh155975 WRITE_REG8(pLayerPointers, (x + i), y[i]); 2211*5256Slh155975 } 2212*5256Slh155975 } 2213