1*5181Sgd78059 /* 2*5181Sgd78059 * CDDL HEADER START 3*5181Sgd78059 * 4*5181Sgd78059 * The contents of this file are subject to the terms of the 5*5181Sgd78059 * Common Development and Distribution License (the "License"). 6*5181Sgd78059 * You may not use this file except in compliance with the License. 7*5181Sgd78059 * 8*5181Sgd78059 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9*5181Sgd78059 * or http://www.opensolaris.org/os/licensing. 10*5181Sgd78059 * See the License for the specific language governing permissions 11*5181Sgd78059 * and limitations under the License. 12*5181Sgd78059 * 13*5181Sgd78059 * When distributing Covered Code, include this CDDL HEADER in each 14*5181Sgd78059 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15*5181Sgd78059 * If applicable, add the following below this CDDL HEADER, with the 16*5181Sgd78059 * fields enclosed by brackets "[]" replaced with your own identifying 17*5181Sgd78059 * information: Portions Copyright [yyyy] [name of copyright owner] 18*5181Sgd78059 * 19*5181Sgd78059 * CDDL HEADER END 20*5181Sgd78059 */ 21*5181Sgd78059 /* 22*5181Sgd78059 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23*5181Sgd78059 * Use is subject to license terms. 24*5181Sgd78059 */ 25*5181Sgd78059 26*5181Sgd78059 #pragma ident "%Z%%M% %I% %E% SMI" 27*5181Sgd78059 28*5181Sgd78059 #include <sys/types.h> 29*5181Sgd78059 #include <sys/sunddi.h> 30*5181Sgd78059 #include "dmfe_impl.h" 31*5181Sgd78059 32*5181Sgd78059 /* 33*5181Sgd78059 * This is the string displayed by modinfo, etc. 34*5181Sgd78059 */ 35*5181Sgd78059 static char dmfe_ident[] = "Davicom DM9102 Ethernet"; 36*5181Sgd78059 37*5181Sgd78059 38*5181Sgd78059 /* 39*5181Sgd78059 * NOTES: 40*5181Sgd78059 * 41*5181Sgd78059 * #defines: 42*5181Sgd78059 * 43*5181Sgd78059 * DMFE_PCI_RNUMBER is the register-set number to use for the operating 44*5181Sgd78059 * registers. On an OBP-based machine, regset 0 refers to CONFIG space, 45*5181Sgd78059 * regset 1 will be the operating registers in I/O space, and regset 2 46*5181Sgd78059 * will be the operating registers in MEMORY space (preferred). If an 47*5181Sgd78059 * expansion ROM is fitted, it may appear as a further register set. 48*5181Sgd78059 * 49*5181Sgd78059 * DMFE_SLOP defines the amount by which the chip may read beyond 50*5181Sgd78059 * the end of a buffer or descriptor, apparently 6-8 dwords :( 51*5181Sgd78059 * We have to make sure this doesn't cause it to access unallocated 52*5181Sgd78059 * or unmapped memory. 53*5181Sgd78059 * 54*5181Sgd78059 * DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP) 55*5181Sgd78059 * rounded up to a multiple of 4. Here we choose a power of two for 56*5181Sgd78059 * speed & simplicity at the cost of a bit more memory. 57*5181Sgd78059 * 58*5181Sgd78059 * However, the buffer length field in the TX/RX descriptors is only 59*5181Sgd78059 * eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes 60*5181Sgd78059 * per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1 61*5181Sgd78059 * (2000) bytes each. 62*5181Sgd78059 * 63*5181Sgd78059 * DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for 64*5181Sgd78059 * the data buffers. The descriptors are always set up in CONSISTENT 65*5181Sgd78059 * mode. 66*5181Sgd78059 * 67*5181Sgd78059 * DMFE_HEADROOM defines how much space we'll leave in allocated 68*5181Sgd78059 * mblks before the first valid data byte. This should be chosen 69*5181Sgd78059 * to be 2 modulo 4, so that once the ethernet header (14 bytes) 70*5181Sgd78059 * has been stripped off, the packet data will be 4-byte aligned. 71*5181Sgd78059 * The remaining space can be used by upstream modules to prepend 72*5181Sgd78059 * any headers required. 73*5181Sgd78059 * 74*5181Sgd78059 * Patchable globals: 75*5181Sgd78059 * 76*5181Sgd78059 * dmfe_bus_modes: the bus mode bits to be put into CSR0. 77*5181Sgd78059 * Setting READ_MULTIPLE in this register seems to cause 78*5181Sgd78059 * the chip to generate a READ LINE command with a parity 79*5181Sgd78059 * error! Don't do it! 80*5181Sgd78059 * 81*5181Sgd78059 * dmfe_setup_desc1: the value to be put into descriptor word 1 82*5181Sgd78059 * when sending a SETUP packet. 83*5181Sgd78059 * 84*5181Sgd78059 * Setting TX_LAST_DESC in desc1 in a setup packet seems 85*5181Sgd78059 * to make the chip spontaneously reset internally - it 86*5181Sgd78059 * attempts to give back the setup packet descriptor by 87*5181Sgd78059 * writing to PCI address 00000000 - which may or may not 88*5181Sgd78059 * get a MASTER ABORT - after which most of its registers 89*5181Sgd78059 * seem to have either default values or garbage! 90*5181Sgd78059 * 91*5181Sgd78059 * TX_FIRST_DESC doesn't seem to have the same effect but 92*5181Sgd78059 * it isn't needed on a setup packet so we'll leave it out 93*5181Sgd78059 * too, just in case it has some other wierd side-effect. 94*5181Sgd78059 * 95*5181Sgd78059 * The default hardware packet filtering mode is now 96*5181Sgd78059 * HASH_AND_PERFECT (imperfect filtering of multicast 97*5181Sgd78059 * packets and perfect filtering of unicast packets). 98*5181Sgd78059 * If this is found not to work reliably, setting the 99*5181Sgd78059 * TX_FILTER_TYPE1 bit will cause a switchover to using 100*5181Sgd78059 * HASH_ONLY mode (imperfect filtering of *all* packets). 101*5181Sgd78059 * Software will then perform the additional filtering 102*5181Sgd78059 * as required. 103*5181Sgd78059 */ 104*5181Sgd78059 105*5181Sgd78059 #define DMFE_PCI_RNUMBER 2 106*5181Sgd78059 #define DMFE_SLOP (8*sizeof (uint32_t)) 107*5181Sgd78059 #define DMFE_BUF_SIZE 2048 108*5181Sgd78059 #define DMFE_BUF_SIZE_1 2000 109*5181Sgd78059 #define DMFE_DMA_MODE DDI_DMA_STREAMING 110*5181Sgd78059 #define DMFE_HEADROOM 34 111*5181Sgd78059 112*5181Sgd78059 static uint32_t dmfe_bus_modes = TX_POLL_INTVL | CACHE_ALIGN; 113*5181Sgd78059 static uint32_t dmfe_setup_desc1 = TX_SETUP_PACKET | SETUPBUF_SIZE | 114*5181Sgd78059 TX_FILTER_TYPE0; 115*5181Sgd78059 116*5181Sgd78059 /* 117*5181Sgd78059 * Some tunable parameters ... 118*5181Sgd78059 * Number of RX/TX ring entries (128/128) 119*5181Sgd78059 * Minimum number of TX ring slots to keep free (1) 120*5181Sgd78059 * Low-water mark at which to try to reclaim TX ring slots (1) 121*5181Sgd78059 * How often to take a TX-done interrupt (twice per ring cycle) 122*5181Sgd78059 * Whether to reclaim TX ring entries on a TX-done interrupt (no) 123*5181Sgd78059 */ 124*5181Sgd78059 125*5181Sgd78059 #define DMFE_TX_DESC 128 /* Should be a multiple of 4 <= 256 */ 126*5181Sgd78059 #define DMFE_RX_DESC 128 /* Should be a multiple of 4 <= 256 */ 127*5181Sgd78059 128*5181Sgd78059 static uint32_t dmfe_rx_desc = DMFE_RX_DESC; 129*5181Sgd78059 static uint32_t dmfe_tx_desc = DMFE_TX_DESC; 130*5181Sgd78059 static uint32_t dmfe_tx_min_free = 1; 131*5181Sgd78059 static uint32_t dmfe_tx_reclaim_level = 1; 132*5181Sgd78059 static uint32_t dmfe_tx_int_factor = (DMFE_TX_DESC / 2) - 1; 133*5181Sgd78059 static boolean_t dmfe_reclaim_on_done = B_FALSE; 134*5181Sgd78059 135*5181Sgd78059 /* 136*5181Sgd78059 * Time-related parameters: 137*5181Sgd78059 * 138*5181Sgd78059 * We use a cyclic to provide a periodic callback; this is then used 139*5181Sgd78059 * to check for TX-stall and poll the link status register. 140*5181Sgd78059 * 141*5181Sgd78059 * DMFE_TICK is the interval between cyclic callbacks, in microseconds. 142*5181Sgd78059 * 143*5181Sgd78059 * TX_STALL_TIME_100 is the timeout in microseconds between passing 144*5181Sgd78059 * a packet to the chip for transmission and seeing that it's gone, 145*5181Sgd78059 * when running at 100Mb/s. If we haven't reclaimed at least one 146*5181Sgd78059 * descriptor in this time we assume the transmitter has stalled 147*5181Sgd78059 * and reset the chip. 148*5181Sgd78059 * 149*5181Sgd78059 * TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s. 150*5181Sgd78059 * 151*5181Sgd78059 * LINK_POLL_TIME is the interval between checks on the link state 152*5181Sgd78059 * when nothing appears to have happened (this is in addition to the 153*5181Sgd78059 * case where we think we've detected a link change, and serves as a 154*5181Sgd78059 * backup in case the quick link check doesn't work properly). 155*5181Sgd78059 * 156*5181Sgd78059 * Patchable globals: 157*5181Sgd78059 * 158*5181Sgd78059 * dmfe_tick_us: DMFE_TICK 159*5181Sgd78059 * dmfe_tx100_stall_us: TX_STALL_TIME_100 160*5181Sgd78059 * dmfe_tx10_stall_us: TX_STALL_TIME_10 161*5181Sgd78059 * dmfe_link_poll_us: LINK_POLL_TIME 162*5181Sgd78059 * 163*5181Sgd78059 * These are then used in _init() to calculate: 164*5181Sgd78059 * 165*5181Sgd78059 * stall_100_tix[]: number of consecutive cyclic callbacks without a 166*5181Sgd78059 * reclaim before the TX process is considered stalled, 167*5181Sgd78059 * when running at 100Mb/s. The elements are indexed 168*5181Sgd78059 * by transmit-engine-state. 169*5181Sgd78059 * stall_10_tix[]: number of consecutive cyclic callbacks without a 170*5181Sgd78059 * reclaim before the TX process is considered stalled, 171*5181Sgd78059 * when running at 10Mb/s. The elements are indexed 172*5181Sgd78059 * by transmit-engine-state. 173*5181Sgd78059 * factotum_tix: number of consecutive cyclic callbacks before waking 174*5181Sgd78059 * up the factotum even though there doesn't appear to 175*5181Sgd78059 * be anything for it to do 176*5181Sgd78059 */ 177*5181Sgd78059 178*5181Sgd78059 #define DMFE_TICK 25000 /* microseconds */ 179*5181Sgd78059 #define TX_STALL_TIME_100 50000 /* microseconds */ 180*5181Sgd78059 #define TX_STALL_TIME_10 200000 /* microseconds */ 181*5181Sgd78059 #define LINK_POLL_TIME 5000000 /* microseconds */ 182*5181Sgd78059 183*5181Sgd78059 static uint32_t dmfe_tick_us = DMFE_TICK; 184*5181Sgd78059 static uint32_t dmfe_tx100_stall_us = TX_STALL_TIME_100; 185*5181Sgd78059 static uint32_t dmfe_tx10_stall_us = TX_STALL_TIME_10; 186*5181Sgd78059 static uint32_t dmfe_link_poll_us = LINK_POLL_TIME; 187*5181Sgd78059 188*5181Sgd78059 /* 189*5181Sgd78059 * Calculated from above in _init() 190*5181Sgd78059 */ 191*5181Sgd78059 192*5181Sgd78059 static uint32_t stall_100_tix[TX_PROCESS_MAX_STATE+1]; 193*5181Sgd78059 static uint32_t stall_10_tix[TX_PROCESS_MAX_STATE+1]; 194*5181Sgd78059 static uint32_t factotum_tix; 195*5181Sgd78059 static uint32_t factotum_fast_tix; 196*5181Sgd78059 static uint32_t factotum_start_tix; 197*5181Sgd78059 198*5181Sgd78059 /* 199*5181Sgd78059 * Property names 200*5181Sgd78059 */ 201*5181Sgd78059 static char localmac_propname[] = "local-mac-address"; 202*5181Sgd78059 static char opmode_propname[] = "opmode-reg-value"; 203*5181Sgd78059 static char debug_propname[] = "dmfe-debug-flags"; 204*5181Sgd78059 205*5181Sgd78059 static int dmfe_m_start(void *); 206*5181Sgd78059 static void dmfe_m_stop(void *); 207*5181Sgd78059 static int dmfe_m_promisc(void *, boolean_t); 208*5181Sgd78059 static int dmfe_m_multicst(void *, boolean_t, const uint8_t *); 209*5181Sgd78059 static int dmfe_m_unicst(void *, const uint8_t *); 210*5181Sgd78059 static void dmfe_m_ioctl(void *, queue_t *, mblk_t *); 211*5181Sgd78059 static boolean_t dmfe_m_getcapab(void *, mac_capab_t, void *); 212*5181Sgd78059 static mblk_t *dmfe_m_tx(void *, mblk_t *); 213*5181Sgd78059 static int dmfe_m_stat(void *, uint_t, uint64_t *); 214*5181Sgd78059 215*5181Sgd78059 static mac_callbacks_t dmfe_m_callbacks = { 216*5181Sgd78059 (MC_IOCTL | MC_GETCAPAB), 217*5181Sgd78059 dmfe_m_stat, 218*5181Sgd78059 dmfe_m_start, 219*5181Sgd78059 dmfe_m_stop, 220*5181Sgd78059 dmfe_m_promisc, 221*5181Sgd78059 dmfe_m_multicst, 222*5181Sgd78059 dmfe_m_unicst, 223*5181Sgd78059 dmfe_m_tx, 224*5181Sgd78059 NULL, 225*5181Sgd78059 dmfe_m_ioctl, 226*5181Sgd78059 dmfe_m_getcapab, 227*5181Sgd78059 }; 228*5181Sgd78059 229*5181Sgd78059 230*5181Sgd78059 /* 231*5181Sgd78059 * Describes the chip's DMA engine 232*5181Sgd78059 */ 233*5181Sgd78059 static ddi_dma_attr_t dma_attr = { 234*5181Sgd78059 DMA_ATTR_V0, /* dma_attr version */ 235*5181Sgd78059 0, /* dma_attr_addr_lo */ 236*5181Sgd78059 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 237*5181Sgd78059 0x0FFFFFF, /* dma_attr_count_max */ 238*5181Sgd78059 0x20, /* dma_attr_align */ 239*5181Sgd78059 0x7F, /* dma_attr_burstsizes */ 240*5181Sgd78059 1, /* dma_attr_minxfer */ 241*5181Sgd78059 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 242*5181Sgd78059 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */ 243*5181Sgd78059 1, /* dma_attr_sgllen */ 244*5181Sgd78059 1, /* dma_attr_granular */ 245*5181Sgd78059 0 /* dma_attr_flags */ 246*5181Sgd78059 }; 247*5181Sgd78059 248*5181Sgd78059 /* 249*5181Sgd78059 * DMA access attributes for registers and descriptors 250*5181Sgd78059 */ 251*5181Sgd78059 static ddi_device_acc_attr_t dmfe_reg_accattr = { 252*5181Sgd78059 DDI_DEVICE_ATTR_V0, 253*5181Sgd78059 DDI_STRUCTURE_LE_ACC, 254*5181Sgd78059 DDI_STRICTORDER_ACC 255*5181Sgd78059 }; 256*5181Sgd78059 257*5181Sgd78059 /* 258*5181Sgd78059 * DMA access attributes for data: NOT to be byte swapped. 259*5181Sgd78059 */ 260*5181Sgd78059 static ddi_device_acc_attr_t dmfe_data_accattr = { 261*5181Sgd78059 DDI_DEVICE_ATTR_V0, 262*5181Sgd78059 DDI_NEVERSWAP_ACC, 263*5181Sgd78059 DDI_STRICTORDER_ACC 264*5181Sgd78059 }; 265*5181Sgd78059 266*5181Sgd78059 static uchar_t dmfe_broadcast_addr[ETHERADDRL] = { 267*5181Sgd78059 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 268*5181Sgd78059 }; 269*5181Sgd78059 270*5181Sgd78059 271*5181Sgd78059 /* 272*5181Sgd78059 * ========== Lowest-level chip register & ring access routines ========== 273*5181Sgd78059 */ 274*5181Sgd78059 275*5181Sgd78059 /* 276*5181Sgd78059 * I/O register get/put routines 277*5181Sgd78059 */ 278*5181Sgd78059 uint32_t 279*5181Sgd78059 dmfe_chip_get32(dmfe_t *dmfep, off_t offset) 280*5181Sgd78059 { 281*5181Sgd78059 caddr_t addr; 282*5181Sgd78059 283*5181Sgd78059 addr = dmfep->io_reg + offset; 284*5181Sgd78059 return (ddi_get32(dmfep->io_handle, (uint32_t *)addr)); 285*5181Sgd78059 } 286*5181Sgd78059 287*5181Sgd78059 void 288*5181Sgd78059 dmfe_chip_put32(dmfe_t *dmfep, off_t offset, uint32_t value) 289*5181Sgd78059 { 290*5181Sgd78059 caddr_t addr; 291*5181Sgd78059 292*5181Sgd78059 addr = dmfep->io_reg + offset; 293*5181Sgd78059 ddi_put32(dmfep->io_handle, (uint32_t *)addr, value); 294*5181Sgd78059 } 295*5181Sgd78059 296*5181Sgd78059 /* 297*5181Sgd78059 * TX/RX ring get/put routines 298*5181Sgd78059 */ 299*5181Sgd78059 static uint32_t 300*5181Sgd78059 dmfe_ring_get32(dma_area_t *dma_p, uint_t index, uint_t offset) 301*5181Sgd78059 { 302*5181Sgd78059 uint32_t *addr; 303*5181Sgd78059 304*5181Sgd78059 addr = (uint32_t *)dma_p->mem_va; 305*5181Sgd78059 return (ddi_get32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset)); 306*5181Sgd78059 } 307*5181Sgd78059 308*5181Sgd78059 static void 309*5181Sgd78059 dmfe_ring_put32(dma_area_t *dma_p, uint_t index, uint_t offset, uint32_t value) 310*5181Sgd78059 { 311*5181Sgd78059 uint32_t *addr; 312*5181Sgd78059 313*5181Sgd78059 addr = (uint32_t *)dma_p->mem_va; 314*5181Sgd78059 ddi_put32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset, value); 315*5181Sgd78059 } 316*5181Sgd78059 317*5181Sgd78059 /* 318*5181Sgd78059 * Setup buffer get/put routines 319*5181Sgd78059 */ 320*5181Sgd78059 static uint32_t 321*5181Sgd78059 dmfe_setup_get32(dma_area_t *dma_p, uint_t index) 322*5181Sgd78059 { 323*5181Sgd78059 uint32_t *addr; 324*5181Sgd78059 325*5181Sgd78059 addr = (uint32_t *)dma_p->setup_va; 326*5181Sgd78059 return (ddi_get32(dma_p->acc_hdl, addr + index)); 327*5181Sgd78059 } 328*5181Sgd78059 329*5181Sgd78059 static void 330*5181Sgd78059 dmfe_setup_put32(dma_area_t *dma_p, uint_t index, uint32_t value) 331*5181Sgd78059 { 332*5181Sgd78059 uint32_t *addr; 333*5181Sgd78059 334*5181Sgd78059 addr = (uint32_t *)dma_p->setup_va; 335*5181Sgd78059 ddi_put32(dma_p->acc_hdl, addr + index, value); 336*5181Sgd78059 } 337*5181Sgd78059 338*5181Sgd78059 339*5181Sgd78059 /* 340*5181Sgd78059 * ========== Low-level chip & ring buffer manipulation ========== 341*5181Sgd78059 */ 342*5181Sgd78059 343*5181Sgd78059 #define DMFE_DBG DMFE_DBG_REGS /* debug flag for this code */ 344*5181Sgd78059 345*5181Sgd78059 /* 346*5181Sgd78059 * dmfe_set_opmode() -- function to set operating mode 347*5181Sgd78059 */ 348*5181Sgd78059 static void 349*5181Sgd78059 dmfe_set_opmode(dmfe_t *dmfep) 350*5181Sgd78059 { 351*5181Sgd78059 DMFE_DEBUG(("dmfe_set_opmode: opmode 0x%x", dmfep->opmode)); 352*5181Sgd78059 353*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 354*5181Sgd78059 355*5181Sgd78059 dmfe_chip_put32(dmfep, OPN_MODE_REG, dmfep->opmode); 356*5181Sgd78059 drv_usecwait(10); 357*5181Sgd78059 } 358*5181Sgd78059 359*5181Sgd78059 /* 360*5181Sgd78059 * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w 361*5181Sgd78059 */ 362*5181Sgd78059 static void 363*5181Sgd78059 dmfe_stop_chip(dmfe_t *dmfep, enum chip_state newstate) 364*5181Sgd78059 { 365*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 366*5181Sgd78059 367*5181Sgd78059 /* 368*5181Sgd78059 * Stop the chip: 369*5181Sgd78059 * disable all interrupts 370*5181Sgd78059 * stop TX/RX processes 371*5181Sgd78059 * clear the status bits for TX/RX stopped 372*5181Sgd78059 * If required, reset the chip 373*5181Sgd78059 * Record the new state 374*5181Sgd78059 */ 375*5181Sgd78059 dmfe_chip_put32(dmfep, INT_MASK_REG, 0); 376*5181Sgd78059 dmfep->opmode &= ~(START_TRANSMIT | START_RECEIVE); 377*5181Sgd78059 dmfe_set_opmode(dmfep); 378*5181Sgd78059 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT); 379*5181Sgd78059 380*5181Sgd78059 switch (newstate) { 381*5181Sgd78059 default: 382*5181Sgd78059 ASSERT(!"can't get here"); 383*5181Sgd78059 return; 384*5181Sgd78059 385*5181Sgd78059 case CHIP_STOPPED: 386*5181Sgd78059 case CHIP_ERROR: 387*5181Sgd78059 break; 388*5181Sgd78059 389*5181Sgd78059 case CHIP_RESET: 390*5181Sgd78059 dmfe_chip_put32(dmfep, BUS_MODE_REG, SW_RESET); 391*5181Sgd78059 drv_usecwait(10); 392*5181Sgd78059 dmfe_chip_put32(dmfep, BUS_MODE_REG, 0); 393*5181Sgd78059 drv_usecwait(10); 394*5181Sgd78059 dmfe_chip_put32(dmfep, BUS_MODE_REG, dmfe_bus_modes); 395*5181Sgd78059 break; 396*5181Sgd78059 } 397*5181Sgd78059 398*5181Sgd78059 dmfep->chip_state = newstate; 399*5181Sgd78059 } 400*5181Sgd78059 401*5181Sgd78059 /* 402*5181Sgd78059 * Initialize transmit and receive descriptor rings, and 403*5181Sgd78059 * set the chip to point to the first entry in each ring 404*5181Sgd78059 */ 405*5181Sgd78059 static void 406*5181Sgd78059 dmfe_init_rings(dmfe_t *dmfep) 407*5181Sgd78059 { 408*5181Sgd78059 dma_area_t *descp; 409*5181Sgd78059 uint32_t pstart; 410*5181Sgd78059 uint32_t pnext; 411*5181Sgd78059 uint32_t pbuff; 412*5181Sgd78059 uint32_t desc1; 413*5181Sgd78059 int i; 414*5181Sgd78059 415*5181Sgd78059 /* 416*5181Sgd78059 * You need all the locks in order to rewrite the descriptor rings 417*5181Sgd78059 */ 418*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 419*5181Sgd78059 ASSERT(mutex_owned(dmfep->rxlock)); 420*5181Sgd78059 ASSERT(mutex_owned(dmfep->txlock)); 421*5181Sgd78059 422*5181Sgd78059 /* 423*5181Sgd78059 * Program the RX ring entries 424*5181Sgd78059 */ 425*5181Sgd78059 descp = &dmfep->rx_desc; 426*5181Sgd78059 pstart = descp->mem_dvma; 427*5181Sgd78059 pnext = pstart + sizeof (struct rx_desc_type); 428*5181Sgd78059 pbuff = dmfep->rx_buff.mem_dvma; 429*5181Sgd78059 desc1 = RX_CHAINING | DMFE_BUF_SIZE_1; 430*5181Sgd78059 431*5181Sgd78059 for (i = 0; i < dmfep->rx.n_desc; ++i) { 432*5181Sgd78059 dmfe_ring_put32(descp, i, RD_NEXT, pnext); 433*5181Sgd78059 dmfe_ring_put32(descp, i, BUFFER1, pbuff); 434*5181Sgd78059 dmfe_ring_put32(descp, i, DESC1, desc1); 435*5181Sgd78059 dmfe_ring_put32(descp, i, DESC0, RX_OWN); 436*5181Sgd78059 437*5181Sgd78059 pnext += sizeof (struct rx_desc_type); 438*5181Sgd78059 pbuff += DMFE_BUF_SIZE; 439*5181Sgd78059 } 440*5181Sgd78059 441*5181Sgd78059 /* 442*5181Sgd78059 * Fix up last entry & sync 443*5181Sgd78059 */ 444*5181Sgd78059 dmfe_ring_put32(descp, --i, RD_NEXT, pstart); 445*5181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 446*5181Sgd78059 dmfep->rx.next_free = 0; 447*5181Sgd78059 448*5181Sgd78059 /* 449*5181Sgd78059 * Set the base address of the RX descriptor list in CSR3 450*5181Sgd78059 */ 451*5181Sgd78059 DMFE_DEBUG(("RX descriptor VA: $%p (DVMA $%x)", 452*5181Sgd78059 descp->mem_va, descp->mem_dvma)); 453*5181Sgd78059 dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma); 454*5181Sgd78059 455*5181Sgd78059 /* 456*5181Sgd78059 * Program the TX ring entries 457*5181Sgd78059 */ 458*5181Sgd78059 descp = &dmfep->tx_desc; 459*5181Sgd78059 pstart = descp->mem_dvma; 460*5181Sgd78059 pnext = pstart + sizeof (struct tx_desc_type); 461*5181Sgd78059 pbuff = dmfep->tx_buff.mem_dvma; 462*5181Sgd78059 desc1 = TX_CHAINING; 463*5181Sgd78059 464*5181Sgd78059 for (i = 0; i < dmfep->tx.n_desc; ++i) { 465*5181Sgd78059 dmfe_ring_put32(descp, i, TD_NEXT, pnext); 466*5181Sgd78059 dmfe_ring_put32(descp, i, BUFFER1, pbuff); 467*5181Sgd78059 dmfe_ring_put32(descp, i, DESC1, desc1); 468*5181Sgd78059 dmfe_ring_put32(descp, i, DESC0, 0); 469*5181Sgd78059 470*5181Sgd78059 pnext += sizeof (struct tx_desc_type); 471*5181Sgd78059 pbuff += DMFE_BUF_SIZE; 472*5181Sgd78059 } 473*5181Sgd78059 474*5181Sgd78059 /* 475*5181Sgd78059 * Fix up last entry & sync 476*5181Sgd78059 */ 477*5181Sgd78059 dmfe_ring_put32(descp, --i, TD_NEXT, pstart); 478*5181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 479*5181Sgd78059 dmfep->tx.n_free = dmfep->tx.n_desc; 480*5181Sgd78059 dmfep->tx.next_free = dmfep->tx.next_busy = 0; 481*5181Sgd78059 482*5181Sgd78059 /* 483*5181Sgd78059 * Set the base address of the TX descrptor list in CSR4 484*5181Sgd78059 */ 485*5181Sgd78059 DMFE_DEBUG(("TX descriptor VA: $%p (DVMA $%x)", 486*5181Sgd78059 descp->mem_va, descp->mem_dvma)); 487*5181Sgd78059 dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma); 488*5181Sgd78059 } 489*5181Sgd78059 490*5181Sgd78059 /* 491*5181Sgd78059 * dmfe_start_chip() -- start the chip transmitting and/or receiving 492*5181Sgd78059 */ 493*5181Sgd78059 static void 494*5181Sgd78059 dmfe_start_chip(dmfe_t *dmfep, int mode) 495*5181Sgd78059 { 496*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 497*5181Sgd78059 498*5181Sgd78059 dmfep->opmode |= mode; 499*5181Sgd78059 dmfe_set_opmode(dmfep); 500*5181Sgd78059 501*5181Sgd78059 dmfe_chip_put32(dmfep, W_J_TIMER_REG, 0); 502*5181Sgd78059 /* 503*5181Sgd78059 * Enable VLAN length mode (allows packets to be 4 bytes Longer). 504*5181Sgd78059 */ 505*5181Sgd78059 dmfe_chip_put32(dmfep, W_J_TIMER_REG, VLAN_ENABLE); 506*5181Sgd78059 507*5181Sgd78059 /* 508*5181Sgd78059 * Clear any pending process-stopped interrupts 509*5181Sgd78059 */ 510*5181Sgd78059 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT); 511*5181Sgd78059 dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX : 512*5181Sgd78059 mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED; 513*5181Sgd78059 } 514*5181Sgd78059 515*5181Sgd78059 /* 516*5181Sgd78059 * dmfe_enable_interrupts() -- enable our favourite set of interrupts. 517*5181Sgd78059 * 518*5181Sgd78059 * Normal interrupts: 519*5181Sgd78059 * We always enable: 520*5181Sgd78059 * RX_PKTDONE_INT (packet received) 521*5181Sgd78059 * TX_PKTDONE_INT (TX complete) 522*5181Sgd78059 * We never enable: 523*5181Sgd78059 * TX_ALLDONE_INT (next TX buffer not ready) 524*5181Sgd78059 * 525*5181Sgd78059 * Abnormal interrupts: 526*5181Sgd78059 * We always enable: 527*5181Sgd78059 * RX_STOPPED_INT 528*5181Sgd78059 * TX_STOPPED_INT 529*5181Sgd78059 * SYSTEM_ERR_INT 530*5181Sgd78059 * RX_UNAVAIL_INT 531*5181Sgd78059 * We never enable: 532*5181Sgd78059 * RX_EARLY_INT 533*5181Sgd78059 * RX_WATCHDOG_INT 534*5181Sgd78059 * TX_JABBER_INT 535*5181Sgd78059 * TX_EARLY_INT 536*5181Sgd78059 * TX_UNDERFLOW_INT 537*5181Sgd78059 * GP_TIMER_INT (not valid in -9 chips) 538*5181Sgd78059 * LINK_STATUS_INT (not valid in -9 chips) 539*5181Sgd78059 */ 540*5181Sgd78059 static void 541*5181Sgd78059 dmfe_enable_interrupts(dmfe_t *dmfep) 542*5181Sgd78059 { 543*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 544*5181Sgd78059 545*5181Sgd78059 /* 546*5181Sgd78059 * Put 'the standard set of interrupts' in the interrupt mask register 547*5181Sgd78059 */ 548*5181Sgd78059 dmfep->imask = RX_PKTDONE_INT | TX_PKTDONE_INT | 549*5181Sgd78059 RX_STOPPED_INT | TX_STOPPED_INT | RX_UNAVAIL_INT | SYSTEM_ERR_INT; 550*5181Sgd78059 551*5181Sgd78059 dmfe_chip_put32(dmfep, INT_MASK_REG, 552*5181Sgd78059 NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask); 553*5181Sgd78059 dmfep->chip_state = CHIP_RUNNING; 554*5181Sgd78059 555*5181Sgd78059 DMFE_DEBUG(("dmfe_enable_interrupts: imask 0x%x", dmfep->imask)); 556*5181Sgd78059 } 557*5181Sgd78059 558*5181Sgd78059 #undef DMFE_DBG 559*5181Sgd78059 560*5181Sgd78059 561*5181Sgd78059 /* 562*5181Sgd78059 * ========== RX side routines ========== 563*5181Sgd78059 */ 564*5181Sgd78059 565*5181Sgd78059 #define DMFE_DBG DMFE_DBG_RECV /* debug flag for this code */ 566*5181Sgd78059 567*5181Sgd78059 /* 568*5181Sgd78059 * Function to update receive statistics on various errors 569*5181Sgd78059 */ 570*5181Sgd78059 static void 571*5181Sgd78059 dmfe_update_rx_stats(dmfe_t *dmfep, uint32_t desc0) 572*5181Sgd78059 { 573*5181Sgd78059 ASSERT(mutex_owned(dmfep->rxlock)); 574*5181Sgd78059 575*5181Sgd78059 /* 576*5181Sgd78059 * The error summary bit and the error bits that it summarises 577*5181Sgd78059 * are only valid if this is the last fragment. Therefore, a 578*5181Sgd78059 * fragment only contributes to the error statistics if both 579*5181Sgd78059 * the last-fragment and error summary bits are set. 580*5181Sgd78059 */ 581*5181Sgd78059 if (((RX_LAST_DESC | RX_ERR_SUMMARY) & ~desc0) == 0) { 582*5181Sgd78059 dmfep->rx_stats_ierrors += 1; 583*5181Sgd78059 584*5181Sgd78059 /* 585*5181Sgd78059 * There are some other error bits in the descriptor for 586*5181Sgd78059 * which there don't seem to be appropriate MAC statistics, 587*5181Sgd78059 * notably RX_COLLISION and perhaps RX_DESC_ERR. The 588*5181Sgd78059 * latter may not be possible if it is supposed to indicate 589*5181Sgd78059 * that one buffer has been filled with a partial packet 590*5181Sgd78059 * and the next buffer required for the rest of the packet 591*5181Sgd78059 * was not available, as all our buffers are more than large 592*5181Sgd78059 * enough for a whole packet without fragmenting. 593*5181Sgd78059 */ 594*5181Sgd78059 595*5181Sgd78059 if (desc0 & RX_OVERFLOW) { 596*5181Sgd78059 dmfep->rx_stats_overflow += 1; 597*5181Sgd78059 598*5181Sgd78059 } else if (desc0 & RX_RUNT_FRAME) 599*5181Sgd78059 dmfep->rx_stats_short += 1; 600*5181Sgd78059 601*5181Sgd78059 if (desc0 & RX_CRC) 602*5181Sgd78059 dmfep->rx_stats_fcs += 1; 603*5181Sgd78059 604*5181Sgd78059 if (desc0 & RX_FRAME2LONG) 605*5181Sgd78059 dmfep->rx_stats_toolong += 1; 606*5181Sgd78059 } 607*5181Sgd78059 608*5181Sgd78059 /* 609*5181Sgd78059 * A receive watchdog timeout is counted as a MAC-level receive 610*5181Sgd78059 * error. Strangely, it doesn't set the packet error summary bit, 611*5181Sgd78059 * according to the chip data sheet :-? 612*5181Sgd78059 */ 613*5181Sgd78059 if (desc0 & RX_RCV_WD_TO) 614*5181Sgd78059 dmfep->rx_stats_macrcv_errors += 1; 615*5181Sgd78059 616*5181Sgd78059 if (desc0 & RX_DRIBBLING) 617*5181Sgd78059 dmfep->rx_stats_align += 1; 618*5181Sgd78059 619*5181Sgd78059 if (desc0 & RX_MII_ERR) 620*5181Sgd78059 dmfep->rx_stats_macrcv_errors += 1; 621*5181Sgd78059 } 622*5181Sgd78059 623*5181Sgd78059 /* 624*5181Sgd78059 * Receive incoming packet(s) and pass them up ... 625*5181Sgd78059 */ 626*5181Sgd78059 static mblk_t * 627*5181Sgd78059 dmfe_getp(dmfe_t *dmfep) 628*5181Sgd78059 { 629*5181Sgd78059 dma_area_t *descp; 630*5181Sgd78059 mblk_t **tail; 631*5181Sgd78059 mblk_t *head; 632*5181Sgd78059 mblk_t *mp; 633*5181Sgd78059 char *rxb; 634*5181Sgd78059 uchar_t *dp; 635*5181Sgd78059 uint32_t desc0; 636*5181Sgd78059 uint32_t misses; 637*5181Sgd78059 int packet_length; 638*5181Sgd78059 int index; 639*5181Sgd78059 640*5181Sgd78059 mutex_enter(dmfep->rxlock); 641*5181Sgd78059 642*5181Sgd78059 /* 643*5181Sgd78059 * Update the missed frame statistic from the on-chip counter. 644*5181Sgd78059 */ 645*5181Sgd78059 misses = dmfe_chip_get32(dmfep, MISSED_FRAME_REG); 646*5181Sgd78059 dmfep->rx_stats_norcvbuf += (misses & MISSED_FRAME_MASK); 647*5181Sgd78059 648*5181Sgd78059 /* 649*5181Sgd78059 * sync (all) receive descriptors before inspecting them 650*5181Sgd78059 */ 651*5181Sgd78059 descp = &dmfep->rx_desc; 652*5181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL); 653*5181Sgd78059 654*5181Sgd78059 /* 655*5181Sgd78059 * We should own at least one RX entry, since we've had a 656*5181Sgd78059 * receive interrupt, but let's not be dogmatic about it. 657*5181Sgd78059 */ 658*5181Sgd78059 index = dmfep->rx.next_free; 659*5181Sgd78059 desc0 = dmfe_ring_get32(descp, index, DESC0); 660*5181Sgd78059 if (desc0 & RX_OWN) 661*5181Sgd78059 DMFE_DEBUG(("dmfe_getp: no work, desc0 0x%x", desc0)); 662*5181Sgd78059 663*5181Sgd78059 for (head = NULL, tail = &head; (desc0 & RX_OWN) == 0; ) { 664*5181Sgd78059 /* 665*5181Sgd78059 * Maintain statistics for every descriptor returned 666*5181Sgd78059 * to us by the chip ... 667*5181Sgd78059 */ 668*5181Sgd78059 DMFE_DEBUG(("dmfe_getp: desc0 0x%x", desc0)); 669*5181Sgd78059 dmfe_update_rx_stats(dmfep, desc0); 670*5181Sgd78059 671*5181Sgd78059 /* 672*5181Sgd78059 * Check that the entry has both "packet start" and 673*5181Sgd78059 * "packet end" flags. We really shouldn't get packet 674*5181Sgd78059 * fragments, 'cos all the RX buffers are bigger than 675*5181Sgd78059 * the largest valid packet. So we'll just drop any 676*5181Sgd78059 * fragments we find & skip on to the next entry. 677*5181Sgd78059 */ 678*5181Sgd78059 if (((RX_FIRST_DESC | RX_LAST_DESC) & ~desc0) != 0) { 679*5181Sgd78059 DMFE_DEBUG(("dmfe_getp: dropping fragment")); 680*5181Sgd78059 goto skip; 681*5181Sgd78059 } 682*5181Sgd78059 683*5181Sgd78059 /* 684*5181Sgd78059 * A whole packet in one buffer. We have to check error 685*5181Sgd78059 * status and packet length before forwarding it upstream. 686*5181Sgd78059 */ 687*5181Sgd78059 if (desc0 & RX_ERR_SUMMARY) { 688*5181Sgd78059 DMFE_DEBUG(("dmfe_getp: dropping errored packet")); 689*5181Sgd78059 goto skip; 690*5181Sgd78059 } 691*5181Sgd78059 692*5181Sgd78059 packet_length = (desc0 >> 16) & 0x3fff; 693*5181Sgd78059 if (packet_length > DMFE_MAX_PKT_SIZE) { 694*5181Sgd78059 DMFE_DEBUG(("dmfe_getp: dropping oversize packet, " 695*5181Sgd78059 "length %d", packet_length)); 696*5181Sgd78059 goto skip; 697*5181Sgd78059 } else if (packet_length < ETHERMIN) { 698*5181Sgd78059 /* 699*5181Sgd78059 * Note that VLAN packet would be even larger, 700*5181Sgd78059 * but we don't worry about dropping runt VLAN 701*5181Sgd78059 * frames. 702*5181Sgd78059 * 703*5181Sgd78059 * This check is probably redundant, as well, 704*5181Sgd78059 * since the hardware should drop RUNT frames. 705*5181Sgd78059 */ 706*5181Sgd78059 DMFE_DEBUG(("dmfe_getp: dropping undersize packet, " 707*5181Sgd78059 "length %d", packet_length)); 708*5181Sgd78059 goto skip; 709*5181Sgd78059 } 710*5181Sgd78059 711*5181Sgd78059 /* 712*5181Sgd78059 * Sync the data, so we can examine it; then check that 713*5181Sgd78059 * the packet is really intended for us (remember that 714*5181Sgd78059 * if we're using Imperfect Filtering, then the chip will 715*5181Sgd78059 * receive unicast packets sent to stations whose addresses 716*5181Sgd78059 * just happen to hash to the same value as our own; we 717*5181Sgd78059 * discard these here so they don't get sent upstream ...) 718*5181Sgd78059 */ 719*5181Sgd78059 (void) ddi_dma_sync(dmfep->rx_buff.dma_hdl, 720*5181Sgd78059 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, 721*5181Sgd78059 DDI_DMA_SYNC_FORKERNEL); 722*5181Sgd78059 rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE]; 723*5181Sgd78059 724*5181Sgd78059 725*5181Sgd78059 /* 726*5181Sgd78059 * We do not bother to check that the packet is really for 727*5181Sgd78059 * us, we let the MAC framework make that check instead. 728*5181Sgd78059 * This is especially important if we ever want to support 729*5181Sgd78059 * multiple MAC addresses. 730*5181Sgd78059 */ 731*5181Sgd78059 732*5181Sgd78059 /* 733*5181Sgd78059 * Packet looks good; get a buffer to copy it into. We 734*5181Sgd78059 * allow some space at the front of the allocated buffer 735*5181Sgd78059 * (HEADROOM) in case any upstream modules want to prepend 736*5181Sgd78059 * some sort of header. The value has been carefully chosen 737*5181Sgd78059 * So that it also has the side-effect of making the packet 738*5181Sgd78059 * *contents* 4-byte aligned, as required by NCA! 739*5181Sgd78059 */ 740*5181Sgd78059 mp = allocb(DMFE_HEADROOM + packet_length, 0); 741*5181Sgd78059 if (mp == NULL) { 742*5181Sgd78059 DMFE_DEBUG(("dmfe_getp: no buffer - dropping packet")); 743*5181Sgd78059 dmfep->rx_stats_norcvbuf += 1; 744*5181Sgd78059 goto skip; 745*5181Sgd78059 } 746*5181Sgd78059 747*5181Sgd78059 /* 748*5181Sgd78059 * Account for statistics of good packets. 749*5181Sgd78059 */ 750*5181Sgd78059 dmfep->rx_stats_ipackets += 1; 751*5181Sgd78059 dmfep->rx_stats_rbytes += packet_length; 752*5181Sgd78059 if (desc0 & RX_MULTI_FRAME) { 753*5181Sgd78059 if (bcmp(rxb, dmfe_broadcast_addr, ETHERADDRL)) { 754*5181Sgd78059 dmfep->rx_stats_multi += 1; 755*5181Sgd78059 } else { 756*5181Sgd78059 dmfep->rx_stats_bcast += 1; 757*5181Sgd78059 } 758*5181Sgd78059 } 759*5181Sgd78059 760*5181Sgd78059 /* 761*5181Sgd78059 * Copy the packet into the STREAMS buffer 762*5181Sgd78059 */ 763*5181Sgd78059 dp = mp->b_rptr += DMFE_HEADROOM; 764*5181Sgd78059 mp->b_cont = mp->b_next = NULL; 765*5181Sgd78059 766*5181Sgd78059 /* 767*5181Sgd78059 * Don't worry about stripping the vlan tag, the MAC 768*5181Sgd78059 * layer will take care of that for us. 769*5181Sgd78059 */ 770*5181Sgd78059 bcopy(rxb, dp, packet_length); 771*5181Sgd78059 772*5181Sgd78059 /* 773*5181Sgd78059 * Fix up the packet length, and link it to the chain 774*5181Sgd78059 */ 775*5181Sgd78059 mp->b_wptr = mp->b_rptr + packet_length - ETHERFCSL; 776*5181Sgd78059 *tail = mp; 777*5181Sgd78059 tail = &mp->b_next; 778*5181Sgd78059 779*5181Sgd78059 skip: 780*5181Sgd78059 /* 781*5181Sgd78059 * Return ownership of ring entry & advance to next 782*5181Sgd78059 */ 783*5181Sgd78059 dmfe_ring_put32(descp, index, DESC0, RX_OWN); 784*5181Sgd78059 index = NEXT(index, dmfep->rx.n_desc); 785*5181Sgd78059 desc0 = dmfe_ring_get32(descp, index, DESC0); 786*5181Sgd78059 } 787*5181Sgd78059 788*5181Sgd78059 /* 789*5181Sgd78059 * Remember where to start looking next time ... 790*5181Sgd78059 */ 791*5181Sgd78059 dmfep->rx.next_free = index; 792*5181Sgd78059 793*5181Sgd78059 /* 794*5181Sgd78059 * sync the receive descriptors that we've given back 795*5181Sgd78059 * (actually, we sync all of them for simplicity), and 796*5181Sgd78059 * wake the chip in case it had suspended receive 797*5181Sgd78059 */ 798*5181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 799*5181Sgd78059 dmfe_chip_put32(dmfep, RX_POLL_REG, 0); 800*5181Sgd78059 801*5181Sgd78059 mutex_exit(dmfep->rxlock); 802*5181Sgd78059 return (head); 803*5181Sgd78059 } 804*5181Sgd78059 805*5181Sgd78059 #undef DMFE_DBG 806*5181Sgd78059 807*5181Sgd78059 808*5181Sgd78059 /* 809*5181Sgd78059 * ========== Primary TX side routines ========== 810*5181Sgd78059 */ 811*5181Sgd78059 812*5181Sgd78059 #define DMFE_DBG DMFE_DBG_SEND /* debug flag for this code */ 813*5181Sgd78059 814*5181Sgd78059 /* 815*5181Sgd78059 * TX ring management: 816*5181Sgd78059 * 817*5181Sgd78059 * There are <tx.n_desc> entries in the ring, of which those from 818*5181Sgd78059 * <tx.next_free> round to but not including <tx.next_busy> must 819*5181Sgd78059 * be owned by the CPU. The number of such entries should equal 820*5181Sgd78059 * <tx.n_free>; but there may also be some more entries which the 821*5181Sgd78059 * chip has given back but which we haven't yet accounted for. 822*5181Sgd78059 * The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts 823*5181Sgd78059 * as it discovers such entries. 824*5181Sgd78059 * 825*5181Sgd78059 * Initially, or when the ring is entirely free: 826*5181Sgd78059 * C = Owned by CPU 827*5181Sgd78059 * D = Owned by Davicom (DMFE) chip 828*5181Sgd78059 * 829*5181Sgd78059 * tx.next_free tx.n_desc = 16 830*5181Sgd78059 * | 831*5181Sgd78059 * v 832*5181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 833*5181Sgd78059 * | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | 834*5181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 835*5181Sgd78059 * ^ 836*5181Sgd78059 * | 837*5181Sgd78059 * tx.next_busy tx.n_free = 16 838*5181Sgd78059 * 839*5181Sgd78059 * On entry to reclaim() during normal use: 840*5181Sgd78059 * 841*5181Sgd78059 * tx.next_free tx.n_desc = 16 842*5181Sgd78059 * | 843*5181Sgd78059 * v 844*5181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 845*5181Sgd78059 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C | 846*5181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 847*5181Sgd78059 * ^ 848*5181Sgd78059 * | 849*5181Sgd78059 * tx.next_busy tx.n_free = 9 850*5181Sgd78059 * 851*5181Sgd78059 * On exit from reclaim(): 852*5181Sgd78059 * 853*5181Sgd78059 * tx.next_free tx.n_desc = 16 854*5181Sgd78059 * | 855*5181Sgd78059 * v 856*5181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 857*5181Sgd78059 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C | 858*5181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 859*5181Sgd78059 * ^ 860*5181Sgd78059 * | 861*5181Sgd78059 * tx.next_busy tx.n_free = 13 862*5181Sgd78059 * 863*5181Sgd78059 * The ring is considered "full" when only one entry is owned by 864*5181Sgd78059 * the CPU; thus <tx.n_free> should always be >= 1. 865*5181Sgd78059 * 866*5181Sgd78059 * tx.next_free tx.n_desc = 16 867*5181Sgd78059 * | 868*5181Sgd78059 * v 869*5181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 870*5181Sgd78059 * | D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D | 871*5181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 872*5181Sgd78059 * ^ 873*5181Sgd78059 * | 874*5181Sgd78059 * tx.next_busy tx.n_free = 1 875*5181Sgd78059 */ 876*5181Sgd78059 877*5181Sgd78059 /* 878*5181Sgd78059 * Function to update transmit statistics on various errors 879*5181Sgd78059 */ 880*5181Sgd78059 static void 881*5181Sgd78059 dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1) 882*5181Sgd78059 { 883*5181Sgd78059 uint32_t collisions; 884*5181Sgd78059 uint32_t errbits; 885*5181Sgd78059 uint32_t errsum; 886*5181Sgd78059 887*5181Sgd78059 ASSERT(mutex_owned(dmfep->txlock)); 888*5181Sgd78059 889*5181Sgd78059 collisions = ((desc0 >> 3) & 0x0f); 890*5181Sgd78059 errsum = desc0 & TX_ERR_SUMMARY; 891*5181Sgd78059 errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS | 892*5181Sgd78059 TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO); 893*5181Sgd78059 if ((errsum == 0) != (errbits == 0)) { 894*5181Sgd78059 dmfe_log(dmfep, "dubious TX error status 0x%x", desc0); 895*5181Sgd78059 desc0 |= TX_ERR_SUMMARY; 896*5181Sgd78059 } 897*5181Sgd78059 898*5181Sgd78059 if (desc0 & TX_ERR_SUMMARY) { 899*5181Sgd78059 dmfep->tx_stats_oerrors += 1; 900*5181Sgd78059 901*5181Sgd78059 /* 902*5181Sgd78059 * If we ever see a transmit jabber timeout, we count it 903*5181Sgd78059 * as a MAC-level transmit error; but we probably won't 904*5181Sgd78059 * see it as it causes an Abnormal interrupt and we reset 905*5181Sgd78059 * the chip in order to recover 906*5181Sgd78059 */ 907*5181Sgd78059 if (desc0 & TX_JABBER_TO) { 908*5181Sgd78059 dmfep->tx_stats_macxmt_errors += 1; 909*5181Sgd78059 dmfep->tx_stats_jabber += 1; 910*5181Sgd78059 } 911*5181Sgd78059 912*5181Sgd78059 if (desc0 & TX_UNDERFLOW) 913*5181Sgd78059 dmfep->tx_stats_underflow += 1; 914*5181Sgd78059 else if (desc0 & TX_LATE_COLL) 915*5181Sgd78059 dmfep->tx_stats_xmtlatecoll += 1; 916*5181Sgd78059 917*5181Sgd78059 if (desc0 & (TX_CARRIER_LOSS | TX_NO_CARRIER)) 918*5181Sgd78059 dmfep->tx_stats_nocarrier += 1; 919*5181Sgd78059 920*5181Sgd78059 if (desc0 & TX_EXCESS_COLL) { 921*5181Sgd78059 dmfep->tx_stats_excoll += 1; 922*5181Sgd78059 collisions = 16; 923*5181Sgd78059 } 924*5181Sgd78059 } else { 925*5181Sgd78059 int bit = index % NBBY; 926*5181Sgd78059 int byt = index / NBBY; 927*5181Sgd78059 928*5181Sgd78059 if (dmfep->tx_mcast[byt] & bit) { 929*5181Sgd78059 dmfep->tx_mcast[byt] &= ~bit; 930*5181Sgd78059 dmfep->tx_stats_multi += 1; 931*5181Sgd78059 932*5181Sgd78059 } else if (dmfep->tx_bcast[byt] & bit) { 933*5181Sgd78059 dmfep->tx_bcast[byt] &= ~bit; 934*5181Sgd78059 dmfep->tx_stats_bcast += 1; 935*5181Sgd78059 } 936*5181Sgd78059 937*5181Sgd78059 dmfep->tx_stats_opackets += 1; 938*5181Sgd78059 dmfep->tx_stats_obytes += desc1 & TX_BUFFER_SIZE1; 939*5181Sgd78059 } 940*5181Sgd78059 941*5181Sgd78059 if (collisions == 1) 942*5181Sgd78059 dmfep->tx_stats_first_coll += 1; 943*5181Sgd78059 else if (collisions != 0) 944*5181Sgd78059 dmfep->tx_stats_multi_coll += 1; 945*5181Sgd78059 dmfep->tx_stats_collisions += collisions; 946*5181Sgd78059 947*5181Sgd78059 if (desc0 & TX_DEFERRED) 948*5181Sgd78059 dmfep->tx_stats_defer += 1; 949*5181Sgd78059 } 950*5181Sgd78059 951*5181Sgd78059 /* 952*5181Sgd78059 * Reclaim all the ring entries that the chip has returned to us ... 953*5181Sgd78059 * 954*5181Sgd78059 * Returns B_FALSE if no entries could be reclaimed. Otherwise, reclaims 955*5181Sgd78059 * as many as possible, restarts the TX stall timeout, and returns B_TRUE. 956*5181Sgd78059 */ 957*5181Sgd78059 static boolean_t 958*5181Sgd78059 dmfe_reclaim_tx_desc(dmfe_t *dmfep) 959*5181Sgd78059 { 960*5181Sgd78059 dma_area_t *descp; 961*5181Sgd78059 uint32_t desc0; 962*5181Sgd78059 uint32_t desc1; 963*5181Sgd78059 int i; 964*5181Sgd78059 965*5181Sgd78059 ASSERT(mutex_owned(dmfep->txlock)); 966*5181Sgd78059 967*5181Sgd78059 /* 968*5181Sgd78059 * sync transmit descriptor ring before looking at it 969*5181Sgd78059 */ 970*5181Sgd78059 descp = &dmfep->tx_desc; 971*5181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL); 972*5181Sgd78059 973*5181Sgd78059 /* 974*5181Sgd78059 * Early exit if there are no descriptors to reclaim, either 975*5181Sgd78059 * because they're all reclaimed already, or because the next 976*5181Sgd78059 * one is still owned by the chip ... 977*5181Sgd78059 */ 978*5181Sgd78059 i = dmfep->tx.next_busy; 979*5181Sgd78059 if (i == dmfep->tx.next_free) 980*5181Sgd78059 return (B_FALSE); 981*5181Sgd78059 desc0 = dmfe_ring_get32(descp, i, DESC0); 982*5181Sgd78059 if (desc0 & TX_OWN) 983*5181Sgd78059 return (B_FALSE); 984*5181Sgd78059 985*5181Sgd78059 /* 986*5181Sgd78059 * Reclaim as many descriptors as possible ... 987*5181Sgd78059 */ 988*5181Sgd78059 for (;;) { 989*5181Sgd78059 desc1 = dmfe_ring_get32(descp, i, DESC1); 990*5181Sgd78059 ASSERT((desc1 & (TX_SETUP_PACKET | TX_LAST_DESC)) != 0); 991*5181Sgd78059 992*5181Sgd78059 if (desc1 & TX_SETUP_PACKET) { 993*5181Sgd78059 /* 994*5181Sgd78059 * Setup packet - restore buffer address 995*5181Sgd78059 */ 996*5181Sgd78059 ASSERT(dmfe_ring_get32(descp, i, BUFFER1) == 997*5181Sgd78059 descp->setup_dvma); 998*5181Sgd78059 dmfe_ring_put32(descp, i, BUFFER1, 999*5181Sgd78059 dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE); 1000*5181Sgd78059 } else { 1001*5181Sgd78059 /* 1002*5181Sgd78059 * Regular packet - just update stats 1003*5181Sgd78059 */ 1004*5181Sgd78059 ASSERT(dmfe_ring_get32(descp, i, BUFFER1) == 1005*5181Sgd78059 dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE); 1006*5181Sgd78059 dmfe_update_tx_stats(dmfep, i, desc0, desc1); 1007*5181Sgd78059 } 1008*5181Sgd78059 1009*5181Sgd78059 #if DMFEDEBUG 1010*5181Sgd78059 /* 1011*5181Sgd78059 * We can use one of the SPARE bits in the TX descriptor 1012*5181Sgd78059 * to track when a ring buffer slot is reclaimed. Then 1013*5181Sgd78059 * we can deduce the last operation on a slot from the 1014*5181Sgd78059 * top half of DESC0: 1015*5181Sgd78059 * 1016*5181Sgd78059 * 0x8000 xxxx given to DMFE chip (TX_OWN) 1017*5181Sgd78059 * 0x7fff xxxx returned but not yet reclaimed 1018*5181Sgd78059 * 0x3fff xxxx reclaimed 1019*5181Sgd78059 */ 1020*5181Sgd78059 #define TX_PEND_RECLAIM (1UL<<30) 1021*5181Sgd78059 dmfe_ring_put32(descp, i, DESC0, desc0 & ~TX_PEND_RECLAIM); 1022*5181Sgd78059 #endif /* DMFEDEBUG */ 1023*5181Sgd78059 1024*5181Sgd78059 /* 1025*5181Sgd78059 * Update count & index; we're all done if the ring is 1026*5181Sgd78059 * now fully reclaimed, or the next entry if still owned 1027*5181Sgd78059 * by the chip ... 1028*5181Sgd78059 */ 1029*5181Sgd78059 dmfep->tx.n_free += 1; 1030*5181Sgd78059 i = NEXT(i, dmfep->tx.n_desc); 1031*5181Sgd78059 if (i == dmfep->tx.next_free) 1032*5181Sgd78059 break; 1033*5181Sgd78059 desc0 = dmfe_ring_get32(descp, i, DESC0); 1034*5181Sgd78059 if (desc0 & TX_OWN) 1035*5181Sgd78059 break; 1036*5181Sgd78059 } 1037*5181Sgd78059 1038*5181Sgd78059 dmfep->tx.next_busy = i; 1039*5181Sgd78059 dmfep->tx_pending_tix = 0; 1040*5181Sgd78059 return (B_TRUE); 1041*5181Sgd78059 } 1042*5181Sgd78059 1043*5181Sgd78059 /* 1044*5181Sgd78059 * Send the message in the message block chain <mp>. 1045*5181Sgd78059 * 1046*5181Sgd78059 * The message is freed if and only if its contents are successfully copied 1047*5181Sgd78059 * and queued for transmission (so that the return value is B_TRUE). 1048*5181Sgd78059 * If we can't queue the message, the return value is B_FALSE and 1049*5181Sgd78059 * the message is *not* freed. 1050*5181Sgd78059 * 1051*5181Sgd78059 * This routine handles the special case of <mp> == NULL, which indicates 1052*5181Sgd78059 * that we want to "send" the special "setup packet" allocated during 1053*5181Sgd78059 * startup. We have to use some different flags in the packet descriptor 1054*5181Sgd78059 * to say its a setup packet (from the global <dmfe_setup_desc1>), and the 1055*5181Sgd78059 * setup packet *isn't* freed after use. 1056*5181Sgd78059 */ 1057*5181Sgd78059 static boolean_t 1058*5181Sgd78059 dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp) 1059*5181Sgd78059 { 1060*5181Sgd78059 dma_area_t *descp; 1061*5181Sgd78059 mblk_t *bp; 1062*5181Sgd78059 char *txb; 1063*5181Sgd78059 uint32_t desc1; 1064*5181Sgd78059 uint32_t index; 1065*5181Sgd78059 size_t totlen; 1066*5181Sgd78059 size_t mblen; 1067*5181Sgd78059 1068*5181Sgd78059 /* 1069*5181Sgd78059 * If the number of free slots is below the reclaim threshold 1070*5181Sgd78059 * (soft limit), we'll try to reclaim some. If we fail, and 1071*5181Sgd78059 * the number of free slots is also below the minimum required 1072*5181Sgd78059 * (the hard limit, usually 1), then we can't send the packet. 1073*5181Sgd78059 */ 1074*5181Sgd78059 mutex_enter(dmfep->txlock); 1075*5181Sgd78059 if (dmfep->tx.n_free <= dmfe_tx_reclaim_level && 1076*5181Sgd78059 dmfe_reclaim_tx_desc(dmfep) == B_FALSE && 1077*5181Sgd78059 dmfep->tx.n_free <= dmfe_tx_min_free) { 1078*5181Sgd78059 /* 1079*5181Sgd78059 * Resource shortage - return B_FALSE so the packet 1080*5181Sgd78059 * will be queued for retry after the next TX-done 1081*5181Sgd78059 * interrupt. 1082*5181Sgd78059 */ 1083*5181Sgd78059 mutex_exit(dmfep->txlock); 1084*5181Sgd78059 DMFE_DEBUG(("dmfe_send_msg: no free descriptors")); 1085*5181Sgd78059 return (B_FALSE); 1086*5181Sgd78059 } 1087*5181Sgd78059 1088*5181Sgd78059 /* 1089*5181Sgd78059 * There's a slot available, so claim it by incrementing 1090*5181Sgd78059 * the next-free index and decrementing the free count. 1091*5181Sgd78059 * If the ring is currently empty, we also restart the 1092*5181Sgd78059 * stall-detect timer. The ASSERTions check that our 1093*5181Sgd78059 * invariants still hold: 1094*5181Sgd78059 * the next-free index must not match the next-busy index 1095*5181Sgd78059 * there must still be at least one free entry 1096*5181Sgd78059 * After this, we now have exclusive ownership of the ring 1097*5181Sgd78059 * entry (and matching buffer) indicated by <index>, so we 1098*5181Sgd78059 * don't need to hold the TX lock any longer 1099*5181Sgd78059 */ 1100*5181Sgd78059 index = dmfep->tx.next_free; 1101*5181Sgd78059 dmfep->tx.next_free = NEXT(index, dmfep->tx.n_desc); 1102*5181Sgd78059 ASSERT(dmfep->tx.next_free != dmfep->tx.next_busy); 1103*5181Sgd78059 if (dmfep->tx.n_free-- == dmfep->tx.n_desc) 1104*5181Sgd78059 dmfep->tx_pending_tix = 0; 1105*5181Sgd78059 ASSERT(dmfep->tx.n_free >= 1); 1106*5181Sgd78059 mutex_exit(dmfep->txlock); 1107*5181Sgd78059 1108*5181Sgd78059 /* 1109*5181Sgd78059 * Check the ownership of the ring entry ... 1110*5181Sgd78059 */ 1111*5181Sgd78059 descp = &dmfep->tx_desc; 1112*5181Sgd78059 ASSERT((dmfe_ring_get32(descp, index, DESC0) & TX_OWN) == 0); 1113*5181Sgd78059 1114*5181Sgd78059 if (mp == NULL) { 1115*5181Sgd78059 /* 1116*5181Sgd78059 * Indicates we should send a SETUP packet, which we do by 1117*5181Sgd78059 * temporarily switching the BUFFER1 pointer in the ring 1118*5181Sgd78059 * entry. The reclaim routine will restore BUFFER1 to its 1119*5181Sgd78059 * usual value. 1120*5181Sgd78059 * 1121*5181Sgd78059 * Note that as the setup packet is tagged on the end of 1122*5181Sgd78059 * the TX ring, when we sync the descriptor we're also 1123*5181Sgd78059 * implicitly syncing the setup packet - hence, we don't 1124*5181Sgd78059 * need a separate ddi_dma_sync() call here. 1125*5181Sgd78059 */ 1126*5181Sgd78059 desc1 = dmfe_setup_desc1; 1127*5181Sgd78059 dmfe_ring_put32(descp, index, BUFFER1, descp->setup_dvma); 1128*5181Sgd78059 } else { 1129*5181Sgd78059 /* 1130*5181Sgd78059 * A regular packet; we copy the data into a pre-mapped 1131*5181Sgd78059 * buffer, which avoids the overhead (and complication) 1132*5181Sgd78059 * of mapping/unmapping STREAMS buffers and keeping hold 1133*5181Sgd78059 * of them until the DMA has completed. 1134*5181Sgd78059 * 1135*5181Sgd78059 * Because all buffers are the same size, and larger 1136*5181Sgd78059 * than the longest single valid message, we don't have 1137*5181Sgd78059 * to bother about splitting the message across multiple 1138*5181Sgd78059 * buffers. 1139*5181Sgd78059 */ 1140*5181Sgd78059 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE]; 1141*5181Sgd78059 totlen = 0; 1142*5181Sgd78059 bp = mp; 1143*5181Sgd78059 1144*5181Sgd78059 /* 1145*5181Sgd78059 * Copy all (remaining) mblks in the message ... 1146*5181Sgd78059 */ 1147*5181Sgd78059 for (; bp != NULL; bp = bp->b_cont) { 1148*5181Sgd78059 mblen = bp->b_wptr - bp->b_rptr; 1149*5181Sgd78059 if ((totlen += mblen) <= DMFE_MAX_PKT_SIZE) { 1150*5181Sgd78059 bcopy(bp->b_rptr, txb, mblen); 1151*5181Sgd78059 txb += mblen; 1152*5181Sgd78059 } 1153*5181Sgd78059 } 1154*5181Sgd78059 1155*5181Sgd78059 /* 1156*5181Sgd78059 * Is this a multicast or broadcast packet? We do 1157*5181Sgd78059 * this so that we can track statistics accurately 1158*5181Sgd78059 * when we reclaim it. 1159*5181Sgd78059 */ 1160*5181Sgd78059 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE]; 1161*5181Sgd78059 if (txb[0] & 0x1) { 1162*5181Sgd78059 if (bcmp(txb, dmfe_broadcast_addr, ETHERADDRL) == 0) { 1163*5181Sgd78059 dmfep->tx_bcast[index / NBBY] |= 1164*5181Sgd78059 (1 << (index % NBBY)); 1165*5181Sgd78059 } else { 1166*5181Sgd78059 dmfep->tx_mcast[index / NBBY] |= 1167*5181Sgd78059 (1 << (index % NBBY)); 1168*5181Sgd78059 } 1169*5181Sgd78059 } 1170*5181Sgd78059 1171*5181Sgd78059 /* 1172*5181Sgd78059 * We'e reached the end of the chain; and we should have 1173*5181Sgd78059 * collected no more than DMFE_MAX_PKT_SIZE bytes into our 1174*5181Sgd78059 * buffer. Note that the <size> field in the descriptor is 1175*5181Sgd78059 * only 11 bits, so bigger packets would be a problem! 1176*5181Sgd78059 */ 1177*5181Sgd78059 ASSERT(bp == NULL); 1178*5181Sgd78059 ASSERT(totlen <= DMFE_MAX_PKT_SIZE); 1179*5181Sgd78059 totlen &= TX_BUFFER_SIZE1; 1180*5181Sgd78059 desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen; 1181*5181Sgd78059 1182*5181Sgd78059 (void) ddi_dma_sync(dmfep->tx_buff.dma_hdl, 1183*5181Sgd78059 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, DDI_DMA_SYNC_FORDEV); 1184*5181Sgd78059 } 1185*5181Sgd78059 1186*5181Sgd78059 /* 1187*5181Sgd78059 * Update ring descriptor entries, sync them, and wake up the 1188*5181Sgd78059 * transmit process 1189*5181Sgd78059 */ 1190*5181Sgd78059 if ((index & dmfe_tx_int_factor) == 0) 1191*5181Sgd78059 desc1 |= TX_INT_ON_COMP; 1192*5181Sgd78059 desc1 |= TX_CHAINING; 1193*5181Sgd78059 dmfe_ring_put32(descp, index, DESC1, desc1); 1194*5181Sgd78059 dmfe_ring_put32(descp, index, DESC0, TX_OWN); 1195*5181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 1196*5181Sgd78059 dmfe_chip_put32(dmfep, TX_POLL_REG, 0); 1197*5181Sgd78059 1198*5181Sgd78059 /* 1199*5181Sgd78059 * Finally, free the message & return success 1200*5181Sgd78059 */ 1201*5181Sgd78059 if (mp) 1202*5181Sgd78059 freemsg(mp); 1203*5181Sgd78059 return (B_TRUE); 1204*5181Sgd78059 } 1205*5181Sgd78059 1206*5181Sgd78059 /* 1207*5181Sgd78059 * dmfe_m_tx() -- send a chain of packets 1208*5181Sgd78059 * 1209*5181Sgd78059 * Called when packet(s) are ready to be transmitted. A pointer to an 1210*5181Sgd78059 * M_DATA message that contains the packet is passed to this routine. 1211*5181Sgd78059 * The complete LLC header is contained in the message's first message 1212*5181Sgd78059 * block, and the remainder of the packet is contained within 1213*5181Sgd78059 * additional M_DATA message blocks linked to the first message block. 1214*5181Sgd78059 * 1215*5181Sgd78059 * Additional messages may be passed by linking with b_next. 1216*5181Sgd78059 */ 1217*5181Sgd78059 static mblk_t * 1218*5181Sgd78059 dmfe_m_tx(void *arg, mblk_t *mp) 1219*5181Sgd78059 { 1220*5181Sgd78059 dmfe_t *dmfep = arg; /* private device info */ 1221*5181Sgd78059 mblk_t *next; 1222*5181Sgd78059 1223*5181Sgd78059 ASSERT(mp != NULL); 1224*5181Sgd78059 ASSERT(dmfep->mac_state == DMFE_MAC_STARTED); 1225*5181Sgd78059 1226*5181Sgd78059 if (dmfep->chip_state != CHIP_RUNNING) 1227*5181Sgd78059 return (mp); 1228*5181Sgd78059 1229*5181Sgd78059 while (mp != NULL) { 1230*5181Sgd78059 next = mp->b_next; 1231*5181Sgd78059 mp->b_next = NULL; 1232*5181Sgd78059 if (!dmfe_send_msg(dmfep, mp)) { 1233*5181Sgd78059 mp->b_next = next; 1234*5181Sgd78059 break; 1235*5181Sgd78059 } 1236*5181Sgd78059 mp = next; 1237*5181Sgd78059 } 1238*5181Sgd78059 1239*5181Sgd78059 return (mp); 1240*5181Sgd78059 } 1241*5181Sgd78059 1242*5181Sgd78059 #undef DMFE_DBG 1243*5181Sgd78059 1244*5181Sgd78059 1245*5181Sgd78059 /* 1246*5181Sgd78059 * ========== Address-setting routines (TX-side) ========== 1247*5181Sgd78059 */ 1248*5181Sgd78059 1249*5181Sgd78059 #define DMFE_DBG DMFE_DBG_ADDR /* debug flag for this code */ 1250*5181Sgd78059 1251*5181Sgd78059 /* 1252*5181Sgd78059 * Find the index of the relevant bit in the setup packet. 1253*5181Sgd78059 * This must mirror the way the hardware will actually calculate it! 1254*5181Sgd78059 */ 1255*5181Sgd78059 static uint32_t 1256*5181Sgd78059 dmfe_hash_index(const uint8_t *address) 1257*5181Sgd78059 { 1258*5181Sgd78059 uint32_t const POLY = HASH_POLY; 1259*5181Sgd78059 uint32_t crc = HASH_CRC; 1260*5181Sgd78059 uint32_t index; 1261*5181Sgd78059 uint32_t msb; 1262*5181Sgd78059 uchar_t currentbyte; 1263*5181Sgd78059 int byteslength; 1264*5181Sgd78059 int shift; 1265*5181Sgd78059 int bit; 1266*5181Sgd78059 1267*5181Sgd78059 for (byteslength = 0; byteslength < ETHERADDRL; ++byteslength) { 1268*5181Sgd78059 currentbyte = address[byteslength]; 1269*5181Sgd78059 for (bit = 0; bit < 8; ++bit) { 1270*5181Sgd78059 msb = crc >> 31; 1271*5181Sgd78059 crc <<= 1; 1272*5181Sgd78059 if (msb ^ (currentbyte & 1)) { 1273*5181Sgd78059 crc ^= POLY; 1274*5181Sgd78059 crc |= 0x00000001; 1275*5181Sgd78059 } 1276*5181Sgd78059 currentbyte >>= 1; 1277*5181Sgd78059 } 1278*5181Sgd78059 } 1279*5181Sgd78059 1280*5181Sgd78059 for (index = 0, bit = 23, shift = 8; shift >= 0; ++bit, --shift) 1281*5181Sgd78059 index |= (((crc >> bit) & 1) << shift); 1282*5181Sgd78059 1283*5181Sgd78059 return (index); 1284*5181Sgd78059 } 1285*5181Sgd78059 1286*5181Sgd78059 /* 1287*5181Sgd78059 * Find and set/clear the relevant bit in the setup packet hash table 1288*5181Sgd78059 * This must mirror the way the hardware will actually interpret it! 1289*5181Sgd78059 */ 1290*5181Sgd78059 static void 1291*5181Sgd78059 dmfe_update_hash(dmfe_t *dmfep, uint32_t index, boolean_t val) 1292*5181Sgd78059 { 1293*5181Sgd78059 dma_area_t *descp; 1294*5181Sgd78059 uint32_t tmp; 1295*5181Sgd78059 1296*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 1297*5181Sgd78059 1298*5181Sgd78059 descp = &dmfep->tx_desc; 1299*5181Sgd78059 tmp = dmfe_setup_get32(descp, index/16); 1300*5181Sgd78059 if (val) 1301*5181Sgd78059 tmp |= 1 << (index%16); 1302*5181Sgd78059 else 1303*5181Sgd78059 tmp &= ~(1 << (index%16)); 1304*5181Sgd78059 dmfe_setup_put32(descp, index/16, tmp); 1305*5181Sgd78059 } 1306*5181Sgd78059 1307*5181Sgd78059 /* 1308*5181Sgd78059 * Update the refcount for the bit in the setup packet corresponding 1309*5181Sgd78059 * to the specified address; if it changes between zero & nonzero, 1310*5181Sgd78059 * also update the bitmap itself & return B_TRUE, so that the caller 1311*5181Sgd78059 * knows to re-send the setup packet. Otherwise (only the refcount 1312*5181Sgd78059 * changed), return B_FALSE 1313*5181Sgd78059 */ 1314*5181Sgd78059 static boolean_t 1315*5181Sgd78059 dmfe_update_mcast(dmfe_t *dmfep, const uint8_t *mca, boolean_t val) 1316*5181Sgd78059 { 1317*5181Sgd78059 uint32_t index; 1318*5181Sgd78059 uint8_t *refp; 1319*5181Sgd78059 boolean_t change; 1320*5181Sgd78059 1321*5181Sgd78059 index = dmfe_hash_index(mca); 1322*5181Sgd78059 refp = &dmfep->mcast_refs[index]; 1323*5181Sgd78059 change = (val ? (*refp)++ : --(*refp)) == 0; 1324*5181Sgd78059 1325*5181Sgd78059 if (change) 1326*5181Sgd78059 dmfe_update_hash(dmfep, index, val); 1327*5181Sgd78059 1328*5181Sgd78059 return (change); 1329*5181Sgd78059 } 1330*5181Sgd78059 1331*5181Sgd78059 /* 1332*5181Sgd78059 * "Transmit" the (possibly updated) magic setup packet 1333*5181Sgd78059 */ 1334*5181Sgd78059 static int 1335*5181Sgd78059 dmfe_send_setup(dmfe_t *dmfep) 1336*5181Sgd78059 { 1337*5181Sgd78059 int status; 1338*5181Sgd78059 1339*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 1340*5181Sgd78059 1341*5181Sgd78059 /* 1342*5181Sgd78059 * If the chip isn't running, we can't really send the setup frame 1343*5181Sgd78059 * now but it doesn't matter, 'cos it will be sent when the transmit 1344*5181Sgd78059 * process is restarted (see dmfe_start()). 1345*5181Sgd78059 */ 1346*5181Sgd78059 if ((dmfep->opmode & START_TRANSMIT) == 0) 1347*5181Sgd78059 return (0); 1348*5181Sgd78059 1349*5181Sgd78059 /* 1350*5181Sgd78059 * "Send" the setup frame. If it fails (e.g. no resources), 1351*5181Sgd78059 * set a flag; then the factotum will retry the "send". Once 1352*5181Sgd78059 * it works, we can clear the flag no matter how many attempts 1353*5181Sgd78059 * had previously failed. We tell the caller that it worked 1354*5181Sgd78059 * whether it did or not; after all, it *will* work eventually. 1355*5181Sgd78059 */ 1356*5181Sgd78059 status = dmfe_send_msg(dmfep, NULL); 1357*5181Sgd78059 dmfep->need_setup = status ? B_FALSE : B_TRUE; 1358*5181Sgd78059 return (0); 1359*5181Sgd78059 } 1360*5181Sgd78059 1361*5181Sgd78059 /* 1362*5181Sgd78059 * dmfe_m_unicst() -- set the physical network address 1363*5181Sgd78059 */ 1364*5181Sgd78059 static int 1365*5181Sgd78059 dmfe_m_unicst(void *arg, const uint8_t *macaddr) 1366*5181Sgd78059 { 1367*5181Sgd78059 dmfe_t *dmfep = arg; 1368*5181Sgd78059 int status; 1369*5181Sgd78059 int index; 1370*5181Sgd78059 1371*5181Sgd78059 /* 1372*5181Sgd78059 * Update our current address and send out a new setup packet 1373*5181Sgd78059 * 1374*5181Sgd78059 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT 1375*5181Sgd78059 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes). 1376*5181Sgd78059 * 1377*5181Sgd78059 * It is said that there is a bug in the 21140 where it fails to 1378*5181Sgd78059 * receive packes addresses to the specified perfect filter address. 1379*5181Sgd78059 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1 1380*5181Sgd78059 * bit should be set in the module variable dmfe_setup_desc1. 1381*5181Sgd78059 * 1382*5181Sgd78059 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering. 1383*5181Sgd78059 * In this mode, *all* incoming addresses are hashed and looked 1384*5181Sgd78059 * up in the bitmap described by the setup packet. Therefore, 1385*5181Sgd78059 * the bit representing the station address has to be added to 1386*5181Sgd78059 * the table before sending it out. If the address is changed, 1387*5181Sgd78059 * the old entry should be removed before the new entry is made. 1388*5181Sgd78059 * 1389*5181Sgd78059 * NOTE: in this mode, unicast packets that are not intended for 1390*5181Sgd78059 * this station may be received; it is up to software to filter 1391*5181Sgd78059 * them out afterwards! 1392*5181Sgd78059 * 1393*5181Sgd78059 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT 1394*5181Sgd78059 * filtering. In this mode, multicast addresses are hashed and 1395*5181Sgd78059 * checked against the bitmap, while unicast addresses are simply 1396*5181Sgd78059 * matched against the one physical address specified in the setup 1397*5181Sgd78059 * packet. This means that we shouldn't receive unicast packets 1398*5181Sgd78059 * that aren't intended for us (but software still has to filter 1399*5181Sgd78059 * multicast packets just the same). 1400*5181Sgd78059 * 1401*5181Sgd78059 * Whichever mode we're using, we have to enter the broadcast 1402*5181Sgd78059 * address into the multicast filter map too, so we do this on 1403*5181Sgd78059 * the first time through after attach or reset. 1404*5181Sgd78059 */ 1405*5181Sgd78059 mutex_enter(dmfep->oplock); 1406*5181Sgd78059 1407*5181Sgd78059 if (dmfep->addr_set && dmfe_setup_desc1 & TX_FILTER_TYPE1) 1408*5181Sgd78059 (void) dmfe_update_mcast(dmfep, dmfep->curr_addr, B_FALSE); 1409*5181Sgd78059 if (dmfe_setup_desc1 & TX_FILTER_TYPE1) 1410*5181Sgd78059 (void) dmfe_update_mcast(dmfep, macaddr, B_TRUE); 1411*5181Sgd78059 if (!dmfep->addr_set) 1412*5181Sgd78059 (void) dmfe_update_mcast(dmfep, dmfe_broadcast_addr, B_TRUE); 1413*5181Sgd78059 1414*5181Sgd78059 /* 1415*5181Sgd78059 * Remember the new current address 1416*5181Sgd78059 */ 1417*5181Sgd78059 ethaddr_copy(macaddr, dmfep->curr_addr); 1418*5181Sgd78059 dmfep->addr_set = B_TRUE; 1419*5181Sgd78059 1420*5181Sgd78059 /* 1421*5181Sgd78059 * Install the new physical address into the proper position in 1422*5181Sgd78059 * the setup frame; this is only used if we select hash+perfect 1423*5181Sgd78059 * filtering, but we'll put it in anyway. The ugliness here is 1424*5181Sgd78059 * down to the usual war of the egg :( 1425*5181Sgd78059 */ 1426*5181Sgd78059 for (index = 0; index < ETHERADDRL; index += 2) 1427*5181Sgd78059 dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2, 1428*5181Sgd78059 (macaddr[index+1] << 8) | macaddr[index]); 1429*5181Sgd78059 1430*5181Sgd78059 /* 1431*5181Sgd78059 * Finally, we're ready to "transmit" the setup frame 1432*5181Sgd78059 */ 1433*5181Sgd78059 status = dmfe_send_setup(dmfep); 1434*5181Sgd78059 mutex_exit(dmfep->oplock); 1435*5181Sgd78059 1436*5181Sgd78059 return (status); 1437*5181Sgd78059 } 1438*5181Sgd78059 1439*5181Sgd78059 /* 1440*5181Sgd78059 * dmfe_m_multicst() -- enable or disable a multicast address 1441*5181Sgd78059 * 1442*5181Sgd78059 * Program the hardware to enable/disable the multicast address 1443*5181Sgd78059 * in "mca" (enable if add is true, otherwise disable it.) 1444*5181Sgd78059 * We keep a refcount for each bit in the map, so that it still 1445*5181Sgd78059 * works out properly if multiple addresses hash to the same bit. 1446*5181Sgd78059 * dmfe_update_mcast() tells us whether the map actually changed; 1447*5181Sgd78059 * if so, we have to re-"transmit" the magic setup packet. 1448*5181Sgd78059 */ 1449*5181Sgd78059 static int 1450*5181Sgd78059 dmfe_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1451*5181Sgd78059 { 1452*5181Sgd78059 dmfe_t *dmfep = arg; /* private device info */ 1453*5181Sgd78059 int status = 0; 1454*5181Sgd78059 1455*5181Sgd78059 mutex_enter(dmfep->oplock); 1456*5181Sgd78059 if (dmfe_update_mcast(dmfep, mca, add)) 1457*5181Sgd78059 status = dmfe_send_setup(dmfep); 1458*5181Sgd78059 mutex_exit(dmfep->oplock); 1459*5181Sgd78059 1460*5181Sgd78059 return (status); 1461*5181Sgd78059 } 1462*5181Sgd78059 1463*5181Sgd78059 #undef DMFE_DBG 1464*5181Sgd78059 1465*5181Sgd78059 1466*5181Sgd78059 /* 1467*5181Sgd78059 * ========== Internal state management entry points ========== 1468*5181Sgd78059 */ 1469*5181Sgd78059 1470*5181Sgd78059 #define DMFE_DBG DMFE_DBG_GLD /* debug flag for this code */ 1471*5181Sgd78059 1472*5181Sgd78059 /* 1473*5181Sgd78059 * These routines provide all the functionality required by the 1474*5181Sgd78059 * corresponding MAC layer entry points, but don't update the MAC layer state 1475*5181Sgd78059 * so they can be called internally without disturbing our record 1476*5181Sgd78059 * of what MAC layer thinks we should be doing ... 1477*5181Sgd78059 */ 1478*5181Sgd78059 1479*5181Sgd78059 /* 1480*5181Sgd78059 * dmfe_stop() -- stop processing, don't reset h/w or rings 1481*5181Sgd78059 */ 1482*5181Sgd78059 static void 1483*5181Sgd78059 dmfe_stop(dmfe_t *dmfep) 1484*5181Sgd78059 { 1485*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 1486*5181Sgd78059 1487*5181Sgd78059 dmfe_stop_chip(dmfep, CHIP_STOPPED); 1488*5181Sgd78059 } 1489*5181Sgd78059 1490*5181Sgd78059 /* 1491*5181Sgd78059 * dmfe_reset() -- stop processing, reset h/w & rings to initial state 1492*5181Sgd78059 */ 1493*5181Sgd78059 static void 1494*5181Sgd78059 dmfe_reset(dmfe_t *dmfep) 1495*5181Sgd78059 { 1496*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 1497*5181Sgd78059 ASSERT(mutex_owned(dmfep->rxlock)); 1498*5181Sgd78059 ASSERT(mutex_owned(dmfep->txlock)); 1499*5181Sgd78059 1500*5181Sgd78059 dmfe_stop_chip(dmfep, CHIP_RESET); 1501*5181Sgd78059 dmfe_init_rings(dmfep); 1502*5181Sgd78059 } 1503*5181Sgd78059 1504*5181Sgd78059 /* 1505*5181Sgd78059 * dmfe_start() -- start transmitting/receiving 1506*5181Sgd78059 */ 1507*5181Sgd78059 static void 1508*5181Sgd78059 dmfe_start(dmfe_t *dmfep) 1509*5181Sgd78059 { 1510*5181Sgd78059 uint32_t gpsr; 1511*5181Sgd78059 1512*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 1513*5181Sgd78059 1514*5181Sgd78059 ASSERT(dmfep->chip_state == CHIP_RESET || 1515*5181Sgd78059 dmfep->chip_state == CHIP_STOPPED); 1516*5181Sgd78059 1517*5181Sgd78059 /* 1518*5181Sgd78059 * Make opmode consistent with PHY duplex setting 1519*5181Sgd78059 */ 1520*5181Sgd78059 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG); 1521*5181Sgd78059 if (gpsr & GPS_FULL_DUPLEX) 1522*5181Sgd78059 dmfep->opmode |= FULL_DUPLEX; 1523*5181Sgd78059 else 1524*5181Sgd78059 dmfep->opmode &= ~FULL_DUPLEX; 1525*5181Sgd78059 1526*5181Sgd78059 /* 1527*5181Sgd78059 * Start transmit processing 1528*5181Sgd78059 * Set up the address filters 1529*5181Sgd78059 * Start receive processing 1530*5181Sgd78059 * Enable interrupts 1531*5181Sgd78059 */ 1532*5181Sgd78059 dmfe_start_chip(dmfep, START_TRANSMIT); 1533*5181Sgd78059 (void) dmfe_send_setup(dmfep); 1534*5181Sgd78059 drv_usecwait(10); 1535*5181Sgd78059 dmfe_start_chip(dmfep, START_RECEIVE); 1536*5181Sgd78059 dmfe_enable_interrupts(dmfep); 1537*5181Sgd78059 } 1538*5181Sgd78059 1539*5181Sgd78059 /* 1540*5181Sgd78059 * dmfe_restart - restart transmitting/receiving after error or suspend 1541*5181Sgd78059 */ 1542*5181Sgd78059 static void 1543*5181Sgd78059 dmfe_restart(dmfe_t *dmfep) 1544*5181Sgd78059 { 1545*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 1546*5181Sgd78059 1547*5181Sgd78059 /* 1548*5181Sgd78059 * You need not only <oplock>, but also <rxlock> AND <txlock> 1549*5181Sgd78059 * in order to reset the rings, but then <txlock> *mustn't* 1550*5181Sgd78059 * be held across the call to dmfe_start() 1551*5181Sgd78059 */ 1552*5181Sgd78059 mutex_enter(dmfep->rxlock); 1553*5181Sgd78059 mutex_enter(dmfep->txlock); 1554*5181Sgd78059 dmfe_reset(dmfep); 1555*5181Sgd78059 mutex_exit(dmfep->txlock); 1556*5181Sgd78059 mutex_exit(dmfep->rxlock); 1557*5181Sgd78059 if (dmfep->mac_state == DMFE_MAC_STARTED) 1558*5181Sgd78059 dmfe_start(dmfep); 1559*5181Sgd78059 } 1560*5181Sgd78059 1561*5181Sgd78059 1562*5181Sgd78059 /* 1563*5181Sgd78059 * ========== MAC-required management entry points ========== 1564*5181Sgd78059 */ 1565*5181Sgd78059 1566*5181Sgd78059 /* 1567*5181Sgd78059 * dmfe_m_stop() -- stop transmitting/receiving 1568*5181Sgd78059 */ 1569*5181Sgd78059 static void 1570*5181Sgd78059 dmfe_m_stop(void *arg) 1571*5181Sgd78059 { 1572*5181Sgd78059 dmfe_t *dmfep = arg; /* private device info */ 1573*5181Sgd78059 1574*5181Sgd78059 /* 1575*5181Sgd78059 * Just stop processing, then record new MAC state 1576*5181Sgd78059 */ 1577*5181Sgd78059 mutex_enter(dmfep->oplock); 1578*5181Sgd78059 dmfe_stop(dmfep); 1579*5181Sgd78059 dmfep->mac_state = DMFE_MAC_STOPPED; 1580*5181Sgd78059 mutex_exit(dmfep->oplock); 1581*5181Sgd78059 } 1582*5181Sgd78059 1583*5181Sgd78059 /* 1584*5181Sgd78059 * dmfe_m_start() -- start transmitting/receiving 1585*5181Sgd78059 */ 1586*5181Sgd78059 static int 1587*5181Sgd78059 dmfe_m_start(void *arg) 1588*5181Sgd78059 { 1589*5181Sgd78059 dmfe_t *dmfep = arg; /* private device info */ 1590*5181Sgd78059 1591*5181Sgd78059 /* 1592*5181Sgd78059 * Start processing and record new MAC state 1593*5181Sgd78059 */ 1594*5181Sgd78059 mutex_enter(dmfep->oplock); 1595*5181Sgd78059 dmfe_start(dmfep); 1596*5181Sgd78059 dmfep->mac_state = DMFE_MAC_STARTED; 1597*5181Sgd78059 mutex_exit(dmfep->oplock); 1598*5181Sgd78059 1599*5181Sgd78059 return (0); 1600*5181Sgd78059 } 1601*5181Sgd78059 1602*5181Sgd78059 /* 1603*5181Sgd78059 * dmfe_m_promisc() -- set or reset promiscuous mode on the board 1604*5181Sgd78059 * 1605*5181Sgd78059 * Program the hardware to enable/disable promiscuous and/or 1606*5181Sgd78059 * receive-all-multicast modes. Davicom don't document this 1607*5181Sgd78059 * clearly, but it looks like we can do this on-the-fly (i.e. 1608*5181Sgd78059 * without stopping & restarting the TX/RX processes). 1609*5181Sgd78059 */ 1610*5181Sgd78059 static int 1611*5181Sgd78059 dmfe_m_promisc(void *arg, boolean_t on) 1612*5181Sgd78059 { 1613*5181Sgd78059 dmfe_t *dmfep = arg; 1614*5181Sgd78059 1615*5181Sgd78059 mutex_enter(dmfep->oplock); 1616*5181Sgd78059 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST); 1617*5181Sgd78059 if (on) 1618*5181Sgd78059 dmfep->opmode |= PROMISC_MODE; 1619*5181Sgd78059 dmfe_set_opmode(dmfep); 1620*5181Sgd78059 mutex_exit(dmfep->oplock); 1621*5181Sgd78059 1622*5181Sgd78059 return (0); 1623*5181Sgd78059 } 1624*5181Sgd78059 1625*5181Sgd78059 /*ARGSUSED*/ 1626*5181Sgd78059 static boolean_t 1627*5181Sgd78059 dmfe_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1628*5181Sgd78059 { 1629*5181Sgd78059 /* 1630*5181Sgd78059 * Note that the chip could support some form of polling and 1631*5181Sgd78059 * multiaddress support. We should look into adding polling 1632*5181Sgd78059 * support later, once Solaris is better positioned to take 1633*5181Sgd78059 * advantage of it, although it may be of little use since 1634*5181Sgd78059 * even a lowly 500MHz US-IIe should be able to keep up with 1635*5181Sgd78059 * 100Mbps. (Esp. if the packets are not unreasonably sized.) 1636*5181Sgd78059 * 1637*5181Sgd78059 * Multiaddress support, however, is likely to be of more 1638*5181Sgd78059 * utility with crossbow and virtualized NICs. Although, the 1639*5181Sgd78059 * fact that dmfe is only supported on low-end US-IIe hardware 1640*5181Sgd78059 * makes one wonder whether VNICs are likely to be used on 1641*5181Sgd78059 * such platforms. The chip certainly supports the notion, 1642*5181Sgd78059 * since it can be run in HASH-ONLY mode. (Though this would 1643*5181Sgd78059 * require software to drop unicast packets that are 1644*5181Sgd78059 * incorrectly received due to hash collision of the 1645*5181Sgd78059 * destination mac address.) 1646*5181Sgd78059 * 1647*5181Sgd78059 * Interestingly enough, modern Davicom chips (the 9102D) 1648*5181Sgd78059 * support full IP checksum offload, though its unclear 1649*5181Sgd78059 * whether any of these chips are used on any systems that can 1650*5181Sgd78059 * run Solaris. 1651*5181Sgd78059 * 1652*5181Sgd78059 * If this driver is ever supported on x86 hardware, then 1653*5181Sgd78059 * these assumptions should be revisited. 1654*5181Sgd78059 */ 1655*5181Sgd78059 switch (cap) { 1656*5181Sgd78059 case MAC_CAPAB_POLL: 1657*5181Sgd78059 case MAC_CAPAB_MULTIADDRESS: 1658*5181Sgd78059 case MAC_CAPAB_HCKSUM: 1659*5181Sgd78059 default: 1660*5181Sgd78059 return (B_FALSE); 1661*5181Sgd78059 } 1662*5181Sgd78059 } 1663*5181Sgd78059 1664*5181Sgd78059 1665*5181Sgd78059 #undef DMFE_DBG 1666*5181Sgd78059 1667*5181Sgd78059 1668*5181Sgd78059 /* 1669*5181Sgd78059 * ========== Factotum, implemented as a softint handler ========== 1670*5181Sgd78059 */ 1671*5181Sgd78059 1672*5181Sgd78059 #define DMFE_DBG DMFE_DBG_FACT /* debug flag for this code */ 1673*5181Sgd78059 1674*5181Sgd78059 /* 1675*5181Sgd78059 * The factotum is woken up when there's something to do that we'd rather 1676*5181Sgd78059 * not do from inside a (high-level?) hardware interrupt handler. Its 1677*5181Sgd78059 * two main tasks are: 1678*5181Sgd78059 * reset & restart the chip after an error 1679*5181Sgd78059 * update & restart the chip after a link status change 1680*5181Sgd78059 */ 1681*5181Sgd78059 static uint_t 1682*5181Sgd78059 dmfe_factotum(caddr_t arg) 1683*5181Sgd78059 { 1684*5181Sgd78059 dmfe_t *dmfep; 1685*5181Sgd78059 1686*5181Sgd78059 dmfep = (dmfe_t *)arg; 1687*5181Sgd78059 ASSERT(dmfep->dmfe_guard == DMFE_GUARD); 1688*5181Sgd78059 1689*5181Sgd78059 mutex_enter(dmfep->oplock); 1690*5181Sgd78059 1691*5181Sgd78059 dmfep->factotum_flag = 0; 1692*5181Sgd78059 DRV_KS_INC(dmfep, KS_FACTOTUM_RUN); 1693*5181Sgd78059 1694*5181Sgd78059 /* 1695*5181Sgd78059 * Check for chip error ... 1696*5181Sgd78059 */ 1697*5181Sgd78059 if (dmfep->chip_state == CHIP_ERROR) { 1698*5181Sgd78059 /* 1699*5181Sgd78059 * Error recovery required: reset the chip and the rings, 1700*5181Sgd78059 * then, if it's supposed to be running, kick it off again. 1701*5181Sgd78059 */ 1702*5181Sgd78059 DRV_KS_INC(dmfep, KS_RECOVERY); 1703*5181Sgd78059 dmfe_restart(dmfep); 1704*5181Sgd78059 } else if (dmfep->need_setup) { 1705*5181Sgd78059 (void) dmfe_send_setup(dmfep); 1706*5181Sgd78059 } 1707*5181Sgd78059 mutex_exit(dmfep->oplock); 1708*5181Sgd78059 1709*5181Sgd78059 /* 1710*5181Sgd78059 * Then, check the link state. We need <milock> but not <oplock> 1711*5181Sgd78059 * to do this, but if something's changed, we need <oplock> as well 1712*5181Sgd78059 * in order to stop/restart the chip! Note: we could simply hold 1713*5181Sgd78059 * <oplock> right through here, but we'd rather not 'cos checking 1714*5181Sgd78059 * the link state involves reading over the bit-serial MII bus, 1715*5181Sgd78059 * which takes ~500us even when nothing's changed. Holding <oplock> 1716*5181Sgd78059 * would lock out the interrupt handler for the duration, so it's 1717*5181Sgd78059 * better to release it first and reacquire it only if needed. 1718*5181Sgd78059 */ 1719*5181Sgd78059 mutex_enter(dmfep->milock); 1720*5181Sgd78059 if (dmfe_check_link(dmfep)) { 1721*5181Sgd78059 mutex_enter(dmfep->oplock); 1722*5181Sgd78059 dmfe_stop(dmfep); 1723*5181Sgd78059 DRV_KS_INC(dmfep, KS_LINK_CHECK); 1724*5181Sgd78059 if (dmfep->update_phy) { 1725*5181Sgd78059 /* 1726*5181Sgd78059 * The chip may reset itself for some unknown 1727*5181Sgd78059 * reason. If this happens, the chip will use 1728*5181Sgd78059 * default settings (for speed, duplex, and autoneg), 1729*5181Sgd78059 * which possibly aren't the user's desired settings. 1730*5181Sgd78059 */ 1731*5181Sgd78059 dmfe_update_phy(dmfep); 1732*5181Sgd78059 dmfep->update_phy = B_FALSE; 1733*5181Sgd78059 } 1734*5181Sgd78059 dmfe_recheck_link(dmfep, B_FALSE); 1735*5181Sgd78059 if (dmfep->mac_state == DMFE_MAC_STARTED) 1736*5181Sgd78059 dmfe_start(dmfep); 1737*5181Sgd78059 mutex_exit(dmfep->oplock); 1738*5181Sgd78059 } 1739*5181Sgd78059 mutex_exit(dmfep->milock); 1740*5181Sgd78059 1741*5181Sgd78059 /* 1742*5181Sgd78059 * Keep MAC up-to-date about the state of the link ... 1743*5181Sgd78059 */ 1744*5181Sgd78059 mac_link_update(dmfep->mh, dmfep->link_state); 1745*5181Sgd78059 1746*5181Sgd78059 return (DDI_INTR_CLAIMED); 1747*5181Sgd78059 } 1748*5181Sgd78059 1749*5181Sgd78059 static void 1750*5181Sgd78059 dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why) 1751*5181Sgd78059 { 1752*5181Sgd78059 DMFE_DEBUG(("dmfe_wake_factotum: %s [%d] flag %d", 1753*5181Sgd78059 why, ks_id, dmfep->factotum_flag)); 1754*5181Sgd78059 1755*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 1756*5181Sgd78059 DRV_KS_INC(dmfep, ks_id); 1757*5181Sgd78059 1758*5181Sgd78059 if (dmfep->factotum_flag++ == 0) 1759*5181Sgd78059 ddi_trigger_softintr(dmfep->factotum_id); 1760*5181Sgd78059 } 1761*5181Sgd78059 1762*5181Sgd78059 #undef DMFE_DBG 1763*5181Sgd78059 1764*5181Sgd78059 1765*5181Sgd78059 /* 1766*5181Sgd78059 * ========== Periodic Tasks (Cyclic handler & friends) ========== 1767*5181Sgd78059 */ 1768*5181Sgd78059 1769*5181Sgd78059 #define DMFE_DBG DMFE_DBG_TICK /* debug flag for this code */ 1770*5181Sgd78059 1771*5181Sgd78059 /* 1772*5181Sgd78059 * Periodic tick tasks, run from the cyclic handler 1773*5181Sgd78059 * 1774*5181Sgd78059 * Check the state of the link and wake the factotum if necessary 1775*5181Sgd78059 */ 1776*5181Sgd78059 static void 1777*5181Sgd78059 dmfe_tick_link_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat) 1778*5181Sgd78059 { 1779*5181Sgd78059 link_state_t phy_state; 1780*5181Sgd78059 link_state_t utp_state; 1781*5181Sgd78059 const char *why; 1782*5181Sgd78059 int ks_id; 1783*5181Sgd78059 1784*5181Sgd78059 _NOTE(ARGUNUSED(istat)) 1785*5181Sgd78059 1786*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 1787*5181Sgd78059 1788*5181Sgd78059 /* 1789*5181Sgd78059 * Is it time to wake the factotum? We do so periodically, in 1790*5181Sgd78059 * case the fast check below doesn't always reveal a link change 1791*5181Sgd78059 */ 1792*5181Sgd78059 if (dmfep->link_poll_tix-- == 0) { 1793*5181Sgd78059 dmfep->link_poll_tix = factotum_tix; 1794*5181Sgd78059 why = "tick (link poll)"; 1795*5181Sgd78059 ks_id = KS_TICK_LINK_POLL; 1796*5181Sgd78059 } else { 1797*5181Sgd78059 why = NULL; 1798*5181Sgd78059 ks_id = KS_TICK_LINK_STATE; 1799*5181Sgd78059 } 1800*5181Sgd78059 1801*5181Sgd78059 /* 1802*5181Sgd78059 * Has the link status changed? If so, we might want to wake 1803*5181Sgd78059 * the factotum to deal with it. 1804*5181Sgd78059 */ 1805*5181Sgd78059 phy_state = (gpsr & GPS_LINK_STATUS) ? LINK_STATE_UP : LINK_STATE_DOWN; 1806*5181Sgd78059 utp_state = (gpsr & GPS_UTP_SIG) ? LINK_STATE_UP : LINK_STATE_DOWN; 1807*5181Sgd78059 if (phy_state != utp_state) 1808*5181Sgd78059 why = "tick (phy <> utp)"; 1809*5181Sgd78059 else if ((dmfep->link_state == LINK_STATE_UP) && 1810*5181Sgd78059 (phy_state == LINK_STATE_DOWN)) 1811*5181Sgd78059 why = "tick (UP -> DOWN)"; 1812*5181Sgd78059 else if (phy_state != dmfep->link_state) { 1813*5181Sgd78059 if (dmfep->link_poll_tix > factotum_fast_tix) 1814*5181Sgd78059 dmfep->link_poll_tix = factotum_fast_tix; 1815*5181Sgd78059 } 1816*5181Sgd78059 1817*5181Sgd78059 if (why != NULL) { 1818*5181Sgd78059 DMFE_DEBUG(("dmfe_%s: link %d phy %d utp %d", 1819*5181Sgd78059 why, dmfep->link_state, phy_state, utp_state)); 1820*5181Sgd78059 dmfe_wake_factotum(dmfep, ks_id, why); 1821*5181Sgd78059 } 1822*5181Sgd78059 } 1823*5181Sgd78059 1824*5181Sgd78059 /* 1825*5181Sgd78059 * Periodic tick tasks, run from the cyclic handler 1826*5181Sgd78059 * 1827*5181Sgd78059 * Check for TX stall; flag an error and wake the factotum if so. 1828*5181Sgd78059 */ 1829*5181Sgd78059 static void 1830*5181Sgd78059 dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat) 1831*5181Sgd78059 { 1832*5181Sgd78059 boolean_t tx_stall; 1833*5181Sgd78059 uint32_t tx_state; 1834*5181Sgd78059 uint32_t limit; 1835*5181Sgd78059 1836*5181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 1837*5181Sgd78059 1838*5181Sgd78059 /* 1839*5181Sgd78059 * Check for transmit stall ... 1840*5181Sgd78059 * 1841*5181Sgd78059 * IF there's at least one packet in the ring, AND the timeout 1842*5181Sgd78059 * has elapsed, AND we can't reclaim any descriptors, THEN we've 1843*5181Sgd78059 * stalled; we return B_TRUE to trigger a reset-and-recover cycle. 1844*5181Sgd78059 * 1845*5181Sgd78059 * Note that the timeout limit is based on the transmit engine 1846*5181Sgd78059 * state; we allow the transmitter longer to make progress in 1847*5181Sgd78059 * some states than in others, based on observations of this 1848*5181Sgd78059 * chip's actual behaviour in the lab. 1849*5181Sgd78059 * 1850*5181Sgd78059 * By observation, we find that on about 1 in 10000 passes through 1851*5181Sgd78059 * here, the TX lock is already held. In that case, we'll skip 1852*5181Sgd78059 * the check on this pass rather than wait. Most likely, the send 1853*5181Sgd78059 * routine was holding the lock when the interrupt happened, and 1854*5181Sgd78059 * we'll succeed next time through. In the event of a real stall, 1855*5181Sgd78059 * the TX ring will fill up, after which the send routine won't be 1856*5181Sgd78059 * called any more and then we're sure to get in. 1857*5181Sgd78059 */ 1858*5181Sgd78059 tx_stall = B_FALSE; 1859*5181Sgd78059 if (mutex_tryenter(dmfep->txlock)) { 1860*5181Sgd78059 if (dmfep->tx.n_free < dmfep->tx.n_desc) { 1861*5181Sgd78059 tx_state = TX_PROCESS_STATE(istat); 1862*5181Sgd78059 if (gpsr & GPS_LINK_100) 1863*5181Sgd78059 limit = stall_100_tix[tx_state]; 1864*5181Sgd78059 else 1865*5181Sgd78059 limit = stall_10_tix[tx_state]; 1866*5181Sgd78059 if (++dmfep->tx_pending_tix >= limit && 1867*5181Sgd78059 dmfe_reclaim_tx_desc(dmfep) == B_FALSE) { 1868*5181Sgd78059 dmfe_log(dmfep, "TX stall detected " 1869*5181Sgd78059 "after %d ticks in state %d; " 1870*5181Sgd78059 "automatic recovery initiated", 1871*5181Sgd78059 dmfep->tx_pending_tix, tx_state); 1872*5181Sgd78059 tx_stall = B_TRUE; 1873*5181Sgd78059 } 1874*5181Sgd78059 } 1875*5181Sgd78059 mutex_exit(dmfep->txlock); 1876*5181Sgd78059 } 1877*5181Sgd78059 1878*5181Sgd78059 if (tx_stall) { 1879*5181Sgd78059 dmfe_stop_chip(dmfep, CHIP_ERROR); 1880*5181Sgd78059 dmfe_wake_factotum(dmfep, KS_TX_STALL, "tick (TX stall)"); 1881*5181Sgd78059 } 1882*5181Sgd78059 } 1883*5181Sgd78059 1884*5181Sgd78059 /* 1885*5181Sgd78059 * Cyclic callback handler 1886*5181Sgd78059 */ 1887*5181Sgd78059 static void 1888*5181Sgd78059 dmfe_cyclic(void *arg) 1889*5181Sgd78059 { 1890*5181Sgd78059 dmfe_t *dmfep = arg; /* private device info */ 1891*5181Sgd78059 uint32_t istat; 1892*5181Sgd78059 uint32_t gpsr; 1893*5181Sgd78059 1894*5181Sgd78059 /* 1895*5181Sgd78059 * If the chip's not RUNNING, there's nothing to do. 1896*5181Sgd78059 * If we can't get the mutex straight away, we'll just 1897*5181Sgd78059 * skip this pass; we'll back back soon enough anyway. 1898*5181Sgd78059 */ 1899*5181Sgd78059 if (dmfep->chip_state != CHIP_RUNNING) 1900*5181Sgd78059 return; 1901*5181Sgd78059 if (mutex_tryenter(dmfep->oplock) == 0) 1902*5181Sgd78059 return; 1903*5181Sgd78059 1904*5181Sgd78059 /* 1905*5181Sgd78059 * Recheck chip state (it might have been stopped since we 1906*5181Sgd78059 * checked above). If still running, call each of the *tick* 1907*5181Sgd78059 * tasks. They will check for link change, TX stall, etc ... 1908*5181Sgd78059 */ 1909*5181Sgd78059 if (dmfep->chip_state == CHIP_RUNNING) { 1910*5181Sgd78059 istat = dmfe_chip_get32(dmfep, STATUS_REG); 1911*5181Sgd78059 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG); 1912*5181Sgd78059 dmfe_tick_link_check(dmfep, gpsr, istat); 1913*5181Sgd78059 dmfe_tick_stall_check(dmfep, gpsr, istat); 1914*5181Sgd78059 } 1915*5181Sgd78059 1916*5181Sgd78059 DRV_KS_INC(dmfep, KS_CYCLIC_RUN); 1917*5181Sgd78059 mutex_exit(dmfep->oplock); 1918*5181Sgd78059 } 1919*5181Sgd78059 1920*5181Sgd78059 #undef DMFE_DBG 1921*5181Sgd78059 1922*5181Sgd78059 1923*5181Sgd78059 /* 1924*5181Sgd78059 * ========== Hardware interrupt handler ========== 1925*5181Sgd78059 */ 1926*5181Sgd78059 1927*5181Sgd78059 #define DMFE_DBG DMFE_DBG_INT /* debug flag for this code */ 1928*5181Sgd78059 1929*5181Sgd78059 /* 1930*5181Sgd78059 * dmfe_interrupt() -- handle chip interrupts 1931*5181Sgd78059 */ 1932*5181Sgd78059 static uint_t 1933*5181Sgd78059 dmfe_interrupt(caddr_t arg) 1934*5181Sgd78059 { 1935*5181Sgd78059 dmfe_t *dmfep; /* private device info */ 1936*5181Sgd78059 uint32_t interrupts; 1937*5181Sgd78059 uint32_t istat; 1938*5181Sgd78059 const char *msg; 1939*5181Sgd78059 mblk_t *mp; 1940*5181Sgd78059 boolean_t warning_msg = B_TRUE; 1941*5181Sgd78059 1942*5181Sgd78059 dmfep = (dmfe_t *)arg; 1943*5181Sgd78059 1944*5181Sgd78059 /* 1945*5181Sgd78059 * A quick check as to whether the interrupt was from this 1946*5181Sgd78059 * device, before we even finish setting up all our local 1947*5181Sgd78059 * variables. Note that reading the interrupt status register 1948*5181Sgd78059 * doesn't have any unpleasant side effects such as clearing 1949*5181Sgd78059 * the bits read, so it's quite OK to re-read it once we have 1950*5181Sgd78059 * determined that we are going to service this interrupt and 1951*5181Sgd78059 * grabbed the mutexen. 1952*5181Sgd78059 */ 1953*5181Sgd78059 istat = dmfe_chip_get32(dmfep, STATUS_REG); 1954*5181Sgd78059 if ((istat & (NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT)) == 0) 1955*5181Sgd78059 return (DDI_INTR_UNCLAIMED); 1956*5181Sgd78059 1957*5181Sgd78059 /* 1958*5181Sgd78059 * Unfortunately, there can be a race condition between attach() 1959*5181Sgd78059 * adding the interrupt handler and initialising the mutexen, 1960*5181Sgd78059 * and the handler itself being called because of a pending 1961*5181Sgd78059 * interrupt. So, we check <imask>; if it shows that interrupts 1962*5181Sgd78059 * haven't yet been enabled (and therefore we shouldn't really 1963*5181Sgd78059 * be here at all), we will just write back the value read from 1964*5181Sgd78059 * the status register, thus acknowledging (and clearing) *all* 1965*5181Sgd78059 * pending conditions without really servicing them, and claim 1966*5181Sgd78059 * the interrupt. 1967*5181Sgd78059 */ 1968*5181Sgd78059 if (dmfep->imask == 0) { 1969*5181Sgd78059 DMFE_DEBUG(("dmfe_interrupt: early interrupt 0x%x", istat)); 1970*5181Sgd78059 dmfe_chip_put32(dmfep, STATUS_REG, istat); 1971*5181Sgd78059 return (DDI_INTR_CLAIMED); 1972*5181Sgd78059 } 1973*5181Sgd78059 1974*5181Sgd78059 /* 1975*5181Sgd78059 * We're committed to servicing this interrupt, but we 1976*5181Sgd78059 * need to get the lock before going any further ... 1977*5181Sgd78059 */ 1978*5181Sgd78059 mutex_enter(dmfep->oplock); 1979*5181Sgd78059 DRV_KS_INC(dmfep, KS_INTERRUPT); 1980*5181Sgd78059 1981*5181Sgd78059 /* 1982*5181Sgd78059 * Identify bits that represent enabled interrupts ... 1983*5181Sgd78059 */ 1984*5181Sgd78059 istat |= dmfe_chip_get32(dmfep, STATUS_REG); 1985*5181Sgd78059 interrupts = istat & dmfep->imask; 1986*5181Sgd78059 ASSERT(interrupts != 0); 1987*5181Sgd78059 1988*5181Sgd78059 DMFE_DEBUG(("dmfe_interrupt: istat 0x%x -> 0x%x", istat, interrupts)); 1989*5181Sgd78059 1990*5181Sgd78059 /* 1991*5181Sgd78059 * Check for any interrupts other than TX/RX done. 1992*5181Sgd78059 * If there are any, they are considered Abnormal 1993*5181Sgd78059 * and will cause the chip to be reset. 1994*5181Sgd78059 */ 1995*5181Sgd78059 if (interrupts & ~(RX_PKTDONE_INT | TX_PKTDONE_INT)) { 1996*5181Sgd78059 if (istat & ABNORMAL_SUMMARY_INT) { 1997*5181Sgd78059 /* 1998*5181Sgd78059 * Any Abnormal interrupts will lead to us 1999*5181Sgd78059 * resetting the chip, so we don't bother 2000*5181Sgd78059 * to clear each interrupt individually. 2001*5181Sgd78059 * 2002*5181Sgd78059 * Our main task here is to identify the problem, 2003*5181Sgd78059 * by pointing out the most significant unexpected 2004*5181Sgd78059 * bit. Additional bits may well be consequences 2005*5181Sgd78059 * of the first problem, so we consider the possible 2006*5181Sgd78059 * causes in order of severity. 2007*5181Sgd78059 */ 2008*5181Sgd78059 if (interrupts & SYSTEM_ERR_INT) { 2009*5181Sgd78059 switch (istat & SYSTEM_ERR_BITS) { 2010*5181Sgd78059 case SYSTEM_ERR_M_ABORT: 2011*5181Sgd78059 msg = "Bus Master Abort"; 2012*5181Sgd78059 break; 2013*5181Sgd78059 2014*5181Sgd78059 case SYSTEM_ERR_T_ABORT: 2015*5181Sgd78059 msg = "Bus Target Abort"; 2016*5181Sgd78059 break; 2017*5181Sgd78059 2018*5181Sgd78059 case SYSTEM_ERR_PARITY: 2019*5181Sgd78059 msg = "Parity Error"; 2020*5181Sgd78059 break; 2021*5181Sgd78059 2022*5181Sgd78059 default: 2023*5181Sgd78059 msg = "Unknown System Bus Error"; 2024*5181Sgd78059 break; 2025*5181Sgd78059 } 2026*5181Sgd78059 } else if (interrupts & RX_STOPPED_INT) { 2027*5181Sgd78059 msg = "RX process stopped"; 2028*5181Sgd78059 } else if (interrupts & RX_UNAVAIL_INT) { 2029*5181Sgd78059 msg = "RX buffer unavailable"; 2030*5181Sgd78059 warning_msg = B_FALSE; 2031*5181Sgd78059 } else if (interrupts & RX_WATCHDOG_INT) { 2032*5181Sgd78059 msg = "RX watchdog timeout?"; 2033*5181Sgd78059 } else if (interrupts & RX_EARLY_INT) { 2034*5181Sgd78059 msg = "RX early interrupt?"; 2035*5181Sgd78059 } else if (interrupts & TX_STOPPED_INT) { 2036*5181Sgd78059 msg = "TX process stopped"; 2037*5181Sgd78059 } else if (interrupts & TX_JABBER_INT) { 2038*5181Sgd78059 msg = "TX jabber timeout"; 2039*5181Sgd78059 } else if (interrupts & TX_UNDERFLOW_INT) { 2040*5181Sgd78059 msg = "TX underflow?"; 2041*5181Sgd78059 } else if (interrupts & TX_EARLY_INT) { 2042*5181Sgd78059 msg = "TX early interrupt?"; 2043*5181Sgd78059 2044*5181Sgd78059 } else if (interrupts & LINK_STATUS_INT) { 2045*5181Sgd78059 msg = "Link status change?"; 2046*5181Sgd78059 } else if (interrupts & GP_TIMER_INT) { 2047*5181Sgd78059 msg = "Timer expired?"; 2048*5181Sgd78059 } 2049*5181Sgd78059 2050*5181Sgd78059 if (warning_msg) 2051*5181Sgd78059 dmfe_warning(dmfep, "abnormal interrupt, " 2052*5181Sgd78059 "status 0x%x: %s", istat, msg); 2053*5181Sgd78059 2054*5181Sgd78059 /* 2055*5181Sgd78059 * We don't want to run the entire reinitialisation 2056*5181Sgd78059 * code out of this (high-level?) interrupt, so we 2057*5181Sgd78059 * simply STOP the chip, and wake up the factotum 2058*5181Sgd78059 * to reinitalise it ... 2059*5181Sgd78059 */ 2060*5181Sgd78059 dmfe_stop_chip(dmfep, CHIP_ERROR); 2061*5181Sgd78059 dmfe_wake_factotum(dmfep, KS_CHIP_ERROR, 2062*5181Sgd78059 "interrupt (error)"); 2063*5181Sgd78059 } else { 2064*5181Sgd78059 /* 2065*5181Sgd78059 * We shouldn't really get here (it would mean 2066*5181Sgd78059 * there were some unprocessed enabled bits but 2067*5181Sgd78059 * they weren't Abnormal?), but we'll check just 2068*5181Sgd78059 * in case ... 2069*5181Sgd78059 */ 2070*5181Sgd78059 DMFE_DEBUG(("unexpected interrupt bits: 0x%x", istat)); 2071*5181Sgd78059 } 2072*5181Sgd78059 } 2073*5181Sgd78059 2074*5181Sgd78059 /* 2075*5181Sgd78059 * Acknowledge all the original bits - except in the case of an 2076*5181Sgd78059 * error, when we leave them unacknowledged so that the recovery 2077*5181Sgd78059 * code can see what was going on when the problem occurred ... 2078*5181Sgd78059 */ 2079*5181Sgd78059 if (dmfep->chip_state != CHIP_ERROR) { 2080*5181Sgd78059 (void) dmfe_chip_put32(dmfep, STATUS_REG, istat); 2081*5181Sgd78059 /* 2082*5181Sgd78059 * Read-after-write forces completion on PCI bus. 2083*5181Sgd78059 * 2084*5181Sgd78059 */ 2085*5181Sgd78059 (void) dmfe_chip_get32(dmfep, STATUS_REG); 2086*5181Sgd78059 } 2087*5181Sgd78059 2088*5181Sgd78059 2089*5181Sgd78059 /* 2090*5181Sgd78059 * We've finished talking to the chip, so we can drop <oplock> 2091*5181Sgd78059 * before handling the normal interrupts, which only involve 2092*5181Sgd78059 * manipulation of descriptors ... 2093*5181Sgd78059 */ 2094*5181Sgd78059 mutex_exit(dmfep->oplock); 2095*5181Sgd78059 2096*5181Sgd78059 if (interrupts & RX_PKTDONE_INT) 2097*5181Sgd78059 if ((mp = dmfe_getp(dmfep)) != NULL) 2098*5181Sgd78059 mac_rx(dmfep->mh, NULL, mp); 2099*5181Sgd78059 2100*5181Sgd78059 if (interrupts & TX_PKTDONE_INT) { 2101*5181Sgd78059 /* 2102*5181Sgd78059 * The only reason for taking this interrupt is to give 2103*5181Sgd78059 * MAC a chance to schedule queued packets after a 2104*5181Sgd78059 * ring-full condition. To minimise the number of 2105*5181Sgd78059 * redundant TX-Done interrupts, we only mark two of the 2106*5181Sgd78059 * ring descriptors as 'interrupt-on-complete' - all the 2107*5181Sgd78059 * others are simply handed back without an interrupt. 2108*5181Sgd78059 */ 2109*5181Sgd78059 if (dmfe_reclaim_on_done && mutex_tryenter(dmfep->txlock)) { 2110*5181Sgd78059 (void) dmfe_reclaim_tx_desc(dmfep); 2111*5181Sgd78059 mutex_exit(dmfep->txlock); 2112*5181Sgd78059 } 2113*5181Sgd78059 mac_tx_update(dmfep->mh); 2114*5181Sgd78059 } 2115*5181Sgd78059 2116*5181Sgd78059 return (DDI_INTR_CLAIMED); 2117*5181Sgd78059 } 2118*5181Sgd78059 2119*5181Sgd78059 #undef DMFE_DBG 2120*5181Sgd78059 2121*5181Sgd78059 2122*5181Sgd78059 /* 2123*5181Sgd78059 * ========== Statistics update handler ========== 2124*5181Sgd78059 */ 2125*5181Sgd78059 2126*5181Sgd78059 #define DMFE_DBG DMFE_DBG_STATS /* debug flag for this code */ 2127*5181Sgd78059 2128*5181Sgd78059 static int 2129*5181Sgd78059 dmfe_m_stat(void *arg, uint_t stat, uint64_t *val) 2130*5181Sgd78059 { 2131*5181Sgd78059 dmfe_t *dmfep = arg; 2132*5181Sgd78059 int rv = 0; 2133*5181Sgd78059 2134*5181Sgd78059 mutex_enter(dmfep->milock); 2135*5181Sgd78059 mutex_enter(dmfep->oplock); 2136*5181Sgd78059 mutex_enter(dmfep->rxlock); 2137*5181Sgd78059 mutex_enter(dmfep->txlock); 2138*5181Sgd78059 2139*5181Sgd78059 /* make sure we have all the stats collected */ 2140*5181Sgd78059 (void) dmfe_reclaim_tx_desc(dmfep); 2141*5181Sgd78059 2142*5181Sgd78059 switch (stat) { 2143*5181Sgd78059 case MAC_STAT_IFSPEED: 2144*5181Sgd78059 *val = dmfep->op_stats_speed; 2145*5181Sgd78059 break; 2146*5181Sgd78059 2147*5181Sgd78059 case MAC_STAT_IPACKETS: 2148*5181Sgd78059 *val = dmfep->rx_stats_ipackets; 2149*5181Sgd78059 break; 2150*5181Sgd78059 2151*5181Sgd78059 case MAC_STAT_MULTIRCV: 2152*5181Sgd78059 *val = dmfep->rx_stats_multi; 2153*5181Sgd78059 break; 2154*5181Sgd78059 2155*5181Sgd78059 case MAC_STAT_BRDCSTRCV: 2156*5181Sgd78059 *val = dmfep->rx_stats_bcast; 2157*5181Sgd78059 break; 2158*5181Sgd78059 2159*5181Sgd78059 case MAC_STAT_RBYTES: 2160*5181Sgd78059 *val = dmfep->rx_stats_rbytes; 2161*5181Sgd78059 break; 2162*5181Sgd78059 2163*5181Sgd78059 case MAC_STAT_IERRORS: 2164*5181Sgd78059 *val = dmfep->rx_stats_ierrors; 2165*5181Sgd78059 break; 2166*5181Sgd78059 2167*5181Sgd78059 case MAC_STAT_NORCVBUF: 2168*5181Sgd78059 *val = dmfep->rx_stats_norcvbuf; 2169*5181Sgd78059 break; 2170*5181Sgd78059 2171*5181Sgd78059 case MAC_STAT_COLLISIONS: 2172*5181Sgd78059 *val = dmfep->tx_stats_collisions; 2173*5181Sgd78059 break; 2174*5181Sgd78059 2175*5181Sgd78059 case MAC_STAT_OERRORS: 2176*5181Sgd78059 *val = dmfep->tx_stats_oerrors; 2177*5181Sgd78059 break; 2178*5181Sgd78059 2179*5181Sgd78059 case MAC_STAT_OPACKETS: 2180*5181Sgd78059 *val = dmfep->tx_stats_opackets; 2181*5181Sgd78059 break; 2182*5181Sgd78059 2183*5181Sgd78059 case MAC_STAT_MULTIXMT: 2184*5181Sgd78059 *val = dmfep->tx_stats_multi; 2185*5181Sgd78059 break; 2186*5181Sgd78059 2187*5181Sgd78059 case MAC_STAT_BRDCSTXMT: 2188*5181Sgd78059 *val = dmfep->tx_stats_bcast; 2189*5181Sgd78059 break; 2190*5181Sgd78059 2191*5181Sgd78059 case MAC_STAT_OBYTES: 2192*5181Sgd78059 *val = dmfep->tx_stats_obytes; 2193*5181Sgd78059 break; 2194*5181Sgd78059 2195*5181Sgd78059 case MAC_STAT_OVERFLOWS: 2196*5181Sgd78059 *val = dmfep->rx_stats_overflow; 2197*5181Sgd78059 break; 2198*5181Sgd78059 2199*5181Sgd78059 case MAC_STAT_UNDERFLOWS: 2200*5181Sgd78059 *val = dmfep->tx_stats_underflow; 2201*5181Sgd78059 break; 2202*5181Sgd78059 2203*5181Sgd78059 case ETHER_STAT_ALIGN_ERRORS: 2204*5181Sgd78059 *val = dmfep->rx_stats_align; 2205*5181Sgd78059 break; 2206*5181Sgd78059 2207*5181Sgd78059 case ETHER_STAT_FCS_ERRORS: 2208*5181Sgd78059 *val = dmfep->rx_stats_fcs; 2209*5181Sgd78059 break; 2210*5181Sgd78059 2211*5181Sgd78059 case ETHER_STAT_TOOLONG_ERRORS: 2212*5181Sgd78059 *val = dmfep->rx_stats_toolong; 2213*5181Sgd78059 break; 2214*5181Sgd78059 2215*5181Sgd78059 case ETHER_STAT_TOOSHORT_ERRORS: 2216*5181Sgd78059 *val = dmfep->rx_stats_short; 2217*5181Sgd78059 break; 2218*5181Sgd78059 2219*5181Sgd78059 case ETHER_STAT_MACRCV_ERRORS: 2220*5181Sgd78059 *val = dmfep->rx_stats_macrcv_errors; 2221*5181Sgd78059 break; 2222*5181Sgd78059 2223*5181Sgd78059 case ETHER_STAT_MACXMT_ERRORS: 2224*5181Sgd78059 *val = dmfep->tx_stats_macxmt_errors; 2225*5181Sgd78059 break; 2226*5181Sgd78059 2227*5181Sgd78059 case ETHER_STAT_JABBER_ERRORS: 2228*5181Sgd78059 *val = dmfep->tx_stats_jabber; 2229*5181Sgd78059 break; 2230*5181Sgd78059 2231*5181Sgd78059 case ETHER_STAT_CARRIER_ERRORS: 2232*5181Sgd78059 *val = dmfep->tx_stats_nocarrier; 2233*5181Sgd78059 break; 2234*5181Sgd78059 2235*5181Sgd78059 case ETHER_STAT_TX_LATE_COLLISIONS: 2236*5181Sgd78059 *val = dmfep->tx_stats_xmtlatecoll; 2237*5181Sgd78059 break; 2238*5181Sgd78059 2239*5181Sgd78059 case ETHER_STAT_EX_COLLISIONS: 2240*5181Sgd78059 *val = dmfep->tx_stats_excoll; 2241*5181Sgd78059 break; 2242*5181Sgd78059 2243*5181Sgd78059 case ETHER_STAT_DEFER_XMTS: 2244*5181Sgd78059 *val = dmfep->tx_stats_defer; 2245*5181Sgd78059 break; 2246*5181Sgd78059 2247*5181Sgd78059 case ETHER_STAT_FIRST_COLLISIONS: 2248*5181Sgd78059 *val = dmfep->tx_stats_first_coll; 2249*5181Sgd78059 break; 2250*5181Sgd78059 2251*5181Sgd78059 case ETHER_STAT_MULTI_COLLISIONS: 2252*5181Sgd78059 *val = dmfep->tx_stats_multi_coll; 2253*5181Sgd78059 break; 2254*5181Sgd78059 2255*5181Sgd78059 case ETHER_STAT_XCVR_INUSE: 2256*5181Sgd78059 *val = dmfep->phy_inuse; 2257*5181Sgd78059 break; 2258*5181Sgd78059 2259*5181Sgd78059 case ETHER_STAT_XCVR_ID: 2260*5181Sgd78059 *val = dmfep->phy_id; 2261*5181Sgd78059 break; 2262*5181Sgd78059 2263*5181Sgd78059 case ETHER_STAT_XCVR_ADDR: 2264*5181Sgd78059 *val = dmfep->phy_addr; 2265*5181Sgd78059 break; 2266*5181Sgd78059 2267*5181Sgd78059 case ETHER_STAT_LINK_DUPLEX: 2268*5181Sgd78059 *val = dmfep->op_stats_duplex; 2269*5181Sgd78059 break; 2270*5181Sgd78059 2271*5181Sgd78059 case ETHER_STAT_CAP_100T4: 2272*5181Sgd78059 *val = dmfep->param_bmsr_100T4; 2273*5181Sgd78059 break; 2274*5181Sgd78059 2275*5181Sgd78059 case ETHER_STAT_CAP_100FDX: 2276*5181Sgd78059 *val = dmfep->param_bmsr_100fdx; 2277*5181Sgd78059 break; 2278*5181Sgd78059 2279*5181Sgd78059 case ETHER_STAT_CAP_100HDX: 2280*5181Sgd78059 *val = dmfep->param_bmsr_100hdx; 2281*5181Sgd78059 break; 2282*5181Sgd78059 2283*5181Sgd78059 case ETHER_STAT_CAP_10FDX: 2284*5181Sgd78059 *val = dmfep->param_bmsr_10fdx; 2285*5181Sgd78059 break; 2286*5181Sgd78059 2287*5181Sgd78059 case ETHER_STAT_CAP_10HDX: 2288*5181Sgd78059 *val = dmfep->param_bmsr_10hdx; 2289*5181Sgd78059 break; 2290*5181Sgd78059 2291*5181Sgd78059 case ETHER_STAT_CAP_AUTONEG: 2292*5181Sgd78059 *val = dmfep->param_bmsr_autoneg; 2293*5181Sgd78059 break; 2294*5181Sgd78059 2295*5181Sgd78059 case ETHER_STAT_CAP_REMFAULT: 2296*5181Sgd78059 *val = dmfep->param_bmsr_remfault; 2297*5181Sgd78059 break; 2298*5181Sgd78059 2299*5181Sgd78059 case ETHER_STAT_ADV_CAP_AUTONEG: 2300*5181Sgd78059 *val = dmfep->param_autoneg; 2301*5181Sgd78059 break; 2302*5181Sgd78059 2303*5181Sgd78059 case ETHER_STAT_ADV_CAP_100T4: 2304*5181Sgd78059 *val = dmfep->param_anar_100T4; 2305*5181Sgd78059 break; 2306*5181Sgd78059 2307*5181Sgd78059 case ETHER_STAT_ADV_CAP_100FDX: 2308*5181Sgd78059 *val = dmfep->param_anar_100fdx; 2309*5181Sgd78059 break; 2310*5181Sgd78059 2311*5181Sgd78059 case ETHER_STAT_ADV_CAP_100HDX: 2312*5181Sgd78059 *val = dmfep->param_anar_100hdx; 2313*5181Sgd78059 break; 2314*5181Sgd78059 2315*5181Sgd78059 case ETHER_STAT_ADV_CAP_10FDX: 2316*5181Sgd78059 *val = dmfep->param_anar_10fdx; 2317*5181Sgd78059 break; 2318*5181Sgd78059 2319*5181Sgd78059 case ETHER_STAT_ADV_CAP_10HDX: 2320*5181Sgd78059 *val = dmfep->param_anar_10hdx; 2321*5181Sgd78059 break; 2322*5181Sgd78059 2323*5181Sgd78059 case ETHER_STAT_ADV_REMFAULT: 2324*5181Sgd78059 *val = dmfep->param_anar_remfault; 2325*5181Sgd78059 break; 2326*5181Sgd78059 2327*5181Sgd78059 case ETHER_STAT_LP_CAP_AUTONEG: 2328*5181Sgd78059 *val = dmfep->param_lp_autoneg; 2329*5181Sgd78059 break; 2330*5181Sgd78059 2331*5181Sgd78059 case ETHER_STAT_LP_CAP_100T4: 2332*5181Sgd78059 *val = dmfep->param_lp_100T4; 2333*5181Sgd78059 break; 2334*5181Sgd78059 2335*5181Sgd78059 case ETHER_STAT_LP_CAP_100FDX: 2336*5181Sgd78059 *val = dmfep->param_lp_100fdx; 2337*5181Sgd78059 break; 2338*5181Sgd78059 2339*5181Sgd78059 case ETHER_STAT_LP_CAP_100HDX: 2340*5181Sgd78059 *val = dmfep->param_lp_100hdx; 2341*5181Sgd78059 break; 2342*5181Sgd78059 2343*5181Sgd78059 case ETHER_STAT_LP_CAP_10FDX: 2344*5181Sgd78059 *val = dmfep->param_lp_10fdx; 2345*5181Sgd78059 break; 2346*5181Sgd78059 2347*5181Sgd78059 case ETHER_STAT_LP_CAP_10HDX: 2348*5181Sgd78059 *val = dmfep->param_lp_10hdx; 2349*5181Sgd78059 break; 2350*5181Sgd78059 2351*5181Sgd78059 case ETHER_STAT_LP_REMFAULT: 2352*5181Sgd78059 *val = dmfep->param_lp_remfault; 2353*5181Sgd78059 break; 2354*5181Sgd78059 2355*5181Sgd78059 default: 2356*5181Sgd78059 rv = ENOTSUP; 2357*5181Sgd78059 } 2358*5181Sgd78059 2359*5181Sgd78059 mutex_exit(dmfep->txlock); 2360*5181Sgd78059 mutex_exit(dmfep->rxlock); 2361*5181Sgd78059 mutex_exit(dmfep->oplock); 2362*5181Sgd78059 mutex_exit(dmfep->milock); 2363*5181Sgd78059 2364*5181Sgd78059 return (rv); 2365*5181Sgd78059 } 2366*5181Sgd78059 2367*5181Sgd78059 #undef DMFE_DBG 2368*5181Sgd78059 2369*5181Sgd78059 2370*5181Sgd78059 /* 2371*5181Sgd78059 * ========== Ioctl handler & subfunctions ========== 2372*5181Sgd78059 */ 2373*5181Sgd78059 2374*5181Sgd78059 #define DMFE_DBG DMFE_DBG_IOCTL /* debug flag for this code */ 2375*5181Sgd78059 2376*5181Sgd78059 /* 2377*5181Sgd78059 * Loopback operation 2378*5181Sgd78059 * 2379*5181Sgd78059 * Support access to the internal loopback and external loopback 2380*5181Sgd78059 * functions selected via the Operation Mode Register (OPR). 2381*5181Sgd78059 * These will be used by netlbtest (see BugId 4370609) 2382*5181Sgd78059 * 2383*5181Sgd78059 * Note that changing the loopback mode causes a stop/restart cycle 2384*5181Sgd78059 * 2385*5181Sgd78059 * It would be nice to evolve this to support the ioctls in sys/netlb.h, 2386*5181Sgd78059 * but then it would be even better to use Brussels to configure this. 2387*5181Sgd78059 */ 2388*5181Sgd78059 static enum ioc_reply 2389*5181Sgd78059 dmfe_loop_ioctl(dmfe_t *dmfep, queue_t *wq, mblk_t *mp, int cmd) 2390*5181Sgd78059 { 2391*5181Sgd78059 loopback_t *loop_req_p; 2392*5181Sgd78059 uint32_t loopmode; 2393*5181Sgd78059 2394*5181Sgd78059 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t)) 2395*5181Sgd78059 return (IOC_INVAL); 2396*5181Sgd78059 2397*5181Sgd78059 loop_req_p = (loopback_t *)mp->b_cont->b_rptr; 2398*5181Sgd78059 2399*5181Sgd78059 switch (cmd) { 2400*5181Sgd78059 default: 2401*5181Sgd78059 /* 2402*5181Sgd78059 * This should never happen ... 2403*5181Sgd78059 */ 2404*5181Sgd78059 dmfe_error(dmfep, "dmfe_loop_ioctl: invalid cmd 0x%x", cmd); 2405*5181Sgd78059 return (IOC_INVAL); 2406*5181Sgd78059 2407*5181Sgd78059 case DMFE_GET_LOOP_MODE: 2408*5181Sgd78059 /* 2409*5181Sgd78059 * This doesn't return the current loopback mode - it 2410*5181Sgd78059 * returns a bitmask :-( of all possible loopback modes 2411*5181Sgd78059 */ 2412*5181Sgd78059 DMFE_DEBUG(("dmfe_loop_ioctl: GET_LOOP_MODE")); 2413*5181Sgd78059 loop_req_p->loopback = DMFE_LOOPBACK_MODES; 2414*5181Sgd78059 miocack(wq, mp, sizeof (loopback_t), 0); 2415*5181Sgd78059 return (IOC_DONE); 2416*5181Sgd78059 2417*5181Sgd78059 case DMFE_SET_LOOP_MODE: 2418*5181Sgd78059 /* 2419*5181Sgd78059 * Select any of the various loopback modes 2420*5181Sgd78059 */ 2421*5181Sgd78059 DMFE_DEBUG(("dmfe_loop_ioctl: SET_LOOP_MODE %d", 2422*5181Sgd78059 loop_req_p->loopback)); 2423*5181Sgd78059 switch (loop_req_p->loopback) { 2424*5181Sgd78059 default: 2425*5181Sgd78059 return (IOC_INVAL); 2426*5181Sgd78059 2427*5181Sgd78059 case DMFE_LOOPBACK_OFF: 2428*5181Sgd78059 loopmode = LOOPBACK_OFF; 2429*5181Sgd78059 break; 2430*5181Sgd78059 2431*5181Sgd78059 case DMFE_PHY_A_LOOPBACK_ON: 2432*5181Sgd78059 loopmode = LOOPBACK_PHY_A; 2433*5181Sgd78059 break; 2434*5181Sgd78059 2435*5181Sgd78059 case DMFE_PHY_D_LOOPBACK_ON: 2436*5181Sgd78059 loopmode = LOOPBACK_PHY_D; 2437*5181Sgd78059 break; 2438*5181Sgd78059 2439*5181Sgd78059 case DMFE_INT_LOOPBACK_ON: 2440*5181Sgd78059 loopmode = LOOPBACK_INTERNAL; 2441*5181Sgd78059 break; 2442*5181Sgd78059 } 2443*5181Sgd78059 2444*5181Sgd78059 if ((dmfep->opmode & LOOPBACK_MODE_MASK) != loopmode) { 2445*5181Sgd78059 dmfep->opmode &= ~LOOPBACK_MODE_MASK; 2446*5181Sgd78059 dmfep->opmode |= loopmode; 2447*5181Sgd78059 return (IOC_RESTART_ACK); 2448*5181Sgd78059 } 2449*5181Sgd78059 2450*5181Sgd78059 return (IOC_ACK); 2451*5181Sgd78059 } 2452*5181Sgd78059 } 2453*5181Sgd78059 2454*5181Sgd78059 /* 2455*5181Sgd78059 * Specific dmfe IOCTLs, the mac module handles the generic ones. 2456*5181Sgd78059 */ 2457*5181Sgd78059 static void 2458*5181Sgd78059 dmfe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 2459*5181Sgd78059 { 2460*5181Sgd78059 dmfe_t *dmfep = arg; 2461*5181Sgd78059 struct iocblk *iocp; 2462*5181Sgd78059 enum ioc_reply status; 2463*5181Sgd78059 int cmd; 2464*5181Sgd78059 2465*5181Sgd78059 /* 2466*5181Sgd78059 * Validate the command before bothering with the mutexen ... 2467*5181Sgd78059 */ 2468*5181Sgd78059 iocp = (struct iocblk *)mp->b_rptr; 2469*5181Sgd78059 cmd = iocp->ioc_cmd; 2470*5181Sgd78059 switch (cmd) { 2471*5181Sgd78059 default: 2472*5181Sgd78059 DMFE_DEBUG(("dmfe_m_ioctl: unknown cmd 0x%x", cmd)); 2473*5181Sgd78059 miocnak(wq, mp, 0, EINVAL); 2474*5181Sgd78059 return; 2475*5181Sgd78059 2476*5181Sgd78059 case DMFE_SET_LOOP_MODE: 2477*5181Sgd78059 case DMFE_GET_LOOP_MODE: 2478*5181Sgd78059 case ND_GET: 2479*5181Sgd78059 case ND_SET: 2480*5181Sgd78059 break; 2481*5181Sgd78059 } 2482*5181Sgd78059 2483*5181Sgd78059 mutex_enter(dmfep->milock); 2484*5181Sgd78059 mutex_enter(dmfep->oplock); 2485*5181Sgd78059 2486*5181Sgd78059 switch (cmd) { 2487*5181Sgd78059 default: 2488*5181Sgd78059 _NOTE(NOTREACHED) 2489*5181Sgd78059 status = IOC_INVAL; 2490*5181Sgd78059 break; 2491*5181Sgd78059 2492*5181Sgd78059 case DMFE_SET_LOOP_MODE: 2493*5181Sgd78059 case DMFE_GET_LOOP_MODE: 2494*5181Sgd78059 status = dmfe_loop_ioctl(dmfep, wq, mp, cmd); 2495*5181Sgd78059 break; 2496*5181Sgd78059 2497*5181Sgd78059 case ND_GET: 2498*5181Sgd78059 case ND_SET: 2499*5181Sgd78059 status = dmfe_nd_ioctl(dmfep, wq, mp, cmd); 2500*5181Sgd78059 break; 2501*5181Sgd78059 } 2502*5181Sgd78059 2503*5181Sgd78059 /* 2504*5181Sgd78059 * Do we need to restart? 2505*5181Sgd78059 */ 2506*5181Sgd78059 switch (status) { 2507*5181Sgd78059 default: 2508*5181Sgd78059 break; 2509*5181Sgd78059 2510*5181Sgd78059 case IOC_RESTART_ACK: 2511*5181Sgd78059 case IOC_RESTART: 2512*5181Sgd78059 /* 2513*5181Sgd78059 * PHY parameters changed; we need to stop, update the 2514*5181Sgd78059 * PHY layer and restart before sending the reply or ACK 2515*5181Sgd78059 */ 2516*5181Sgd78059 dmfe_stop(dmfep); 2517*5181Sgd78059 dmfe_update_phy(dmfep); 2518*5181Sgd78059 dmfep->update_phy = B_FALSE; 2519*5181Sgd78059 2520*5181Sgd78059 /* 2521*5181Sgd78059 * The link will now most likely go DOWN and UP, because 2522*5181Sgd78059 * we've changed the loopback state or the link parameters 2523*5181Sgd78059 * or autonegotiation. So we have to check that it's 2524*5181Sgd78059 * settled down before we restart the TX/RX processes. 2525*5181Sgd78059 * The ioctl code will have planted some reason strings 2526*5181Sgd78059 * to explain what's happening, so the link state change 2527*5181Sgd78059 * messages won't be printed on the console . We wake the 2528*5181Sgd78059 * factotum to deal with link notifications, if any ... 2529*5181Sgd78059 */ 2530*5181Sgd78059 if (dmfe_check_link(dmfep)) { 2531*5181Sgd78059 dmfe_recheck_link(dmfep, B_TRUE); 2532*5181Sgd78059 dmfe_wake_factotum(dmfep, KS_LINK_CHECK, "ioctl"); 2533*5181Sgd78059 } 2534*5181Sgd78059 2535*5181Sgd78059 if (dmfep->mac_state == DMFE_MAC_STARTED) 2536*5181Sgd78059 dmfe_start(dmfep); 2537*5181Sgd78059 break; 2538*5181Sgd78059 } 2539*5181Sgd78059 2540*5181Sgd78059 /* 2541*5181Sgd78059 * The 'reasons-for-link-change', if any, don't apply any more 2542*5181Sgd78059 */ 2543*5181Sgd78059 mutex_exit(dmfep->oplock); 2544*5181Sgd78059 mutex_exit(dmfep->milock); 2545*5181Sgd78059 2546*5181Sgd78059 /* 2547*5181Sgd78059 * Finally, decide how to reply 2548*5181Sgd78059 */ 2549*5181Sgd78059 switch (status) { 2550*5181Sgd78059 default: 2551*5181Sgd78059 /* 2552*5181Sgd78059 * Error, reply with a NAK and EINVAL 2553*5181Sgd78059 */ 2554*5181Sgd78059 miocnak(wq, mp, 0, EINVAL); 2555*5181Sgd78059 break; 2556*5181Sgd78059 2557*5181Sgd78059 case IOC_RESTART_ACK: 2558*5181Sgd78059 case IOC_ACK: 2559*5181Sgd78059 /* 2560*5181Sgd78059 * OK, reply with an ACK 2561*5181Sgd78059 */ 2562*5181Sgd78059 miocack(wq, mp, 0, 0); 2563*5181Sgd78059 break; 2564*5181Sgd78059 2565*5181Sgd78059 case IOC_RESTART: 2566*5181Sgd78059 case IOC_REPLY: 2567*5181Sgd78059 /* 2568*5181Sgd78059 * OK, send prepared reply 2569*5181Sgd78059 */ 2570*5181Sgd78059 qreply(wq, mp); 2571*5181Sgd78059 break; 2572*5181Sgd78059 2573*5181Sgd78059 case IOC_DONE: 2574*5181Sgd78059 /* 2575*5181Sgd78059 * OK, reply already sent 2576*5181Sgd78059 */ 2577*5181Sgd78059 break; 2578*5181Sgd78059 } 2579*5181Sgd78059 } 2580*5181Sgd78059 2581*5181Sgd78059 #undef DMFE_DBG 2582*5181Sgd78059 2583*5181Sgd78059 2584*5181Sgd78059 /* 2585*5181Sgd78059 * ========== Per-instance setup/teardown code ========== 2586*5181Sgd78059 */ 2587*5181Sgd78059 2588*5181Sgd78059 #define DMFE_DBG DMFE_DBG_INIT /* debug flag for this code */ 2589*5181Sgd78059 2590*5181Sgd78059 /* 2591*5181Sgd78059 * Determine local MAC address & broadcast address for this interface 2592*5181Sgd78059 */ 2593*5181Sgd78059 static void 2594*5181Sgd78059 dmfe_find_mac_address(dmfe_t *dmfep) 2595*5181Sgd78059 { 2596*5181Sgd78059 uchar_t *prop; 2597*5181Sgd78059 uint_t propsize; 2598*5181Sgd78059 int err; 2599*5181Sgd78059 2600*5181Sgd78059 /* 2601*5181Sgd78059 * We have to find the "vendor's factory-set address". This is 2602*5181Sgd78059 * the value of the property "local-mac-address", as set by OBP 2603*5181Sgd78059 * (or a .conf file!) 2604*5181Sgd78059 * 2605*5181Sgd78059 * If the property is not there, then we try to find the factory 2606*5181Sgd78059 * mac address from the devices serial EEPROM. 2607*5181Sgd78059 */ 2608*5181Sgd78059 bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr)); 2609*5181Sgd78059 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo, 2610*5181Sgd78059 DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize); 2611*5181Sgd78059 if (err == DDI_PROP_SUCCESS) { 2612*5181Sgd78059 if (propsize == ETHERADDRL) 2613*5181Sgd78059 ethaddr_copy(prop, dmfep->curr_addr); 2614*5181Sgd78059 ddi_prop_free(prop); 2615*5181Sgd78059 } else { 2616*5181Sgd78059 /* no property set... check eeprom */ 2617*5181Sgd78059 dmfe_read_eeprom(dmfep, EEPROM_EN_ADDR, dmfep->curr_addr, 2618*5181Sgd78059 ETHERADDRL); 2619*5181Sgd78059 } 2620*5181Sgd78059 2621*5181Sgd78059 DMFE_DEBUG(("dmfe_setup_mac_address: factory %s", 2622*5181Sgd78059 ether_sprintf((void *)dmfep->curr_addr))); 2623*5181Sgd78059 } 2624*5181Sgd78059 2625*5181Sgd78059 static int 2626*5181Sgd78059 dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize, 2627*5181Sgd78059 size_t setup, size_t slop, ddi_device_acc_attr_t *attr_p, 2628*5181Sgd78059 uint_t dma_flags, dma_area_t *dma_p) 2629*5181Sgd78059 { 2630*5181Sgd78059 ddi_dma_cookie_t dma_cookie; 2631*5181Sgd78059 uint_t ncookies; 2632*5181Sgd78059 int err; 2633*5181Sgd78059 2634*5181Sgd78059 /* 2635*5181Sgd78059 * Allocate handle 2636*5181Sgd78059 */ 2637*5181Sgd78059 err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr, 2638*5181Sgd78059 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 2639*5181Sgd78059 if (err != DDI_SUCCESS) 2640*5181Sgd78059 return (DDI_FAILURE); 2641*5181Sgd78059 2642*5181Sgd78059 /* 2643*5181Sgd78059 * Allocate memory 2644*5181Sgd78059 */ 2645*5181Sgd78059 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop, 2646*5181Sgd78059 attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 2647*5181Sgd78059 DDI_DMA_SLEEP, NULL, 2648*5181Sgd78059 &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl); 2649*5181Sgd78059 if (err != DDI_SUCCESS) 2650*5181Sgd78059 return (DDI_FAILURE); 2651*5181Sgd78059 2652*5181Sgd78059 /* 2653*5181Sgd78059 * Bind the two together 2654*5181Sgd78059 */ 2655*5181Sgd78059 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2656*5181Sgd78059 dma_p->mem_va, dma_p->alength, dma_flags, 2657*5181Sgd78059 DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies); 2658*5181Sgd78059 if (err != DDI_DMA_MAPPED) 2659*5181Sgd78059 return (DDI_FAILURE); 2660*5181Sgd78059 if ((dma_p->ncookies = ncookies) != 1) 2661*5181Sgd78059 return (DDI_FAILURE); 2662*5181Sgd78059 2663*5181Sgd78059 dma_p->mem_dvma = dma_cookie.dmac_address; 2664*5181Sgd78059 if (setup > 0) { 2665*5181Sgd78059 dma_p->setup_dvma = dma_p->mem_dvma + memsize; 2666*5181Sgd78059 dma_p->setup_va = dma_p->mem_va + memsize; 2667*5181Sgd78059 } else { 2668*5181Sgd78059 dma_p->setup_dvma = 0; 2669*5181Sgd78059 dma_p->setup_va = NULL; 2670*5181Sgd78059 } 2671*5181Sgd78059 2672*5181Sgd78059 return (DDI_SUCCESS); 2673*5181Sgd78059 } 2674*5181Sgd78059 2675*5181Sgd78059 /* 2676*5181Sgd78059 * This function allocates the transmit and receive buffers and descriptors. 2677*5181Sgd78059 */ 2678*5181Sgd78059 static int 2679*5181Sgd78059 dmfe_alloc_bufs(dmfe_t *dmfep) 2680*5181Sgd78059 { 2681*5181Sgd78059 size_t memsize; 2682*5181Sgd78059 int err; 2683*5181Sgd78059 2684*5181Sgd78059 /* 2685*5181Sgd78059 * Allocate memory & handles for TX descriptor ring 2686*5181Sgd78059 */ 2687*5181Sgd78059 memsize = dmfep->tx.n_desc * sizeof (struct tx_desc_type); 2688*5181Sgd78059 err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP, 2689*5181Sgd78059 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2690*5181Sgd78059 &dmfep->tx_desc); 2691*5181Sgd78059 if (err != DDI_SUCCESS) 2692*5181Sgd78059 return (DDI_FAILURE); 2693*5181Sgd78059 2694*5181Sgd78059 /* 2695*5181Sgd78059 * Allocate memory & handles for TX buffers 2696*5181Sgd78059 */ 2697*5181Sgd78059 memsize = dmfep->tx.n_desc * DMFE_BUF_SIZE; 2698*5181Sgd78059 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0, 2699*5181Sgd78059 &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE, 2700*5181Sgd78059 &dmfep->tx_buff); 2701*5181Sgd78059 if (err != DDI_SUCCESS) 2702*5181Sgd78059 return (DDI_FAILURE); 2703*5181Sgd78059 2704*5181Sgd78059 /* 2705*5181Sgd78059 * Allocate memory & handles for RX descriptor ring 2706*5181Sgd78059 */ 2707*5181Sgd78059 memsize = dmfep->rx.n_desc * sizeof (struct rx_desc_type); 2708*5181Sgd78059 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP, 2709*5181Sgd78059 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2710*5181Sgd78059 &dmfep->rx_desc); 2711*5181Sgd78059 if (err != DDI_SUCCESS) 2712*5181Sgd78059 return (DDI_FAILURE); 2713*5181Sgd78059 2714*5181Sgd78059 /* 2715*5181Sgd78059 * Allocate memory & handles for RX buffers 2716*5181Sgd78059 */ 2717*5181Sgd78059 memsize = dmfep->rx.n_desc * DMFE_BUF_SIZE; 2718*5181Sgd78059 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0, 2719*5181Sgd78059 &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE, &dmfep->rx_buff); 2720*5181Sgd78059 if (err != DDI_SUCCESS) 2721*5181Sgd78059 return (DDI_FAILURE); 2722*5181Sgd78059 2723*5181Sgd78059 /* 2724*5181Sgd78059 * Allocate bitmasks for tx packet type tracking 2725*5181Sgd78059 */ 2726*5181Sgd78059 dmfep->tx_mcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP); 2727*5181Sgd78059 dmfep->tx_bcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP); 2728*5181Sgd78059 2729*5181Sgd78059 return (DDI_SUCCESS); 2730*5181Sgd78059 } 2731*5181Sgd78059 2732*5181Sgd78059 static void 2733*5181Sgd78059 dmfe_free_dma_mem(dma_area_t *dma_p) 2734*5181Sgd78059 { 2735*5181Sgd78059 if (dma_p->dma_hdl != NULL) { 2736*5181Sgd78059 if (dma_p->ncookies) { 2737*5181Sgd78059 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2738*5181Sgd78059 dma_p->ncookies = 0; 2739*5181Sgd78059 } 2740*5181Sgd78059 ddi_dma_free_handle(&dma_p->dma_hdl); 2741*5181Sgd78059 dma_p->dma_hdl = NULL; 2742*5181Sgd78059 dma_p->mem_dvma = 0; 2743*5181Sgd78059 dma_p->setup_dvma = 0; 2744*5181Sgd78059 } 2745*5181Sgd78059 2746*5181Sgd78059 if (dma_p->acc_hdl != NULL) { 2747*5181Sgd78059 ddi_dma_mem_free(&dma_p->acc_hdl); 2748*5181Sgd78059 dma_p->acc_hdl = NULL; 2749*5181Sgd78059 dma_p->mem_va = NULL; 2750*5181Sgd78059 dma_p->setup_va = NULL; 2751*5181Sgd78059 } 2752*5181Sgd78059 } 2753*5181Sgd78059 2754*5181Sgd78059 /* 2755*5181Sgd78059 * This routine frees the transmit and receive buffers and descriptors. 2756*5181Sgd78059 * Make sure the chip is stopped before calling it! 2757*5181Sgd78059 */ 2758*5181Sgd78059 static void 2759*5181Sgd78059 dmfe_free_bufs(dmfe_t *dmfep) 2760*5181Sgd78059 { 2761*5181Sgd78059 dmfe_free_dma_mem(&dmfep->rx_buff); 2762*5181Sgd78059 dmfe_free_dma_mem(&dmfep->rx_desc); 2763*5181Sgd78059 dmfe_free_dma_mem(&dmfep->tx_buff); 2764*5181Sgd78059 dmfe_free_dma_mem(&dmfep->tx_desc); 2765*5181Sgd78059 kmem_free(dmfep->tx_mcast, dmfep->tx.n_desc / NBBY); 2766*5181Sgd78059 kmem_free(dmfep->tx_bcast, dmfep->tx.n_desc / NBBY); 2767*5181Sgd78059 } 2768*5181Sgd78059 2769*5181Sgd78059 static void 2770*5181Sgd78059 dmfe_unattach(dmfe_t *dmfep) 2771*5181Sgd78059 { 2772*5181Sgd78059 /* 2773*5181Sgd78059 * Clean up and free all DMFE data structures 2774*5181Sgd78059 */ 2775*5181Sgd78059 if (dmfep->cycid != NULL) { 2776*5181Sgd78059 ddi_periodic_delete(dmfep->cycid); 2777*5181Sgd78059 dmfep->cycid = NULL; 2778*5181Sgd78059 } 2779*5181Sgd78059 2780*5181Sgd78059 if (dmfep->ksp_drv != NULL) 2781*5181Sgd78059 kstat_delete(dmfep->ksp_drv); 2782*5181Sgd78059 if (dmfep->progress & PROGRESS_HWINT) { 2783*5181Sgd78059 ddi_remove_intr(dmfep->devinfo, 0, dmfep->iblk); 2784*5181Sgd78059 mutex_destroy(dmfep->txlock); 2785*5181Sgd78059 mutex_destroy(dmfep->rxlock); 2786*5181Sgd78059 mutex_destroy(dmfep->oplock); 2787*5181Sgd78059 } 2788*5181Sgd78059 if (dmfep->progress & PROGRESS_SOFTINT) 2789*5181Sgd78059 ddi_remove_softintr(dmfep->factotum_id); 2790*5181Sgd78059 if (dmfep->progress & PROGRESS_BUFS) 2791*5181Sgd78059 dmfe_free_bufs(dmfep); 2792*5181Sgd78059 if (dmfep->progress & PROGRESS_REGS) 2793*5181Sgd78059 ddi_regs_map_free(&dmfep->io_handle); 2794*5181Sgd78059 if (dmfep->progress & PROGRESS_NDD) 2795*5181Sgd78059 dmfe_nd_cleanup(dmfep); 2796*5181Sgd78059 2797*5181Sgd78059 kmem_free(dmfep, sizeof (*dmfep)); 2798*5181Sgd78059 } 2799*5181Sgd78059 2800*5181Sgd78059 static int 2801*5181Sgd78059 dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp) 2802*5181Sgd78059 { 2803*5181Sgd78059 ddi_acc_handle_t handle; 2804*5181Sgd78059 uint32_t regval; 2805*5181Sgd78059 2806*5181Sgd78059 if (pci_config_setup(dmfep->devinfo, &handle) != DDI_SUCCESS) 2807*5181Sgd78059 return (DDI_FAILURE); 2808*5181Sgd78059 2809*5181Sgd78059 /* 2810*5181Sgd78059 * Get vendor/device/revision. We expect (but don't check) that 2811*5181Sgd78059 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102) 2812*5181Sgd78059 */ 2813*5181Sgd78059 idp->vendor = pci_config_get16(handle, PCI_CONF_VENID); 2814*5181Sgd78059 idp->device = pci_config_get16(handle, PCI_CONF_DEVID); 2815*5181Sgd78059 idp->revision = pci_config_get8(handle, PCI_CONF_REVID); 2816*5181Sgd78059 2817*5181Sgd78059 /* 2818*5181Sgd78059 * Turn on Bus Master Enable bit and ensure the device is not asleep 2819*5181Sgd78059 */ 2820*5181Sgd78059 regval = pci_config_get32(handle, PCI_CONF_COMM); 2821*5181Sgd78059 pci_config_put32(handle, PCI_CONF_COMM, (regval | PCI_COMM_ME)); 2822*5181Sgd78059 2823*5181Sgd78059 regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD); 2824*5181Sgd78059 pci_config_put32(handle, PCI_DMFE_CONF_CFDD, 2825*5181Sgd78059 regval & ~(CFDD_SLEEP | CFDD_SNOOZE)); 2826*5181Sgd78059 2827*5181Sgd78059 pci_config_teardown(&handle); 2828*5181Sgd78059 return (DDI_SUCCESS); 2829*5181Sgd78059 } 2830*5181Sgd78059 2831*5181Sgd78059 struct ks_index { 2832*5181Sgd78059 int index; 2833*5181Sgd78059 char *name; 2834*5181Sgd78059 }; 2835*5181Sgd78059 2836*5181Sgd78059 static const struct ks_index ks_drv_names[] = { 2837*5181Sgd78059 { KS_INTERRUPT, "intr" }, 2838*5181Sgd78059 { KS_CYCLIC_RUN, "cyclic_run" }, 2839*5181Sgd78059 2840*5181Sgd78059 { KS_TICK_LINK_STATE, "link_state_change" }, 2841*5181Sgd78059 { KS_TICK_LINK_POLL, "link_state_poll" }, 2842*5181Sgd78059 { KS_TX_STALL, "tx_stall_detect" }, 2843*5181Sgd78059 { KS_CHIP_ERROR, "chip_error_interrupt" }, 2844*5181Sgd78059 2845*5181Sgd78059 { KS_FACTOTUM_RUN, "factotum_run" }, 2846*5181Sgd78059 { KS_RECOVERY, "factotum_recover" }, 2847*5181Sgd78059 { KS_LINK_CHECK, "factotum_link_check" }, 2848*5181Sgd78059 2849*5181Sgd78059 { KS_LINK_UP_CNT, "link_up_cnt" }, 2850*5181Sgd78059 { KS_LINK_DROP_CNT, "link_drop_cnt" }, 2851*5181Sgd78059 2852*5181Sgd78059 { KS_MIIREG_BMSR, "mii_status" }, 2853*5181Sgd78059 { KS_MIIREG_ANAR, "mii_advert_cap" }, 2854*5181Sgd78059 { KS_MIIREG_ANLPAR, "mii_partner_cap" }, 2855*5181Sgd78059 { KS_MIIREG_ANER, "mii_expansion_cap" }, 2856*5181Sgd78059 { KS_MIIREG_DSCSR, "mii_dscsr" }, 2857*5181Sgd78059 2858*5181Sgd78059 { -1, NULL } 2859*5181Sgd78059 }; 2860*5181Sgd78059 2861*5181Sgd78059 static void 2862*5181Sgd78059 dmfe_init_kstats(dmfe_t *dmfep, int instance) 2863*5181Sgd78059 { 2864*5181Sgd78059 kstat_t *ksp; 2865*5181Sgd78059 kstat_named_t *knp; 2866*5181Sgd78059 const struct ks_index *ksip; 2867*5181Sgd78059 2868*5181Sgd78059 /* no need to create MII stats, the mac module already does it */ 2869*5181Sgd78059 2870*5181Sgd78059 /* Create and initialise driver-defined kstats */ 2871*5181Sgd78059 ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net", 2872*5181Sgd78059 KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT); 2873*5181Sgd78059 if (ksp != NULL) { 2874*5181Sgd78059 for (knp = ksp->ks_data, ksip = ks_drv_names; 2875*5181Sgd78059 ksip->name != NULL; ++ksip) { 2876*5181Sgd78059 kstat_named_init(&knp[ksip->index], ksip->name, 2877*5181Sgd78059 KSTAT_DATA_UINT64); 2878*5181Sgd78059 } 2879*5181Sgd78059 dmfep->ksp_drv = ksp; 2880*5181Sgd78059 dmfep->knp_drv = knp; 2881*5181Sgd78059 kstat_install(ksp); 2882*5181Sgd78059 } else { 2883*5181Sgd78059 dmfe_error(dmfep, "kstat_create() for dmfe_events failed"); 2884*5181Sgd78059 } 2885*5181Sgd78059 } 2886*5181Sgd78059 2887*5181Sgd78059 static int 2888*5181Sgd78059 dmfe_resume(dev_info_t *devinfo) 2889*5181Sgd78059 { 2890*5181Sgd78059 dmfe_t *dmfep; /* Our private data */ 2891*5181Sgd78059 chip_id_t chipid; 2892*5181Sgd78059 2893*5181Sgd78059 dmfep = ddi_get_driver_private(devinfo); 2894*5181Sgd78059 if (dmfep == NULL) 2895*5181Sgd78059 return (DDI_FAILURE); 2896*5181Sgd78059 2897*5181Sgd78059 /* 2898*5181Sgd78059 * Refuse to resume if the data structures aren't consistent 2899*5181Sgd78059 */ 2900*5181Sgd78059 if (dmfep->devinfo != devinfo) 2901*5181Sgd78059 return (DDI_FAILURE); 2902*5181Sgd78059 2903*5181Sgd78059 /* 2904*5181Sgd78059 * Refuse to resume if the chip's changed its identity (*boggle*) 2905*5181Sgd78059 */ 2906*5181Sgd78059 if (dmfe_config_init(dmfep, &chipid) != DDI_SUCCESS) 2907*5181Sgd78059 return (DDI_FAILURE); 2908*5181Sgd78059 if (chipid.vendor != dmfep->chipid.vendor) 2909*5181Sgd78059 return (DDI_FAILURE); 2910*5181Sgd78059 if (chipid.device != dmfep->chipid.device) 2911*5181Sgd78059 return (DDI_FAILURE); 2912*5181Sgd78059 if (chipid.revision != dmfep->chipid.revision) 2913*5181Sgd78059 return (DDI_FAILURE); 2914*5181Sgd78059 2915*5181Sgd78059 /* 2916*5181Sgd78059 * All OK, reinitialise h/w & kick off MAC scheduling 2917*5181Sgd78059 */ 2918*5181Sgd78059 mutex_enter(dmfep->oplock); 2919*5181Sgd78059 dmfe_restart(dmfep); 2920*5181Sgd78059 mutex_exit(dmfep->oplock); 2921*5181Sgd78059 mac_tx_update(dmfep->mh); 2922*5181Sgd78059 return (DDI_SUCCESS); 2923*5181Sgd78059 } 2924*5181Sgd78059 2925*5181Sgd78059 /* 2926*5181Sgd78059 * attach(9E) -- Attach a device to the system 2927*5181Sgd78059 * 2928*5181Sgd78059 * Called once for each board successfully probed. 2929*5181Sgd78059 */ 2930*5181Sgd78059 static int 2931*5181Sgd78059 dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2932*5181Sgd78059 { 2933*5181Sgd78059 mac_register_t *macp; 2934*5181Sgd78059 dmfe_t *dmfep; /* Our private data */ 2935*5181Sgd78059 uint32_t csr6; 2936*5181Sgd78059 int instance; 2937*5181Sgd78059 int err; 2938*5181Sgd78059 2939*5181Sgd78059 instance = ddi_get_instance(devinfo); 2940*5181Sgd78059 2941*5181Sgd78059 switch (cmd) { 2942*5181Sgd78059 default: 2943*5181Sgd78059 return (DDI_FAILURE); 2944*5181Sgd78059 2945*5181Sgd78059 case DDI_RESUME: 2946*5181Sgd78059 return (dmfe_resume(devinfo)); 2947*5181Sgd78059 2948*5181Sgd78059 case DDI_ATTACH: 2949*5181Sgd78059 break; 2950*5181Sgd78059 } 2951*5181Sgd78059 2952*5181Sgd78059 dmfep = kmem_zalloc(sizeof (*dmfep), KM_SLEEP); 2953*5181Sgd78059 ddi_set_driver_private(devinfo, dmfep); 2954*5181Sgd78059 dmfep->devinfo = devinfo; 2955*5181Sgd78059 dmfep->dmfe_guard = DMFE_GUARD; 2956*5181Sgd78059 2957*5181Sgd78059 /* 2958*5181Sgd78059 * Initialize more fields in DMFE private data 2959*5181Sgd78059 * Determine the local MAC address 2960*5181Sgd78059 */ 2961*5181Sgd78059 #if DMFEDEBUG 2962*5181Sgd78059 dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0, 2963*5181Sgd78059 debug_propname, dmfe_debug); 2964*5181Sgd78059 #endif /* DMFEDEBUG */ 2965*5181Sgd78059 dmfep->cycid = NULL; 2966*5181Sgd78059 (void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d", 2967*5181Sgd78059 instance); 2968*5181Sgd78059 2969*5181Sgd78059 /* 2970*5181Sgd78059 * Check for custom "opmode-reg-value" property; 2971*5181Sgd78059 * if none, use the defaults below for CSR6 ... 2972*5181Sgd78059 */ 2973*5181Sgd78059 csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1; 2974*5181Sgd78059 dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 2975*5181Sgd78059 DDI_PROP_DONTPASS, opmode_propname, csr6); 2976*5181Sgd78059 2977*5181Sgd78059 /* 2978*5181Sgd78059 * Read chip ID & set up config space command register(s) 2979*5181Sgd78059 */ 2980*5181Sgd78059 if (dmfe_config_init(dmfep, &dmfep->chipid) != DDI_SUCCESS) { 2981*5181Sgd78059 dmfe_error(dmfep, "dmfe_config_init() failed"); 2982*5181Sgd78059 goto attach_fail; 2983*5181Sgd78059 } 2984*5181Sgd78059 dmfep->progress |= PROGRESS_CONFIG; 2985*5181Sgd78059 2986*5181Sgd78059 /* 2987*5181Sgd78059 * Register NDD-tweakable parameters 2988*5181Sgd78059 */ 2989*5181Sgd78059 if (dmfe_nd_init(dmfep)) { 2990*5181Sgd78059 dmfe_error(dmfep, "dmfe_nd_init() failed"); 2991*5181Sgd78059 goto attach_fail; 2992*5181Sgd78059 } 2993*5181Sgd78059 dmfep->progress |= PROGRESS_NDD; 2994*5181Sgd78059 2995*5181Sgd78059 /* 2996*5181Sgd78059 * Map operating registers 2997*5181Sgd78059 */ 2998*5181Sgd78059 err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER, 2999*5181Sgd78059 &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle); 3000*5181Sgd78059 if (err != DDI_SUCCESS) { 3001*5181Sgd78059 dmfe_error(dmfep, "ddi_regs_map_setup() failed"); 3002*5181Sgd78059 goto attach_fail; 3003*5181Sgd78059 } 3004*5181Sgd78059 dmfep->progress |= PROGRESS_REGS; 3005*5181Sgd78059 3006*5181Sgd78059 /* 3007*5181Sgd78059 * Get our MAC address. 3008*5181Sgd78059 */ 3009*5181Sgd78059 dmfe_find_mac_address(dmfep); 3010*5181Sgd78059 3011*5181Sgd78059 /* 3012*5181Sgd78059 * Allocate the TX and RX descriptors/buffers. 3013*5181Sgd78059 */ 3014*5181Sgd78059 dmfep->tx.n_desc = dmfe_tx_desc; 3015*5181Sgd78059 dmfep->rx.n_desc = dmfe_rx_desc; 3016*5181Sgd78059 err = dmfe_alloc_bufs(dmfep); 3017*5181Sgd78059 if (err != DDI_SUCCESS) { 3018*5181Sgd78059 dmfe_error(dmfep, "DMA buffer allocation failed"); 3019*5181Sgd78059 goto attach_fail; 3020*5181Sgd78059 } 3021*5181Sgd78059 dmfep->progress |= PROGRESS_BUFS; 3022*5181Sgd78059 3023*5181Sgd78059 /* 3024*5181Sgd78059 * Add the softint handler 3025*5181Sgd78059 */ 3026*5181Sgd78059 dmfep->link_poll_tix = factotum_start_tix; 3027*5181Sgd78059 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id, 3028*5181Sgd78059 NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) { 3029*5181Sgd78059 dmfe_error(dmfep, "ddi_add_softintr() failed"); 3030*5181Sgd78059 goto attach_fail; 3031*5181Sgd78059 } 3032*5181Sgd78059 dmfep->progress |= PROGRESS_SOFTINT; 3033*5181Sgd78059 3034*5181Sgd78059 /* 3035*5181Sgd78059 * Add the h/w interrupt handler & initialise mutexen 3036*5181Sgd78059 */ 3037*5181Sgd78059 if (ddi_add_intr(devinfo, 0, &dmfep->iblk, NULL, 3038*5181Sgd78059 dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) { 3039*5181Sgd78059 dmfe_error(dmfep, "ddi_add_intr() failed"); 3040*5181Sgd78059 goto attach_fail; 3041*5181Sgd78059 } 3042*5181Sgd78059 mutex_init(dmfep->milock, NULL, MUTEX_DRIVER, NULL); 3043*5181Sgd78059 mutex_init(dmfep->oplock, NULL, MUTEX_DRIVER, dmfep->iblk); 3044*5181Sgd78059 mutex_init(dmfep->rxlock, NULL, MUTEX_DRIVER, dmfep->iblk); 3045*5181Sgd78059 mutex_init(dmfep->txlock, NULL, MUTEX_DRIVER, dmfep->iblk); 3046*5181Sgd78059 dmfep->progress |= PROGRESS_HWINT; 3047*5181Sgd78059 3048*5181Sgd78059 /* 3049*5181Sgd78059 * Create & initialise named kstats 3050*5181Sgd78059 */ 3051*5181Sgd78059 dmfe_init_kstats(dmfep, instance); 3052*5181Sgd78059 3053*5181Sgd78059 /* 3054*5181Sgd78059 * Reset & initialise the chip and the ring buffers 3055*5181Sgd78059 * Initialise the (internal) PHY 3056*5181Sgd78059 */ 3057*5181Sgd78059 mutex_enter(dmfep->oplock); 3058*5181Sgd78059 mutex_enter(dmfep->rxlock); 3059*5181Sgd78059 mutex_enter(dmfep->txlock); 3060*5181Sgd78059 3061*5181Sgd78059 dmfe_reset(dmfep); 3062*5181Sgd78059 3063*5181Sgd78059 /* 3064*5181Sgd78059 * Prepare the setup packet 3065*5181Sgd78059 */ 3066*5181Sgd78059 bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE); 3067*5181Sgd78059 bzero(dmfep->mcast_refs, MCASTBUF_SIZE); 3068*5181Sgd78059 dmfep->addr_set = B_FALSE; 3069*5181Sgd78059 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST); 3070*5181Sgd78059 dmfep->mac_state = DMFE_MAC_RESET; 3071*5181Sgd78059 3072*5181Sgd78059 mutex_exit(dmfep->txlock); 3073*5181Sgd78059 mutex_exit(dmfep->rxlock); 3074*5181Sgd78059 mutex_exit(dmfep->oplock); 3075*5181Sgd78059 3076*5181Sgd78059 dmfep->link_state = LINK_STATE_UNKNOWN; 3077*5181Sgd78059 if (dmfe_init_phy(dmfep) != B_TRUE) 3078*5181Sgd78059 goto attach_fail; 3079*5181Sgd78059 dmfep->update_phy = B_TRUE; 3080*5181Sgd78059 3081*5181Sgd78059 /* 3082*5181Sgd78059 * Send a reasonable setup frame. This configures our starting 3083*5181Sgd78059 * address and the broadcast address. 3084*5181Sgd78059 */ 3085*5181Sgd78059 (void) dmfe_m_unicst(dmfep, dmfep->curr_addr); 3086*5181Sgd78059 3087*5181Sgd78059 /* 3088*5181Sgd78059 * Initialize pointers to device specific functions which 3089*5181Sgd78059 * will be used by the generic layer. 3090*5181Sgd78059 */ 3091*5181Sgd78059 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3092*5181Sgd78059 goto attach_fail; 3093*5181Sgd78059 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3094*5181Sgd78059 macp->m_driver = dmfep; 3095*5181Sgd78059 macp->m_dip = devinfo; 3096*5181Sgd78059 macp->m_src_addr = dmfep->curr_addr; 3097*5181Sgd78059 macp->m_callbacks = &dmfe_m_callbacks; 3098*5181Sgd78059 macp->m_min_sdu = 0; 3099*5181Sgd78059 macp->m_max_sdu = ETHERMTU; 3100*5181Sgd78059 3101*5181Sgd78059 /* 3102*5181Sgd78059 * Finally, we're ready to register ourselves with the MAC layer 3103*5181Sgd78059 * interface; if this succeeds, we're all ready to start() 3104*5181Sgd78059 */ 3105*5181Sgd78059 err = mac_register(macp, &dmfep->mh); 3106*5181Sgd78059 mac_free(macp); 3107*5181Sgd78059 if (err != 0) 3108*5181Sgd78059 goto attach_fail; 3109*5181Sgd78059 ASSERT(dmfep->dmfe_guard == DMFE_GUARD); 3110*5181Sgd78059 3111*5181Sgd78059 /* 3112*5181Sgd78059 * Install the cyclic callback that we use to check for link 3113*5181Sgd78059 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic()) 3114*5181Sgd78059 * is invoked in kernel context then. 3115*5181Sgd78059 */ 3116*5181Sgd78059 ASSERT(dmfep->cycid == NULL); 3117*5181Sgd78059 dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep, 3118*5181Sgd78059 dmfe_tick_us * 1000, DDI_IPL_0); 3119*5181Sgd78059 return (DDI_SUCCESS); 3120*5181Sgd78059 3121*5181Sgd78059 attach_fail: 3122*5181Sgd78059 dmfe_unattach(dmfep); 3123*5181Sgd78059 return (DDI_FAILURE); 3124*5181Sgd78059 } 3125*5181Sgd78059 3126*5181Sgd78059 /* 3127*5181Sgd78059 * dmfe_suspend() -- suspend transmit/receive for powerdown 3128*5181Sgd78059 */ 3129*5181Sgd78059 static int 3130*5181Sgd78059 dmfe_suspend(dmfe_t *dmfep) 3131*5181Sgd78059 { 3132*5181Sgd78059 /* 3133*5181Sgd78059 * Just stop processing ... 3134*5181Sgd78059 */ 3135*5181Sgd78059 mutex_enter(dmfep->oplock); 3136*5181Sgd78059 dmfe_stop(dmfep); 3137*5181Sgd78059 mutex_exit(dmfep->oplock); 3138*5181Sgd78059 3139*5181Sgd78059 return (DDI_SUCCESS); 3140*5181Sgd78059 } 3141*5181Sgd78059 3142*5181Sgd78059 /* 3143*5181Sgd78059 * detach(9E) -- Detach a device from the system 3144*5181Sgd78059 */ 3145*5181Sgd78059 static int 3146*5181Sgd78059 dmfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3147*5181Sgd78059 { 3148*5181Sgd78059 dmfe_t *dmfep; 3149*5181Sgd78059 3150*5181Sgd78059 dmfep = ddi_get_driver_private(devinfo); 3151*5181Sgd78059 3152*5181Sgd78059 switch (cmd) { 3153*5181Sgd78059 default: 3154*5181Sgd78059 return (DDI_FAILURE); 3155*5181Sgd78059 3156*5181Sgd78059 case DDI_SUSPEND: 3157*5181Sgd78059 return (dmfe_suspend(dmfep)); 3158*5181Sgd78059 3159*5181Sgd78059 case DDI_DETACH: 3160*5181Sgd78059 break; 3161*5181Sgd78059 } 3162*5181Sgd78059 3163*5181Sgd78059 /* 3164*5181Sgd78059 * Unregister from the MAC subsystem. This can fail, in 3165*5181Sgd78059 * particular if there are DLPI style-2 streams still open - 3166*5181Sgd78059 * in which case we just return failure without shutting 3167*5181Sgd78059 * down chip operations. 3168*5181Sgd78059 */ 3169*5181Sgd78059 if (mac_unregister(dmfep->mh) != DDI_SUCCESS) 3170*5181Sgd78059 return (DDI_FAILURE); 3171*5181Sgd78059 3172*5181Sgd78059 /* 3173*5181Sgd78059 * All activity stopped, so we can clean up & exit 3174*5181Sgd78059 */ 3175*5181Sgd78059 dmfe_unattach(dmfep); 3176*5181Sgd78059 return (DDI_SUCCESS); 3177*5181Sgd78059 } 3178*5181Sgd78059 3179*5181Sgd78059 3180*5181Sgd78059 /* 3181*5181Sgd78059 * ========== Module Loading Data & Entry Points ========== 3182*5181Sgd78059 */ 3183*5181Sgd78059 3184*5181Sgd78059 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops, nulldev, nulldev, dmfe_attach, dmfe_detach, 3185*5181Sgd78059 nodev, NULL, D_MP, NULL); 3186*5181Sgd78059 3187*5181Sgd78059 static struct modldrv dmfe_modldrv = { 3188*5181Sgd78059 &mod_driverops, /* Type of module. This one is a driver */ 3189*5181Sgd78059 dmfe_ident, /* short description */ 3190*5181Sgd78059 &dmfe_dev_ops /* driver specific ops */ 3191*5181Sgd78059 }; 3192*5181Sgd78059 3193*5181Sgd78059 static struct modlinkage modlinkage = { 3194*5181Sgd78059 MODREV_1, (void *)&dmfe_modldrv, NULL 3195*5181Sgd78059 }; 3196*5181Sgd78059 3197*5181Sgd78059 int 3198*5181Sgd78059 _info(struct modinfo *modinfop) 3199*5181Sgd78059 { 3200*5181Sgd78059 return (mod_info(&modlinkage, modinfop)); 3201*5181Sgd78059 } 3202*5181Sgd78059 3203*5181Sgd78059 int 3204*5181Sgd78059 _init(void) 3205*5181Sgd78059 { 3206*5181Sgd78059 uint32_t tmp100; 3207*5181Sgd78059 uint32_t tmp10; 3208*5181Sgd78059 int i; 3209*5181Sgd78059 int status; 3210*5181Sgd78059 3211*5181Sgd78059 /* Calculate global timing parameters */ 3212*5181Sgd78059 tmp100 = (dmfe_tx100_stall_us+dmfe_tick_us-1)/dmfe_tick_us; 3213*5181Sgd78059 tmp10 = (dmfe_tx10_stall_us+dmfe_tick_us-1)/dmfe_tick_us; 3214*5181Sgd78059 3215*5181Sgd78059 for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) { 3216*5181Sgd78059 switch (i) { 3217*5181Sgd78059 case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA): 3218*5181Sgd78059 case TX_PROCESS_STATE(TX_PROCESS_WAIT_END): 3219*5181Sgd78059 /* 3220*5181Sgd78059 * The chip doesn't spontaneously recover from 3221*5181Sgd78059 * a stall in these states, so we reset early 3222*5181Sgd78059 */ 3223*5181Sgd78059 stall_100_tix[i] = tmp100; 3224*5181Sgd78059 stall_10_tix[i] = tmp10; 3225*5181Sgd78059 break; 3226*5181Sgd78059 3227*5181Sgd78059 case TX_PROCESS_STATE(TX_PROCESS_SUSPEND): 3228*5181Sgd78059 default: 3229*5181Sgd78059 /* 3230*5181Sgd78059 * The chip has been seen to spontaneously recover 3231*5181Sgd78059 * after an apparent stall in the SUSPEND state, 3232*5181Sgd78059 * so we'll allow it rather longer to do so. As 3233*5181Sgd78059 * stalls in other states have not been observed, 3234*5181Sgd78059 * we'll use long timeouts for them too ... 3235*5181Sgd78059 */ 3236*5181Sgd78059 stall_100_tix[i] = tmp100 * 20; 3237*5181Sgd78059 stall_10_tix[i] = tmp10 * 20; 3238*5181Sgd78059 break; 3239*5181Sgd78059 } 3240*5181Sgd78059 } 3241*5181Sgd78059 3242*5181Sgd78059 factotum_tix = (dmfe_link_poll_us+dmfe_tick_us-1)/dmfe_tick_us; 3243*5181Sgd78059 factotum_fast_tix = 1+(factotum_tix/5); 3244*5181Sgd78059 factotum_start_tix = 1+(factotum_tix*2); 3245*5181Sgd78059 3246*5181Sgd78059 mac_init_ops(&dmfe_dev_ops, "dmfe"); 3247*5181Sgd78059 status = mod_install(&modlinkage); 3248*5181Sgd78059 if (status == DDI_SUCCESS) 3249*5181Sgd78059 dmfe_log_init(); 3250*5181Sgd78059 3251*5181Sgd78059 return (status); 3252*5181Sgd78059 } 3253*5181Sgd78059 3254*5181Sgd78059 int 3255*5181Sgd78059 _fini(void) 3256*5181Sgd78059 { 3257*5181Sgd78059 int status; 3258*5181Sgd78059 3259*5181Sgd78059 status = mod_remove(&modlinkage); 3260*5181Sgd78059 if (status == DDI_SUCCESS) { 3261*5181Sgd78059 mac_fini_ops(&dmfe_dev_ops); 3262*5181Sgd78059 dmfe_log_fini(); 3263*5181Sgd78059 } 3264*5181Sgd78059 3265*5181Sgd78059 return (status); 3266*5181Sgd78059 } 3267*5181Sgd78059 3268*5181Sgd78059 #undef DMFE_DBG 3269