15181Sgd78059 /* 25181Sgd78059 * CDDL HEADER START 35181Sgd78059 * 45181Sgd78059 * The contents of this file are subject to the terms of the 55181Sgd78059 * Common Development and Distribution License (the "License"). 65181Sgd78059 * You may not use this file except in compliance with the License. 75181Sgd78059 * 85181Sgd78059 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 95181Sgd78059 * or http://www.opensolaris.org/os/licensing. 105181Sgd78059 * See the License for the specific language governing permissions 115181Sgd78059 * and limitations under the License. 125181Sgd78059 * 135181Sgd78059 * When distributing Covered Code, include this CDDL HEADER in each 145181Sgd78059 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 155181Sgd78059 * If applicable, add the following below this CDDL HEADER, with the 165181Sgd78059 * fields enclosed by brackets "[]" replaced with your own identifying 175181Sgd78059 * information: Portions Copyright [yyyy] [name of copyright owner] 185181Sgd78059 * 195181Sgd78059 * CDDL HEADER END 205181Sgd78059 */ 215181Sgd78059 /* 225895Syz147064 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 235181Sgd78059 * Use is subject to license terms. 245181Sgd78059 */ 255181Sgd78059 265181Sgd78059 275181Sgd78059 #include <sys/types.h> 285181Sgd78059 #include <sys/sunddi.h> 295181Sgd78059 #include "dmfe_impl.h" 305181Sgd78059 315181Sgd78059 /* 325181Sgd78059 * This is the string displayed by modinfo, etc. 335181Sgd78059 */ 345181Sgd78059 static char dmfe_ident[] = "Davicom DM9102 Ethernet"; 355181Sgd78059 365181Sgd78059 375181Sgd78059 /* 385181Sgd78059 * NOTES: 395181Sgd78059 * 405181Sgd78059 * #defines: 415181Sgd78059 * 425181Sgd78059 * DMFE_PCI_RNUMBER is the register-set number to use for the operating 435181Sgd78059 * registers. On an OBP-based machine, regset 0 refers to CONFIG space, 445181Sgd78059 * regset 1 will be the operating registers in I/O space, and regset 2 455181Sgd78059 * will be the operating registers in MEMORY space (preferred). If an 465181Sgd78059 * expansion ROM is fitted, it may appear as a further register set. 475181Sgd78059 * 485181Sgd78059 * DMFE_SLOP defines the amount by which the chip may read beyond 495181Sgd78059 * the end of a buffer or descriptor, apparently 6-8 dwords :( 505181Sgd78059 * We have to make sure this doesn't cause it to access unallocated 515181Sgd78059 * or unmapped memory. 525181Sgd78059 * 535181Sgd78059 * DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP) 545181Sgd78059 * rounded up to a multiple of 4. Here we choose a power of two for 555181Sgd78059 * speed & simplicity at the cost of a bit more memory. 565181Sgd78059 * 575181Sgd78059 * However, the buffer length field in the TX/RX descriptors is only 585181Sgd78059 * eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes 595181Sgd78059 * per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1 605181Sgd78059 * (2000) bytes each. 615181Sgd78059 * 625181Sgd78059 * DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for 635181Sgd78059 * the data buffers. The descriptors are always set up in CONSISTENT 645181Sgd78059 * mode. 655181Sgd78059 * 665181Sgd78059 * DMFE_HEADROOM defines how much space we'll leave in allocated 675181Sgd78059 * mblks before the first valid data byte. This should be chosen 685181Sgd78059 * to be 2 modulo 4, so that once the ethernet header (14 bytes) 695181Sgd78059 * has been stripped off, the packet data will be 4-byte aligned. 705181Sgd78059 * The remaining space can be used by upstream modules to prepend 715181Sgd78059 * any headers required. 725181Sgd78059 * 735181Sgd78059 * Patchable globals: 745181Sgd78059 * 755181Sgd78059 * dmfe_bus_modes: the bus mode bits to be put into CSR0. 765181Sgd78059 * Setting READ_MULTIPLE in this register seems to cause 775181Sgd78059 * the chip to generate a READ LINE command with a parity 785181Sgd78059 * error! Don't do it! 795181Sgd78059 * 805181Sgd78059 * dmfe_setup_desc1: the value to be put into descriptor word 1 815181Sgd78059 * when sending a SETUP packet. 825181Sgd78059 * 835181Sgd78059 * Setting TX_LAST_DESC in desc1 in a setup packet seems 845181Sgd78059 * to make the chip spontaneously reset internally - it 855181Sgd78059 * attempts to give back the setup packet descriptor by 865181Sgd78059 * writing to PCI address 00000000 - which may or may not 875181Sgd78059 * get a MASTER ABORT - after which most of its registers 885181Sgd78059 * seem to have either default values or garbage! 895181Sgd78059 * 905181Sgd78059 * TX_FIRST_DESC doesn't seem to have the same effect but 915181Sgd78059 * it isn't needed on a setup packet so we'll leave it out 925181Sgd78059 * too, just in case it has some other wierd side-effect. 935181Sgd78059 * 945181Sgd78059 * The default hardware packet filtering mode is now 955181Sgd78059 * HASH_AND_PERFECT (imperfect filtering of multicast 965181Sgd78059 * packets and perfect filtering of unicast packets). 975181Sgd78059 * If this is found not to work reliably, setting the 985181Sgd78059 * TX_FILTER_TYPE1 bit will cause a switchover to using 995181Sgd78059 * HASH_ONLY mode (imperfect filtering of *all* packets). 1005181Sgd78059 * Software will then perform the additional filtering 1015181Sgd78059 * as required. 1025181Sgd78059 */ 1035181Sgd78059 1045181Sgd78059 #define DMFE_PCI_RNUMBER 2 1055181Sgd78059 #define DMFE_SLOP (8*sizeof (uint32_t)) 1065181Sgd78059 #define DMFE_BUF_SIZE 2048 1075181Sgd78059 #define DMFE_BUF_SIZE_1 2000 1085181Sgd78059 #define DMFE_DMA_MODE DDI_DMA_STREAMING 1095181Sgd78059 #define DMFE_HEADROOM 34 1105181Sgd78059 1115181Sgd78059 static uint32_t dmfe_bus_modes = TX_POLL_INTVL | CACHE_ALIGN; 1125181Sgd78059 static uint32_t dmfe_setup_desc1 = TX_SETUP_PACKET | SETUPBUF_SIZE | 1135181Sgd78059 TX_FILTER_TYPE0; 1145181Sgd78059 1155181Sgd78059 /* 1165181Sgd78059 * Some tunable parameters ... 1175181Sgd78059 * Number of RX/TX ring entries (128/128) 1185181Sgd78059 * Minimum number of TX ring slots to keep free (1) 1195181Sgd78059 * Low-water mark at which to try to reclaim TX ring slots (1) 1205181Sgd78059 * How often to take a TX-done interrupt (twice per ring cycle) 1215181Sgd78059 * Whether to reclaim TX ring entries on a TX-done interrupt (no) 1225181Sgd78059 */ 1235181Sgd78059 1245181Sgd78059 #define DMFE_TX_DESC 128 /* Should be a multiple of 4 <= 256 */ 1255181Sgd78059 #define DMFE_RX_DESC 128 /* Should be a multiple of 4 <= 256 */ 1265181Sgd78059 1275181Sgd78059 static uint32_t dmfe_rx_desc = DMFE_RX_DESC; 1285181Sgd78059 static uint32_t dmfe_tx_desc = DMFE_TX_DESC; 1295181Sgd78059 static uint32_t dmfe_tx_min_free = 1; 1305181Sgd78059 static uint32_t dmfe_tx_reclaim_level = 1; 1315181Sgd78059 static uint32_t dmfe_tx_int_factor = (DMFE_TX_DESC / 2) - 1; 1325181Sgd78059 static boolean_t dmfe_reclaim_on_done = B_FALSE; 1335181Sgd78059 1345181Sgd78059 /* 1355181Sgd78059 * Time-related parameters: 1365181Sgd78059 * 1375181Sgd78059 * We use a cyclic to provide a periodic callback; this is then used 1385181Sgd78059 * to check for TX-stall and poll the link status register. 1395181Sgd78059 * 1405181Sgd78059 * DMFE_TICK is the interval between cyclic callbacks, in microseconds. 1415181Sgd78059 * 1425181Sgd78059 * TX_STALL_TIME_100 is the timeout in microseconds between passing 1435181Sgd78059 * a packet to the chip for transmission and seeing that it's gone, 1445181Sgd78059 * when running at 100Mb/s. If we haven't reclaimed at least one 1455181Sgd78059 * descriptor in this time we assume the transmitter has stalled 1465181Sgd78059 * and reset the chip. 1475181Sgd78059 * 1485181Sgd78059 * TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s. 1495181Sgd78059 * 1505181Sgd78059 * LINK_POLL_TIME is the interval between checks on the link state 1515181Sgd78059 * when nothing appears to have happened (this is in addition to the 1525181Sgd78059 * case where we think we've detected a link change, and serves as a 1535181Sgd78059 * backup in case the quick link check doesn't work properly). 1545181Sgd78059 * 1555181Sgd78059 * Patchable globals: 1565181Sgd78059 * 1575181Sgd78059 * dmfe_tick_us: DMFE_TICK 1585181Sgd78059 * dmfe_tx100_stall_us: TX_STALL_TIME_100 1595181Sgd78059 * dmfe_tx10_stall_us: TX_STALL_TIME_10 1605181Sgd78059 * dmfe_link_poll_us: LINK_POLL_TIME 1615181Sgd78059 * 1625181Sgd78059 * These are then used in _init() to calculate: 1635181Sgd78059 * 1645181Sgd78059 * stall_100_tix[]: number of consecutive cyclic callbacks without a 1655181Sgd78059 * reclaim before the TX process is considered stalled, 1665181Sgd78059 * when running at 100Mb/s. The elements are indexed 1675181Sgd78059 * by transmit-engine-state. 1685181Sgd78059 * stall_10_tix[]: number of consecutive cyclic callbacks without a 1695181Sgd78059 * reclaim before the TX process is considered stalled, 1705181Sgd78059 * when running at 10Mb/s. The elements are indexed 1715181Sgd78059 * by transmit-engine-state. 1725181Sgd78059 * factotum_tix: number of consecutive cyclic callbacks before waking 1735181Sgd78059 * up the factotum even though there doesn't appear to 1745181Sgd78059 * be anything for it to do 1755181Sgd78059 */ 1765181Sgd78059 1775181Sgd78059 #define DMFE_TICK 25000 /* microseconds */ 1785181Sgd78059 #define TX_STALL_TIME_100 50000 /* microseconds */ 1795181Sgd78059 #define TX_STALL_TIME_10 200000 /* microseconds */ 1805181Sgd78059 #define LINK_POLL_TIME 5000000 /* microseconds */ 1815181Sgd78059 1825181Sgd78059 static uint32_t dmfe_tick_us = DMFE_TICK; 1835181Sgd78059 static uint32_t dmfe_tx100_stall_us = TX_STALL_TIME_100; 1845181Sgd78059 static uint32_t dmfe_tx10_stall_us = TX_STALL_TIME_10; 1855181Sgd78059 static uint32_t dmfe_link_poll_us = LINK_POLL_TIME; 1865181Sgd78059 1875181Sgd78059 /* 1885181Sgd78059 * Calculated from above in _init() 1895181Sgd78059 */ 1905181Sgd78059 1915181Sgd78059 static uint32_t stall_100_tix[TX_PROCESS_MAX_STATE+1]; 1925181Sgd78059 static uint32_t stall_10_tix[TX_PROCESS_MAX_STATE+1]; 1935181Sgd78059 static uint32_t factotum_tix; 1945181Sgd78059 static uint32_t factotum_fast_tix; 1955181Sgd78059 static uint32_t factotum_start_tix; 1965181Sgd78059 1975181Sgd78059 /* 1985181Sgd78059 * Property names 1995181Sgd78059 */ 2005181Sgd78059 static char localmac_propname[] = "local-mac-address"; 2015181Sgd78059 static char opmode_propname[] = "opmode-reg-value"; 2025181Sgd78059 static char debug_propname[] = "dmfe-debug-flags"; 2035181Sgd78059 2045181Sgd78059 static int dmfe_m_start(void *); 2055181Sgd78059 static void dmfe_m_stop(void *); 2065181Sgd78059 static int dmfe_m_promisc(void *, boolean_t); 2075181Sgd78059 static int dmfe_m_multicst(void *, boolean_t, const uint8_t *); 2085181Sgd78059 static int dmfe_m_unicst(void *, const uint8_t *); 2095181Sgd78059 static void dmfe_m_ioctl(void *, queue_t *, mblk_t *); 2105181Sgd78059 static mblk_t *dmfe_m_tx(void *, mblk_t *); 2115181Sgd78059 static int dmfe_m_stat(void *, uint_t, uint64_t *); 2125181Sgd78059 2135181Sgd78059 static mac_callbacks_t dmfe_m_callbacks = { 214*8275SEric Cheng (MC_IOCTL), 2155181Sgd78059 dmfe_m_stat, 2165181Sgd78059 dmfe_m_start, 2175181Sgd78059 dmfe_m_stop, 2185181Sgd78059 dmfe_m_promisc, 2195181Sgd78059 dmfe_m_multicst, 2205181Sgd78059 dmfe_m_unicst, 2215181Sgd78059 dmfe_m_tx, 222*8275SEric Cheng dmfe_m_ioctl, 2235181Sgd78059 NULL, 2245181Sgd78059 }; 2255181Sgd78059 2265181Sgd78059 2275181Sgd78059 /* 2285181Sgd78059 * Describes the chip's DMA engine 2295181Sgd78059 */ 2305181Sgd78059 static ddi_dma_attr_t dma_attr = { 2315181Sgd78059 DMA_ATTR_V0, /* dma_attr version */ 2325181Sgd78059 0, /* dma_attr_addr_lo */ 2335181Sgd78059 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 2345181Sgd78059 0x0FFFFFF, /* dma_attr_count_max */ 2355181Sgd78059 0x20, /* dma_attr_align */ 2365181Sgd78059 0x7F, /* dma_attr_burstsizes */ 2375181Sgd78059 1, /* dma_attr_minxfer */ 2385181Sgd78059 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 2395181Sgd78059 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */ 2405181Sgd78059 1, /* dma_attr_sgllen */ 2415181Sgd78059 1, /* dma_attr_granular */ 2425181Sgd78059 0 /* dma_attr_flags */ 2435181Sgd78059 }; 2445181Sgd78059 2455181Sgd78059 /* 2465181Sgd78059 * DMA access attributes for registers and descriptors 2475181Sgd78059 */ 2485181Sgd78059 static ddi_device_acc_attr_t dmfe_reg_accattr = { 2495181Sgd78059 DDI_DEVICE_ATTR_V0, 2505181Sgd78059 DDI_STRUCTURE_LE_ACC, 2515181Sgd78059 DDI_STRICTORDER_ACC 2525181Sgd78059 }; 2535181Sgd78059 2545181Sgd78059 /* 2555181Sgd78059 * DMA access attributes for data: NOT to be byte swapped. 2565181Sgd78059 */ 2575181Sgd78059 static ddi_device_acc_attr_t dmfe_data_accattr = { 2585181Sgd78059 DDI_DEVICE_ATTR_V0, 2595181Sgd78059 DDI_NEVERSWAP_ACC, 2605181Sgd78059 DDI_STRICTORDER_ACC 2615181Sgd78059 }; 2625181Sgd78059 2635181Sgd78059 static uchar_t dmfe_broadcast_addr[ETHERADDRL] = { 2645181Sgd78059 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 2655181Sgd78059 }; 2665181Sgd78059 2675181Sgd78059 2685181Sgd78059 /* 2695181Sgd78059 * ========== Lowest-level chip register & ring access routines ========== 2705181Sgd78059 */ 2715181Sgd78059 2725181Sgd78059 /* 2735181Sgd78059 * I/O register get/put routines 2745181Sgd78059 */ 2755181Sgd78059 uint32_t 2765181Sgd78059 dmfe_chip_get32(dmfe_t *dmfep, off_t offset) 2775181Sgd78059 { 2786990Sgd78059 uint32_t *addr; 2796990Sgd78059 2806990Sgd78059 addr = (void *)(dmfep->io_reg + offset); 2816990Sgd78059 return (ddi_get32(dmfep->io_handle, addr)); 2825181Sgd78059 } 2835181Sgd78059 2845181Sgd78059 void 2855181Sgd78059 dmfe_chip_put32(dmfe_t *dmfep, off_t offset, uint32_t value) 2865181Sgd78059 { 2876990Sgd78059 uint32_t *addr; 2886990Sgd78059 2896990Sgd78059 addr = (void *)(dmfep->io_reg + offset); 2906990Sgd78059 ddi_put32(dmfep->io_handle, addr, value); 2915181Sgd78059 } 2925181Sgd78059 2935181Sgd78059 /* 2945181Sgd78059 * TX/RX ring get/put routines 2955181Sgd78059 */ 2965181Sgd78059 static uint32_t 2975181Sgd78059 dmfe_ring_get32(dma_area_t *dma_p, uint_t index, uint_t offset) 2985181Sgd78059 { 2995181Sgd78059 uint32_t *addr; 3005181Sgd78059 3016990Sgd78059 addr = (void *)dma_p->mem_va; 3025181Sgd78059 return (ddi_get32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset)); 3035181Sgd78059 } 3045181Sgd78059 3055181Sgd78059 static void 3065181Sgd78059 dmfe_ring_put32(dma_area_t *dma_p, uint_t index, uint_t offset, uint32_t value) 3075181Sgd78059 { 3085181Sgd78059 uint32_t *addr; 3095181Sgd78059 3106990Sgd78059 addr = (void *)dma_p->mem_va; 3115181Sgd78059 ddi_put32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset, value); 3125181Sgd78059 } 3135181Sgd78059 3145181Sgd78059 /* 3155181Sgd78059 * Setup buffer get/put routines 3165181Sgd78059 */ 3175181Sgd78059 static uint32_t 3185181Sgd78059 dmfe_setup_get32(dma_area_t *dma_p, uint_t index) 3195181Sgd78059 { 3205181Sgd78059 uint32_t *addr; 3215181Sgd78059 3226990Sgd78059 addr = (void *)dma_p->setup_va; 3235181Sgd78059 return (ddi_get32(dma_p->acc_hdl, addr + index)); 3245181Sgd78059 } 3255181Sgd78059 3265181Sgd78059 static void 3275181Sgd78059 dmfe_setup_put32(dma_area_t *dma_p, uint_t index, uint32_t value) 3285181Sgd78059 { 3295181Sgd78059 uint32_t *addr; 3305181Sgd78059 3316990Sgd78059 addr = (void *)dma_p->setup_va; 3325181Sgd78059 ddi_put32(dma_p->acc_hdl, addr + index, value); 3335181Sgd78059 } 3345181Sgd78059 3355181Sgd78059 3365181Sgd78059 /* 3375181Sgd78059 * ========== Low-level chip & ring buffer manipulation ========== 3385181Sgd78059 */ 3395181Sgd78059 3405181Sgd78059 #define DMFE_DBG DMFE_DBG_REGS /* debug flag for this code */ 3415181Sgd78059 3425181Sgd78059 /* 3435181Sgd78059 * dmfe_set_opmode() -- function to set operating mode 3445181Sgd78059 */ 3455181Sgd78059 static void 3465181Sgd78059 dmfe_set_opmode(dmfe_t *dmfep) 3475181Sgd78059 { 3485181Sgd78059 DMFE_DEBUG(("dmfe_set_opmode: opmode 0x%x", dmfep->opmode)); 3495181Sgd78059 3505181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 3515181Sgd78059 3525181Sgd78059 dmfe_chip_put32(dmfep, OPN_MODE_REG, dmfep->opmode); 3535181Sgd78059 drv_usecwait(10); 3545181Sgd78059 } 3555181Sgd78059 3565181Sgd78059 /* 3575181Sgd78059 * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w 3585181Sgd78059 */ 3595181Sgd78059 static void 3605181Sgd78059 dmfe_stop_chip(dmfe_t *dmfep, enum chip_state newstate) 3615181Sgd78059 { 3625181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 3635181Sgd78059 3645181Sgd78059 /* 3655181Sgd78059 * Stop the chip: 3665181Sgd78059 * disable all interrupts 3675181Sgd78059 * stop TX/RX processes 3685181Sgd78059 * clear the status bits for TX/RX stopped 3695181Sgd78059 * If required, reset the chip 3705181Sgd78059 * Record the new state 3715181Sgd78059 */ 3725181Sgd78059 dmfe_chip_put32(dmfep, INT_MASK_REG, 0); 3735181Sgd78059 dmfep->opmode &= ~(START_TRANSMIT | START_RECEIVE); 3745181Sgd78059 dmfe_set_opmode(dmfep); 3755181Sgd78059 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT); 3765181Sgd78059 3775181Sgd78059 switch (newstate) { 3785181Sgd78059 default: 3795181Sgd78059 ASSERT(!"can't get here"); 3805181Sgd78059 return; 3815181Sgd78059 3825181Sgd78059 case CHIP_STOPPED: 3835181Sgd78059 case CHIP_ERROR: 3845181Sgd78059 break; 3855181Sgd78059 3865181Sgd78059 case CHIP_RESET: 3875181Sgd78059 dmfe_chip_put32(dmfep, BUS_MODE_REG, SW_RESET); 3885181Sgd78059 drv_usecwait(10); 3895181Sgd78059 dmfe_chip_put32(dmfep, BUS_MODE_REG, 0); 3905181Sgd78059 drv_usecwait(10); 3915181Sgd78059 dmfe_chip_put32(dmfep, BUS_MODE_REG, dmfe_bus_modes); 3925181Sgd78059 break; 3935181Sgd78059 } 3945181Sgd78059 3955181Sgd78059 dmfep->chip_state = newstate; 3965181Sgd78059 } 3975181Sgd78059 3985181Sgd78059 /* 3995181Sgd78059 * Initialize transmit and receive descriptor rings, and 4005181Sgd78059 * set the chip to point to the first entry in each ring 4015181Sgd78059 */ 4025181Sgd78059 static void 4035181Sgd78059 dmfe_init_rings(dmfe_t *dmfep) 4045181Sgd78059 { 4055181Sgd78059 dma_area_t *descp; 4065181Sgd78059 uint32_t pstart; 4075181Sgd78059 uint32_t pnext; 4085181Sgd78059 uint32_t pbuff; 4095181Sgd78059 uint32_t desc1; 4105181Sgd78059 int i; 4115181Sgd78059 4125181Sgd78059 /* 4135181Sgd78059 * You need all the locks in order to rewrite the descriptor rings 4145181Sgd78059 */ 4155181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 4165181Sgd78059 ASSERT(mutex_owned(dmfep->rxlock)); 4175181Sgd78059 ASSERT(mutex_owned(dmfep->txlock)); 4185181Sgd78059 4195181Sgd78059 /* 4205181Sgd78059 * Program the RX ring entries 4215181Sgd78059 */ 4225181Sgd78059 descp = &dmfep->rx_desc; 4235181Sgd78059 pstart = descp->mem_dvma; 4245181Sgd78059 pnext = pstart + sizeof (struct rx_desc_type); 4255181Sgd78059 pbuff = dmfep->rx_buff.mem_dvma; 4265181Sgd78059 desc1 = RX_CHAINING | DMFE_BUF_SIZE_1; 4275181Sgd78059 4285181Sgd78059 for (i = 0; i < dmfep->rx.n_desc; ++i) { 4295181Sgd78059 dmfe_ring_put32(descp, i, RD_NEXT, pnext); 4305181Sgd78059 dmfe_ring_put32(descp, i, BUFFER1, pbuff); 4315181Sgd78059 dmfe_ring_put32(descp, i, DESC1, desc1); 4325181Sgd78059 dmfe_ring_put32(descp, i, DESC0, RX_OWN); 4335181Sgd78059 4345181Sgd78059 pnext += sizeof (struct rx_desc_type); 4355181Sgd78059 pbuff += DMFE_BUF_SIZE; 4365181Sgd78059 } 4375181Sgd78059 4385181Sgd78059 /* 4395181Sgd78059 * Fix up last entry & sync 4405181Sgd78059 */ 4415181Sgd78059 dmfe_ring_put32(descp, --i, RD_NEXT, pstart); 4425181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 4435181Sgd78059 dmfep->rx.next_free = 0; 4445181Sgd78059 4455181Sgd78059 /* 4465181Sgd78059 * Set the base address of the RX descriptor list in CSR3 4475181Sgd78059 */ 4485181Sgd78059 DMFE_DEBUG(("RX descriptor VA: $%p (DVMA $%x)", 4495181Sgd78059 descp->mem_va, descp->mem_dvma)); 4505181Sgd78059 dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma); 4515181Sgd78059 4525181Sgd78059 /* 4535181Sgd78059 * Program the TX ring entries 4545181Sgd78059 */ 4555181Sgd78059 descp = &dmfep->tx_desc; 4565181Sgd78059 pstart = descp->mem_dvma; 4575181Sgd78059 pnext = pstart + sizeof (struct tx_desc_type); 4585181Sgd78059 pbuff = dmfep->tx_buff.mem_dvma; 4595181Sgd78059 desc1 = TX_CHAINING; 4605181Sgd78059 4615181Sgd78059 for (i = 0; i < dmfep->tx.n_desc; ++i) { 4625181Sgd78059 dmfe_ring_put32(descp, i, TD_NEXT, pnext); 4635181Sgd78059 dmfe_ring_put32(descp, i, BUFFER1, pbuff); 4645181Sgd78059 dmfe_ring_put32(descp, i, DESC1, desc1); 4655181Sgd78059 dmfe_ring_put32(descp, i, DESC0, 0); 4665181Sgd78059 4675181Sgd78059 pnext += sizeof (struct tx_desc_type); 4685181Sgd78059 pbuff += DMFE_BUF_SIZE; 4695181Sgd78059 } 4705181Sgd78059 4715181Sgd78059 /* 4725181Sgd78059 * Fix up last entry & sync 4735181Sgd78059 */ 4745181Sgd78059 dmfe_ring_put32(descp, --i, TD_NEXT, pstart); 4755181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 4765181Sgd78059 dmfep->tx.n_free = dmfep->tx.n_desc; 4775181Sgd78059 dmfep->tx.next_free = dmfep->tx.next_busy = 0; 4785181Sgd78059 4795181Sgd78059 /* 4805181Sgd78059 * Set the base address of the TX descrptor list in CSR4 4815181Sgd78059 */ 4825181Sgd78059 DMFE_DEBUG(("TX descriptor VA: $%p (DVMA $%x)", 4835181Sgd78059 descp->mem_va, descp->mem_dvma)); 4845181Sgd78059 dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma); 4855181Sgd78059 } 4865181Sgd78059 4875181Sgd78059 /* 4885181Sgd78059 * dmfe_start_chip() -- start the chip transmitting and/or receiving 4895181Sgd78059 */ 4905181Sgd78059 static void 4915181Sgd78059 dmfe_start_chip(dmfe_t *dmfep, int mode) 4925181Sgd78059 { 4935181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 4945181Sgd78059 4955181Sgd78059 dmfep->opmode |= mode; 4965181Sgd78059 dmfe_set_opmode(dmfep); 4975181Sgd78059 4985181Sgd78059 dmfe_chip_put32(dmfep, W_J_TIMER_REG, 0); 4995181Sgd78059 /* 5005181Sgd78059 * Enable VLAN length mode (allows packets to be 4 bytes Longer). 5015181Sgd78059 */ 5025181Sgd78059 dmfe_chip_put32(dmfep, W_J_TIMER_REG, VLAN_ENABLE); 5035181Sgd78059 5045181Sgd78059 /* 5055181Sgd78059 * Clear any pending process-stopped interrupts 5065181Sgd78059 */ 5075181Sgd78059 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT); 5085181Sgd78059 dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX : 5095181Sgd78059 mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED; 5105181Sgd78059 } 5115181Sgd78059 5125181Sgd78059 /* 5135181Sgd78059 * dmfe_enable_interrupts() -- enable our favourite set of interrupts. 5145181Sgd78059 * 5155181Sgd78059 * Normal interrupts: 5165181Sgd78059 * We always enable: 5175181Sgd78059 * RX_PKTDONE_INT (packet received) 5185181Sgd78059 * TX_PKTDONE_INT (TX complete) 5195181Sgd78059 * We never enable: 5205181Sgd78059 * TX_ALLDONE_INT (next TX buffer not ready) 5215181Sgd78059 * 5225181Sgd78059 * Abnormal interrupts: 5235181Sgd78059 * We always enable: 5245181Sgd78059 * RX_STOPPED_INT 5255181Sgd78059 * TX_STOPPED_INT 5265181Sgd78059 * SYSTEM_ERR_INT 5275181Sgd78059 * RX_UNAVAIL_INT 5285181Sgd78059 * We never enable: 5295181Sgd78059 * RX_EARLY_INT 5305181Sgd78059 * RX_WATCHDOG_INT 5315181Sgd78059 * TX_JABBER_INT 5325181Sgd78059 * TX_EARLY_INT 5335181Sgd78059 * TX_UNDERFLOW_INT 5345181Sgd78059 * GP_TIMER_INT (not valid in -9 chips) 5355181Sgd78059 * LINK_STATUS_INT (not valid in -9 chips) 5365181Sgd78059 */ 5375181Sgd78059 static void 5385181Sgd78059 dmfe_enable_interrupts(dmfe_t *dmfep) 5395181Sgd78059 { 5405181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 5415181Sgd78059 5425181Sgd78059 /* 5435181Sgd78059 * Put 'the standard set of interrupts' in the interrupt mask register 5445181Sgd78059 */ 5455181Sgd78059 dmfep->imask = RX_PKTDONE_INT | TX_PKTDONE_INT | 5465181Sgd78059 RX_STOPPED_INT | TX_STOPPED_INT | RX_UNAVAIL_INT | SYSTEM_ERR_INT; 5475181Sgd78059 5485181Sgd78059 dmfe_chip_put32(dmfep, INT_MASK_REG, 5495181Sgd78059 NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask); 5505181Sgd78059 dmfep->chip_state = CHIP_RUNNING; 5515181Sgd78059 5525181Sgd78059 DMFE_DEBUG(("dmfe_enable_interrupts: imask 0x%x", dmfep->imask)); 5535181Sgd78059 } 5545181Sgd78059 5555181Sgd78059 #undef DMFE_DBG 5565181Sgd78059 5575181Sgd78059 5585181Sgd78059 /* 5595181Sgd78059 * ========== RX side routines ========== 5605181Sgd78059 */ 5615181Sgd78059 5625181Sgd78059 #define DMFE_DBG DMFE_DBG_RECV /* debug flag for this code */ 5635181Sgd78059 5645181Sgd78059 /* 5655181Sgd78059 * Function to update receive statistics on various errors 5665181Sgd78059 */ 5675181Sgd78059 static void 5685181Sgd78059 dmfe_update_rx_stats(dmfe_t *dmfep, uint32_t desc0) 5695181Sgd78059 { 5705181Sgd78059 ASSERT(mutex_owned(dmfep->rxlock)); 5715181Sgd78059 5725181Sgd78059 /* 5735181Sgd78059 * The error summary bit and the error bits that it summarises 5745181Sgd78059 * are only valid if this is the last fragment. Therefore, a 5755181Sgd78059 * fragment only contributes to the error statistics if both 5765181Sgd78059 * the last-fragment and error summary bits are set. 5775181Sgd78059 */ 5785181Sgd78059 if (((RX_LAST_DESC | RX_ERR_SUMMARY) & ~desc0) == 0) { 5795181Sgd78059 dmfep->rx_stats_ierrors += 1; 5805181Sgd78059 5815181Sgd78059 /* 5825181Sgd78059 * There are some other error bits in the descriptor for 5835181Sgd78059 * which there don't seem to be appropriate MAC statistics, 5845181Sgd78059 * notably RX_COLLISION and perhaps RX_DESC_ERR. The 5855181Sgd78059 * latter may not be possible if it is supposed to indicate 5865181Sgd78059 * that one buffer has been filled with a partial packet 5875181Sgd78059 * and the next buffer required for the rest of the packet 5885181Sgd78059 * was not available, as all our buffers are more than large 5895181Sgd78059 * enough for a whole packet without fragmenting. 5905181Sgd78059 */ 5915181Sgd78059 5925181Sgd78059 if (desc0 & RX_OVERFLOW) { 5935181Sgd78059 dmfep->rx_stats_overflow += 1; 5945181Sgd78059 5955181Sgd78059 } else if (desc0 & RX_RUNT_FRAME) 5965181Sgd78059 dmfep->rx_stats_short += 1; 5975181Sgd78059 5985181Sgd78059 if (desc0 & RX_CRC) 5995181Sgd78059 dmfep->rx_stats_fcs += 1; 6005181Sgd78059 6015181Sgd78059 if (desc0 & RX_FRAME2LONG) 6025181Sgd78059 dmfep->rx_stats_toolong += 1; 6035181Sgd78059 } 6045181Sgd78059 6055181Sgd78059 /* 6065181Sgd78059 * A receive watchdog timeout is counted as a MAC-level receive 6075181Sgd78059 * error. Strangely, it doesn't set the packet error summary bit, 6085181Sgd78059 * according to the chip data sheet :-? 6095181Sgd78059 */ 6105181Sgd78059 if (desc0 & RX_RCV_WD_TO) 6115181Sgd78059 dmfep->rx_stats_macrcv_errors += 1; 6125181Sgd78059 6135181Sgd78059 if (desc0 & RX_DRIBBLING) 6145181Sgd78059 dmfep->rx_stats_align += 1; 6155181Sgd78059 6165181Sgd78059 if (desc0 & RX_MII_ERR) 6175181Sgd78059 dmfep->rx_stats_macrcv_errors += 1; 6185181Sgd78059 } 6195181Sgd78059 6205181Sgd78059 /* 6215181Sgd78059 * Receive incoming packet(s) and pass them up ... 6225181Sgd78059 */ 6235181Sgd78059 static mblk_t * 6245181Sgd78059 dmfe_getp(dmfe_t *dmfep) 6255181Sgd78059 { 6265181Sgd78059 dma_area_t *descp; 6275181Sgd78059 mblk_t **tail; 6285181Sgd78059 mblk_t *head; 6295181Sgd78059 mblk_t *mp; 6305181Sgd78059 char *rxb; 6315181Sgd78059 uchar_t *dp; 6325181Sgd78059 uint32_t desc0; 6335181Sgd78059 uint32_t misses; 6345181Sgd78059 int packet_length; 6355181Sgd78059 int index; 6365181Sgd78059 6375181Sgd78059 mutex_enter(dmfep->rxlock); 6385181Sgd78059 6395181Sgd78059 /* 6405181Sgd78059 * Update the missed frame statistic from the on-chip counter. 6415181Sgd78059 */ 6425181Sgd78059 misses = dmfe_chip_get32(dmfep, MISSED_FRAME_REG); 6435181Sgd78059 dmfep->rx_stats_norcvbuf += (misses & MISSED_FRAME_MASK); 6445181Sgd78059 6455181Sgd78059 /* 6465181Sgd78059 * sync (all) receive descriptors before inspecting them 6475181Sgd78059 */ 6485181Sgd78059 descp = &dmfep->rx_desc; 6495181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL); 6505181Sgd78059 6515181Sgd78059 /* 6525181Sgd78059 * We should own at least one RX entry, since we've had a 6535181Sgd78059 * receive interrupt, but let's not be dogmatic about it. 6545181Sgd78059 */ 6555181Sgd78059 index = dmfep->rx.next_free; 6565181Sgd78059 desc0 = dmfe_ring_get32(descp, index, DESC0); 6575181Sgd78059 if (desc0 & RX_OWN) 6585181Sgd78059 DMFE_DEBUG(("dmfe_getp: no work, desc0 0x%x", desc0)); 6595181Sgd78059 6605181Sgd78059 for (head = NULL, tail = &head; (desc0 & RX_OWN) == 0; ) { 6615181Sgd78059 /* 6625181Sgd78059 * Maintain statistics for every descriptor returned 6635181Sgd78059 * to us by the chip ... 6645181Sgd78059 */ 6655181Sgd78059 DMFE_DEBUG(("dmfe_getp: desc0 0x%x", desc0)); 6665181Sgd78059 dmfe_update_rx_stats(dmfep, desc0); 6675181Sgd78059 6685181Sgd78059 /* 6695181Sgd78059 * Check that the entry has both "packet start" and 6705181Sgd78059 * "packet end" flags. We really shouldn't get packet 6715181Sgd78059 * fragments, 'cos all the RX buffers are bigger than 6725181Sgd78059 * the largest valid packet. So we'll just drop any 6735181Sgd78059 * fragments we find & skip on to the next entry. 6745181Sgd78059 */ 6755181Sgd78059 if (((RX_FIRST_DESC | RX_LAST_DESC) & ~desc0) != 0) { 6765181Sgd78059 DMFE_DEBUG(("dmfe_getp: dropping fragment")); 6775181Sgd78059 goto skip; 6785181Sgd78059 } 6795181Sgd78059 6805181Sgd78059 /* 6815181Sgd78059 * A whole packet in one buffer. We have to check error 6825181Sgd78059 * status and packet length before forwarding it upstream. 6835181Sgd78059 */ 6845181Sgd78059 if (desc0 & RX_ERR_SUMMARY) { 6855181Sgd78059 DMFE_DEBUG(("dmfe_getp: dropping errored packet")); 6865181Sgd78059 goto skip; 6875181Sgd78059 } 6885181Sgd78059 6895181Sgd78059 packet_length = (desc0 >> 16) & 0x3fff; 6905181Sgd78059 if (packet_length > DMFE_MAX_PKT_SIZE) { 6915181Sgd78059 DMFE_DEBUG(("dmfe_getp: dropping oversize packet, " 6925181Sgd78059 "length %d", packet_length)); 6935181Sgd78059 goto skip; 6945181Sgd78059 } else if (packet_length < ETHERMIN) { 6955181Sgd78059 /* 6965181Sgd78059 * Note that VLAN packet would be even larger, 6975181Sgd78059 * but we don't worry about dropping runt VLAN 6985181Sgd78059 * frames. 6995181Sgd78059 * 7005181Sgd78059 * This check is probably redundant, as well, 7015181Sgd78059 * since the hardware should drop RUNT frames. 7025181Sgd78059 */ 7035181Sgd78059 DMFE_DEBUG(("dmfe_getp: dropping undersize packet, " 7045181Sgd78059 "length %d", packet_length)); 7055181Sgd78059 goto skip; 7065181Sgd78059 } 7075181Sgd78059 7085181Sgd78059 /* 7095181Sgd78059 * Sync the data, so we can examine it; then check that 7105181Sgd78059 * the packet is really intended for us (remember that 7115181Sgd78059 * if we're using Imperfect Filtering, then the chip will 7125181Sgd78059 * receive unicast packets sent to stations whose addresses 7135181Sgd78059 * just happen to hash to the same value as our own; we 7145181Sgd78059 * discard these here so they don't get sent upstream ...) 7155181Sgd78059 */ 7165181Sgd78059 (void) ddi_dma_sync(dmfep->rx_buff.dma_hdl, 7175181Sgd78059 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, 7185181Sgd78059 DDI_DMA_SYNC_FORKERNEL); 7195181Sgd78059 rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE]; 7205181Sgd78059 7215181Sgd78059 7225181Sgd78059 /* 7235181Sgd78059 * We do not bother to check that the packet is really for 7245181Sgd78059 * us, we let the MAC framework make that check instead. 7255181Sgd78059 * This is especially important if we ever want to support 7265181Sgd78059 * multiple MAC addresses. 7275181Sgd78059 */ 7285181Sgd78059 7295181Sgd78059 /* 7305181Sgd78059 * Packet looks good; get a buffer to copy it into. We 7315181Sgd78059 * allow some space at the front of the allocated buffer 7325181Sgd78059 * (HEADROOM) in case any upstream modules want to prepend 7335181Sgd78059 * some sort of header. The value has been carefully chosen 7345181Sgd78059 * So that it also has the side-effect of making the packet 7355181Sgd78059 * *contents* 4-byte aligned, as required by NCA! 7365181Sgd78059 */ 7375181Sgd78059 mp = allocb(DMFE_HEADROOM + packet_length, 0); 7385181Sgd78059 if (mp == NULL) { 7395181Sgd78059 DMFE_DEBUG(("dmfe_getp: no buffer - dropping packet")); 7405181Sgd78059 dmfep->rx_stats_norcvbuf += 1; 7415181Sgd78059 goto skip; 7425181Sgd78059 } 7435181Sgd78059 7445181Sgd78059 /* 7455181Sgd78059 * Account for statistics of good packets. 7465181Sgd78059 */ 7475181Sgd78059 dmfep->rx_stats_ipackets += 1; 7485181Sgd78059 dmfep->rx_stats_rbytes += packet_length; 7495181Sgd78059 if (desc0 & RX_MULTI_FRAME) { 7505181Sgd78059 if (bcmp(rxb, dmfe_broadcast_addr, ETHERADDRL)) { 7515181Sgd78059 dmfep->rx_stats_multi += 1; 7525181Sgd78059 } else { 7535181Sgd78059 dmfep->rx_stats_bcast += 1; 7545181Sgd78059 } 7555181Sgd78059 } 7565181Sgd78059 7575181Sgd78059 /* 7585181Sgd78059 * Copy the packet into the STREAMS buffer 7595181Sgd78059 */ 7605181Sgd78059 dp = mp->b_rptr += DMFE_HEADROOM; 7615181Sgd78059 mp->b_cont = mp->b_next = NULL; 7625181Sgd78059 7635181Sgd78059 /* 7645181Sgd78059 * Don't worry about stripping the vlan tag, the MAC 7655181Sgd78059 * layer will take care of that for us. 7665181Sgd78059 */ 7675181Sgd78059 bcopy(rxb, dp, packet_length); 7685181Sgd78059 7695181Sgd78059 /* 7705181Sgd78059 * Fix up the packet length, and link it to the chain 7715181Sgd78059 */ 7725181Sgd78059 mp->b_wptr = mp->b_rptr + packet_length - ETHERFCSL; 7735181Sgd78059 *tail = mp; 7745181Sgd78059 tail = &mp->b_next; 7755181Sgd78059 7765181Sgd78059 skip: 7775181Sgd78059 /* 7785181Sgd78059 * Return ownership of ring entry & advance to next 7795181Sgd78059 */ 7805181Sgd78059 dmfe_ring_put32(descp, index, DESC0, RX_OWN); 7815181Sgd78059 index = NEXT(index, dmfep->rx.n_desc); 7825181Sgd78059 desc0 = dmfe_ring_get32(descp, index, DESC0); 7835181Sgd78059 } 7845181Sgd78059 7855181Sgd78059 /* 7865181Sgd78059 * Remember where to start looking next time ... 7875181Sgd78059 */ 7885181Sgd78059 dmfep->rx.next_free = index; 7895181Sgd78059 7905181Sgd78059 /* 7915181Sgd78059 * sync the receive descriptors that we've given back 7925181Sgd78059 * (actually, we sync all of them for simplicity), and 7935181Sgd78059 * wake the chip in case it had suspended receive 7945181Sgd78059 */ 7955181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 7965181Sgd78059 dmfe_chip_put32(dmfep, RX_POLL_REG, 0); 7975181Sgd78059 7985181Sgd78059 mutex_exit(dmfep->rxlock); 7995181Sgd78059 return (head); 8005181Sgd78059 } 8015181Sgd78059 8025181Sgd78059 #undef DMFE_DBG 8035181Sgd78059 8045181Sgd78059 8055181Sgd78059 /* 8065181Sgd78059 * ========== Primary TX side routines ========== 8075181Sgd78059 */ 8085181Sgd78059 8095181Sgd78059 #define DMFE_DBG DMFE_DBG_SEND /* debug flag for this code */ 8105181Sgd78059 8115181Sgd78059 /* 8125181Sgd78059 * TX ring management: 8135181Sgd78059 * 8145181Sgd78059 * There are <tx.n_desc> entries in the ring, of which those from 8155181Sgd78059 * <tx.next_free> round to but not including <tx.next_busy> must 8165181Sgd78059 * be owned by the CPU. The number of such entries should equal 8175181Sgd78059 * <tx.n_free>; but there may also be some more entries which the 8185181Sgd78059 * chip has given back but which we haven't yet accounted for. 8195181Sgd78059 * The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts 8205181Sgd78059 * as it discovers such entries. 8215181Sgd78059 * 8225181Sgd78059 * Initially, or when the ring is entirely free: 8235181Sgd78059 * C = Owned by CPU 8245181Sgd78059 * D = Owned by Davicom (DMFE) chip 8255181Sgd78059 * 8265181Sgd78059 * tx.next_free tx.n_desc = 16 8275181Sgd78059 * | 8285181Sgd78059 * v 8295181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 8305181Sgd78059 * | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | 8315181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 8325181Sgd78059 * ^ 8335181Sgd78059 * | 8345181Sgd78059 * tx.next_busy tx.n_free = 16 8355181Sgd78059 * 8365181Sgd78059 * On entry to reclaim() during normal use: 8375181Sgd78059 * 8385181Sgd78059 * tx.next_free tx.n_desc = 16 8395181Sgd78059 * | 8405181Sgd78059 * v 8415181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 8425181Sgd78059 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C | 8435181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 8445181Sgd78059 * ^ 8455181Sgd78059 * | 8465181Sgd78059 * tx.next_busy tx.n_free = 9 8475181Sgd78059 * 8485181Sgd78059 * On exit from reclaim(): 8495181Sgd78059 * 8505181Sgd78059 * tx.next_free tx.n_desc = 16 8515181Sgd78059 * | 8525181Sgd78059 * v 8535181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 8545181Sgd78059 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C | 8555181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 8565181Sgd78059 * ^ 8575181Sgd78059 * | 8585181Sgd78059 * tx.next_busy tx.n_free = 13 8595181Sgd78059 * 8605181Sgd78059 * The ring is considered "full" when only one entry is owned by 8615181Sgd78059 * the CPU; thus <tx.n_free> should always be >= 1. 8625181Sgd78059 * 8635181Sgd78059 * tx.next_free tx.n_desc = 16 8645181Sgd78059 * | 8655181Sgd78059 * v 8665181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 8675181Sgd78059 * | D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D | 8685181Sgd78059 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 8695181Sgd78059 * ^ 8705181Sgd78059 * | 8715181Sgd78059 * tx.next_busy tx.n_free = 1 8725181Sgd78059 */ 8735181Sgd78059 8745181Sgd78059 /* 8755181Sgd78059 * Function to update transmit statistics on various errors 8765181Sgd78059 */ 8775181Sgd78059 static void 8785181Sgd78059 dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1) 8795181Sgd78059 { 8805181Sgd78059 uint32_t collisions; 8815181Sgd78059 uint32_t errbits; 8825181Sgd78059 uint32_t errsum; 8835181Sgd78059 8845181Sgd78059 ASSERT(mutex_owned(dmfep->txlock)); 8855181Sgd78059 8865181Sgd78059 collisions = ((desc0 >> 3) & 0x0f); 8875181Sgd78059 errsum = desc0 & TX_ERR_SUMMARY; 8885181Sgd78059 errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS | 8895181Sgd78059 TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO); 8905181Sgd78059 if ((errsum == 0) != (errbits == 0)) { 8915181Sgd78059 dmfe_log(dmfep, "dubious TX error status 0x%x", desc0); 8925181Sgd78059 desc0 |= TX_ERR_SUMMARY; 8935181Sgd78059 } 8945181Sgd78059 8955181Sgd78059 if (desc0 & TX_ERR_SUMMARY) { 8965181Sgd78059 dmfep->tx_stats_oerrors += 1; 8975181Sgd78059 8985181Sgd78059 /* 8995181Sgd78059 * If we ever see a transmit jabber timeout, we count it 9005181Sgd78059 * as a MAC-level transmit error; but we probably won't 9015181Sgd78059 * see it as it causes an Abnormal interrupt and we reset 9025181Sgd78059 * the chip in order to recover 9035181Sgd78059 */ 9045181Sgd78059 if (desc0 & TX_JABBER_TO) { 9055181Sgd78059 dmfep->tx_stats_macxmt_errors += 1; 9065181Sgd78059 dmfep->tx_stats_jabber += 1; 9075181Sgd78059 } 9085181Sgd78059 9095181Sgd78059 if (desc0 & TX_UNDERFLOW) 9105181Sgd78059 dmfep->tx_stats_underflow += 1; 9115181Sgd78059 else if (desc0 & TX_LATE_COLL) 9125181Sgd78059 dmfep->tx_stats_xmtlatecoll += 1; 9135181Sgd78059 9145181Sgd78059 if (desc0 & (TX_CARRIER_LOSS | TX_NO_CARRIER)) 9155181Sgd78059 dmfep->tx_stats_nocarrier += 1; 9165181Sgd78059 9175181Sgd78059 if (desc0 & TX_EXCESS_COLL) { 9185181Sgd78059 dmfep->tx_stats_excoll += 1; 9195181Sgd78059 collisions = 16; 9205181Sgd78059 } 9215181Sgd78059 } else { 9225181Sgd78059 int bit = index % NBBY; 9235181Sgd78059 int byt = index / NBBY; 9245181Sgd78059 9255181Sgd78059 if (dmfep->tx_mcast[byt] & bit) { 9265181Sgd78059 dmfep->tx_mcast[byt] &= ~bit; 9275181Sgd78059 dmfep->tx_stats_multi += 1; 9285181Sgd78059 9295181Sgd78059 } else if (dmfep->tx_bcast[byt] & bit) { 9305181Sgd78059 dmfep->tx_bcast[byt] &= ~bit; 9315181Sgd78059 dmfep->tx_stats_bcast += 1; 9325181Sgd78059 } 9335181Sgd78059 9345181Sgd78059 dmfep->tx_stats_opackets += 1; 9355181Sgd78059 dmfep->tx_stats_obytes += desc1 & TX_BUFFER_SIZE1; 9365181Sgd78059 } 9375181Sgd78059 9385181Sgd78059 if (collisions == 1) 9395181Sgd78059 dmfep->tx_stats_first_coll += 1; 9405181Sgd78059 else if (collisions != 0) 9415181Sgd78059 dmfep->tx_stats_multi_coll += 1; 9425181Sgd78059 dmfep->tx_stats_collisions += collisions; 9435181Sgd78059 9445181Sgd78059 if (desc0 & TX_DEFERRED) 9455181Sgd78059 dmfep->tx_stats_defer += 1; 9465181Sgd78059 } 9475181Sgd78059 9485181Sgd78059 /* 9495181Sgd78059 * Reclaim all the ring entries that the chip has returned to us ... 9505181Sgd78059 * 9515181Sgd78059 * Returns B_FALSE if no entries could be reclaimed. Otherwise, reclaims 9525181Sgd78059 * as many as possible, restarts the TX stall timeout, and returns B_TRUE. 9535181Sgd78059 */ 9545181Sgd78059 static boolean_t 9555181Sgd78059 dmfe_reclaim_tx_desc(dmfe_t *dmfep) 9565181Sgd78059 { 9575181Sgd78059 dma_area_t *descp; 9585181Sgd78059 uint32_t desc0; 9595181Sgd78059 uint32_t desc1; 9605181Sgd78059 int i; 9615181Sgd78059 9625181Sgd78059 ASSERT(mutex_owned(dmfep->txlock)); 9635181Sgd78059 9645181Sgd78059 /* 9655181Sgd78059 * sync transmit descriptor ring before looking at it 9665181Sgd78059 */ 9675181Sgd78059 descp = &dmfep->tx_desc; 9685181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL); 9695181Sgd78059 9705181Sgd78059 /* 9715181Sgd78059 * Early exit if there are no descriptors to reclaim, either 9725181Sgd78059 * because they're all reclaimed already, or because the next 9735181Sgd78059 * one is still owned by the chip ... 9745181Sgd78059 */ 9755181Sgd78059 i = dmfep->tx.next_busy; 9765181Sgd78059 if (i == dmfep->tx.next_free) 9775181Sgd78059 return (B_FALSE); 9785181Sgd78059 desc0 = dmfe_ring_get32(descp, i, DESC0); 9795181Sgd78059 if (desc0 & TX_OWN) 9805181Sgd78059 return (B_FALSE); 9815181Sgd78059 9825181Sgd78059 /* 9835181Sgd78059 * Reclaim as many descriptors as possible ... 9845181Sgd78059 */ 9855181Sgd78059 for (;;) { 9865181Sgd78059 desc1 = dmfe_ring_get32(descp, i, DESC1); 9875181Sgd78059 ASSERT((desc1 & (TX_SETUP_PACKET | TX_LAST_DESC)) != 0); 9885181Sgd78059 9895181Sgd78059 if (desc1 & TX_SETUP_PACKET) { 9905181Sgd78059 /* 9915181Sgd78059 * Setup packet - restore buffer address 9925181Sgd78059 */ 9935181Sgd78059 ASSERT(dmfe_ring_get32(descp, i, BUFFER1) == 9945181Sgd78059 descp->setup_dvma); 9955181Sgd78059 dmfe_ring_put32(descp, i, BUFFER1, 9965181Sgd78059 dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE); 9975181Sgd78059 } else { 9985181Sgd78059 /* 9995181Sgd78059 * Regular packet - just update stats 10005181Sgd78059 */ 10015181Sgd78059 ASSERT(dmfe_ring_get32(descp, i, BUFFER1) == 10025181Sgd78059 dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE); 10035181Sgd78059 dmfe_update_tx_stats(dmfep, i, desc0, desc1); 10045181Sgd78059 } 10055181Sgd78059 10065181Sgd78059 #if DMFEDEBUG 10075181Sgd78059 /* 10085181Sgd78059 * We can use one of the SPARE bits in the TX descriptor 10095181Sgd78059 * to track when a ring buffer slot is reclaimed. Then 10105181Sgd78059 * we can deduce the last operation on a slot from the 10115181Sgd78059 * top half of DESC0: 10125181Sgd78059 * 10135181Sgd78059 * 0x8000 xxxx given to DMFE chip (TX_OWN) 10145181Sgd78059 * 0x7fff xxxx returned but not yet reclaimed 10155181Sgd78059 * 0x3fff xxxx reclaimed 10165181Sgd78059 */ 10175181Sgd78059 #define TX_PEND_RECLAIM (1UL<<30) 10185181Sgd78059 dmfe_ring_put32(descp, i, DESC0, desc0 & ~TX_PEND_RECLAIM); 10195181Sgd78059 #endif /* DMFEDEBUG */ 10205181Sgd78059 10215181Sgd78059 /* 10225181Sgd78059 * Update count & index; we're all done if the ring is 10235181Sgd78059 * now fully reclaimed, or the next entry if still owned 10245181Sgd78059 * by the chip ... 10255181Sgd78059 */ 10265181Sgd78059 dmfep->tx.n_free += 1; 10275181Sgd78059 i = NEXT(i, dmfep->tx.n_desc); 10285181Sgd78059 if (i == dmfep->tx.next_free) 10295181Sgd78059 break; 10305181Sgd78059 desc0 = dmfe_ring_get32(descp, i, DESC0); 10315181Sgd78059 if (desc0 & TX_OWN) 10325181Sgd78059 break; 10335181Sgd78059 } 10345181Sgd78059 10355181Sgd78059 dmfep->tx.next_busy = i; 10365181Sgd78059 dmfep->tx_pending_tix = 0; 10375181Sgd78059 return (B_TRUE); 10385181Sgd78059 } 10395181Sgd78059 10405181Sgd78059 /* 10415181Sgd78059 * Send the message in the message block chain <mp>. 10425181Sgd78059 * 10435181Sgd78059 * The message is freed if and only if its contents are successfully copied 10445181Sgd78059 * and queued for transmission (so that the return value is B_TRUE). 10455181Sgd78059 * If we can't queue the message, the return value is B_FALSE and 10465181Sgd78059 * the message is *not* freed. 10475181Sgd78059 * 10485181Sgd78059 * This routine handles the special case of <mp> == NULL, which indicates 10495181Sgd78059 * that we want to "send" the special "setup packet" allocated during 10505181Sgd78059 * startup. We have to use some different flags in the packet descriptor 10515181Sgd78059 * to say its a setup packet (from the global <dmfe_setup_desc1>), and the 10525181Sgd78059 * setup packet *isn't* freed after use. 10535181Sgd78059 */ 10545181Sgd78059 static boolean_t 10555181Sgd78059 dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp) 10565181Sgd78059 { 10575181Sgd78059 dma_area_t *descp; 10585181Sgd78059 mblk_t *bp; 10595181Sgd78059 char *txb; 10605181Sgd78059 uint32_t desc1; 10615181Sgd78059 uint32_t index; 10625181Sgd78059 size_t totlen; 10635181Sgd78059 size_t mblen; 10645181Sgd78059 10655181Sgd78059 /* 10665181Sgd78059 * If the number of free slots is below the reclaim threshold 10675181Sgd78059 * (soft limit), we'll try to reclaim some. If we fail, and 10685181Sgd78059 * the number of free slots is also below the minimum required 10695181Sgd78059 * (the hard limit, usually 1), then we can't send the packet. 10705181Sgd78059 */ 10715181Sgd78059 mutex_enter(dmfep->txlock); 10725181Sgd78059 if (dmfep->tx.n_free <= dmfe_tx_reclaim_level && 10735181Sgd78059 dmfe_reclaim_tx_desc(dmfep) == B_FALSE && 10745181Sgd78059 dmfep->tx.n_free <= dmfe_tx_min_free) { 10755181Sgd78059 /* 10765181Sgd78059 * Resource shortage - return B_FALSE so the packet 10775181Sgd78059 * will be queued for retry after the next TX-done 10785181Sgd78059 * interrupt. 10795181Sgd78059 */ 10805181Sgd78059 mutex_exit(dmfep->txlock); 10815181Sgd78059 DMFE_DEBUG(("dmfe_send_msg: no free descriptors")); 10825181Sgd78059 return (B_FALSE); 10835181Sgd78059 } 10845181Sgd78059 10855181Sgd78059 /* 10865181Sgd78059 * There's a slot available, so claim it by incrementing 10875181Sgd78059 * the next-free index and decrementing the free count. 10885181Sgd78059 * If the ring is currently empty, we also restart the 10895181Sgd78059 * stall-detect timer. The ASSERTions check that our 10905181Sgd78059 * invariants still hold: 10915181Sgd78059 * the next-free index must not match the next-busy index 10925181Sgd78059 * there must still be at least one free entry 10935181Sgd78059 * After this, we now have exclusive ownership of the ring 10945181Sgd78059 * entry (and matching buffer) indicated by <index>, so we 10955181Sgd78059 * don't need to hold the TX lock any longer 10965181Sgd78059 */ 10975181Sgd78059 index = dmfep->tx.next_free; 10985181Sgd78059 dmfep->tx.next_free = NEXT(index, dmfep->tx.n_desc); 10995181Sgd78059 ASSERT(dmfep->tx.next_free != dmfep->tx.next_busy); 11005181Sgd78059 if (dmfep->tx.n_free-- == dmfep->tx.n_desc) 11015181Sgd78059 dmfep->tx_pending_tix = 0; 11025181Sgd78059 ASSERT(dmfep->tx.n_free >= 1); 11035181Sgd78059 mutex_exit(dmfep->txlock); 11045181Sgd78059 11055181Sgd78059 /* 11065181Sgd78059 * Check the ownership of the ring entry ... 11075181Sgd78059 */ 11085181Sgd78059 descp = &dmfep->tx_desc; 11095181Sgd78059 ASSERT((dmfe_ring_get32(descp, index, DESC0) & TX_OWN) == 0); 11105181Sgd78059 11115181Sgd78059 if (mp == NULL) { 11125181Sgd78059 /* 11135181Sgd78059 * Indicates we should send a SETUP packet, which we do by 11145181Sgd78059 * temporarily switching the BUFFER1 pointer in the ring 11155181Sgd78059 * entry. The reclaim routine will restore BUFFER1 to its 11165181Sgd78059 * usual value. 11175181Sgd78059 * 11185181Sgd78059 * Note that as the setup packet is tagged on the end of 11195181Sgd78059 * the TX ring, when we sync the descriptor we're also 11205181Sgd78059 * implicitly syncing the setup packet - hence, we don't 11215181Sgd78059 * need a separate ddi_dma_sync() call here. 11225181Sgd78059 */ 11235181Sgd78059 desc1 = dmfe_setup_desc1; 11245181Sgd78059 dmfe_ring_put32(descp, index, BUFFER1, descp->setup_dvma); 11255181Sgd78059 } else { 11265181Sgd78059 /* 11275181Sgd78059 * A regular packet; we copy the data into a pre-mapped 11285181Sgd78059 * buffer, which avoids the overhead (and complication) 11295181Sgd78059 * of mapping/unmapping STREAMS buffers and keeping hold 11305181Sgd78059 * of them until the DMA has completed. 11315181Sgd78059 * 11325181Sgd78059 * Because all buffers are the same size, and larger 11335181Sgd78059 * than the longest single valid message, we don't have 11345181Sgd78059 * to bother about splitting the message across multiple 11355181Sgd78059 * buffers. 11365181Sgd78059 */ 11375181Sgd78059 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE]; 11385181Sgd78059 totlen = 0; 11395181Sgd78059 bp = mp; 11405181Sgd78059 11415181Sgd78059 /* 11425181Sgd78059 * Copy all (remaining) mblks in the message ... 11435181Sgd78059 */ 11445181Sgd78059 for (; bp != NULL; bp = bp->b_cont) { 11456990Sgd78059 mblen = MBLKL(bp); 11465181Sgd78059 if ((totlen += mblen) <= DMFE_MAX_PKT_SIZE) { 11475181Sgd78059 bcopy(bp->b_rptr, txb, mblen); 11485181Sgd78059 txb += mblen; 11495181Sgd78059 } 11505181Sgd78059 } 11515181Sgd78059 11525181Sgd78059 /* 11535181Sgd78059 * Is this a multicast or broadcast packet? We do 11545181Sgd78059 * this so that we can track statistics accurately 11555181Sgd78059 * when we reclaim it. 11565181Sgd78059 */ 11575181Sgd78059 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE]; 11585181Sgd78059 if (txb[0] & 0x1) { 11595181Sgd78059 if (bcmp(txb, dmfe_broadcast_addr, ETHERADDRL) == 0) { 11605181Sgd78059 dmfep->tx_bcast[index / NBBY] |= 11615181Sgd78059 (1 << (index % NBBY)); 11625181Sgd78059 } else { 11635181Sgd78059 dmfep->tx_mcast[index / NBBY] |= 11645181Sgd78059 (1 << (index % NBBY)); 11655181Sgd78059 } 11665181Sgd78059 } 11675181Sgd78059 11685181Sgd78059 /* 11695181Sgd78059 * We'e reached the end of the chain; and we should have 11705181Sgd78059 * collected no more than DMFE_MAX_PKT_SIZE bytes into our 11715181Sgd78059 * buffer. Note that the <size> field in the descriptor is 11725181Sgd78059 * only 11 bits, so bigger packets would be a problem! 11735181Sgd78059 */ 11745181Sgd78059 ASSERT(bp == NULL); 11755181Sgd78059 ASSERT(totlen <= DMFE_MAX_PKT_SIZE); 11765181Sgd78059 totlen &= TX_BUFFER_SIZE1; 11775181Sgd78059 desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen; 11785181Sgd78059 11795181Sgd78059 (void) ddi_dma_sync(dmfep->tx_buff.dma_hdl, 11805181Sgd78059 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, DDI_DMA_SYNC_FORDEV); 11815181Sgd78059 } 11825181Sgd78059 11835181Sgd78059 /* 11845181Sgd78059 * Update ring descriptor entries, sync them, and wake up the 11855181Sgd78059 * transmit process 11865181Sgd78059 */ 11875181Sgd78059 if ((index & dmfe_tx_int_factor) == 0) 11885181Sgd78059 desc1 |= TX_INT_ON_COMP; 11895181Sgd78059 desc1 |= TX_CHAINING; 11905181Sgd78059 dmfe_ring_put32(descp, index, DESC1, desc1); 11915181Sgd78059 dmfe_ring_put32(descp, index, DESC0, TX_OWN); 11925181Sgd78059 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 11935181Sgd78059 dmfe_chip_put32(dmfep, TX_POLL_REG, 0); 11945181Sgd78059 11955181Sgd78059 /* 11965181Sgd78059 * Finally, free the message & return success 11975181Sgd78059 */ 11985181Sgd78059 if (mp) 11995181Sgd78059 freemsg(mp); 12005181Sgd78059 return (B_TRUE); 12015181Sgd78059 } 12025181Sgd78059 12035181Sgd78059 /* 12045181Sgd78059 * dmfe_m_tx() -- send a chain of packets 12055181Sgd78059 * 12065181Sgd78059 * Called when packet(s) are ready to be transmitted. A pointer to an 12075181Sgd78059 * M_DATA message that contains the packet is passed to this routine. 12085181Sgd78059 * The complete LLC header is contained in the message's first message 12095181Sgd78059 * block, and the remainder of the packet is contained within 12105181Sgd78059 * additional M_DATA message blocks linked to the first message block. 12115181Sgd78059 * 12125181Sgd78059 * Additional messages may be passed by linking with b_next. 12135181Sgd78059 */ 12145181Sgd78059 static mblk_t * 12155181Sgd78059 dmfe_m_tx(void *arg, mblk_t *mp) 12165181Sgd78059 { 12175181Sgd78059 dmfe_t *dmfep = arg; /* private device info */ 12185181Sgd78059 mblk_t *next; 12195181Sgd78059 12205181Sgd78059 ASSERT(mp != NULL); 12215181Sgd78059 ASSERT(dmfep->mac_state == DMFE_MAC_STARTED); 12225181Sgd78059 12235181Sgd78059 if (dmfep->chip_state != CHIP_RUNNING) 12245181Sgd78059 return (mp); 12255181Sgd78059 12265181Sgd78059 while (mp != NULL) { 12275181Sgd78059 next = mp->b_next; 12285181Sgd78059 mp->b_next = NULL; 12295181Sgd78059 if (!dmfe_send_msg(dmfep, mp)) { 12305181Sgd78059 mp->b_next = next; 12315181Sgd78059 break; 12325181Sgd78059 } 12335181Sgd78059 mp = next; 12345181Sgd78059 } 12355181Sgd78059 12365181Sgd78059 return (mp); 12375181Sgd78059 } 12385181Sgd78059 12395181Sgd78059 #undef DMFE_DBG 12405181Sgd78059 12415181Sgd78059 12425181Sgd78059 /* 12435181Sgd78059 * ========== Address-setting routines (TX-side) ========== 12445181Sgd78059 */ 12455181Sgd78059 12465181Sgd78059 #define DMFE_DBG DMFE_DBG_ADDR /* debug flag for this code */ 12475181Sgd78059 12485181Sgd78059 /* 12495181Sgd78059 * Find the index of the relevant bit in the setup packet. 12505181Sgd78059 * This must mirror the way the hardware will actually calculate it! 12515181Sgd78059 */ 12525181Sgd78059 static uint32_t 12535181Sgd78059 dmfe_hash_index(const uint8_t *address) 12545181Sgd78059 { 12555181Sgd78059 uint32_t const POLY = HASH_POLY; 12565181Sgd78059 uint32_t crc = HASH_CRC; 12575181Sgd78059 uint32_t index; 12585181Sgd78059 uint32_t msb; 12595181Sgd78059 uchar_t currentbyte; 12605181Sgd78059 int byteslength; 12615181Sgd78059 int shift; 12625181Sgd78059 int bit; 12635181Sgd78059 12645181Sgd78059 for (byteslength = 0; byteslength < ETHERADDRL; ++byteslength) { 12655181Sgd78059 currentbyte = address[byteslength]; 12665181Sgd78059 for (bit = 0; bit < 8; ++bit) { 12675181Sgd78059 msb = crc >> 31; 12685181Sgd78059 crc <<= 1; 12695181Sgd78059 if (msb ^ (currentbyte & 1)) { 12705181Sgd78059 crc ^= POLY; 12715181Sgd78059 crc |= 0x00000001; 12725181Sgd78059 } 12735181Sgd78059 currentbyte >>= 1; 12745181Sgd78059 } 12755181Sgd78059 } 12765181Sgd78059 12775181Sgd78059 for (index = 0, bit = 23, shift = 8; shift >= 0; ++bit, --shift) 12785181Sgd78059 index |= (((crc >> bit) & 1) << shift); 12795181Sgd78059 12805181Sgd78059 return (index); 12815181Sgd78059 } 12825181Sgd78059 12835181Sgd78059 /* 12845181Sgd78059 * Find and set/clear the relevant bit in the setup packet hash table 12855181Sgd78059 * This must mirror the way the hardware will actually interpret it! 12865181Sgd78059 */ 12875181Sgd78059 static void 12885181Sgd78059 dmfe_update_hash(dmfe_t *dmfep, uint32_t index, boolean_t val) 12895181Sgd78059 { 12905181Sgd78059 dma_area_t *descp; 12915181Sgd78059 uint32_t tmp; 12925181Sgd78059 12935181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 12945181Sgd78059 12955181Sgd78059 descp = &dmfep->tx_desc; 12965181Sgd78059 tmp = dmfe_setup_get32(descp, index/16); 12975181Sgd78059 if (val) 12985181Sgd78059 tmp |= 1 << (index%16); 12995181Sgd78059 else 13005181Sgd78059 tmp &= ~(1 << (index%16)); 13015181Sgd78059 dmfe_setup_put32(descp, index/16, tmp); 13025181Sgd78059 } 13035181Sgd78059 13045181Sgd78059 /* 13055181Sgd78059 * Update the refcount for the bit in the setup packet corresponding 13065181Sgd78059 * to the specified address; if it changes between zero & nonzero, 13075181Sgd78059 * also update the bitmap itself & return B_TRUE, so that the caller 13085181Sgd78059 * knows to re-send the setup packet. Otherwise (only the refcount 13095181Sgd78059 * changed), return B_FALSE 13105181Sgd78059 */ 13115181Sgd78059 static boolean_t 13125181Sgd78059 dmfe_update_mcast(dmfe_t *dmfep, const uint8_t *mca, boolean_t val) 13135181Sgd78059 { 13145181Sgd78059 uint32_t index; 13155181Sgd78059 uint8_t *refp; 13165181Sgd78059 boolean_t change; 13175181Sgd78059 13185181Sgd78059 index = dmfe_hash_index(mca); 13195181Sgd78059 refp = &dmfep->mcast_refs[index]; 13205181Sgd78059 change = (val ? (*refp)++ : --(*refp)) == 0; 13215181Sgd78059 13225181Sgd78059 if (change) 13235181Sgd78059 dmfe_update_hash(dmfep, index, val); 13245181Sgd78059 13255181Sgd78059 return (change); 13265181Sgd78059 } 13275181Sgd78059 13285181Sgd78059 /* 13295181Sgd78059 * "Transmit" the (possibly updated) magic setup packet 13305181Sgd78059 */ 13315181Sgd78059 static int 13325181Sgd78059 dmfe_send_setup(dmfe_t *dmfep) 13335181Sgd78059 { 13345181Sgd78059 int status; 13355181Sgd78059 13365181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 13375181Sgd78059 13385181Sgd78059 /* 13395181Sgd78059 * If the chip isn't running, we can't really send the setup frame 13405181Sgd78059 * now but it doesn't matter, 'cos it will be sent when the transmit 13415181Sgd78059 * process is restarted (see dmfe_start()). 13425181Sgd78059 */ 13435181Sgd78059 if ((dmfep->opmode & START_TRANSMIT) == 0) 13445181Sgd78059 return (0); 13455181Sgd78059 13465181Sgd78059 /* 13475181Sgd78059 * "Send" the setup frame. If it fails (e.g. no resources), 13485181Sgd78059 * set a flag; then the factotum will retry the "send". Once 13495181Sgd78059 * it works, we can clear the flag no matter how many attempts 13505181Sgd78059 * had previously failed. We tell the caller that it worked 13515181Sgd78059 * whether it did or not; after all, it *will* work eventually. 13525181Sgd78059 */ 13535181Sgd78059 status = dmfe_send_msg(dmfep, NULL); 13545181Sgd78059 dmfep->need_setup = status ? B_FALSE : B_TRUE; 13555181Sgd78059 return (0); 13565181Sgd78059 } 13575181Sgd78059 13585181Sgd78059 /* 13595181Sgd78059 * dmfe_m_unicst() -- set the physical network address 13605181Sgd78059 */ 13615181Sgd78059 static int 13625181Sgd78059 dmfe_m_unicst(void *arg, const uint8_t *macaddr) 13635181Sgd78059 { 13645181Sgd78059 dmfe_t *dmfep = arg; 13655181Sgd78059 int status; 13665181Sgd78059 int index; 13675181Sgd78059 13685181Sgd78059 /* 13695181Sgd78059 * Update our current address and send out a new setup packet 13705181Sgd78059 * 13715181Sgd78059 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT 13725181Sgd78059 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes). 13735181Sgd78059 * 13745181Sgd78059 * It is said that there is a bug in the 21140 where it fails to 13755181Sgd78059 * receive packes addresses to the specified perfect filter address. 13765181Sgd78059 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1 13775181Sgd78059 * bit should be set in the module variable dmfe_setup_desc1. 13785181Sgd78059 * 13795181Sgd78059 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering. 13805181Sgd78059 * In this mode, *all* incoming addresses are hashed and looked 13815181Sgd78059 * up in the bitmap described by the setup packet. Therefore, 13825181Sgd78059 * the bit representing the station address has to be added to 13835181Sgd78059 * the table before sending it out. If the address is changed, 13845181Sgd78059 * the old entry should be removed before the new entry is made. 13855181Sgd78059 * 13865181Sgd78059 * NOTE: in this mode, unicast packets that are not intended for 13875181Sgd78059 * this station may be received; it is up to software to filter 13885181Sgd78059 * them out afterwards! 13895181Sgd78059 * 13905181Sgd78059 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT 13915181Sgd78059 * filtering. In this mode, multicast addresses are hashed and 13925181Sgd78059 * checked against the bitmap, while unicast addresses are simply 13935181Sgd78059 * matched against the one physical address specified in the setup 13945181Sgd78059 * packet. This means that we shouldn't receive unicast packets 13955181Sgd78059 * that aren't intended for us (but software still has to filter 13965181Sgd78059 * multicast packets just the same). 13975181Sgd78059 * 13985181Sgd78059 * Whichever mode we're using, we have to enter the broadcast 13995181Sgd78059 * address into the multicast filter map too, so we do this on 14005181Sgd78059 * the first time through after attach or reset. 14015181Sgd78059 */ 14025181Sgd78059 mutex_enter(dmfep->oplock); 14035181Sgd78059 14045181Sgd78059 if (dmfep->addr_set && dmfe_setup_desc1 & TX_FILTER_TYPE1) 14055181Sgd78059 (void) dmfe_update_mcast(dmfep, dmfep->curr_addr, B_FALSE); 14065181Sgd78059 if (dmfe_setup_desc1 & TX_FILTER_TYPE1) 14075181Sgd78059 (void) dmfe_update_mcast(dmfep, macaddr, B_TRUE); 14085181Sgd78059 if (!dmfep->addr_set) 14095181Sgd78059 (void) dmfe_update_mcast(dmfep, dmfe_broadcast_addr, B_TRUE); 14105181Sgd78059 14115181Sgd78059 /* 14125181Sgd78059 * Remember the new current address 14135181Sgd78059 */ 14145181Sgd78059 ethaddr_copy(macaddr, dmfep->curr_addr); 14155181Sgd78059 dmfep->addr_set = B_TRUE; 14165181Sgd78059 14175181Sgd78059 /* 14185181Sgd78059 * Install the new physical address into the proper position in 14195181Sgd78059 * the setup frame; this is only used if we select hash+perfect 14205181Sgd78059 * filtering, but we'll put it in anyway. The ugliness here is 14215181Sgd78059 * down to the usual war of the egg :( 14225181Sgd78059 */ 14235181Sgd78059 for (index = 0; index < ETHERADDRL; index += 2) 14245181Sgd78059 dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2, 14255181Sgd78059 (macaddr[index+1] << 8) | macaddr[index]); 14265181Sgd78059 14275181Sgd78059 /* 14285181Sgd78059 * Finally, we're ready to "transmit" the setup frame 14295181Sgd78059 */ 14305181Sgd78059 status = dmfe_send_setup(dmfep); 14315181Sgd78059 mutex_exit(dmfep->oplock); 14325181Sgd78059 14335181Sgd78059 return (status); 14345181Sgd78059 } 14355181Sgd78059 14365181Sgd78059 /* 14375181Sgd78059 * dmfe_m_multicst() -- enable or disable a multicast address 14385181Sgd78059 * 14395181Sgd78059 * Program the hardware to enable/disable the multicast address 14405181Sgd78059 * in "mca" (enable if add is true, otherwise disable it.) 14415181Sgd78059 * We keep a refcount for each bit in the map, so that it still 14425181Sgd78059 * works out properly if multiple addresses hash to the same bit. 14435181Sgd78059 * dmfe_update_mcast() tells us whether the map actually changed; 14445181Sgd78059 * if so, we have to re-"transmit" the magic setup packet. 14455181Sgd78059 */ 14465181Sgd78059 static int 14475181Sgd78059 dmfe_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 14485181Sgd78059 { 14495181Sgd78059 dmfe_t *dmfep = arg; /* private device info */ 14505181Sgd78059 int status = 0; 14515181Sgd78059 14525181Sgd78059 mutex_enter(dmfep->oplock); 14535181Sgd78059 if (dmfe_update_mcast(dmfep, mca, add)) 14545181Sgd78059 status = dmfe_send_setup(dmfep); 14555181Sgd78059 mutex_exit(dmfep->oplock); 14565181Sgd78059 14575181Sgd78059 return (status); 14585181Sgd78059 } 14595181Sgd78059 14605181Sgd78059 #undef DMFE_DBG 14615181Sgd78059 14625181Sgd78059 14635181Sgd78059 /* 14645181Sgd78059 * ========== Internal state management entry points ========== 14655181Sgd78059 */ 14665181Sgd78059 14675181Sgd78059 #define DMFE_DBG DMFE_DBG_GLD /* debug flag for this code */ 14685181Sgd78059 14695181Sgd78059 /* 14705181Sgd78059 * These routines provide all the functionality required by the 14715181Sgd78059 * corresponding MAC layer entry points, but don't update the MAC layer state 14725181Sgd78059 * so they can be called internally without disturbing our record 14735181Sgd78059 * of what MAC layer thinks we should be doing ... 14745181Sgd78059 */ 14755181Sgd78059 14765181Sgd78059 /* 14775181Sgd78059 * dmfe_stop() -- stop processing, don't reset h/w or rings 14785181Sgd78059 */ 14795181Sgd78059 static void 14805181Sgd78059 dmfe_stop(dmfe_t *dmfep) 14815181Sgd78059 { 14825181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 14835181Sgd78059 14845181Sgd78059 dmfe_stop_chip(dmfep, CHIP_STOPPED); 14855181Sgd78059 } 14865181Sgd78059 14875181Sgd78059 /* 14885181Sgd78059 * dmfe_reset() -- stop processing, reset h/w & rings to initial state 14895181Sgd78059 */ 14905181Sgd78059 static void 14915181Sgd78059 dmfe_reset(dmfe_t *dmfep) 14925181Sgd78059 { 14935181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 14945181Sgd78059 ASSERT(mutex_owned(dmfep->rxlock)); 14955181Sgd78059 ASSERT(mutex_owned(dmfep->txlock)); 14965181Sgd78059 14975181Sgd78059 dmfe_stop_chip(dmfep, CHIP_RESET); 14985181Sgd78059 dmfe_init_rings(dmfep); 14995181Sgd78059 } 15005181Sgd78059 15015181Sgd78059 /* 15025181Sgd78059 * dmfe_start() -- start transmitting/receiving 15035181Sgd78059 */ 15045181Sgd78059 static void 15055181Sgd78059 dmfe_start(dmfe_t *dmfep) 15065181Sgd78059 { 15075181Sgd78059 uint32_t gpsr; 15085181Sgd78059 15095181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 15105181Sgd78059 15115181Sgd78059 ASSERT(dmfep->chip_state == CHIP_RESET || 15125181Sgd78059 dmfep->chip_state == CHIP_STOPPED); 15135181Sgd78059 15145181Sgd78059 /* 15155181Sgd78059 * Make opmode consistent with PHY duplex setting 15165181Sgd78059 */ 15175181Sgd78059 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG); 15185181Sgd78059 if (gpsr & GPS_FULL_DUPLEX) 15195181Sgd78059 dmfep->opmode |= FULL_DUPLEX; 15205181Sgd78059 else 15215181Sgd78059 dmfep->opmode &= ~FULL_DUPLEX; 15225181Sgd78059 15235181Sgd78059 /* 15245181Sgd78059 * Start transmit processing 15255181Sgd78059 * Set up the address filters 15265181Sgd78059 * Start receive processing 15275181Sgd78059 * Enable interrupts 15285181Sgd78059 */ 15295181Sgd78059 dmfe_start_chip(dmfep, START_TRANSMIT); 15305181Sgd78059 (void) dmfe_send_setup(dmfep); 15315181Sgd78059 drv_usecwait(10); 15325181Sgd78059 dmfe_start_chip(dmfep, START_RECEIVE); 15335181Sgd78059 dmfe_enable_interrupts(dmfep); 15345181Sgd78059 } 15355181Sgd78059 15365181Sgd78059 /* 15375181Sgd78059 * dmfe_restart - restart transmitting/receiving after error or suspend 15385181Sgd78059 */ 15395181Sgd78059 static void 15405181Sgd78059 dmfe_restart(dmfe_t *dmfep) 15415181Sgd78059 { 15425181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 15435181Sgd78059 15445181Sgd78059 /* 15455181Sgd78059 * You need not only <oplock>, but also <rxlock> AND <txlock> 15465181Sgd78059 * in order to reset the rings, but then <txlock> *mustn't* 15475181Sgd78059 * be held across the call to dmfe_start() 15485181Sgd78059 */ 15495181Sgd78059 mutex_enter(dmfep->rxlock); 15505181Sgd78059 mutex_enter(dmfep->txlock); 15515181Sgd78059 dmfe_reset(dmfep); 15525181Sgd78059 mutex_exit(dmfep->txlock); 15535181Sgd78059 mutex_exit(dmfep->rxlock); 15545181Sgd78059 if (dmfep->mac_state == DMFE_MAC_STARTED) 15555181Sgd78059 dmfe_start(dmfep); 15565181Sgd78059 } 15575181Sgd78059 15585181Sgd78059 15595181Sgd78059 /* 15605181Sgd78059 * ========== MAC-required management entry points ========== 15615181Sgd78059 */ 15625181Sgd78059 15635181Sgd78059 /* 15645181Sgd78059 * dmfe_m_stop() -- stop transmitting/receiving 15655181Sgd78059 */ 15665181Sgd78059 static void 15675181Sgd78059 dmfe_m_stop(void *arg) 15685181Sgd78059 { 15695181Sgd78059 dmfe_t *dmfep = arg; /* private device info */ 15705181Sgd78059 15715181Sgd78059 /* 15725181Sgd78059 * Just stop processing, then record new MAC state 15735181Sgd78059 */ 15745181Sgd78059 mutex_enter(dmfep->oplock); 15755181Sgd78059 dmfe_stop(dmfep); 15765181Sgd78059 dmfep->mac_state = DMFE_MAC_STOPPED; 15775181Sgd78059 mutex_exit(dmfep->oplock); 15785181Sgd78059 } 15795181Sgd78059 15805181Sgd78059 /* 15815181Sgd78059 * dmfe_m_start() -- start transmitting/receiving 15825181Sgd78059 */ 15835181Sgd78059 static int 15845181Sgd78059 dmfe_m_start(void *arg) 15855181Sgd78059 { 15865181Sgd78059 dmfe_t *dmfep = arg; /* private device info */ 15875181Sgd78059 15885181Sgd78059 /* 15895181Sgd78059 * Start processing and record new MAC state 15905181Sgd78059 */ 15915181Sgd78059 mutex_enter(dmfep->oplock); 15925181Sgd78059 dmfe_start(dmfep); 15935181Sgd78059 dmfep->mac_state = DMFE_MAC_STARTED; 15945181Sgd78059 mutex_exit(dmfep->oplock); 15955181Sgd78059 15965181Sgd78059 return (0); 15975181Sgd78059 } 15985181Sgd78059 15995181Sgd78059 /* 16005181Sgd78059 * dmfe_m_promisc() -- set or reset promiscuous mode on the board 16015181Sgd78059 * 16025181Sgd78059 * Program the hardware to enable/disable promiscuous and/or 16035181Sgd78059 * receive-all-multicast modes. Davicom don't document this 16045181Sgd78059 * clearly, but it looks like we can do this on-the-fly (i.e. 16055181Sgd78059 * without stopping & restarting the TX/RX processes). 16065181Sgd78059 */ 16075181Sgd78059 static int 16085181Sgd78059 dmfe_m_promisc(void *arg, boolean_t on) 16095181Sgd78059 { 16105181Sgd78059 dmfe_t *dmfep = arg; 16115181Sgd78059 16125181Sgd78059 mutex_enter(dmfep->oplock); 16135181Sgd78059 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST); 16145181Sgd78059 if (on) 16155181Sgd78059 dmfep->opmode |= PROMISC_MODE; 16165181Sgd78059 dmfe_set_opmode(dmfep); 16175181Sgd78059 mutex_exit(dmfep->oplock); 16185181Sgd78059 16195181Sgd78059 return (0); 16205181Sgd78059 } 16215181Sgd78059 16225181Sgd78059 #undef DMFE_DBG 16235181Sgd78059 16245181Sgd78059 16255181Sgd78059 /* 16265181Sgd78059 * ========== Factotum, implemented as a softint handler ========== 16275181Sgd78059 */ 16285181Sgd78059 16295181Sgd78059 #define DMFE_DBG DMFE_DBG_FACT /* debug flag for this code */ 16305181Sgd78059 16315181Sgd78059 /* 16325181Sgd78059 * The factotum is woken up when there's something to do that we'd rather 16335181Sgd78059 * not do from inside a (high-level?) hardware interrupt handler. Its 16345181Sgd78059 * two main tasks are: 16355181Sgd78059 * reset & restart the chip after an error 16365181Sgd78059 * update & restart the chip after a link status change 16375181Sgd78059 */ 16385181Sgd78059 static uint_t 16395181Sgd78059 dmfe_factotum(caddr_t arg) 16405181Sgd78059 { 16415181Sgd78059 dmfe_t *dmfep; 16425181Sgd78059 16436990Sgd78059 dmfep = (void *)arg; 16445181Sgd78059 ASSERT(dmfep->dmfe_guard == DMFE_GUARD); 16455181Sgd78059 16465181Sgd78059 mutex_enter(dmfep->oplock); 16475181Sgd78059 16485181Sgd78059 dmfep->factotum_flag = 0; 16495181Sgd78059 DRV_KS_INC(dmfep, KS_FACTOTUM_RUN); 16505181Sgd78059 16515181Sgd78059 /* 16525181Sgd78059 * Check for chip error ... 16535181Sgd78059 */ 16545181Sgd78059 if (dmfep->chip_state == CHIP_ERROR) { 16555181Sgd78059 /* 16565181Sgd78059 * Error recovery required: reset the chip and the rings, 16575181Sgd78059 * then, if it's supposed to be running, kick it off again. 16585181Sgd78059 */ 16595181Sgd78059 DRV_KS_INC(dmfep, KS_RECOVERY); 16605181Sgd78059 dmfe_restart(dmfep); 16615181Sgd78059 } else if (dmfep->need_setup) { 16625181Sgd78059 (void) dmfe_send_setup(dmfep); 16635181Sgd78059 } 16645181Sgd78059 mutex_exit(dmfep->oplock); 16655181Sgd78059 16665181Sgd78059 /* 16675181Sgd78059 * Then, check the link state. We need <milock> but not <oplock> 16685181Sgd78059 * to do this, but if something's changed, we need <oplock> as well 16695181Sgd78059 * in order to stop/restart the chip! Note: we could simply hold 16705181Sgd78059 * <oplock> right through here, but we'd rather not 'cos checking 16715181Sgd78059 * the link state involves reading over the bit-serial MII bus, 16725181Sgd78059 * which takes ~500us even when nothing's changed. Holding <oplock> 16735181Sgd78059 * would lock out the interrupt handler for the duration, so it's 16745181Sgd78059 * better to release it first and reacquire it only if needed. 16755181Sgd78059 */ 16765181Sgd78059 mutex_enter(dmfep->milock); 16775181Sgd78059 if (dmfe_check_link(dmfep)) { 16785181Sgd78059 mutex_enter(dmfep->oplock); 16795181Sgd78059 dmfe_stop(dmfep); 16805181Sgd78059 DRV_KS_INC(dmfep, KS_LINK_CHECK); 16815181Sgd78059 if (dmfep->update_phy) { 16825181Sgd78059 /* 16835181Sgd78059 * The chip may reset itself for some unknown 16845181Sgd78059 * reason. If this happens, the chip will use 16855181Sgd78059 * default settings (for speed, duplex, and autoneg), 16865181Sgd78059 * which possibly aren't the user's desired settings. 16875181Sgd78059 */ 16885181Sgd78059 dmfe_update_phy(dmfep); 16895181Sgd78059 dmfep->update_phy = B_FALSE; 16905181Sgd78059 } 16915181Sgd78059 dmfe_recheck_link(dmfep, B_FALSE); 16925181Sgd78059 if (dmfep->mac_state == DMFE_MAC_STARTED) 16935181Sgd78059 dmfe_start(dmfep); 16945181Sgd78059 mutex_exit(dmfep->oplock); 16955181Sgd78059 } 16965181Sgd78059 mutex_exit(dmfep->milock); 16975181Sgd78059 16985181Sgd78059 /* 16995181Sgd78059 * Keep MAC up-to-date about the state of the link ... 17005181Sgd78059 */ 17015181Sgd78059 mac_link_update(dmfep->mh, dmfep->link_state); 17025181Sgd78059 17035181Sgd78059 return (DDI_INTR_CLAIMED); 17045181Sgd78059 } 17055181Sgd78059 17065181Sgd78059 static void 17075181Sgd78059 dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why) 17085181Sgd78059 { 17095181Sgd78059 DMFE_DEBUG(("dmfe_wake_factotum: %s [%d] flag %d", 17105181Sgd78059 why, ks_id, dmfep->factotum_flag)); 17115181Sgd78059 17125181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 17135181Sgd78059 DRV_KS_INC(dmfep, ks_id); 17145181Sgd78059 17155181Sgd78059 if (dmfep->factotum_flag++ == 0) 17165181Sgd78059 ddi_trigger_softintr(dmfep->factotum_id); 17175181Sgd78059 } 17185181Sgd78059 17195181Sgd78059 #undef DMFE_DBG 17205181Sgd78059 17215181Sgd78059 17225181Sgd78059 /* 17235181Sgd78059 * ========== Periodic Tasks (Cyclic handler & friends) ========== 17245181Sgd78059 */ 17255181Sgd78059 17265181Sgd78059 #define DMFE_DBG DMFE_DBG_TICK /* debug flag for this code */ 17275181Sgd78059 17285181Sgd78059 /* 17295181Sgd78059 * Periodic tick tasks, run from the cyclic handler 17305181Sgd78059 * 17315181Sgd78059 * Check the state of the link and wake the factotum if necessary 17325181Sgd78059 */ 17335181Sgd78059 static void 17345181Sgd78059 dmfe_tick_link_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat) 17355181Sgd78059 { 17365181Sgd78059 link_state_t phy_state; 17375181Sgd78059 link_state_t utp_state; 17385181Sgd78059 const char *why; 17395181Sgd78059 int ks_id; 17405181Sgd78059 17415181Sgd78059 _NOTE(ARGUNUSED(istat)) 17425181Sgd78059 17435181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 17445181Sgd78059 17455181Sgd78059 /* 17465181Sgd78059 * Is it time to wake the factotum? We do so periodically, in 17475181Sgd78059 * case the fast check below doesn't always reveal a link change 17485181Sgd78059 */ 17495181Sgd78059 if (dmfep->link_poll_tix-- == 0) { 17505181Sgd78059 dmfep->link_poll_tix = factotum_tix; 17515181Sgd78059 why = "tick (link poll)"; 17525181Sgd78059 ks_id = KS_TICK_LINK_POLL; 17535181Sgd78059 } else { 17545181Sgd78059 why = NULL; 17555181Sgd78059 ks_id = KS_TICK_LINK_STATE; 17565181Sgd78059 } 17575181Sgd78059 17585181Sgd78059 /* 17595181Sgd78059 * Has the link status changed? If so, we might want to wake 17605181Sgd78059 * the factotum to deal with it. 17615181Sgd78059 */ 17625181Sgd78059 phy_state = (gpsr & GPS_LINK_STATUS) ? LINK_STATE_UP : LINK_STATE_DOWN; 17635181Sgd78059 utp_state = (gpsr & GPS_UTP_SIG) ? LINK_STATE_UP : LINK_STATE_DOWN; 17645181Sgd78059 if (phy_state != utp_state) 17655181Sgd78059 why = "tick (phy <> utp)"; 17665181Sgd78059 else if ((dmfep->link_state == LINK_STATE_UP) && 17675181Sgd78059 (phy_state == LINK_STATE_DOWN)) 17685181Sgd78059 why = "tick (UP -> DOWN)"; 17695181Sgd78059 else if (phy_state != dmfep->link_state) { 17705181Sgd78059 if (dmfep->link_poll_tix > factotum_fast_tix) 17715181Sgd78059 dmfep->link_poll_tix = factotum_fast_tix; 17725181Sgd78059 } 17735181Sgd78059 17745181Sgd78059 if (why != NULL) { 17755181Sgd78059 DMFE_DEBUG(("dmfe_%s: link %d phy %d utp %d", 17765181Sgd78059 why, dmfep->link_state, phy_state, utp_state)); 17775181Sgd78059 dmfe_wake_factotum(dmfep, ks_id, why); 17785181Sgd78059 } 17795181Sgd78059 } 17805181Sgd78059 17815181Sgd78059 /* 17825181Sgd78059 * Periodic tick tasks, run from the cyclic handler 17835181Sgd78059 * 17845181Sgd78059 * Check for TX stall; flag an error and wake the factotum if so. 17855181Sgd78059 */ 17865181Sgd78059 static void 17875181Sgd78059 dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat) 17885181Sgd78059 { 17895181Sgd78059 boolean_t tx_stall; 17905181Sgd78059 uint32_t tx_state; 17915181Sgd78059 uint32_t limit; 17925181Sgd78059 17935181Sgd78059 ASSERT(mutex_owned(dmfep->oplock)); 17945181Sgd78059 17955181Sgd78059 /* 17965181Sgd78059 * Check for transmit stall ... 17975181Sgd78059 * 17985181Sgd78059 * IF there's at least one packet in the ring, AND the timeout 17995181Sgd78059 * has elapsed, AND we can't reclaim any descriptors, THEN we've 18005181Sgd78059 * stalled; we return B_TRUE to trigger a reset-and-recover cycle. 18015181Sgd78059 * 18025181Sgd78059 * Note that the timeout limit is based on the transmit engine 18035181Sgd78059 * state; we allow the transmitter longer to make progress in 18045181Sgd78059 * some states than in others, based on observations of this 18055181Sgd78059 * chip's actual behaviour in the lab. 18065181Sgd78059 * 18075181Sgd78059 * By observation, we find that on about 1 in 10000 passes through 18085181Sgd78059 * here, the TX lock is already held. In that case, we'll skip 18095181Sgd78059 * the check on this pass rather than wait. Most likely, the send 18105181Sgd78059 * routine was holding the lock when the interrupt happened, and 18115181Sgd78059 * we'll succeed next time through. In the event of a real stall, 18125181Sgd78059 * the TX ring will fill up, after which the send routine won't be 18135181Sgd78059 * called any more and then we're sure to get in. 18145181Sgd78059 */ 18155181Sgd78059 tx_stall = B_FALSE; 18165181Sgd78059 if (mutex_tryenter(dmfep->txlock)) { 18175181Sgd78059 if (dmfep->tx.n_free < dmfep->tx.n_desc) { 18185181Sgd78059 tx_state = TX_PROCESS_STATE(istat); 18195181Sgd78059 if (gpsr & GPS_LINK_100) 18205181Sgd78059 limit = stall_100_tix[tx_state]; 18215181Sgd78059 else 18225181Sgd78059 limit = stall_10_tix[tx_state]; 18235181Sgd78059 if (++dmfep->tx_pending_tix >= limit && 18245181Sgd78059 dmfe_reclaim_tx_desc(dmfep) == B_FALSE) { 18255181Sgd78059 dmfe_log(dmfep, "TX stall detected " 18265181Sgd78059 "after %d ticks in state %d; " 18275181Sgd78059 "automatic recovery initiated", 18285181Sgd78059 dmfep->tx_pending_tix, tx_state); 18295181Sgd78059 tx_stall = B_TRUE; 18305181Sgd78059 } 18315181Sgd78059 } 18325181Sgd78059 mutex_exit(dmfep->txlock); 18335181Sgd78059 } 18345181Sgd78059 18355181Sgd78059 if (tx_stall) { 18365181Sgd78059 dmfe_stop_chip(dmfep, CHIP_ERROR); 18375181Sgd78059 dmfe_wake_factotum(dmfep, KS_TX_STALL, "tick (TX stall)"); 18385181Sgd78059 } 18395181Sgd78059 } 18405181Sgd78059 18415181Sgd78059 /* 18425181Sgd78059 * Cyclic callback handler 18435181Sgd78059 */ 18445181Sgd78059 static void 18455181Sgd78059 dmfe_cyclic(void *arg) 18465181Sgd78059 { 18475181Sgd78059 dmfe_t *dmfep = arg; /* private device info */ 18485181Sgd78059 uint32_t istat; 18495181Sgd78059 uint32_t gpsr; 18505181Sgd78059 18515181Sgd78059 /* 18525181Sgd78059 * If the chip's not RUNNING, there's nothing to do. 18535181Sgd78059 * If we can't get the mutex straight away, we'll just 18545181Sgd78059 * skip this pass; we'll back back soon enough anyway. 18555181Sgd78059 */ 18565181Sgd78059 if (dmfep->chip_state != CHIP_RUNNING) 18575181Sgd78059 return; 18585181Sgd78059 if (mutex_tryenter(dmfep->oplock) == 0) 18595181Sgd78059 return; 18605181Sgd78059 18615181Sgd78059 /* 18625181Sgd78059 * Recheck chip state (it might have been stopped since we 18635181Sgd78059 * checked above). If still running, call each of the *tick* 18645181Sgd78059 * tasks. They will check for link change, TX stall, etc ... 18655181Sgd78059 */ 18665181Sgd78059 if (dmfep->chip_state == CHIP_RUNNING) { 18675181Sgd78059 istat = dmfe_chip_get32(dmfep, STATUS_REG); 18685181Sgd78059 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG); 18695181Sgd78059 dmfe_tick_link_check(dmfep, gpsr, istat); 18705181Sgd78059 dmfe_tick_stall_check(dmfep, gpsr, istat); 18715181Sgd78059 } 18725181Sgd78059 18735181Sgd78059 DRV_KS_INC(dmfep, KS_CYCLIC_RUN); 18745181Sgd78059 mutex_exit(dmfep->oplock); 18755181Sgd78059 } 18765181Sgd78059 18775181Sgd78059 #undef DMFE_DBG 18785181Sgd78059 18795181Sgd78059 18805181Sgd78059 /* 18815181Sgd78059 * ========== Hardware interrupt handler ========== 18825181Sgd78059 */ 18835181Sgd78059 18845181Sgd78059 #define DMFE_DBG DMFE_DBG_INT /* debug flag for this code */ 18855181Sgd78059 18865181Sgd78059 /* 18875181Sgd78059 * dmfe_interrupt() -- handle chip interrupts 18885181Sgd78059 */ 18895181Sgd78059 static uint_t 18905181Sgd78059 dmfe_interrupt(caddr_t arg) 18915181Sgd78059 { 18925181Sgd78059 dmfe_t *dmfep; /* private device info */ 18935181Sgd78059 uint32_t interrupts; 18945181Sgd78059 uint32_t istat; 18955181Sgd78059 const char *msg; 18965181Sgd78059 mblk_t *mp; 18975181Sgd78059 boolean_t warning_msg = B_TRUE; 18985181Sgd78059 18996990Sgd78059 dmfep = (void *)arg; 19005181Sgd78059 19015181Sgd78059 /* 19025181Sgd78059 * A quick check as to whether the interrupt was from this 19035181Sgd78059 * device, before we even finish setting up all our local 19045181Sgd78059 * variables. Note that reading the interrupt status register 19055181Sgd78059 * doesn't have any unpleasant side effects such as clearing 19065181Sgd78059 * the bits read, so it's quite OK to re-read it once we have 19075181Sgd78059 * determined that we are going to service this interrupt and 19085181Sgd78059 * grabbed the mutexen. 19095181Sgd78059 */ 19105181Sgd78059 istat = dmfe_chip_get32(dmfep, STATUS_REG); 19115181Sgd78059 if ((istat & (NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT)) == 0) 19125181Sgd78059 return (DDI_INTR_UNCLAIMED); 19135181Sgd78059 19145181Sgd78059 /* 19155181Sgd78059 * Unfortunately, there can be a race condition between attach() 19165181Sgd78059 * adding the interrupt handler and initialising the mutexen, 19175181Sgd78059 * and the handler itself being called because of a pending 19185181Sgd78059 * interrupt. So, we check <imask>; if it shows that interrupts 19195181Sgd78059 * haven't yet been enabled (and therefore we shouldn't really 19205181Sgd78059 * be here at all), we will just write back the value read from 19215181Sgd78059 * the status register, thus acknowledging (and clearing) *all* 19225181Sgd78059 * pending conditions without really servicing them, and claim 19235181Sgd78059 * the interrupt. 19245181Sgd78059 */ 19255181Sgd78059 if (dmfep->imask == 0) { 19265181Sgd78059 DMFE_DEBUG(("dmfe_interrupt: early interrupt 0x%x", istat)); 19275181Sgd78059 dmfe_chip_put32(dmfep, STATUS_REG, istat); 19285181Sgd78059 return (DDI_INTR_CLAIMED); 19295181Sgd78059 } 19305181Sgd78059 19315181Sgd78059 /* 19325181Sgd78059 * We're committed to servicing this interrupt, but we 19335181Sgd78059 * need to get the lock before going any further ... 19345181Sgd78059 */ 19355181Sgd78059 mutex_enter(dmfep->oplock); 19365181Sgd78059 DRV_KS_INC(dmfep, KS_INTERRUPT); 19375181Sgd78059 19385181Sgd78059 /* 19395181Sgd78059 * Identify bits that represent enabled interrupts ... 19405181Sgd78059 */ 19415181Sgd78059 istat |= dmfe_chip_get32(dmfep, STATUS_REG); 19425181Sgd78059 interrupts = istat & dmfep->imask; 19435181Sgd78059 ASSERT(interrupts != 0); 19445181Sgd78059 19455181Sgd78059 DMFE_DEBUG(("dmfe_interrupt: istat 0x%x -> 0x%x", istat, interrupts)); 19465181Sgd78059 19475181Sgd78059 /* 19485181Sgd78059 * Check for any interrupts other than TX/RX done. 19495181Sgd78059 * If there are any, they are considered Abnormal 19505181Sgd78059 * and will cause the chip to be reset. 19515181Sgd78059 */ 19525181Sgd78059 if (interrupts & ~(RX_PKTDONE_INT | TX_PKTDONE_INT)) { 19535181Sgd78059 if (istat & ABNORMAL_SUMMARY_INT) { 19545181Sgd78059 /* 19555181Sgd78059 * Any Abnormal interrupts will lead to us 19565181Sgd78059 * resetting the chip, so we don't bother 19575181Sgd78059 * to clear each interrupt individually. 19585181Sgd78059 * 19595181Sgd78059 * Our main task here is to identify the problem, 19605181Sgd78059 * by pointing out the most significant unexpected 19615181Sgd78059 * bit. Additional bits may well be consequences 19625181Sgd78059 * of the first problem, so we consider the possible 19635181Sgd78059 * causes in order of severity. 19645181Sgd78059 */ 19655181Sgd78059 if (interrupts & SYSTEM_ERR_INT) { 19665181Sgd78059 switch (istat & SYSTEM_ERR_BITS) { 19675181Sgd78059 case SYSTEM_ERR_M_ABORT: 19685181Sgd78059 msg = "Bus Master Abort"; 19695181Sgd78059 break; 19705181Sgd78059 19715181Sgd78059 case SYSTEM_ERR_T_ABORT: 19725181Sgd78059 msg = "Bus Target Abort"; 19735181Sgd78059 break; 19745181Sgd78059 19755181Sgd78059 case SYSTEM_ERR_PARITY: 19765181Sgd78059 msg = "Parity Error"; 19775181Sgd78059 break; 19785181Sgd78059 19795181Sgd78059 default: 19805181Sgd78059 msg = "Unknown System Bus Error"; 19815181Sgd78059 break; 19825181Sgd78059 } 19835181Sgd78059 } else if (interrupts & RX_STOPPED_INT) { 19845181Sgd78059 msg = "RX process stopped"; 19855181Sgd78059 } else if (interrupts & RX_UNAVAIL_INT) { 19865181Sgd78059 msg = "RX buffer unavailable"; 19875181Sgd78059 warning_msg = B_FALSE; 19885181Sgd78059 } else if (interrupts & RX_WATCHDOG_INT) { 19895181Sgd78059 msg = "RX watchdog timeout?"; 19905181Sgd78059 } else if (interrupts & RX_EARLY_INT) { 19915181Sgd78059 msg = "RX early interrupt?"; 19925181Sgd78059 } else if (interrupts & TX_STOPPED_INT) { 19935181Sgd78059 msg = "TX process stopped"; 19945181Sgd78059 } else if (interrupts & TX_JABBER_INT) { 19955181Sgd78059 msg = "TX jabber timeout"; 19965181Sgd78059 } else if (interrupts & TX_UNDERFLOW_INT) { 19975181Sgd78059 msg = "TX underflow?"; 19985181Sgd78059 } else if (interrupts & TX_EARLY_INT) { 19995181Sgd78059 msg = "TX early interrupt?"; 20005181Sgd78059 20015181Sgd78059 } else if (interrupts & LINK_STATUS_INT) { 20025181Sgd78059 msg = "Link status change?"; 20035181Sgd78059 } else if (interrupts & GP_TIMER_INT) { 20045181Sgd78059 msg = "Timer expired?"; 20055181Sgd78059 } 20065181Sgd78059 20075181Sgd78059 if (warning_msg) 20085181Sgd78059 dmfe_warning(dmfep, "abnormal interrupt, " 20095181Sgd78059 "status 0x%x: %s", istat, msg); 20105181Sgd78059 20115181Sgd78059 /* 20125181Sgd78059 * We don't want to run the entire reinitialisation 20135181Sgd78059 * code out of this (high-level?) interrupt, so we 20145181Sgd78059 * simply STOP the chip, and wake up the factotum 20155181Sgd78059 * to reinitalise it ... 20165181Sgd78059 */ 20175181Sgd78059 dmfe_stop_chip(dmfep, CHIP_ERROR); 20185181Sgd78059 dmfe_wake_factotum(dmfep, KS_CHIP_ERROR, 20195181Sgd78059 "interrupt (error)"); 20205181Sgd78059 } else { 20215181Sgd78059 /* 20225181Sgd78059 * We shouldn't really get here (it would mean 20235181Sgd78059 * there were some unprocessed enabled bits but 20245181Sgd78059 * they weren't Abnormal?), but we'll check just 20255181Sgd78059 * in case ... 20265181Sgd78059 */ 20275181Sgd78059 DMFE_DEBUG(("unexpected interrupt bits: 0x%x", istat)); 20285181Sgd78059 } 20295181Sgd78059 } 20305181Sgd78059 20315181Sgd78059 /* 20325181Sgd78059 * Acknowledge all the original bits - except in the case of an 20335181Sgd78059 * error, when we leave them unacknowledged so that the recovery 20345181Sgd78059 * code can see what was going on when the problem occurred ... 20355181Sgd78059 */ 20365181Sgd78059 if (dmfep->chip_state != CHIP_ERROR) { 20375181Sgd78059 (void) dmfe_chip_put32(dmfep, STATUS_REG, istat); 20385181Sgd78059 /* 20395181Sgd78059 * Read-after-write forces completion on PCI bus. 20405181Sgd78059 * 20415181Sgd78059 */ 20425181Sgd78059 (void) dmfe_chip_get32(dmfep, STATUS_REG); 20435181Sgd78059 } 20445181Sgd78059 20455181Sgd78059 20465181Sgd78059 /* 20475181Sgd78059 * We've finished talking to the chip, so we can drop <oplock> 20485181Sgd78059 * before handling the normal interrupts, which only involve 20495181Sgd78059 * manipulation of descriptors ... 20505181Sgd78059 */ 20515181Sgd78059 mutex_exit(dmfep->oplock); 20525181Sgd78059 20535181Sgd78059 if (interrupts & RX_PKTDONE_INT) 20545181Sgd78059 if ((mp = dmfe_getp(dmfep)) != NULL) 20555181Sgd78059 mac_rx(dmfep->mh, NULL, mp); 20565181Sgd78059 20575181Sgd78059 if (interrupts & TX_PKTDONE_INT) { 20585181Sgd78059 /* 20595181Sgd78059 * The only reason for taking this interrupt is to give 20605181Sgd78059 * MAC a chance to schedule queued packets after a 20615181Sgd78059 * ring-full condition. To minimise the number of 20625181Sgd78059 * redundant TX-Done interrupts, we only mark two of the 20635181Sgd78059 * ring descriptors as 'interrupt-on-complete' - all the 20645181Sgd78059 * others are simply handed back without an interrupt. 20655181Sgd78059 */ 20665181Sgd78059 if (dmfe_reclaim_on_done && mutex_tryenter(dmfep->txlock)) { 20675181Sgd78059 (void) dmfe_reclaim_tx_desc(dmfep); 20685181Sgd78059 mutex_exit(dmfep->txlock); 20695181Sgd78059 } 20705181Sgd78059 mac_tx_update(dmfep->mh); 20715181Sgd78059 } 20725181Sgd78059 20735181Sgd78059 return (DDI_INTR_CLAIMED); 20745181Sgd78059 } 20755181Sgd78059 20765181Sgd78059 #undef DMFE_DBG 20775181Sgd78059 20785181Sgd78059 20795181Sgd78059 /* 20805181Sgd78059 * ========== Statistics update handler ========== 20815181Sgd78059 */ 20825181Sgd78059 20835181Sgd78059 #define DMFE_DBG DMFE_DBG_STATS /* debug flag for this code */ 20845181Sgd78059 20855181Sgd78059 static int 20865181Sgd78059 dmfe_m_stat(void *arg, uint_t stat, uint64_t *val) 20875181Sgd78059 { 20885181Sgd78059 dmfe_t *dmfep = arg; 20895181Sgd78059 int rv = 0; 20905181Sgd78059 20915181Sgd78059 mutex_enter(dmfep->milock); 20925181Sgd78059 mutex_enter(dmfep->oplock); 20935181Sgd78059 mutex_enter(dmfep->rxlock); 20945181Sgd78059 mutex_enter(dmfep->txlock); 20955181Sgd78059 20965181Sgd78059 /* make sure we have all the stats collected */ 20975181Sgd78059 (void) dmfe_reclaim_tx_desc(dmfep); 20985181Sgd78059 20995181Sgd78059 switch (stat) { 21005181Sgd78059 case MAC_STAT_IFSPEED: 21015181Sgd78059 *val = dmfep->op_stats_speed; 21025181Sgd78059 break; 21035181Sgd78059 21045181Sgd78059 case MAC_STAT_IPACKETS: 21055181Sgd78059 *val = dmfep->rx_stats_ipackets; 21065181Sgd78059 break; 21075181Sgd78059 21085181Sgd78059 case MAC_STAT_MULTIRCV: 21095181Sgd78059 *val = dmfep->rx_stats_multi; 21105181Sgd78059 break; 21115181Sgd78059 21125181Sgd78059 case MAC_STAT_BRDCSTRCV: 21135181Sgd78059 *val = dmfep->rx_stats_bcast; 21145181Sgd78059 break; 21155181Sgd78059 21165181Sgd78059 case MAC_STAT_RBYTES: 21175181Sgd78059 *val = dmfep->rx_stats_rbytes; 21185181Sgd78059 break; 21195181Sgd78059 21205181Sgd78059 case MAC_STAT_IERRORS: 21215181Sgd78059 *val = dmfep->rx_stats_ierrors; 21225181Sgd78059 break; 21235181Sgd78059 21245181Sgd78059 case MAC_STAT_NORCVBUF: 21255181Sgd78059 *val = dmfep->rx_stats_norcvbuf; 21265181Sgd78059 break; 21275181Sgd78059 21285181Sgd78059 case MAC_STAT_COLLISIONS: 21295181Sgd78059 *val = dmfep->tx_stats_collisions; 21305181Sgd78059 break; 21315181Sgd78059 21325181Sgd78059 case MAC_STAT_OERRORS: 21335181Sgd78059 *val = dmfep->tx_stats_oerrors; 21345181Sgd78059 break; 21355181Sgd78059 21365181Sgd78059 case MAC_STAT_OPACKETS: 21375181Sgd78059 *val = dmfep->tx_stats_opackets; 21385181Sgd78059 break; 21395181Sgd78059 21405181Sgd78059 case MAC_STAT_MULTIXMT: 21415181Sgd78059 *val = dmfep->tx_stats_multi; 21425181Sgd78059 break; 21435181Sgd78059 21445181Sgd78059 case MAC_STAT_BRDCSTXMT: 21455181Sgd78059 *val = dmfep->tx_stats_bcast; 21465181Sgd78059 break; 21475181Sgd78059 21485181Sgd78059 case MAC_STAT_OBYTES: 21495181Sgd78059 *val = dmfep->tx_stats_obytes; 21505181Sgd78059 break; 21515181Sgd78059 21525181Sgd78059 case MAC_STAT_OVERFLOWS: 21535181Sgd78059 *val = dmfep->rx_stats_overflow; 21545181Sgd78059 break; 21555181Sgd78059 21565181Sgd78059 case MAC_STAT_UNDERFLOWS: 21575181Sgd78059 *val = dmfep->tx_stats_underflow; 21585181Sgd78059 break; 21595181Sgd78059 21605181Sgd78059 case ETHER_STAT_ALIGN_ERRORS: 21615181Sgd78059 *val = dmfep->rx_stats_align; 21625181Sgd78059 break; 21635181Sgd78059 21645181Sgd78059 case ETHER_STAT_FCS_ERRORS: 21655181Sgd78059 *val = dmfep->rx_stats_fcs; 21665181Sgd78059 break; 21675181Sgd78059 21685181Sgd78059 case ETHER_STAT_TOOLONG_ERRORS: 21695181Sgd78059 *val = dmfep->rx_stats_toolong; 21705181Sgd78059 break; 21715181Sgd78059 21725181Sgd78059 case ETHER_STAT_TOOSHORT_ERRORS: 21735181Sgd78059 *val = dmfep->rx_stats_short; 21745181Sgd78059 break; 21755181Sgd78059 21765181Sgd78059 case ETHER_STAT_MACRCV_ERRORS: 21775181Sgd78059 *val = dmfep->rx_stats_macrcv_errors; 21785181Sgd78059 break; 21795181Sgd78059 21805181Sgd78059 case ETHER_STAT_MACXMT_ERRORS: 21815181Sgd78059 *val = dmfep->tx_stats_macxmt_errors; 21825181Sgd78059 break; 21835181Sgd78059 21845181Sgd78059 case ETHER_STAT_JABBER_ERRORS: 21855181Sgd78059 *val = dmfep->tx_stats_jabber; 21865181Sgd78059 break; 21875181Sgd78059 21885181Sgd78059 case ETHER_STAT_CARRIER_ERRORS: 21895181Sgd78059 *val = dmfep->tx_stats_nocarrier; 21905181Sgd78059 break; 21915181Sgd78059 21925181Sgd78059 case ETHER_STAT_TX_LATE_COLLISIONS: 21935181Sgd78059 *val = dmfep->tx_stats_xmtlatecoll; 21945181Sgd78059 break; 21955181Sgd78059 21965181Sgd78059 case ETHER_STAT_EX_COLLISIONS: 21975181Sgd78059 *val = dmfep->tx_stats_excoll; 21985181Sgd78059 break; 21995181Sgd78059 22005181Sgd78059 case ETHER_STAT_DEFER_XMTS: 22015181Sgd78059 *val = dmfep->tx_stats_defer; 22025181Sgd78059 break; 22035181Sgd78059 22045181Sgd78059 case ETHER_STAT_FIRST_COLLISIONS: 22055181Sgd78059 *val = dmfep->tx_stats_first_coll; 22065181Sgd78059 break; 22075181Sgd78059 22085181Sgd78059 case ETHER_STAT_MULTI_COLLISIONS: 22095181Sgd78059 *val = dmfep->tx_stats_multi_coll; 22105181Sgd78059 break; 22115181Sgd78059 22125181Sgd78059 case ETHER_STAT_XCVR_INUSE: 22135181Sgd78059 *val = dmfep->phy_inuse; 22145181Sgd78059 break; 22155181Sgd78059 22165181Sgd78059 case ETHER_STAT_XCVR_ID: 22175181Sgd78059 *val = dmfep->phy_id; 22185181Sgd78059 break; 22195181Sgd78059 22205181Sgd78059 case ETHER_STAT_XCVR_ADDR: 22215181Sgd78059 *val = dmfep->phy_addr; 22225181Sgd78059 break; 22235181Sgd78059 22245181Sgd78059 case ETHER_STAT_LINK_DUPLEX: 22255181Sgd78059 *val = dmfep->op_stats_duplex; 22265181Sgd78059 break; 22275181Sgd78059 22285181Sgd78059 case ETHER_STAT_CAP_100T4: 22295181Sgd78059 *val = dmfep->param_bmsr_100T4; 22305181Sgd78059 break; 22315181Sgd78059 22325181Sgd78059 case ETHER_STAT_CAP_100FDX: 22335181Sgd78059 *val = dmfep->param_bmsr_100fdx; 22345181Sgd78059 break; 22355181Sgd78059 22365181Sgd78059 case ETHER_STAT_CAP_100HDX: 22375181Sgd78059 *val = dmfep->param_bmsr_100hdx; 22385181Sgd78059 break; 22395181Sgd78059 22405181Sgd78059 case ETHER_STAT_CAP_10FDX: 22415181Sgd78059 *val = dmfep->param_bmsr_10fdx; 22425181Sgd78059 break; 22435181Sgd78059 22445181Sgd78059 case ETHER_STAT_CAP_10HDX: 22455181Sgd78059 *val = dmfep->param_bmsr_10hdx; 22465181Sgd78059 break; 22475181Sgd78059 22485181Sgd78059 case ETHER_STAT_CAP_AUTONEG: 22495181Sgd78059 *val = dmfep->param_bmsr_autoneg; 22505181Sgd78059 break; 22515181Sgd78059 22525181Sgd78059 case ETHER_STAT_CAP_REMFAULT: 22535181Sgd78059 *val = dmfep->param_bmsr_remfault; 22545181Sgd78059 break; 22555181Sgd78059 22565181Sgd78059 case ETHER_STAT_ADV_CAP_AUTONEG: 22575181Sgd78059 *val = dmfep->param_autoneg; 22585181Sgd78059 break; 22595181Sgd78059 22605181Sgd78059 case ETHER_STAT_ADV_CAP_100T4: 22615181Sgd78059 *val = dmfep->param_anar_100T4; 22625181Sgd78059 break; 22635181Sgd78059 22645181Sgd78059 case ETHER_STAT_ADV_CAP_100FDX: 22655181Sgd78059 *val = dmfep->param_anar_100fdx; 22665181Sgd78059 break; 22675181Sgd78059 22685181Sgd78059 case ETHER_STAT_ADV_CAP_100HDX: 22695181Sgd78059 *val = dmfep->param_anar_100hdx; 22705181Sgd78059 break; 22715181Sgd78059 22725181Sgd78059 case ETHER_STAT_ADV_CAP_10FDX: 22735181Sgd78059 *val = dmfep->param_anar_10fdx; 22745181Sgd78059 break; 22755181Sgd78059 22765181Sgd78059 case ETHER_STAT_ADV_CAP_10HDX: 22775181Sgd78059 *val = dmfep->param_anar_10hdx; 22785181Sgd78059 break; 22795181Sgd78059 22805181Sgd78059 case ETHER_STAT_ADV_REMFAULT: 22815181Sgd78059 *val = dmfep->param_anar_remfault; 22825181Sgd78059 break; 22835181Sgd78059 22845181Sgd78059 case ETHER_STAT_LP_CAP_AUTONEG: 22855181Sgd78059 *val = dmfep->param_lp_autoneg; 22865181Sgd78059 break; 22875181Sgd78059 22885181Sgd78059 case ETHER_STAT_LP_CAP_100T4: 22895181Sgd78059 *val = dmfep->param_lp_100T4; 22905181Sgd78059 break; 22915181Sgd78059 22925181Sgd78059 case ETHER_STAT_LP_CAP_100FDX: 22935181Sgd78059 *val = dmfep->param_lp_100fdx; 22945181Sgd78059 break; 22955181Sgd78059 22965181Sgd78059 case ETHER_STAT_LP_CAP_100HDX: 22975181Sgd78059 *val = dmfep->param_lp_100hdx; 22985181Sgd78059 break; 22995181Sgd78059 23005181Sgd78059 case ETHER_STAT_LP_CAP_10FDX: 23015181Sgd78059 *val = dmfep->param_lp_10fdx; 23025181Sgd78059 break; 23035181Sgd78059 23045181Sgd78059 case ETHER_STAT_LP_CAP_10HDX: 23055181Sgd78059 *val = dmfep->param_lp_10hdx; 23065181Sgd78059 break; 23075181Sgd78059 23085181Sgd78059 case ETHER_STAT_LP_REMFAULT: 23095181Sgd78059 *val = dmfep->param_lp_remfault; 23105181Sgd78059 break; 23115181Sgd78059 23125181Sgd78059 default: 23135181Sgd78059 rv = ENOTSUP; 23145181Sgd78059 } 23155181Sgd78059 23165181Sgd78059 mutex_exit(dmfep->txlock); 23175181Sgd78059 mutex_exit(dmfep->rxlock); 23185181Sgd78059 mutex_exit(dmfep->oplock); 23195181Sgd78059 mutex_exit(dmfep->milock); 23205181Sgd78059 23215181Sgd78059 return (rv); 23225181Sgd78059 } 23235181Sgd78059 23245181Sgd78059 #undef DMFE_DBG 23255181Sgd78059 23265181Sgd78059 23275181Sgd78059 /* 23285181Sgd78059 * ========== Ioctl handler & subfunctions ========== 23295181Sgd78059 */ 23305181Sgd78059 23315181Sgd78059 #define DMFE_DBG DMFE_DBG_IOCTL /* debug flag for this code */ 23325181Sgd78059 23335181Sgd78059 /* 23345181Sgd78059 * Loopback operation 23355181Sgd78059 * 23365181Sgd78059 * Support access to the internal loopback and external loopback 23375181Sgd78059 * functions selected via the Operation Mode Register (OPR). 23385181Sgd78059 * These will be used by netlbtest (see BugId 4370609) 23395181Sgd78059 * 23405181Sgd78059 * Note that changing the loopback mode causes a stop/restart cycle 23415181Sgd78059 * 23425181Sgd78059 * It would be nice to evolve this to support the ioctls in sys/netlb.h, 23435181Sgd78059 * but then it would be even better to use Brussels to configure this. 23445181Sgd78059 */ 23455181Sgd78059 static enum ioc_reply 23465181Sgd78059 dmfe_loop_ioctl(dmfe_t *dmfep, queue_t *wq, mblk_t *mp, int cmd) 23475181Sgd78059 { 23485181Sgd78059 loopback_t *loop_req_p; 23495181Sgd78059 uint32_t loopmode; 23505181Sgd78059 23515181Sgd78059 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t)) 23525181Sgd78059 return (IOC_INVAL); 23535181Sgd78059 23546990Sgd78059 loop_req_p = (void *)mp->b_cont->b_rptr; 23555181Sgd78059 23565181Sgd78059 switch (cmd) { 23575181Sgd78059 default: 23585181Sgd78059 /* 23595181Sgd78059 * This should never happen ... 23605181Sgd78059 */ 23615181Sgd78059 dmfe_error(dmfep, "dmfe_loop_ioctl: invalid cmd 0x%x", cmd); 23625181Sgd78059 return (IOC_INVAL); 23635181Sgd78059 23645181Sgd78059 case DMFE_GET_LOOP_MODE: 23655181Sgd78059 /* 23665181Sgd78059 * This doesn't return the current loopback mode - it 23675181Sgd78059 * returns a bitmask :-( of all possible loopback modes 23685181Sgd78059 */ 23695181Sgd78059 DMFE_DEBUG(("dmfe_loop_ioctl: GET_LOOP_MODE")); 23705181Sgd78059 loop_req_p->loopback = DMFE_LOOPBACK_MODES; 23715181Sgd78059 miocack(wq, mp, sizeof (loopback_t), 0); 23725181Sgd78059 return (IOC_DONE); 23735181Sgd78059 23745181Sgd78059 case DMFE_SET_LOOP_MODE: 23755181Sgd78059 /* 23765181Sgd78059 * Select any of the various loopback modes 23775181Sgd78059 */ 23785181Sgd78059 DMFE_DEBUG(("dmfe_loop_ioctl: SET_LOOP_MODE %d", 23795181Sgd78059 loop_req_p->loopback)); 23805181Sgd78059 switch (loop_req_p->loopback) { 23815181Sgd78059 default: 23825181Sgd78059 return (IOC_INVAL); 23835181Sgd78059 23845181Sgd78059 case DMFE_LOOPBACK_OFF: 23855181Sgd78059 loopmode = LOOPBACK_OFF; 23865181Sgd78059 break; 23875181Sgd78059 23885181Sgd78059 case DMFE_PHY_A_LOOPBACK_ON: 23895181Sgd78059 loopmode = LOOPBACK_PHY_A; 23905181Sgd78059 break; 23915181Sgd78059 23925181Sgd78059 case DMFE_PHY_D_LOOPBACK_ON: 23935181Sgd78059 loopmode = LOOPBACK_PHY_D; 23945181Sgd78059 break; 23955181Sgd78059 23965181Sgd78059 case DMFE_INT_LOOPBACK_ON: 23975181Sgd78059 loopmode = LOOPBACK_INTERNAL; 23985181Sgd78059 break; 23995181Sgd78059 } 24005181Sgd78059 24015181Sgd78059 if ((dmfep->opmode & LOOPBACK_MODE_MASK) != loopmode) { 24025181Sgd78059 dmfep->opmode &= ~LOOPBACK_MODE_MASK; 24035181Sgd78059 dmfep->opmode |= loopmode; 24045181Sgd78059 return (IOC_RESTART_ACK); 24055181Sgd78059 } 24065181Sgd78059 24075181Sgd78059 return (IOC_ACK); 24085181Sgd78059 } 24095181Sgd78059 } 24105181Sgd78059 24115181Sgd78059 /* 24125181Sgd78059 * Specific dmfe IOCTLs, the mac module handles the generic ones. 24135181Sgd78059 */ 24145181Sgd78059 static void 24155181Sgd78059 dmfe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 24165181Sgd78059 { 24175181Sgd78059 dmfe_t *dmfep = arg; 24185181Sgd78059 struct iocblk *iocp; 24195181Sgd78059 enum ioc_reply status; 24205181Sgd78059 int cmd; 24215181Sgd78059 24225181Sgd78059 /* 24235181Sgd78059 * Validate the command before bothering with the mutexen ... 24245181Sgd78059 */ 24256990Sgd78059 iocp = (void *)mp->b_rptr; 24265181Sgd78059 cmd = iocp->ioc_cmd; 24275181Sgd78059 switch (cmd) { 24285181Sgd78059 default: 24295181Sgd78059 DMFE_DEBUG(("dmfe_m_ioctl: unknown cmd 0x%x", cmd)); 24305181Sgd78059 miocnak(wq, mp, 0, EINVAL); 24315181Sgd78059 return; 24325181Sgd78059 24335181Sgd78059 case DMFE_SET_LOOP_MODE: 24345181Sgd78059 case DMFE_GET_LOOP_MODE: 24355181Sgd78059 case ND_GET: 24365181Sgd78059 case ND_SET: 24375181Sgd78059 break; 24385181Sgd78059 } 24395181Sgd78059 24405181Sgd78059 mutex_enter(dmfep->milock); 24415181Sgd78059 mutex_enter(dmfep->oplock); 24425181Sgd78059 24435181Sgd78059 switch (cmd) { 24445181Sgd78059 default: 24455181Sgd78059 _NOTE(NOTREACHED) 24465181Sgd78059 status = IOC_INVAL; 24475181Sgd78059 break; 24485181Sgd78059 24495181Sgd78059 case DMFE_SET_LOOP_MODE: 24505181Sgd78059 case DMFE_GET_LOOP_MODE: 24515181Sgd78059 status = dmfe_loop_ioctl(dmfep, wq, mp, cmd); 24525181Sgd78059 break; 24535181Sgd78059 24545181Sgd78059 case ND_GET: 24555181Sgd78059 case ND_SET: 24565181Sgd78059 status = dmfe_nd_ioctl(dmfep, wq, mp, cmd); 24575181Sgd78059 break; 24585181Sgd78059 } 24595181Sgd78059 24605181Sgd78059 /* 24615181Sgd78059 * Do we need to restart? 24625181Sgd78059 */ 24635181Sgd78059 switch (status) { 24645181Sgd78059 default: 24655181Sgd78059 break; 24665181Sgd78059 24675181Sgd78059 case IOC_RESTART_ACK: 24685181Sgd78059 case IOC_RESTART: 24695181Sgd78059 /* 24705181Sgd78059 * PHY parameters changed; we need to stop, update the 24715181Sgd78059 * PHY layer and restart before sending the reply or ACK 24725181Sgd78059 */ 24735181Sgd78059 dmfe_stop(dmfep); 24745181Sgd78059 dmfe_update_phy(dmfep); 24755181Sgd78059 dmfep->update_phy = B_FALSE; 24765181Sgd78059 24775181Sgd78059 /* 24785181Sgd78059 * The link will now most likely go DOWN and UP, because 24795181Sgd78059 * we've changed the loopback state or the link parameters 24805181Sgd78059 * or autonegotiation. So we have to check that it's 24815181Sgd78059 * settled down before we restart the TX/RX processes. 24825181Sgd78059 * The ioctl code will have planted some reason strings 24835181Sgd78059 * to explain what's happening, so the link state change 24845181Sgd78059 * messages won't be printed on the console . We wake the 24855181Sgd78059 * factotum to deal with link notifications, if any ... 24865181Sgd78059 */ 24875181Sgd78059 if (dmfe_check_link(dmfep)) { 24885181Sgd78059 dmfe_recheck_link(dmfep, B_TRUE); 24895181Sgd78059 dmfe_wake_factotum(dmfep, KS_LINK_CHECK, "ioctl"); 24905181Sgd78059 } 24915181Sgd78059 24925181Sgd78059 if (dmfep->mac_state == DMFE_MAC_STARTED) 24935181Sgd78059 dmfe_start(dmfep); 24945181Sgd78059 break; 24955181Sgd78059 } 24965181Sgd78059 24975181Sgd78059 /* 24985181Sgd78059 * The 'reasons-for-link-change', if any, don't apply any more 24995181Sgd78059 */ 25005181Sgd78059 mutex_exit(dmfep->oplock); 25015181Sgd78059 mutex_exit(dmfep->milock); 25025181Sgd78059 25035181Sgd78059 /* 25045181Sgd78059 * Finally, decide how to reply 25055181Sgd78059 */ 25065181Sgd78059 switch (status) { 25075181Sgd78059 default: 25085181Sgd78059 /* 25095181Sgd78059 * Error, reply with a NAK and EINVAL 25105181Sgd78059 */ 25115181Sgd78059 miocnak(wq, mp, 0, EINVAL); 25125181Sgd78059 break; 25135181Sgd78059 25145181Sgd78059 case IOC_RESTART_ACK: 25155181Sgd78059 case IOC_ACK: 25165181Sgd78059 /* 25175181Sgd78059 * OK, reply with an ACK 25185181Sgd78059 */ 25195181Sgd78059 miocack(wq, mp, 0, 0); 25205181Sgd78059 break; 25215181Sgd78059 25225181Sgd78059 case IOC_RESTART: 25235181Sgd78059 case IOC_REPLY: 25245181Sgd78059 /* 25255181Sgd78059 * OK, send prepared reply 25265181Sgd78059 */ 25275181Sgd78059 qreply(wq, mp); 25285181Sgd78059 break; 25295181Sgd78059 25305181Sgd78059 case IOC_DONE: 25315181Sgd78059 /* 25325181Sgd78059 * OK, reply already sent 25335181Sgd78059 */ 25345181Sgd78059 break; 25355181Sgd78059 } 25365181Sgd78059 } 25375181Sgd78059 25385181Sgd78059 #undef DMFE_DBG 25395181Sgd78059 25405181Sgd78059 25415181Sgd78059 /* 25425181Sgd78059 * ========== Per-instance setup/teardown code ========== 25435181Sgd78059 */ 25445181Sgd78059 25455181Sgd78059 #define DMFE_DBG DMFE_DBG_INIT /* debug flag for this code */ 25465181Sgd78059 25475181Sgd78059 /* 25485181Sgd78059 * Determine local MAC address & broadcast address for this interface 25495181Sgd78059 */ 25505181Sgd78059 static void 25515181Sgd78059 dmfe_find_mac_address(dmfe_t *dmfep) 25525181Sgd78059 { 25535181Sgd78059 uchar_t *prop; 25545181Sgd78059 uint_t propsize; 25555181Sgd78059 int err; 25565181Sgd78059 25575181Sgd78059 /* 25585181Sgd78059 * We have to find the "vendor's factory-set address". This is 25595181Sgd78059 * the value of the property "local-mac-address", as set by OBP 25605181Sgd78059 * (or a .conf file!) 25615181Sgd78059 * 25625181Sgd78059 * If the property is not there, then we try to find the factory 25635181Sgd78059 * mac address from the devices serial EEPROM. 25645181Sgd78059 */ 25655181Sgd78059 bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr)); 25665181Sgd78059 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo, 25675181Sgd78059 DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize); 25685181Sgd78059 if (err == DDI_PROP_SUCCESS) { 25695181Sgd78059 if (propsize == ETHERADDRL) 25705181Sgd78059 ethaddr_copy(prop, dmfep->curr_addr); 25715181Sgd78059 ddi_prop_free(prop); 25725181Sgd78059 } else { 25735181Sgd78059 /* no property set... check eeprom */ 25745181Sgd78059 dmfe_read_eeprom(dmfep, EEPROM_EN_ADDR, dmfep->curr_addr, 25755181Sgd78059 ETHERADDRL); 25765181Sgd78059 } 25775181Sgd78059 25785181Sgd78059 DMFE_DEBUG(("dmfe_setup_mac_address: factory %s", 25795181Sgd78059 ether_sprintf((void *)dmfep->curr_addr))); 25805181Sgd78059 } 25815181Sgd78059 25825181Sgd78059 static int 25835181Sgd78059 dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize, 25845181Sgd78059 size_t setup, size_t slop, ddi_device_acc_attr_t *attr_p, 25855181Sgd78059 uint_t dma_flags, dma_area_t *dma_p) 25865181Sgd78059 { 25875181Sgd78059 ddi_dma_cookie_t dma_cookie; 25885181Sgd78059 uint_t ncookies; 25895181Sgd78059 int err; 25905181Sgd78059 25915181Sgd78059 /* 25925181Sgd78059 * Allocate handle 25935181Sgd78059 */ 25945181Sgd78059 err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr, 25955181Sgd78059 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 25965181Sgd78059 if (err != DDI_SUCCESS) 25975181Sgd78059 return (DDI_FAILURE); 25985181Sgd78059 25995181Sgd78059 /* 26005181Sgd78059 * Allocate memory 26015181Sgd78059 */ 26025181Sgd78059 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop, 26035181Sgd78059 attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 26045181Sgd78059 DDI_DMA_SLEEP, NULL, 26055181Sgd78059 &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl); 26065181Sgd78059 if (err != DDI_SUCCESS) 26075181Sgd78059 return (DDI_FAILURE); 26085181Sgd78059 26095181Sgd78059 /* 26105181Sgd78059 * Bind the two together 26115181Sgd78059 */ 26125181Sgd78059 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 26135181Sgd78059 dma_p->mem_va, dma_p->alength, dma_flags, 26145181Sgd78059 DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies); 26155181Sgd78059 if (err != DDI_DMA_MAPPED) 26165181Sgd78059 return (DDI_FAILURE); 26175181Sgd78059 if ((dma_p->ncookies = ncookies) != 1) 26185181Sgd78059 return (DDI_FAILURE); 26195181Sgd78059 26205181Sgd78059 dma_p->mem_dvma = dma_cookie.dmac_address; 26215181Sgd78059 if (setup > 0) { 26225181Sgd78059 dma_p->setup_dvma = dma_p->mem_dvma + memsize; 26235181Sgd78059 dma_p->setup_va = dma_p->mem_va + memsize; 26245181Sgd78059 } else { 26255181Sgd78059 dma_p->setup_dvma = 0; 26265181Sgd78059 dma_p->setup_va = NULL; 26275181Sgd78059 } 26285181Sgd78059 26295181Sgd78059 return (DDI_SUCCESS); 26305181Sgd78059 } 26315181Sgd78059 26325181Sgd78059 /* 26335181Sgd78059 * This function allocates the transmit and receive buffers and descriptors. 26345181Sgd78059 */ 26355181Sgd78059 static int 26365181Sgd78059 dmfe_alloc_bufs(dmfe_t *dmfep) 26375181Sgd78059 { 26385181Sgd78059 size_t memsize; 26395181Sgd78059 int err; 26405181Sgd78059 26415181Sgd78059 /* 26425181Sgd78059 * Allocate memory & handles for TX descriptor ring 26435181Sgd78059 */ 26445181Sgd78059 memsize = dmfep->tx.n_desc * sizeof (struct tx_desc_type); 26455181Sgd78059 err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP, 26465181Sgd78059 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 26475181Sgd78059 &dmfep->tx_desc); 26485181Sgd78059 if (err != DDI_SUCCESS) 26495181Sgd78059 return (DDI_FAILURE); 26505181Sgd78059 26515181Sgd78059 /* 26525181Sgd78059 * Allocate memory & handles for TX buffers 26535181Sgd78059 */ 26545181Sgd78059 memsize = dmfep->tx.n_desc * DMFE_BUF_SIZE; 26555181Sgd78059 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0, 26565181Sgd78059 &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE, 26575181Sgd78059 &dmfep->tx_buff); 26585181Sgd78059 if (err != DDI_SUCCESS) 26595181Sgd78059 return (DDI_FAILURE); 26605181Sgd78059 26615181Sgd78059 /* 26625181Sgd78059 * Allocate memory & handles for RX descriptor ring 26635181Sgd78059 */ 26645181Sgd78059 memsize = dmfep->rx.n_desc * sizeof (struct rx_desc_type); 26655181Sgd78059 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP, 26665181Sgd78059 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 26675181Sgd78059 &dmfep->rx_desc); 26685181Sgd78059 if (err != DDI_SUCCESS) 26695181Sgd78059 return (DDI_FAILURE); 26705181Sgd78059 26715181Sgd78059 /* 26725181Sgd78059 * Allocate memory & handles for RX buffers 26735181Sgd78059 */ 26745181Sgd78059 memsize = dmfep->rx.n_desc * DMFE_BUF_SIZE; 26755181Sgd78059 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0, 26765181Sgd78059 &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE, &dmfep->rx_buff); 26775181Sgd78059 if (err != DDI_SUCCESS) 26785181Sgd78059 return (DDI_FAILURE); 26795181Sgd78059 26805181Sgd78059 /* 26815181Sgd78059 * Allocate bitmasks for tx packet type tracking 26825181Sgd78059 */ 26835181Sgd78059 dmfep->tx_mcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP); 26845181Sgd78059 dmfep->tx_bcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP); 26855181Sgd78059 26865181Sgd78059 return (DDI_SUCCESS); 26875181Sgd78059 } 26885181Sgd78059 26895181Sgd78059 static void 26905181Sgd78059 dmfe_free_dma_mem(dma_area_t *dma_p) 26915181Sgd78059 { 26925181Sgd78059 if (dma_p->dma_hdl != NULL) { 26935181Sgd78059 if (dma_p->ncookies) { 26945181Sgd78059 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 26955181Sgd78059 dma_p->ncookies = 0; 26965181Sgd78059 } 26975181Sgd78059 ddi_dma_free_handle(&dma_p->dma_hdl); 26985181Sgd78059 dma_p->dma_hdl = NULL; 26995181Sgd78059 dma_p->mem_dvma = 0; 27005181Sgd78059 dma_p->setup_dvma = 0; 27015181Sgd78059 } 27025181Sgd78059 27035181Sgd78059 if (dma_p->acc_hdl != NULL) { 27045181Sgd78059 ddi_dma_mem_free(&dma_p->acc_hdl); 27055181Sgd78059 dma_p->acc_hdl = NULL; 27065181Sgd78059 dma_p->mem_va = NULL; 27075181Sgd78059 dma_p->setup_va = NULL; 27085181Sgd78059 } 27095181Sgd78059 } 27105181Sgd78059 27115181Sgd78059 /* 27125181Sgd78059 * This routine frees the transmit and receive buffers and descriptors. 27135181Sgd78059 * Make sure the chip is stopped before calling it! 27145181Sgd78059 */ 27155181Sgd78059 static void 27165181Sgd78059 dmfe_free_bufs(dmfe_t *dmfep) 27175181Sgd78059 { 27185181Sgd78059 dmfe_free_dma_mem(&dmfep->rx_buff); 27195181Sgd78059 dmfe_free_dma_mem(&dmfep->rx_desc); 27205181Sgd78059 dmfe_free_dma_mem(&dmfep->tx_buff); 27215181Sgd78059 dmfe_free_dma_mem(&dmfep->tx_desc); 27225181Sgd78059 kmem_free(dmfep->tx_mcast, dmfep->tx.n_desc / NBBY); 27235181Sgd78059 kmem_free(dmfep->tx_bcast, dmfep->tx.n_desc / NBBY); 27245181Sgd78059 } 27255181Sgd78059 27265181Sgd78059 static void 27275181Sgd78059 dmfe_unattach(dmfe_t *dmfep) 27285181Sgd78059 { 27295181Sgd78059 /* 27305181Sgd78059 * Clean up and free all DMFE data structures 27315181Sgd78059 */ 27325181Sgd78059 if (dmfep->cycid != NULL) { 27335181Sgd78059 ddi_periodic_delete(dmfep->cycid); 27345181Sgd78059 dmfep->cycid = NULL; 27355181Sgd78059 } 27365181Sgd78059 27375181Sgd78059 if (dmfep->ksp_drv != NULL) 27385181Sgd78059 kstat_delete(dmfep->ksp_drv); 27395181Sgd78059 if (dmfep->progress & PROGRESS_HWINT) { 27405181Sgd78059 ddi_remove_intr(dmfep->devinfo, 0, dmfep->iblk); 27415181Sgd78059 mutex_destroy(dmfep->txlock); 27425181Sgd78059 mutex_destroy(dmfep->rxlock); 27435181Sgd78059 mutex_destroy(dmfep->oplock); 27445181Sgd78059 } 27455181Sgd78059 if (dmfep->progress & PROGRESS_SOFTINT) 27465181Sgd78059 ddi_remove_softintr(dmfep->factotum_id); 27475181Sgd78059 if (dmfep->progress & PROGRESS_BUFS) 27485181Sgd78059 dmfe_free_bufs(dmfep); 27495181Sgd78059 if (dmfep->progress & PROGRESS_REGS) 27505181Sgd78059 ddi_regs_map_free(&dmfep->io_handle); 27515181Sgd78059 if (dmfep->progress & PROGRESS_NDD) 27525181Sgd78059 dmfe_nd_cleanup(dmfep); 27535181Sgd78059 27545181Sgd78059 kmem_free(dmfep, sizeof (*dmfep)); 27555181Sgd78059 } 27565181Sgd78059 27575181Sgd78059 static int 27585181Sgd78059 dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp) 27595181Sgd78059 { 27605181Sgd78059 ddi_acc_handle_t handle; 27615181Sgd78059 uint32_t regval; 27625181Sgd78059 27635181Sgd78059 if (pci_config_setup(dmfep->devinfo, &handle) != DDI_SUCCESS) 27645181Sgd78059 return (DDI_FAILURE); 27655181Sgd78059 27665181Sgd78059 /* 27675181Sgd78059 * Get vendor/device/revision. We expect (but don't check) that 27685181Sgd78059 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102) 27695181Sgd78059 */ 27705181Sgd78059 idp->vendor = pci_config_get16(handle, PCI_CONF_VENID); 27715181Sgd78059 idp->device = pci_config_get16(handle, PCI_CONF_DEVID); 27725181Sgd78059 idp->revision = pci_config_get8(handle, PCI_CONF_REVID); 27735181Sgd78059 27745181Sgd78059 /* 27755181Sgd78059 * Turn on Bus Master Enable bit and ensure the device is not asleep 27765181Sgd78059 */ 27775181Sgd78059 regval = pci_config_get32(handle, PCI_CONF_COMM); 27785181Sgd78059 pci_config_put32(handle, PCI_CONF_COMM, (regval | PCI_COMM_ME)); 27795181Sgd78059 27805181Sgd78059 regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD); 27815181Sgd78059 pci_config_put32(handle, PCI_DMFE_CONF_CFDD, 27825181Sgd78059 regval & ~(CFDD_SLEEP | CFDD_SNOOZE)); 27835181Sgd78059 27845181Sgd78059 pci_config_teardown(&handle); 27855181Sgd78059 return (DDI_SUCCESS); 27865181Sgd78059 } 27875181Sgd78059 27885181Sgd78059 struct ks_index { 27895181Sgd78059 int index; 27905181Sgd78059 char *name; 27915181Sgd78059 }; 27925181Sgd78059 27935181Sgd78059 static const struct ks_index ks_drv_names[] = { 27945181Sgd78059 { KS_INTERRUPT, "intr" }, 27955181Sgd78059 { KS_CYCLIC_RUN, "cyclic_run" }, 27965181Sgd78059 27975181Sgd78059 { KS_TICK_LINK_STATE, "link_state_change" }, 27985181Sgd78059 { KS_TICK_LINK_POLL, "link_state_poll" }, 27995181Sgd78059 { KS_TX_STALL, "tx_stall_detect" }, 28005181Sgd78059 { KS_CHIP_ERROR, "chip_error_interrupt" }, 28015181Sgd78059 28025181Sgd78059 { KS_FACTOTUM_RUN, "factotum_run" }, 28035181Sgd78059 { KS_RECOVERY, "factotum_recover" }, 28045181Sgd78059 { KS_LINK_CHECK, "factotum_link_check" }, 28055181Sgd78059 28065181Sgd78059 { KS_LINK_UP_CNT, "link_up_cnt" }, 28075181Sgd78059 { KS_LINK_DROP_CNT, "link_drop_cnt" }, 28085181Sgd78059 28095181Sgd78059 { KS_MIIREG_BMSR, "mii_status" }, 28105181Sgd78059 { KS_MIIREG_ANAR, "mii_advert_cap" }, 28115181Sgd78059 { KS_MIIREG_ANLPAR, "mii_partner_cap" }, 28125181Sgd78059 { KS_MIIREG_ANER, "mii_expansion_cap" }, 28135181Sgd78059 { KS_MIIREG_DSCSR, "mii_dscsr" }, 28145181Sgd78059 28155181Sgd78059 { -1, NULL } 28165181Sgd78059 }; 28175181Sgd78059 28185181Sgd78059 static void 28195181Sgd78059 dmfe_init_kstats(dmfe_t *dmfep, int instance) 28205181Sgd78059 { 28215181Sgd78059 kstat_t *ksp; 28225181Sgd78059 kstat_named_t *knp; 28235181Sgd78059 const struct ks_index *ksip; 28245181Sgd78059 28255181Sgd78059 /* no need to create MII stats, the mac module already does it */ 28265181Sgd78059 28275181Sgd78059 /* Create and initialise driver-defined kstats */ 28285181Sgd78059 ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net", 28295181Sgd78059 KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT); 28305181Sgd78059 if (ksp != NULL) { 28315181Sgd78059 for (knp = ksp->ks_data, ksip = ks_drv_names; 28325181Sgd78059 ksip->name != NULL; ++ksip) { 28335181Sgd78059 kstat_named_init(&knp[ksip->index], ksip->name, 28345181Sgd78059 KSTAT_DATA_UINT64); 28355181Sgd78059 } 28365181Sgd78059 dmfep->ksp_drv = ksp; 28375181Sgd78059 dmfep->knp_drv = knp; 28385181Sgd78059 kstat_install(ksp); 28395181Sgd78059 } else { 28405181Sgd78059 dmfe_error(dmfep, "kstat_create() for dmfe_events failed"); 28415181Sgd78059 } 28425181Sgd78059 } 28435181Sgd78059 28445181Sgd78059 static int 28455181Sgd78059 dmfe_resume(dev_info_t *devinfo) 28465181Sgd78059 { 28475181Sgd78059 dmfe_t *dmfep; /* Our private data */ 28485181Sgd78059 chip_id_t chipid; 28495181Sgd78059 28505181Sgd78059 dmfep = ddi_get_driver_private(devinfo); 28515181Sgd78059 if (dmfep == NULL) 28525181Sgd78059 return (DDI_FAILURE); 28535181Sgd78059 28545181Sgd78059 /* 28555181Sgd78059 * Refuse to resume if the data structures aren't consistent 28565181Sgd78059 */ 28575181Sgd78059 if (dmfep->devinfo != devinfo) 28585181Sgd78059 return (DDI_FAILURE); 28595181Sgd78059 28605181Sgd78059 /* 28615181Sgd78059 * Refuse to resume if the chip's changed its identity (*boggle*) 28625181Sgd78059 */ 28635181Sgd78059 if (dmfe_config_init(dmfep, &chipid) != DDI_SUCCESS) 28645181Sgd78059 return (DDI_FAILURE); 28655181Sgd78059 if (chipid.vendor != dmfep->chipid.vendor) 28665181Sgd78059 return (DDI_FAILURE); 28675181Sgd78059 if (chipid.device != dmfep->chipid.device) 28685181Sgd78059 return (DDI_FAILURE); 28695181Sgd78059 if (chipid.revision != dmfep->chipid.revision) 28705181Sgd78059 return (DDI_FAILURE); 28715181Sgd78059 28725181Sgd78059 /* 28735181Sgd78059 * All OK, reinitialise h/w & kick off MAC scheduling 28745181Sgd78059 */ 28755181Sgd78059 mutex_enter(dmfep->oplock); 28765181Sgd78059 dmfe_restart(dmfep); 28775181Sgd78059 mutex_exit(dmfep->oplock); 28785181Sgd78059 mac_tx_update(dmfep->mh); 28795181Sgd78059 return (DDI_SUCCESS); 28805181Sgd78059 } 28815181Sgd78059 28825181Sgd78059 /* 28835181Sgd78059 * attach(9E) -- Attach a device to the system 28845181Sgd78059 * 28855181Sgd78059 * Called once for each board successfully probed. 28865181Sgd78059 */ 28875181Sgd78059 static int 28885181Sgd78059 dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 28895181Sgd78059 { 28905181Sgd78059 mac_register_t *macp; 28915181Sgd78059 dmfe_t *dmfep; /* Our private data */ 28925181Sgd78059 uint32_t csr6; 28935181Sgd78059 int instance; 28945181Sgd78059 int err; 28955181Sgd78059 28965181Sgd78059 instance = ddi_get_instance(devinfo); 28975181Sgd78059 28985181Sgd78059 switch (cmd) { 28995181Sgd78059 default: 29005181Sgd78059 return (DDI_FAILURE); 29015181Sgd78059 29025181Sgd78059 case DDI_RESUME: 29035181Sgd78059 return (dmfe_resume(devinfo)); 29045181Sgd78059 29055181Sgd78059 case DDI_ATTACH: 29065181Sgd78059 break; 29075181Sgd78059 } 29085181Sgd78059 29095181Sgd78059 dmfep = kmem_zalloc(sizeof (*dmfep), KM_SLEEP); 29105181Sgd78059 ddi_set_driver_private(devinfo, dmfep); 29115181Sgd78059 dmfep->devinfo = devinfo; 29125181Sgd78059 dmfep->dmfe_guard = DMFE_GUARD; 29135181Sgd78059 29145181Sgd78059 /* 29155181Sgd78059 * Initialize more fields in DMFE private data 29165181Sgd78059 * Determine the local MAC address 29175181Sgd78059 */ 29185181Sgd78059 #if DMFEDEBUG 29195181Sgd78059 dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0, 29205181Sgd78059 debug_propname, dmfe_debug); 29215181Sgd78059 #endif /* DMFEDEBUG */ 29225181Sgd78059 dmfep->cycid = NULL; 29235181Sgd78059 (void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d", 29245181Sgd78059 instance); 29255181Sgd78059 29265181Sgd78059 /* 29275181Sgd78059 * Check for custom "opmode-reg-value" property; 29285181Sgd78059 * if none, use the defaults below for CSR6 ... 29295181Sgd78059 */ 29305181Sgd78059 csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1; 29315181Sgd78059 dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 29325181Sgd78059 DDI_PROP_DONTPASS, opmode_propname, csr6); 29335181Sgd78059 29345181Sgd78059 /* 29355181Sgd78059 * Read chip ID & set up config space command register(s) 29365181Sgd78059 */ 29375181Sgd78059 if (dmfe_config_init(dmfep, &dmfep->chipid) != DDI_SUCCESS) { 29385181Sgd78059 dmfe_error(dmfep, "dmfe_config_init() failed"); 29395181Sgd78059 goto attach_fail; 29405181Sgd78059 } 29415181Sgd78059 dmfep->progress |= PROGRESS_CONFIG; 29425181Sgd78059 29435181Sgd78059 /* 29445181Sgd78059 * Register NDD-tweakable parameters 29455181Sgd78059 */ 29465181Sgd78059 if (dmfe_nd_init(dmfep)) { 29475181Sgd78059 dmfe_error(dmfep, "dmfe_nd_init() failed"); 29485181Sgd78059 goto attach_fail; 29495181Sgd78059 } 29505181Sgd78059 dmfep->progress |= PROGRESS_NDD; 29515181Sgd78059 29525181Sgd78059 /* 29535181Sgd78059 * Map operating registers 29545181Sgd78059 */ 29555181Sgd78059 err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER, 29565181Sgd78059 &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle); 29575181Sgd78059 if (err != DDI_SUCCESS) { 29585181Sgd78059 dmfe_error(dmfep, "ddi_regs_map_setup() failed"); 29595181Sgd78059 goto attach_fail; 29605181Sgd78059 } 29615181Sgd78059 dmfep->progress |= PROGRESS_REGS; 29625181Sgd78059 29635181Sgd78059 /* 29645181Sgd78059 * Get our MAC address. 29655181Sgd78059 */ 29665181Sgd78059 dmfe_find_mac_address(dmfep); 29675181Sgd78059 29685181Sgd78059 /* 29695181Sgd78059 * Allocate the TX and RX descriptors/buffers. 29705181Sgd78059 */ 29715181Sgd78059 dmfep->tx.n_desc = dmfe_tx_desc; 29725181Sgd78059 dmfep->rx.n_desc = dmfe_rx_desc; 29735181Sgd78059 err = dmfe_alloc_bufs(dmfep); 29745181Sgd78059 if (err != DDI_SUCCESS) { 29755181Sgd78059 dmfe_error(dmfep, "DMA buffer allocation failed"); 29765181Sgd78059 goto attach_fail; 29775181Sgd78059 } 29785181Sgd78059 dmfep->progress |= PROGRESS_BUFS; 29795181Sgd78059 29805181Sgd78059 /* 29815181Sgd78059 * Add the softint handler 29825181Sgd78059 */ 29835181Sgd78059 dmfep->link_poll_tix = factotum_start_tix; 29845181Sgd78059 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id, 29855181Sgd78059 NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) { 29865181Sgd78059 dmfe_error(dmfep, "ddi_add_softintr() failed"); 29875181Sgd78059 goto attach_fail; 29885181Sgd78059 } 29895181Sgd78059 dmfep->progress |= PROGRESS_SOFTINT; 29905181Sgd78059 29915181Sgd78059 /* 29925181Sgd78059 * Add the h/w interrupt handler & initialise mutexen 29935181Sgd78059 */ 29945181Sgd78059 if (ddi_add_intr(devinfo, 0, &dmfep->iblk, NULL, 29955181Sgd78059 dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) { 29965181Sgd78059 dmfe_error(dmfep, "ddi_add_intr() failed"); 29975181Sgd78059 goto attach_fail; 29985181Sgd78059 } 29995181Sgd78059 mutex_init(dmfep->milock, NULL, MUTEX_DRIVER, NULL); 30005181Sgd78059 mutex_init(dmfep->oplock, NULL, MUTEX_DRIVER, dmfep->iblk); 30015181Sgd78059 mutex_init(dmfep->rxlock, NULL, MUTEX_DRIVER, dmfep->iblk); 30025181Sgd78059 mutex_init(dmfep->txlock, NULL, MUTEX_DRIVER, dmfep->iblk); 30035181Sgd78059 dmfep->progress |= PROGRESS_HWINT; 30045181Sgd78059 30055181Sgd78059 /* 30065181Sgd78059 * Create & initialise named kstats 30075181Sgd78059 */ 30085181Sgd78059 dmfe_init_kstats(dmfep, instance); 30095181Sgd78059 30105181Sgd78059 /* 30115181Sgd78059 * Reset & initialise the chip and the ring buffers 30125181Sgd78059 * Initialise the (internal) PHY 30135181Sgd78059 */ 30145181Sgd78059 mutex_enter(dmfep->oplock); 30155181Sgd78059 mutex_enter(dmfep->rxlock); 30165181Sgd78059 mutex_enter(dmfep->txlock); 30175181Sgd78059 30185181Sgd78059 dmfe_reset(dmfep); 30195181Sgd78059 30205181Sgd78059 /* 30215181Sgd78059 * Prepare the setup packet 30225181Sgd78059 */ 30235181Sgd78059 bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE); 30245181Sgd78059 bzero(dmfep->mcast_refs, MCASTBUF_SIZE); 30255181Sgd78059 dmfep->addr_set = B_FALSE; 30265181Sgd78059 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST); 30275181Sgd78059 dmfep->mac_state = DMFE_MAC_RESET; 30285181Sgd78059 30295181Sgd78059 mutex_exit(dmfep->txlock); 30305181Sgd78059 mutex_exit(dmfep->rxlock); 30315181Sgd78059 mutex_exit(dmfep->oplock); 30325181Sgd78059 30335181Sgd78059 dmfep->link_state = LINK_STATE_UNKNOWN; 30345181Sgd78059 if (dmfe_init_phy(dmfep) != B_TRUE) 30355181Sgd78059 goto attach_fail; 30365181Sgd78059 dmfep->update_phy = B_TRUE; 30375181Sgd78059 30385181Sgd78059 /* 30395181Sgd78059 * Send a reasonable setup frame. This configures our starting 30405181Sgd78059 * address and the broadcast address. 30415181Sgd78059 */ 30425181Sgd78059 (void) dmfe_m_unicst(dmfep, dmfep->curr_addr); 30435181Sgd78059 30445181Sgd78059 /* 30455181Sgd78059 * Initialize pointers to device specific functions which 30465181Sgd78059 * will be used by the generic layer. 30475181Sgd78059 */ 30485181Sgd78059 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 30495181Sgd78059 goto attach_fail; 30505181Sgd78059 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 30515181Sgd78059 macp->m_driver = dmfep; 30525181Sgd78059 macp->m_dip = devinfo; 30535181Sgd78059 macp->m_src_addr = dmfep->curr_addr; 30545181Sgd78059 macp->m_callbacks = &dmfe_m_callbacks; 30555181Sgd78059 macp->m_min_sdu = 0; 30565181Sgd78059 macp->m_max_sdu = ETHERMTU; 30575895Syz147064 macp->m_margin = VLAN_TAGSZ; 30585181Sgd78059 30595181Sgd78059 /* 30605181Sgd78059 * Finally, we're ready to register ourselves with the MAC layer 30615181Sgd78059 * interface; if this succeeds, we're all ready to start() 30625181Sgd78059 */ 30635181Sgd78059 err = mac_register(macp, &dmfep->mh); 30645181Sgd78059 mac_free(macp); 30655181Sgd78059 if (err != 0) 30665181Sgd78059 goto attach_fail; 30675181Sgd78059 ASSERT(dmfep->dmfe_guard == DMFE_GUARD); 30685181Sgd78059 30695181Sgd78059 /* 30705181Sgd78059 * Install the cyclic callback that we use to check for link 30715181Sgd78059 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic()) 30725181Sgd78059 * is invoked in kernel context then. 30735181Sgd78059 */ 30745181Sgd78059 ASSERT(dmfep->cycid == NULL); 30755181Sgd78059 dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep, 30765181Sgd78059 dmfe_tick_us * 1000, DDI_IPL_0); 30775181Sgd78059 return (DDI_SUCCESS); 30785181Sgd78059 30795181Sgd78059 attach_fail: 30805181Sgd78059 dmfe_unattach(dmfep); 30815181Sgd78059 return (DDI_FAILURE); 30825181Sgd78059 } 30835181Sgd78059 30845181Sgd78059 /* 30855181Sgd78059 * dmfe_suspend() -- suspend transmit/receive for powerdown 30865181Sgd78059 */ 30875181Sgd78059 static int 30885181Sgd78059 dmfe_suspend(dmfe_t *dmfep) 30895181Sgd78059 { 30905181Sgd78059 /* 30915181Sgd78059 * Just stop processing ... 30925181Sgd78059 */ 30935181Sgd78059 mutex_enter(dmfep->oplock); 30945181Sgd78059 dmfe_stop(dmfep); 30955181Sgd78059 mutex_exit(dmfep->oplock); 30965181Sgd78059 30975181Sgd78059 return (DDI_SUCCESS); 30985181Sgd78059 } 30995181Sgd78059 31005181Sgd78059 /* 31015181Sgd78059 * detach(9E) -- Detach a device from the system 31025181Sgd78059 */ 31035181Sgd78059 static int 31045181Sgd78059 dmfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 31055181Sgd78059 { 31065181Sgd78059 dmfe_t *dmfep; 31075181Sgd78059 31085181Sgd78059 dmfep = ddi_get_driver_private(devinfo); 31095181Sgd78059 31105181Sgd78059 switch (cmd) { 31115181Sgd78059 default: 31125181Sgd78059 return (DDI_FAILURE); 31135181Sgd78059 31145181Sgd78059 case DDI_SUSPEND: 31155181Sgd78059 return (dmfe_suspend(dmfep)); 31165181Sgd78059 31175181Sgd78059 case DDI_DETACH: 31185181Sgd78059 break; 31195181Sgd78059 } 31205181Sgd78059 31215181Sgd78059 /* 31225181Sgd78059 * Unregister from the MAC subsystem. This can fail, in 31235181Sgd78059 * particular if there are DLPI style-2 streams still open - 31245181Sgd78059 * in which case we just return failure without shutting 31255181Sgd78059 * down chip operations. 31265181Sgd78059 */ 31275181Sgd78059 if (mac_unregister(dmfep->mh) != DDI_SUCCESS) 31285181Sgd78059 return (DDI_FAILURE); 31295181Sgd78059 31305181Sgd78059 /* 31315181Sgd78059 * All activity stopped, so we can clean up & exit 31325181Sgd78059 */ 31335181Sgd78059 dmfe_unattach(dmfep); 31345181Sgd78059 return (DDI_SUCCESS); 31355181Sgd78059 } 31365181Sgd78059 31375181Sgd78059 31385181Sgd78059 /* 31395181Sgd78059 * ========== Module Loading Data & Entry Points ========== 31405181Sgd78059 */ 31415181Sgd78059 31425181Sgd78059 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops, nulldev, nulldev, dmfe_attach, dmfe_detach, 31437656SSherry.Moore@Sun.COM nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported); 31445181Sgd78059 31455181Sgd78059 static struct modldrv dmfe_modldrv = { 31465181Sgd78059 &mod_driverops, /* Type of module. This one is a driver */ 31475181Sgd78059 dmfe_ident, /* short description */ 31485181Sgd78059 &dmfe_dev_ops /* driver specific ops */ 31495181Sgd78059 }; 31505181Sgd78059 31515181Sgd78059 static struct modlinkage modlinkage = { 31525181Sgd78059 MODREV_1, (void *)&dmfe_modldrv, NULL 31535181Sgd78059 }; 31545181Sgd78059 31555181Sgd78059 int 31565181Sgd78059 _info(struct modinfo *modinfop) 31575181Sgd78059 { 31585181Sgd78059 return (mod_info(&modlinkage, modinfop)); 31595181Sgd78059 } 31605181Sgd78059 31615181Sgd78059 int 31625181Sgd78059 _init(void) 31635181Sgd78059 { 31645181Sgd78059 uint32_t tmp100; 31655181Sgd78059 uint32_t tmp10; 31665181Sgd78059 int i; 31675181Sgd78059 int status; 31685181Sgd78059 31695181Sgd78059 /* Calculate global timing parameters */ 31705181Sgd78059 tmp100 = (dmfe_tx100_stall_us+dmfe_tick_us-1)/dmfe_tick_us; 31715181Sgd78059 tmp10 = (dmfe_tx10_stall_us+dmfe_tick_us-1)/dmfe_tick_us; 31725181Sgd78059 31735181Sgd78059 for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) { 31745181Sgd78059 switch (i) { 31755181Sgd78059 case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA): 31765181Sgd78059 case TX_PROCESS_STATE(TX_PROCESS_WAIT_END): 31775181Sgd78059 /* 31785181Sgd78059 * The chip doesn't spontaneously recover from 31795181Sgd78059 * a stall in these states, so we reset early 31805181Sgd78059 */ 31815181Sgd78059 stall_100_tix[i] = tmp100; 31825181Sgd78059 stall_10_tix[i] = tmp10; 31835181Sgd78059 break; 31845181Sgd78059 31855181Sgd78059 case TX_PROCESS_STATE(TX_PROCESS_SUSPEND): 31865181Sgd78059 default: 31875181Sgd78059 /* 31885181Sgd78059 * The chip has been seen to spontaneously recover 31895181Sgd78059 * after an apparent stall in the SUSPEND state, 31905181Sgd78059 * so we'll allow it rather longer to do so. As 31915181Sgd78059 * stalls in other states have not been observed, 31925181Sgd78059 * we'll use long timeouts for them too ... 31935181Sgd78059 */ 31945181Sgd78059 stall_100_tix[i] = tmp100 * 20; 31955181Sgd78059 stall_10_tix[i] = tmp10 * 20; 31965181Sgd78059 break; 31975181Sgd78059 } 31985181Sgd78059 } 31995181Sgd78059 32005181Sgd78059 factotum_tix = (dmfe_link_poll_us+dmfe_tick_us-1)/dmfe_tick_us; 32015181Sgd78059 factotum_fast_tix = 1+(factotum_tix/5); 32025181Sgd78059 factotum_start_tix = 1+(factotum_tix*2); 32035181Sgd78059 32045181Sgd78059 mac_init_ops(&dmfe_dev_ops, "dmfe"); 32055181Sgd78059 status = mod_install(&modlinkage); 32065181Sgd78059 if (status == DDI_SUCCESS) 32075181Sgd78059 dmfe_log_init(); 32085181Sgd78059 32095181Sgd78059 return (status); 32105181Sgd78059 } 32115181Sgd78059 32125181Sgd78059 int 32135181Sgd78059 _fini(void) 32145181Sgd78059 { 32155181Sgd78059 int status; 32165181Sgd78059 32175181Sgd78059 status = mod_remove(&modlinkage); 32185181Sgd78059 if (status == DDI_SUCCESS) { 32195181Sgd78059 mac_fini_ops(&dmfe_dev_ops); 32205181Sgd78059 dmfe_log_fini(); 32215181Sgd78059 } 32225181Sgd78059 32235181Sgd78059 return (status); 32245181Sgd78059 } 32255181Sgd78059 32265181Sgd78059 #undef DMFE_DBG 3227