15748Sduboff /* 25748Sduboff * sfe_util.c: general ethernet mac driver framework version 2.6 35748Sduboff * 4*7116Sduboff * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved. 55748Sduboff * 65748Sduboff * Redistribution and use in source and binary forms, with or without 75748Sduboff * modification, are permitted provided that the following conditions are met: 85748Sduboff * 95748Sduboff * 1. Redistributions of source code must retain the above copyright notice, 105748Sduboff * this list of conditions and the following disclaimer. 115748Sduboff * 125748Sduboff * 2. Redistributions in binary form must reproduce the above copyright notice, 135748Sduboff * this list of conditions and the following disclaimer in the documentation 145748Sduboff * and/or other materials provided with the distribution. 155748Sduboff * 165748Sduboff * 3. Neither the name of the author nor the names of its contributors may be 175748Sduboff * used to endorse or promote products derived from this software without 185748Sduboff * specific prior written permission. 195748Sduboff * 205748Sduboff * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 215748Sduboff * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 225748Sduboff * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 235748Sduboff * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 245748Sduboff * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 255748Sduboff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 265748Sduboff * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 275748Sduboff * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 285748Sduboff * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 295748Sduboff * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 305748Sduboff * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 315748Sduboff * DAMAGE. 325748Sduboff */ 335748Sduboff 345748Sduboff #pragma ident "%Z%%M% %I% %E% SMI" /* sfe device driver */ 355748Sduboff 365748Sduboff /* 375748Sduboff * System Header files. 385748Sduboff */ 395748Sduboff #include <sys/types.h> 405748Sduboff #include <sys/conf.h> 415748Sduboff #include <sys/debug.h> 425748Sduboff #include <sys/kmem.h> 435748Sduboff #include <sys/vtrace.h> 445748Sduboff #include <sys/ethernet.h> 455748Sduboff #include <sys/modctl.h> 465748Sduboff #include <sys/errno.h> 475748Sduboff #include <sys/ddi.h> 485748Sduboff #include <sys/sunddi.h> 495748Sduboff #include <sys/stream.h> /* required for MBLK* */ 505748Sduboff #include <sys/strsun.h> /* required for mionack() */ 515748Sduboff #include <sys/byteorder.h> 525748Sduboff #include <sys/pci.h> 535748Sduboff #include <inet/common.h> 545748Sduboff #include <inet/led.h> 555748Sduboff #include <inet/mi.h> 565748Sduboff #include <inet/nd.h> 575748Sduboff #include <sys/crc32.h> 585748Sduboff 595748Sduboff #include <sys/note.h> 605748Sduboff 615748Sduboff #include "sfe_mii.h" 625748Sduboff #include "sfe_util.h" 635748Sduboff 645748Sduboff 655748Sduboff 665748Sduboff extern char ident[]; 675748Sduboff 685748Sduboff /* Debugging support */ 695748Sduboff #ifdef GEM_DEBUG_LEVEL 705748Sduboff static int gem_debug = GEM_DEBUG_LEVEL; 715748Sduboff #define DPRINTF(n, args) if (gem_debug > (n)) cmn_err args 725748Sduboff #else 735748Sduboff #define DPRINTF(n, args) 745748Sduboff #undef ASSERT 755748Sduboff #define ASSERT(x) 765748Sduboff #endif 775748Sduboff 785748Sduboff #define IOC_LINESIZE 0x40 /* Is it right for amd64? */ 795748Sduboff 805748Sduboff /* 815748Sduboff * Useful macros and typedefs 825748Sduboff */ 835748Sduboff #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1)) 845748Sduboff 855748Sduboff #define GET_NET16(p) ((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1]) 865748Sduboff #define GET_ETHERTYPE(p) GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2) 875748Sduboff 885748Sduboff #define GET_IPTYPEv4(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 9]) 895748Sduboff #define GET_IPTYPEv6(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 6]) 905748Sduboff 915748Sduboff 925748Sduboff #ifndef INT32_MAX 935748Sduboff #define INT32_MAX 0x7fffffff 945748Sduboff #endif 955748Sduboff 965748Sduboff #define VTAG_OFF (ETHERADDRL*2) 975748Sduboff #ifndef VTAG_SIZE 985748Sduboff #define VTAG_SIZE 4 995748Sduboff #endif 1005748Sduboff #ifndef VTAG_TPID 1015748Sduboff #define VTAG_TPID 0x8100U 1025748Sduboff #endif 1035748Sduboff 1045748Sduboff #define GET_TXBUF(dp, sn) \ 1055748Sduboff &(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)] 1065748Sduboff 1075748Sduboff #ifndef offsetof 1085748Sduboff #define offsetof(t, m) ((long)&(((t *) 0)->m)) 1095748Sduboff #endif 1105748Sduboff #define TXFLAG_VTAG(flag) \ 1115748Sduboff (((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT) 1125748Sduboff 1135748Sduboff #define MAXPKTBUF(dp) \ 1145748Sduboff ((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL) 1155748Sduboff 1165748Sduboff #define WATCH_INTERVAL_FAST drv_usectohz(100*1000) /* 100mS */ 117*7116Sduboff #define BOOLEAN(x) ((x) != 0) 1185748Sduboff 1195748Sduboff /* 1205748Sduboff * Macros to distinct chip generation. 1215748Sduboff */ 1225748Sduboff 1235748Sduboff /* 1245748Sduboff * Private functions 1255748Sduboff */ 1265748Sduboff static void gem_mii_start(struct gem_dev *); 1275748Sduboff static void gem_mii_stop(struct gem_dev *); 1285748Sduboff 1295748Sduboff /* local buffer management */ 1305748Sduboff static void gem_nd_setup(struct gem_dev *dp); 1315748Sduboff static void gem_nd_cleanup(struct gem_dev *dp); 1325748Sduboff static int gem_alloc_memory(struct gem_dev *); 1335748Sduboff static void gem_free_memory(struct gem_dev *); 1345748Sduboff static void gem_init_rx_ring(struct gem_dev *); 1355748Sduboff static void gem_init_tx_ring(struct gem_dev *); 1365748Sduboff __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *); 1375748Sduboff 1385748Sduboff static void gem_tx_timeout(struct gem_dev *); 1395748Sduboff static void gem_mii_link_watcher(struct gem_dev *dp); 1405748Sduboff static int gem_mac_init(struct gem_dev *dp); 1415748Sduboff static int gem_mac_start(struct gem_dev *dp); 1425748Sduboff static int gem_mac_stop(struct gem_dev *dp, uint_t flags); 1435748Sduboff static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp); 1445748Sduboff 1455748Sduboff static struct ether_addr gem_etherbroadcastaddr = { 1465748Sduboff 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 1475748Sduboff }; 1485748Sduboff 1495748Sduboff int gem_speed_value[] = {10, 100, 1000}; 1505748Sduboff 1515748Sduboff /* ============================================================== */ 1525748Sduboff /* 1535748Sduboff * Misc runtime routines 1545748Sduboff */ 1555748Sduboff /* ============================================================== */ 1565748Sduboff /* 1575748Sduboff * Ether CRC calculation according to 21143 data sheet 1585748Sduboff */ 1595748Sduboff uint32_t 1605748Sduboff gem_ether_crc_le(const uint8_t *addr, int len) 1615748Sduboff { 1625748Sduboff uint32_t crc; 1635748Sduboff 1645748Sduboff CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table); 1655748Sduboff return (crc); 1665748Sduboff } 1675748Sduboff 1685748Sduboff uint32_t 1695748Sduboff gem_ether_crc_be(const uint8_t *addr, int len) 1705748Sduboff { 1715748Sduboff int idx; 1725748Sduboff int bit; 1735748Sduboff uint_t data; 1745748Sduboff uint32_t crc; 1755748Sduboff #define CRC32_POLY_BE 0x04c11db7 1765748Sduboff 1775748Sduboff crc = 0xffffffff; 1785748Sduboff for (idx = 0; idx < len; idx++) { 1795748Sduboff for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { 1805748Sduboff crc = (crc << 1) 1815748Sduboff ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0); 1825748Sduboff } 1835748Sduboff } 1845748Sduboff return (crc); 1855748Sduboff #undef CRC32_POLY_BE 1865748Sduboff } 1875748Sduboff 1885748Sduboff int 1895748Sduboff gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val) 1905748Sduboff { 1915748Sduboff char propname[32]; 1925748Sduboff 1935748Sduboff (void) sprintf(propname, prop_template, dp->name); 1945748Sduboff 1955748Sduboff return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip, 1965748Sduboff DDI_PROP_DONTPASS, propname, def_val)); 1975748Sduboff } 1985748Sduboff 1995748Sduboff static int 2005748Sduboff gem_population(uint32_t x) 2015748Sduboff { 2025748Sduboff int i; 2035748Sduboff int cnt; 2045748Sduboff 2055748Sduboff cnt = 0; 2065748Sduboff for (i = 0; i < 32; i++) { 2075748Sduboff if (x & (1 << i)) { 2085748Sduboff cnt++; 2095748Sduboff } 2105748Sduboff } 2115748Sduboff return (cnt); 2125748Sduboff } 2135748Sduboff 214*7116Sduboff #ifdef GEM_DEBUG_LEVEL 215*7116Sduboff #ifdef GEM_DEBUG_VLAN 2165748Sduboff static void 217*7116Sduboff gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp, 218*7116Sduboff boolean_t check_cksum) 2195748Sduboff { 220*7116Sduboff char msg[180]; 221*7116Sduboff uint8_t buf[18+20+20]; 222*7116Sduboff uint8_t *p; 223*7116Sduboff size_t offset; 224*7116Sduboff uint_t ethertype; 225*7116Sduboff uint_t proto; 226*7116Sduboff uint_t ipproto = 0; 227*7116Sduboff uint_t iplen; 228*7116Sduboff uint_t iphlen; 229*7116Sduboff uint_t tcplen; 230*7116Sduboff uint_t udplen; 231*7116Sduboff uint_t cksum; 232*7116Sduboff int rest; 233*7116Sduboff int len; 234*7116Sduboff char *bp; 235*7116Sduboff mblk_t *tp; 236*7116Sduboff extern uint_t ip_cksum(mblk_t *, int, uint32_t); 237*7116Sduboff 238*7116Sduboff msg[0] = 0; 239*7116Sduboff bp = msg; 240*7116Sduboff 241*7116Sduboff rest = sizeof (buf); 242*7116Sduboff offset = 0; 243*7116Sduboff for (tp = mp; tp; tp = tp->b_cont) { 244*7116Sduboff len = tp->b_wptr - tp->b_rptr; 245*7116Sduboff len = min(rest, len); 246*7116Sduboff bcopy(tp->b_rptr, &buf[offset], len); 247*7116Sduboff rest -= len; 248*7116Sduboff offset += len; 249*7116Sduboff if (rest == 0) { 250*7116Sduboff break; 251*7116Sduboff } 252*7116Sduboff } 253*7116Sduboff 254*7116Sduboff offset = 0; 255*7116Sduboff p = &buf[offset]; 256*7116Sduboff 257*7116Sduboff /* ethernet address */ 258*7116Sduboff sprintf(bp, 259*7116Sduboff "ether: %02x:%02x:%02x:%02x:%02x:%02x" 260*7116Sduboff " -> %02x:%02x:%02x:%02x:%02x:%02x", 261*7116Sduboff p[6], p[7], p[8], p[9], p[10], p[11], 262*7116Sduboff p[0], p[1], p[2], p[3], p[4], p[5]); 263*7116Sduboff bp = &msg[strlen(msg)]; 264*7116Sduboff 265*7116Sduboff /* vlag tag and etherrtype */ 266*7116Sduboff ethertype = GET_ETHERTYPE(p); 267*7116Sduboff if (ethertype == VTAG_TPID) { 268*7116Sduboff sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14])); 269*7116Sduboff bp = &msg[strlen(msg)]; 270*7116Sduboff 271*7116Sduboff offset += VTAG_SIZE; 272*7116Sduboff p = &buf[offset]; 273*7116Sduboff ethertype = GET_ETHERTYPE(p); 274*7116Sduboff } 275*7116Sduboff sprintf(bp, " type:%04x", ethertype); 276*7116Sduboff bp = &msg[strlen(msg)]; 277*7116Sduboff 278*7116Sduboff /* ethernet packet length */ 279*7116Sduboff sprintf(bp, " mblklen:%d", msgdsize(mp)); 280*7116Sduboff bp = &msg[strlen(msg)]; 281*7116Sduboff if (mp->b_cont) { 282*7116Sduboff sprintf(bp, "("); 283*7116Sduboff bp = &msg[strlen(msg)]; 284*7116Sduboff for (tp = mp; tp; tp = tp->b_cont) { 285*7116Sduboff if (tp == mp) { 286*7116Sduboff sprintf(bp, "%d", tp->b_wptr - tp->b_rptr); 287*7116Sduboff } else { 288*7116Sduboff sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr); 289*7116Sduboff } 290*7116Sduboff bp = &msg[strlen(msg)]; 291*7116Sduboff } 292*7116Sduboff sprintf(bp, ")"); 293*7116Sduboff bp = &msg[strlen(msg)]; 294*7116Sduboff } 295*7116Sduboff 296*7116Sduboff if (ethertype != ETHERTYPE_IP) { 297*7116Sduboff goto x; 298*7116Sduboff } 299*7116Sduboff 300*7116Sduboff /* ip address */ 301*7116Sduboff offset += sizeof (struct ether_header); 302*7116Sduboff p = &buf[offset]; 303*7116Sduboff ipproto = p[9]; 304*7116Sduboff iplen = GET_NET16(&p[2]); 305*7116Sduboff sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d", 306*7116Sduboff p[12], p[13], p[14], p[15], 307*7116Sduboff p[16], p[17], p[18], p[19], 308*7116Sduboff ipproto, iplen); 309*7116Sduboff bp = (void *)&msg[strlen(msg)]; 310*7116Sduboff 311*7116Sduboff iphlen = (p[0] & 0xf) * 4; 312*7116Sduboff 313*7116Sduboff /* cksum for psuedo header */ 314*7116Sduboff cksum = *(uint16_t *)&p[12]; 315*7116Sduboff cksum += *(uint16_t *)&p[14]; 316*7116Sduboff cksum += *(uint16_t *)&p[16]; 317*7116Sduboff cksum += *(uint16_t *)&p[18]; 318*7116Sduboff cksum += BE_16(ipproto); 319*7116Sduboff 320*7116Sduboff /* tcp or udp protocol header */ 321*7116Sduboff offset += iphlen; 322*7116Sduboff p = &buf[offset]; 323*7116Sduboff if (ipproto == IPPROTO_TCP) { 324*7116Sduboff tcplen = iplen - iphlen; 325*7116Sduboff sprintf(bp, ", tcp: len:%d cksum:%x", 326*7116Sduboff tcplen, GET_NET16(&p[16])); 327*7116Sduboff bp = (void *)&msg[strlen(msg)]; 328*7116Sduboff 329*7116Sduboff if (check_cksum) { 330*7116Sduboff cksum += BE_16(tcplen); 331*7116Sduboff cksum = (uint16_t)ip_cksum(mp, offset, cksum); 332*7116Sduboff sprintf(bp, " (%s)", 333*7116Sduboff (cksum == 0 || cksum == 0xffff) ? "ok" : "ng"); 334*7116Sduboff bp = (void *)&msg[strlen(msg)]; 335*7116Sduboff } 336*7116Sduboff } else if (ipproto == IPPROTO_UDP) { 337*7116Sduboff udplen = GET_NET16(&p[4]); 338*7116Sduboff sprintf(bp, ", udp: len:%d cksum:%x", 339*7116Sduboff udplen, GET_NET16(&p[6])); 340*7116Sduboff bp = (void *)&msg[strlen(msg)]; 341*7116Sduboff 342*7116Sduboff if (GET_NET16(&p[6]) && check_cksum) { 343*7116Sduboff cksum += *(uint16_t *)&p[4]; 344*7116Sduboff cksum = (uint16_t)ip_cksum(mp, offset, cksum); 345*7116Sduboff sprintf(bp, " (%s)", 346*7116Sduboff (cksum == 0 || cksum == 0xffff) ? "ok" : "ng"); 347*7116Sduboff bp = (void *)&msg[strlen(msg)]; 348*7116Sduboff } 349*7116Sduboff } 350*7116Sduboff x: 351*7116Sduboff cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg); 3525748Sduboff } 353*7116Sduboff #endif /* GEM_DEBUG_VLAN */ 354*7116Sduboff #endif /* GEM_DEBUG_LEVEL */ 355*7116Sduboff 3565748Sduboff /* ============================================================== */ 3575748Sduboff /* 3585748Sduboff * IO cache flush 3595748Sduboff */ 3605748Sduboff /* ============================================================== */ 3615748Sduboff __INLINE__ void 3625748Sduboff gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how) 3635748Sduboff { 3645748Sduboff int n; 3655748Sduboff int m; 3665748Sduboff int rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift; 3675748Sduboff 3685748Sduboff /* sync active descriptors */ 3695748Sduboff if (rx_desc_unit_shift < 0 || nslot == 0) { 3705748Sduboff /* no rx descriptor ring */ 3715748Sduboff return; 3725748Sduboff } 3735748Sduboff 3745748Sduboff n = dp->gc.gc_rx_ring_size - head; 3755748Sduboff if ((m = nslot - n) > 0) { 3765748Sduboff (void) ddi_dma_sync(dp->desc_dma_handle, 3775748Sduboff (off_t)0, 3785748Sduboff (size_t)(m << rx_desc_unit_shift), 3795748Sduboff how); 3805748Sduboff nslot = n; 3815748Sduboff } 3825748Sduboff 3835748Sduboff (void) ddi_dma_sync(dp->desc_dma_handle, 3845748Sduboff (off_t)(head << rx_desc_unit_shift), 3855748Sduboff (size_t)(nslot << rx_desc_unit_shift), 3865748Sduboff how); 3875748Sduboff } 3885748Sduboff 3895748Sduboff __INLINE__ void 3905748Sduboff gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how) 3915748Sduboff { 3925748Sduboff int n; 3935748Sduboff int m; 3945748Sduboff int tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift; 3955748Sduboff 3965748Sduboff /* sync active descriptors */ 3975748Sduboff if (tx_desc_unit_shift < 0 || nslot == 0) { 3985748Sduboff /* no tx descriptor ring */ 3995748Sduboff return; 4005748Sduboff } 4015748Sduboff 4025748Sduboff n = dp->gc.gc_tx_ring_size - head; 4035748Sduboff if ((m = nslot - n) > 0) { 4045748Sduboff (void) ddi_dma_sync(dp->desc_dma_handle, 4055748Sduboff (off_t)(dp->tx_ring_dma - dp->rx_ring_dma), 4065748Sduboff (size_t)(m << tx_desc_unit_shift), 4075748Sduboff how); 4085748Sduboff nslot = n; 4095748Sduboff } 4105748Sduboff 4115748Sduboff (void) ddi_dma_sync(dp->desc_dma_handle, 4125748Sduboff (off_t)((head << tx_desc_unit_shift) 4135748Sduboff + (dp->tx_ring_dma - dp->rx_ring_dma)), 4145748Sduboff (size_t)(nslot << tx_desc_unit_shift), 4155748Sduboff how); 4165748Sduboff } 4175748Sduboff 4185748Sduboff static void 4195748Sduboff gem_rx_start_default(struct gem_dev *dp, int head, int nslot) 4205748Sduboff { 4215748Sduboff gem_rx_desc_dma_sync(dp, 4225748Sduboff SLOT(head, dp->gc.gc_rx_ring_size), nslot, 4235748Sduboff DDI_DMA_SYNC_FORDEV); 4245748Sduboff } 4255748Sduboff 4265748Sduboff /* ============================================================== */ 4275748Sduboff /* 4285748Sduboff * Buffer management 4295748Sduboff */ 4305748Sduboff /* ============================================================== */ 4315748Sduboff static void 4325748Sduboff gem_dump_txbuf(struct gem_dev *dp, int level, const char *title) 4335748Sduboff { 4345748Sduboff cmn_err(level, 4355748Sduboff "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), " 4365748Sduboff "tx_softq: %d[%d] %d[%d] (+%d), " 4375748Sduboff "tx_free: %d[%d] %d[%d] (+%d), " 4385748Sduboff "tx_desc: %d[%d] %d[%d] (+%d), " 439*7116Sduboff "intr: %d[%d] (+%d), ", 4405748Sduboff dp->name, title, 4415748Sduboff dp->tx_active_head, 4425748Sduboff SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size), 4435748Sduboff dp->tx_active_tail, 4445748Sduboff SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size), 4455748Sduboff dp->tx_active_tail - dp->tx_active_head, 4465748Sduboff dp->tx_softq_head, 4475748Sduboff SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size), 4485748Sduboff dp->tx_softq_tail, 4495748Sduboff SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size), 4505748Sduboff dp->tx_softq_tail - dp->tx_softq_head, 4515748Sduboff dp->tx_free_head, 4525748Sduboff SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size), 4535748Sduboff dp->tx_free_tail, 4545748Sduboff SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size), 4555748Sduboff dp->tx_free_tail - dp->tx_free_head, 4565748Sduboff dp->tx_desc_head, 4575748Sduboff SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size), 4585748Sduboff dp->tx_desc_tail, 4595748Sduboff SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size), 4605748Sduboff dp->tx_desc_tail - dp->tx_desc_head, 4615748Sduboff dp->tx_desc_intr, 4625748Sduboff SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size), 4635748Sduboff dp->tx_desc_intr - dp->tx_desc_head); 4645748Sduboff } 4655748Sduboff 4665748Sduboff static void 4675748Sduboff gem_free_rxbuf(struct rxbuf *rbp) 4685748Sduboff { 4695748Sduboff struct gem_dev *dp; 4705748Sduboff 4715748Sduboff dp = rbp->rxb_devp; 4725748Sduboff ASSERT(mutex_owned(&dp->intrlock)); 4735748Sduboff rbp->rxb_next = dp->rx_buf_freelist; 4745748Sduboff dp->rx_buf_freelist = rbp; 4755748Sduboff dp->rx_buf_freecnt++; 4765748Sduboff } 4775748Sduboff 4785748Sduboff /* 4795748Sduboff * gem_get_rxbuf: supply a receive buffer which have been mapped into 4805748Sduboff * DMA space. 4815748Sduboff */ 4825748Sduboff struct rxbuf * 4835748Sduboff gem_get_rxbuf(struct gem_dev *dp, int cansleep) 4845748Sduboff { 4855748Sduboff struct rxbuf *rbp; 4865748Sduboff uint_t count = 0; 4875748Sduboff int i; 4885748Sduboff int err; 4895748Sduboff 4905748Sduboff ASSERT(mutex_owned(&dp->intrlock)); 4915748Sduboff 4925748Sduboff DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d", 4935748Sduboff dp->rx_buf_freecnt)); 4945748Sduboff /* 4955748Sduboff * Get rx buffer management structure 4965748Sduboff */ 4975748Sduboff rbp = dp->rx_buf_freelist; 4985748Sduboff if (rbp) { 4995748Sduboff /* get one from the recycle list */ 5005748Sduboff ASSERT(dp->rx_buf_freecnt > 0); 5015748Sduboff 5025748Sduboff dp->rx_buf_freelist = rbp->rxb_next; 5035748Sduboff dp->rx_buf_freecnt--; 5045748Sduboff rbp->rxb_next = NULL; 5055748Sduboff return (rbp); 5065748Sduboff } 5075748Sduboff 5085748Sduboff /* 5095748Sduboff * Allocate a rx buffer management structure 5105748Sduboff */ 5115748Sduboff rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP); 5125748Sduboff if (rbp == NULL) { 5135748Sduboff /* no memory */ 5145748Sduboff return (NULL); 5155748Sduboff } 5165748Sduboff 5175748Sduboff /* 5185748Sduboff * Prepare a back pointer to the device structure which will be 5195748Sduboff * refered on freeing the buffer later. 5205748Sduboff */ 5215748Sduboff rbp->rxb_devp = dp; 5225748Sduboff 5235748Sduboff /* allocate a dma handle for rx data buffer */ 5245748Sduboff if ((err = ddi_dma_alloc_handle(dp->dip, 5255748Sduboff &dp->gc.gc_dma_attr_rxbuf, 5265748Sduboff (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT), 5275748Sduboff NULL, &rbp->rxb_dh)) != DDI_SUCCESS) { 5285748Sduboff 5295748Sduboff cmn_err(CE_WARN, 5305748Sduboff "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d", 5315748Sduboff dp->name, __func__, err); 5325748Sduboff 5335748Sduboff kmem_free(rbp, sizeof (struct rxbuf)); 5345748Sduboff return (NULL); 5355748Sduboff } 5365748Sduboff 5375748Sduboff /* allocate a bounce buffer for rx */ 5385748Sduboff if ((err = ddi_dma_mem_alloc(rbp->rxb_dh, 5395748Sduboff ROUNDUP(dp->rx_buf_len, IOC_LINESIZE), 5405748Sduboff &dp->gc.gc_buf_attr, 5415748Sduboff /* 5425748Sduboff * if the nic requires a header at the top of receive buffers, 5435748Sduboff * it may access the rx buffer randomly. 5445748Sduboff */ 5455748Sduboff (dp->gc.gc_rx_header_len > 0) 5465748Sduboff ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING, 5475748Sduboff cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, 5485748Sduboff NULL, 5495748Sduboff &rbp->rxb_buf, &rbp->rxb_buf_len, 5505748Sduboff &rbp->rxb_bah)) != DDI_SUCCESS) { 5515748Sduboff 5525748Sduboff cmn_err(CE_WARN, 5535748Sduboff "!%s: %s: ddi_dma_mem_alloc: failed, err=%d", 5545748Sduboff dp->name, __func__, err); 5555748Sduboff 5565748Sduboff ddi_dma_free_handle(&rbp->rxb_dh); 5575748Sduboff kmem_free(rbp, sizeof (struct rxbuf)); 5585748Sduboff return (NULL); 5595748Sduboff } 5605748Sduboff 5615748Sduboff /* Mapin the bounce buffer into the DMA space */ 5625748Sduboff if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh, 5635748Sduboff NULL, rbp->rxb_buf, dp->rx_buf_len, 5645748Sduboff ((dp->gc.gc_rx_header_len > 0) 5655748Sduboff ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT) 5665748Sduboff :(DDI_DMA_READ | DDI_DMA_STREAMING)), 5675748Sduboff cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, 5685748Sduboff NULL, 5695748Sduboff rbp->rxb_dmacookie, 5705748Sduboff &count)) != DDI_DMA_MAPPED) { 5715748Sduboff 5725748Sduboff ASSERT(err != DDI_DMA_INUSE); 5735748Sduboff DPRINTF(0, (CE_WARN, 5745748Sduboff "!%s: ddi_dma_addr_bind_handle: failed, err=%d", 5755748Sduboff dp->name, __func__, err)); 5765748Sduboff 5775748Sduboff /* 5785748Sduboff * we failed to allocate a dma resource 5795748Sduboff * for the rx bounce buffer. 5805748Sduboff */ 5815748Sduboff ddi_dma_mem_free(&rbp->rxb_bah); 5825748Sduboff ddi_dma_free_handle(&rbp->rxb_dh); 5835748Sduboff kmem_free(rbp, sizeof (struct rxbuf)); 5845748Sduboff return (NULL); 5855748Sduboff } 5865748Sduboff 5875748Sduboff /* correct the rest of the DMA mapping */ 5885748Sduboff for (i = 1; i < count; i++) { 5895748Sduboff ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]); 5905748Sduboff } 5915748Sduboff rbp->rxb_nfrags = count; 5925748Sduboff 5935748Sduboff /* Now we successfully prepared an rx buffer */ 5945748Sduboff dp->rx_buf_allocated++; 5955748Sduboff 5965748Sduboff return (rbp); 5975748Sduboff } 5985748Sduboff 5995748Sduboff /* ============================================================== */ 6005748Sduboff /* 6015748Sduboff * memory resource management 6025748Sduboff */ 6035748Sduboff /* ============================================================== */ 6045748Sduboff static int 6055748Sduboff gem_alloc_memory(struct gem_dev *dp) 6065748Sduboff { 6075748Sduboff caddr_t ring; 6085748Sduboff caddr_t buf; 6095748Sduboff size_t req_size; 6105748Sduboff size_t ring_len; 6115748Sduboff size_t buf_len; 6125748Sduboff ddi_dma_cookie_t ring_cookie; 6135748Sduboff ddi_dma_cookie_t buf_cookie; 6145748Sduboff uint_t count; 6155748Sduboff int i; 6165748Sduboff int err; 6175748Sduboff struct txbuf *tbp; 6185748Sduboff int tx_buf_len; 6195748Sduboff ddi_dma_attr_t dma_attr_txbounce; 6205748Sduboff 6215748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 6225748Sduboff 6235748Sduboff dp->desc_dma_handle = NULL; 6245748Sduboff req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size; 6255748Sduboff 6265748Sduboff if (req_size > 0) { 6275748Sduboff /* 6285748Sduboff * Alloc RX/TX descriptors and a io area. 6295748Sduboff */ 6305748Sduboff if ((err = ddi_dma_alloc_handle(dp->dip, 6315748Sduboff &dp->gc.gc_dma_attr_desc, 6325748Sduboff DDI_DMA_SLEEP, NULL, 6335748Sduboff &dp->desc_dma_handle)) != DDI_SUCCESS) { 6345748Sduboff cmn_err(CE_WARN, 6355748Sduboff "!%s: %s: ddi_dma_alloc_handle failed: %d", 6365748Sduboff dp->name, __func__, err); 6375748Sduboff return (ENOMEM); 6385748Sduboff } 6395748Sduboff 6405748Sduboff if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle, 6415748Sduboff req_size, &dp->gc.gc_desc_attr, 6425748Sduboff DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 6435748Sduboff &ring, &ring_len, 6445748Sduboff &dp->desc_acc_handle)) != DDI_SUCCESS) { 6455748Sduboff cmn_err(CE_WARN, 6465748Sduboff "!%s: %s: ddi_dma_mem_alloc failed: " 6475748Sduboff "ret %d, request size: %d", 6485748Sduboff dp->name, __func__, err, (int)req_size); 6495748Sduboff ddi_dma_free_handle(&dp->desc_dma_handle); 6505748Sduboff return (ENOMEM); 6515748Sduboff } 6525748Sduboff 6535748Sduboff if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle, 6545748Sduboff NULL, ring, ring_len, 6555748Sduboff DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 6565748Sduboff DDI_DMA_SLEEP, NULL, 6575748Sduboff &ring_cookie, &count)) != DDI_SUCCESS) { 6585748Sduboff ASSERT(err != DDI_DMA_INUSE); 6595748Sduboff cmn_err(CE_WARN, 6605748Sduboff "!%s: %s: ddi_dma_addr_bind_handle failed: %d", 6615748Sduboff dp->name, __func__, err); 6625748Sduboff ddi_dma_mem_free(&dp->desc_acc_handle); 6635748Sduboff ddi_dma_free_handle(&dp->desc_dma_handle); 6645748Sduboff return (ENOMEM); 6655748Sduboff } 6665748Sduboff ASSERT(count == 1); 6675748Sduboff 6685748Sduboff /* set base of rx descriptor ring */ 6695748Sduboff dp->rx_ring = ring; 6705748Sduboff dp->rx_ring_dma = ring_cookie.dmac_laddress; 6715748Sduboff 6725748Sduboff /* set base of tx descriptor ring */ 6735748Sduboff dp->tx_ring = dp->rx_ring + dp->rx_desc_size; 6745748Sduboff dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size; 6755748Sduboff 6765748Sduboff /* set base of io area */ 6775748Sduboff dp->io_area = dp->tx_ring + dp->tx_desc_size; 6785748Sduboff dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size; 6795748Sduboff } 6805748Sduboff 6815748Sduboff /* 6825748Sduboff * Prepare DMA resources for tx packets 6835748Sduboff */ 6845748Sduboff ASSERT(dp->gc.gc_tx_buf_size > 0); 6855748Sduboff 6865748Sduboff /* Special dma attribute for tx bounce buffers */ 6875748Sduboff dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf; 6885748Sduboff dma_attr_txbounce.dma_attr_sgllen = 1; 6895748Sduboff dma_attr_txbounce.dma_attr_align = 6905748Sduboff max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE); 6915748Sduboff 6925748Sduboff /* Size for tx bounce buffers must be max tx packet size. */ 6935748Sduboff tx_buf_len = MAXPKTBUF(dp); 6945748Sduboff tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE); 6955748Sduboff 6965748Sduboff ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL); 6975748Sduboff 6985748Sduboff for (i = 0, tbp = dp->tx_buf; 6995748Sduboff i < dp->gc.gc_tx_buf_size; i++, tbp++) { 7005748Sduboff 7015748Sduboff /* setup bounce buffers for tx packets */ 7025748Sduboff if ((err = ddi_dma_alloc_handle(dp->dip, 7035748Sduboff &dma_attr_txbounce, 7045748Sduboff DDI_DMA_SLEEP, NULL, 7055748Sduboff &tbp->txb_bdh)) != DDI_SUCCESS) { 7065748Sduboff 7075748Sduboff cmn_err(CE_WARN, 7085748Sduboff "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:" 7095748Sduboff " err=%d, i=%d", 7105748Sduboff dp->name, __func__, err, i); 7115748Sduboff goto err_alloc_dh; 7125748Sduboff } 7135748Sduboff 7145748Sduboff if ((err = ddi_dma_mem_alloc(tbp->txb_bdh, 7155748Sduboff tx_buf_len, 7165748Sduboff &dp->gc.gc_buf_attr, 7175748Sduboff DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 7185748Sduboff &buf, &buf_len, 7195748Sduboff &tbp->txb_bah)) != DDI_SUCCESS) { 7205748Sduboff cmn_err(CE_WARN, 7215748Sduboff "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed" 7225748Sduboff "ret %d, request size %d", 7235748Sduboff dp->name, __func__, err, tx_buf_len); 7245748Sduboff ddi_dma_free_handle(&tbp->txb_bdh); 7255748Sduboff goto err_alloc_dh; 7265748Sduboff } 7275748Sduboff 7285748Sduboff if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh, 7295748Sduboff NULL, buf, buf_len, 7305748Sduboff DDI_DMA_WRITE | DDI_DMA_STREAMING, 7315748Sduboff DDI_DMA_SLEEP, NULL, 7325748Sduboff &buf_cookie, &count)) != DDI_SUCCESS) { 7335748Sduboff ASSERT(err != DDI_DMA_INUSE); 7345748Sduboff cmn_err(CE_WARN, 7355748Sduboff "!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d", 7365748Sduboff dp->name, __func__, err); 7375748Sduboff ddi_dma_mem_free(&tbp->txb_bah); 7385748Sduboff ddi_dma_free_handle(&tbp->txb_bdh); 7395748Sduboff goto err_alloc_dh; 7405748Sduboff } 7415748Sduboff ASSERT(count == 1); 7425748Sduboff tbp->txb_buf = buf; 7435748Sduboff tbp->txb_buf_dma = buf_cookie.dmac_laddress; 7445748Sduboff } 7455748Sduboff 7465748Sduboff return (0); 7475748Sduboff 7485748Sduboff err_alloc_dh: 7495748Sduboff if (dp->gc.gc_tx_buf_size > 0) { 7505748Sduboff while (i-- > 0) { 7515748Sduboff (void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh); 7525748Sduboff ddi_dma_mem_free(&dp->tx_buf[i].txb_bah); 7535748Sduboff ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh); 7545748Sduboff } 7555748Sduboff } 7565748Sduboff 7575748Sduboff if (dp->desc_dma_handle) { 7585748Sduboff (void) ddi_dma_unbind_handle(dp->desc_dma_handle); 7595748Sduboff ddi_dma_mem_free(&dp->desc_acc_handle); 7605748Sduboff ddi_dma_free_handle(&dp->desc_dma_handle); 7615748Sduboff dp->desc_dma_handle = NULL; 7625748Sduboff } 7635748Sduboff 7645748Sduboff return (ENOMEM); 7655748Sduboff } 7665748Sduboff 7675748Sduboff static void 7685748Sduboff gem_free_memory(struct gem_dev *dp) 7695748Sduboff { 7705748Sduboff int i; 7715748Sduboff struct rxbuf *rbp; 7725748Sduboff struct txbuf *tbp; 7735748Sduboff 7745748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 7755748Sduboff 7765748Sduboff /* Free TX/RX descriptors and tx padding buffer */ 7775748Sduboff if (dp->desc_dma_handle) { 7785748Sduboff (void) ddi_dma_unbind_handle(dp->desc_dma_handle); 7795748Sduboff ddi_dma_mem_free(&dp->desc_acc_handle); 7805748Sduboff ddi_dma_free_handle(&dp->desc_dma_handle); 7815748Sduboff dp->desc_dma_handle = NULL; 7825748Sduboff } 7835748Sduboff 7845748Sduboff /* Free dma handles for Tx */ 7855748Sduboff for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) { 7865748Sduboff /* Free bounce buffer associated to each txbuf */ 7875748Sduboff (void) ddi_dma_unbind_handle(tbp->txb_bdh); 7885748Sduboff ddi_dma_mem_free(&tbp->txb_bah); 7895748Sduboff ddi_dma_free_handle(&tbp->txb_bdh); 7905748Sduboff } 7915748Sduboff 7925748Sduboff /* Free rx buffer */ 7935748Sduboff while ((rbp = dp->rx_buf_freelist) != NULL) { 7945748Sduboff 7955748Sduboff ASSERT(dp->rx_buf_freecnt > 0); 7965748Sduboff 7975748Sduboff dp->rx_buf_freelist = rbp->rxb_next; 7985748Sduboff dp->rx_buf_freecnt--; 7995748Sduboff 8005748Sduboff /* release DMA mapping */ 8015748Sduboff ASSERT(rbp->rxb_dh != NULL); 8025748Sduboff 8035748Sduboff /* free dma handles for rx bbuf */ 8045748Sduboff /* it has dma mapping always */ 8055748Sduboff ASSERT(rbp->rxb_nfrags > 0); 8065748Sduboff (void) ddi_dma_unbind_handle(rbp->rxb_dh); 8075748Sduboff 8085748Sduboff /* free the associated bounce buffer and dma handle */ 8095748Sduboff ASSERT(rbp->rxb_bah != NULL); 8105748Sduboff ddi_dma_mem_free(&rbp->rxb_bah); 8115748Sduboff /* free the associated dma handle */ 8125748Sduboff ddi_dma_free_handle(&rbp->rxb_dh); 8135748Sduboff 8145748Sduboff /* free the base memory of rx buffer management */ 8155748Sduboff kmem_free(rbp, sizeof (struct rxbuf)); 8165748Sduboff } 8175748Sduboff } 8185748Sduboff 8195748Sduboff /* ============================================================== */ 8205748Sduboff /* 8215748Sduboff * Rx/Tx descriptor slot management 8225748Sduboff */ 8235748Sduboff /* ============================================================== */ 8245748Sduboff /* 8255748Sduboff * Initialize an empty rx ring. 8265748Sduboff */ 8275748Sduboff static void 8285748Sduboff gem_init_rx_ring(struct gem_dev *dp) 8295748Sduboff { 8305748Sduboff int i; 8315748Sduboff int rx_ring_size = dp->gc.gc_rx_ring_size; 8325748Sduboff 8335748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d", 8345748Sduboff dp->name, __func__, 8355748Sduboff rx_ring_size, dp->gc.gc_rx_buf_max)); 8365748Sduboff 8375748Sduboff /* make a physical chain of rx descriptors */ 8385748Sduboff for (i = 0; i < rx_ring_size; i++) { 8395748Sduboff (*dp->gc.gc_rx_desc_init)(dp, i); 8405748Sduboff } 8415748Sduboff gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV); 8425748Sduboff 8435748Sduboff dp->rx_active_head = (seqnum_t)0; 8445748Sduboff dp->rx_active_tail = (seqnum_t)0; 8455748Sduboff 8465748Sduboff ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL); 8475748Sduboff ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL); 8485748Sduboff } 8495748Sduboff 8505748Sduboff /* 8515748Sduboff * Prepare rx buffers and put them into the rx buffer/descriptor ring. 8525748Sduboff */ 8535748Sduboff static void 8545748Sduboff gem_prepare_rx_buf(struct gem_dev *dp) 8555748Sduboff { 8565748Sduboff int i; 8575748Sduboff int nrbuf; 8585748Sduboff struct rxbuf *rbp; 8595748Sduboff 8605748Sduboff ASSERT(mutex_owned(&dp->intrlock)); 8615748Sduboff 8625748Sduboff /* Now we have no active buffers in rx ring */ 8635748Sduboff 8645748Sduboff nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max); 8655748Sduboff for (i = 0; i < nrbuf; i++) { 8665748Sduboff if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) { 8675748Sduboff break; 8685748Sduboff } 8695748Sduboff gem_append_rxbuf(dp, rbp); 8705748Sduboff } 8715748Sduboff 8725748Sduboff gem_rx_desc_dma_sync(dp, 8735748Sduboff 0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV); 8745748Sduboff } 8755748Sduboff 8765748Sduboff /* 8775748Sduboff * Reclaim active rx buffers in rx buffer ring. 8785748Sduboff */ 8795748Sduboff static void 8805748Sduboff gem_clean_rx_buf(struct gem_dev *dp) 8815748Sduboff { 8825748Sduboff int i; 8835748Sduboff struct rxbuf *rbp; 8845748Sduboff int rx_ring_size = dp->gc.gc_rx_ring_size; 8855748Sduboff #ifdef GEM_DEBUG_LEVEL 8865748Sduboff int total; 8875748Sduboff #endif 8885748Sduboff ASSERT(mutex_owned(&dp->intrlock)); 8895748Sduboff 8905748Sduboff DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free", 8915748Sduboff dp->name, __func__, dp->rx_buf_freecnt)); 8925748Sduboff /* 8935748Sduboff * clean up HW descriptors 8945748Sduboff */ 8955748Sduboff for (i = 0; i < rx_ring_size; i++) { 8965748Sduboff (*dp->gc.gc_rx_desc_clean)(dp, i); 8975748Sduboff } 8985748Sduboff gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV); 8995748Sduboff 9005748Sduboff #ifdef GEM_DEBUG_LEVEL 9015748Sduboff total = 0; 9025748Sduboff #endif 9035748Sduboff /* 9045748Sduboff * Reclaim allocated rx buffers 9055748Sduboff */ 9065748Sduboff while ((rbp = dp->rx_buf_head) != NULL) { 9075748Sduboff #ifdef GEM_DEBUG_LEVEL 9085748Sduboff total++; 9095748Sduboff #endif 9105748Sduboff /* remove the first one from rx buffer list */ 9115748Sduboff dp->rx_buf_head = rbp->rxb_next; 9125748Sduboff 9135748Sduboff /* recycle the rxbuf */ 9145748Sduboff gem_free_rxbuf(rbp); 9155748Sduboff } 9165748Sduboff dp->rx_buf_tail = (struct rxbuf *)NULL; 9175748Sduboff 9185748Sduboff DPRINTF(2, (CE_CONT, 9195748Sduboff "!%s: %s: %d buffers freeed, total: %d free", 9205748Sduboff dp->name, __func__, total, dp->rx_buf_freecnt)); 9215748Sduboff } 9225748Sduboff 9235748Sduboff /* 9245748Sduboff * Initialize an empty transmit buffer/descriptor ring 9255748Sduboff */ 9265748Sduboff static void 9275748Sduboff gem_init_tx_ring(struct gem_dev *dp) 9285748Sduboff { 9295748Sduboff int i; 9305748Sduboff int tx_buf_size = dp->gc.gc_tx_buf_size; 9315748Sduboff int tx_ring_size = dp->gc.gc_tx_ring_size; 9325748Sduboff 9335748Sduboff DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d", 9345748Sduboff dp->name, __func__, 9355748Sduboff dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size)); 9365748Sduboff 9375748Sduboff ASSERT(!dp->mac_active); 9385748Sduboff 9395748Sduboff /* initialize active list and free list */ 9405748Sduboff dp->tx_slots_base = 9415748Sduboff SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size); 9425748Sduboff dp->tx_softq_tail -= dp->tx_softq_head; 9435748Sduboff dp->tx_softq_head = (seqnum_t)0; 9445748Sduboff 9455748Sduboff dp->tx_active_head = dp->tx_softq_head; 9465748Sduboff dp->tx_active_tail = dp->tx_softq_head; 9475748Sduboff 9485748Sduboff dp->tx_free_head = dp->tx_softq_tail; 9495748Sduboff dp->tx_free_tail = dp->gc.gc_tx_buf_limit; 9505748Sduboff 9515748Sduboff dp->tx_desc_head = (seqnum_t)0; 9525748Sduboff dp->tx_desc_tail = (seqnum_t)0; 9535748Sduboff dp->tx_desc_intr = (seqnum_t)0; 9545748Sduboff 9555748Sduboff for (i = 0; i < tx_ring_size; i++) { 9565748Sduboff (*dp->gc.gc_tx_desc_init)(dp, i); 9575748Sduboff } 9585748Sduboff gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV); 9595748Sduboff } 9605748Sduboff 9615748Sduboff __INLINE__ 9625748Sduboff static void 9635748Sduboff gem_txbuf_free_dma_resources(struct txbuf *tbp) 9645748Sduboff { 9655748Sduboff if (tbp->txb_mp) { 9665748Sduboff freemsg(tbp->txb_mp); 9675748Sduboff tbp->txb_mp = NULL; 9685748Sduboff } 9695748Sduboff tbp->txb_nfrags = 0; 970*7116Sduboff tbp->txb_flag = 0; 9715748Sduboff } 9725748Sduboff #pragma inline(gem_txbuf_free_dma_resources) 9735748Sduboff 9745748Sduboff /* 9755748Sduboff * reclaim active tx buffers and reset positions in tx rings. 9765748Sduboff */ 9775748Sduboff static void 9785748Sduboff gem_clean_tx_buf(struct gem_dev *dp) 9795748Sduboff { 9805748Sduboff int i; 9815748Sduboff seqnum_t head; 9825748Sduboff seqnum_t tail; 9835748Sduboff seqnum_t sn; 9845748Sduboff struct txbuf *tbp; 9855748Sduboff int tx_ring_size = dp->gc.gc_tx_ring_size; 9865748Sduboff #ifdef GEM_DEBUG_LEVEL 9875748Sduboff int err; 9885748Sduboff #endif 9895748Sduboff 9905748Sduboff ASSERT(!dp->mac_active); 9915748Sduboff ASSERT(dp->tx_busy == 0); 9925748Sduboff ASSERT(dp->tx_softq_tail == dp->tx_free_head); 9935748Sduboff 9945748Sduboff /* 9955748Sduboff * clean up all HW descriptors 9965748Sduboff */ 9975748Sduboff for (i = 0; i < tx_ring_size; i++) { 9985748Sduboff (*dp->gc.gc_tx_desc_clean)(dp, i); 9995748Sduboff } 10005748Sduboff gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV); 10015748Sduboff 10025748Sduboff /* dequeue all active and loaded buffers */ 10035748Sduboff head = dp->tx_active_head; 10045748Sduboff tail = dp->tx_softq_tail; 10055748Sduboff 10065748Sduboff ASSERT(dp->tx_free_head - head >= 0); 10075748Sduboff tbp = GET_TXBUF(dp, head); 10085748Sduboff for (sn = head; sn != tail; sn++) { 10095748Sduboff gem_txbuf_free_dma_resources(tbp); 10105748Sduboff ASSERT(tbp->txb_mp == NULL); 10115748Sduboff dp->stats.errxmt++; 10125748Sduboff tbp = tbp->txb_next; 10135748Sduboff } 10145748Sduboff 10155748Sduboff #ifdef GEM_DEBUG_LEVEL 10165748Sduboff /* ensure no dma resources for tx are not in use now */ 10175748Sduboff err = 0; 10185748Sduboff while (sn != head + dp->gc.gc_tx_buf_size) { 10195748Sduboff if (tbp->txb_mp || tbp->txb_nfrags) { 10205748Sduboff DPRINTF(0, (CE_CONT, 10215748Sduboff "%s: %s: sn:%d[%d] mp:%p nfrags:%d", 10225748Sduboff dp->name, __func__, 10235748Sduboff sn, SLOT(sn, dp->gc.gc_tx_buf_size), 10245748Sduboff tbp->txb_mp, tbp->txb_nfrags)); 10255748Sduboff err = 1; 10265748Sduboff } 10275748Sduboff sn++; 10285748Sduboff tbp = tbp->txb_next; 10295748Sduboff } 10305748Sduboff 10315748Sduboff if (err) { 10325748Sduboff gem_dump_txbuf(dp, CE_WARN, 10335748Sduboff "gem_clean_tx_buf: tbp->txb_mp != NULL"); 10345748Sduboff } 10355748Sduboff #endif 10365748Sduboff /* recycle buffers, now no active tx buffers in the ring */ 10375748Sduboff dp->tx_free_tail += tail - head; 10385748Sduboff ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit); 10395748Sduboff 10405748Sduboff /* fix positions in tx buffer rings */ 10415748Sduboff dp->tx_active_head = dp->tx_free_head; 10425748Sduboff dp->tx_active_tail = dp->tx_free_head; 10435748Sduboff dp->tx_softq_head = dp->tx_free_head; 10445748Sduboff dp->tx_softq_tail = dp->tx_free_head; 10455748Sduboff } 10465748Sduboff 10475748Sduboff /* 10485748Sduboff * Reclaim transmitted buffers from tx buffer/descriptor ring. 10495748Sduboff */ 10505748Sduboff __INLINE__ int 10515748Sduboff gem_reclaim_txbuf(struct gem_dev *dp) 10525748Sduboff { 10535748Sduboff struct txbuf *tbp; 10545748Sduboff uint_t txstat; 10555748Sduboff int err = GEM_SUCCESS; 10565748Sduboff seqnum_t head; 10575748Sduboff seqnum_t tail; 10585748Sduboff seqnum_t sn; 10595748Sduboff seqnum_t desc_head; 10605748Sduboff int tx_ring_size = dp->gc.gc_tx_ring_size; 10615748Sduboff uint_t (*tx_desc_stat)(struct gem_dev *dp, 10625748Sduboff int slot, int ndesc) = dp->gc.gc_tx_desc_stat; 1063*7116Sduboff clock_t now; 1064*7116Sduboff 1065*7116Sduboff now = ddi_get_lbolt(); 1066*7116Sduboff if (now == (clock_t)0) { 1067*7116Sduboff /* make non-zero timestamp */ 1068*7116Sduboff now--; 1069*7116Sduboff } 10705748Sduboff 10715748Sduboff mutex_enter(&dp->xmitlock); 10725748Sduboff 10735748Sduboff head = dp->tx_active_head; 10745748Sduboff tail = dp->tx_active_tail; 10755748Sduboff 10765748Sduboff #if GEM_DEBUG_LEVEL > 2 10775748Sduboff if (head != tail) { 10785748Sduboff cmn_err(CE_CONT, "!%s: %s: " 10795748Sduboff "testing active_head:%d[%d], active_tail:%d[%d]", 10805748Sduboff dp->name, __func__, 10815748Sduboff head, SLOT(head, dp->gc.gc_tx_buf_size), 10825748Sduboff tail, SLOT(tail, dp->gc.gc_tx_buf_size)); 10835748Sduboff } 10845748Sduboff #endif 10855748Sduboff #ifdef DEBUG 10865748Sduboff if (dp->tx_reclaim_busy == 0) { 10875748Sduboff /* check tx buffer management consistency */ 10885748Sduboff ASSERT(dp->tx_free_tail - dp->tx_active_head 10895748Sduboff == dp->gc.gc_tx_buf_limit); 10905748Sduboff /* EMPTY */ 10915748Sduboff } 10925748Sduboff #endif 10935748Sduboff dp->tx_reclaim_busy++; 10945748Sduboff 10955748Sduboff /* sync all active HW descriptors */ 10965748Sduboff gem_tx_desc_dma_sync(dp, 10975748Sduboff SLOT(dp->tx_desc_head, tx_ring_size), 10985748Sduboff dp->tx_desc_tail - dp->tx_desc_head, 10995748Sduboff DDI_DMA_SYNC_FORKERNEL); 11005748Sduboff 11015748Sduboff tbp = GET_TXBUF(dp, head); 11025748Sduboff desc_head = dp->tx_desc_head; 11035748Sduboff for (sn = head; sn != tail; 11045748Sduboff dp->tx_active_head = (++sn), tbp = tbp->txb_next) { 11055748Sduboff int ndescs; 11065748Sduboff 11075748Sduboff ASSERT(tbp->txb_desc == desc_head); 11085748Sduboff 11095748Sduboff ndescs = tbp->txb_ndescs; 1110*7116Sduboff if (ndescs == 0) { 1111*7116Sduboff /* skip errored descriptors */ 1112*7116Sduboff continue; 1113*7116Sduboff } 11145748Sduboff txstat = (*tx_desc_stat)(dp, 11155748Sduboff SLOT(tbp->txb_desc, tx_ring_size), ndescs); 11165748Sduboff 11175748Sduboff if (txstat == 0) { 11185748Sduboff /* not transmitted yet */ 11195748Sduboff break; 11205748Sduboff } 11215748Sduboff 1122*7116Sduboff if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) { 1123*7116Sduboff dp->tx_blocked = now; 1124*7116Sduboff } 1125*7116Sduboff 11265748Sduboff ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR)); 11275748Sduboff 11285748Sduboff if (txstat & GEM_TX_ERR) { 11295748Sduboff err = GEM_FAILURE; 11305748Sduboff cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]", 11315748Sduboff dp->name, sn, SLOT(sn, tx_ring_size)); 11325748Sduboff } 11335748Sduboff #if GEM_DEBUG_LEVEL > 4 11345748Sduboff if (now - tbp->txb_stime >= 50) { 11355748Sduboff cmn_err(CE_WARN, "!%s: tx delay while %d mS", 11365748Sduboff dp->name, (now - tbp->txb_stime)*10); 11375748Sduboff } 11385748Sduboff #endif 11395748Sduboff /* free transmitted descriptors */ 11405748Sduboff desc_head += ndescs; 11415748Sduboff } 11425748Sduboff 11435748Sduboff if (dp->tx_desc_head != desc_head) { 11445748Sduboff /* we have reclaimed one or more tx buffers */ 11455748Sduboff dp->tx_desc_head = desc_head; 11465748Sduboff 11475748Sduboff /* If we passed the next interrupt position, update it */ 1148*7116Sduboff if (desc_head - dp->tx_desc_intr > 0) { 11495748Sduboff dp->tx_desc_intr = desc_head; 11505748Sduboff } 11515748Sduboff } 11525748Sduboff mutex_exit(&dp->xmitlock); 11535748Sduboff 11545748Sduboff /* free dma mapping resources associated with transmitted tx buffers */ 11555748Sduboff tbp = GET_TXBUF(dp, head); 11565748Sduboff tail = sn; 11575748Sduboff #if GEM_DEBUG_LEVEL > 2 11585748Sduboff if (head != tail) { 11595748Sduboff cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]", 11605748Sduboff __func__, 11615748Sduboff head, SLOT(head, dp->gc.gc_tx_buf_size), 11625748Sduboff tail, SLOT(tail, dp->gc.gc_tx_buf_size)); 11635748Sduboff } 11645748Sduboff #endif 11655748Sduboff for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) { 11665748Sduboff gem_txbuf_free_dma_resources(tbp); 11675748Sduboff } 11685748Sduboff 11695748Sduboff /* recycle the tx buffers */ 11705748Sduboff mutex_enter(&dp->xmitlock); 11715748Sduboff if (--dp->tx_reclaim_busy == 0) { 11725748Sduboff /* we are the last thread who can update free tail */ 11735748Sduboff #if GEM_DEBUG_LEVEL > 4 11745748Sduboff /* check all resouces have been deallocated */ 11755748Sduboff sn = dp->tx_free_tail; 11765748Sduboff tbp = GET_TXBUF(dp, new_tail); 11775748Sduboff while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) { 11785748Sduboff if (tbp->txb_nfrags) { 11795748Sduboff /* in use */ 11805748Sduboff break; 11815748Sduboff } 11825748Sduboff ASSERT(tbp->txb_mp == NULL); 11835748Sduboff tbp = tbp->txb_next; 11845748Sduboff sn++; 11855748Sduboff } 11865748Sduboff ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn); 11875748Sduboff #endif 11885748Sduboff dp->tx_free_tail = 11895748Sduboff dp->tx_active_head + dp->gc.gc_tx_buf_limit; 11905748Sduboff } 11915748Sduboff if (!dp->mac_active) { 11925748Sduboff /* someone may be waiting for me. */ 11935748Sduboff cv_broadcast(&dp->tx_drain_cv); 11945748Sduboff } 11955748Sduboff #if GEM_DEBUG_LEVEL > 2 11965748Sduboff cmn_err(CE_CONT, "!%s: %s: called, " 11975748Sduboff "free_head:%d free_tail:%d(+%d) added:%d", 11985748Sduboff dp->name, __func__, 11995748Sduboff dp->tx_free_head, dp->tx_free_tail, 12005748Sduboff dp->tx_free_tail - dp->tx_free_head, tail - head); 12015748Sduboff #endif 12025748Sduboff mutex_exit(&dp->xmitlock); 12035748Sduboff 12045748Sduboff return (err); 12055748Sduboff } 12065748Sduboff #pragma inline(gem_reclaim_txbuf) 12075748Sduboff 12085748Sduboff 12095748Sduboff /* 12105748Sduboff * Make tx descriptors in out-of-order manner 12115748Sduboff */ 12125748Sduboff static void 12135748Sduboff gem_tx_load_descs_oo(struct gem_dev *dp, 1214*7116Sduboff seqnum_t start_slot, seqnum_t end_slot, uint64_t flags) 12155748Sduboff { 12165748Sduboff seqnum_t sn; 12175748Sduboff struct txbuf *tbp; 12185748Sduboff int tx_ring_size = dp->gc.gc_tx_ring_size; 12195748Sduboff int (*tx_desc_write) 12205748Sduboff (struct gem_dev *dp, int slot, 12215748Sduboff ddi_dma_cookie_t *dmacookie, 12225748Sduboff int frags, uint64_t flag) = dp->gc.gc_tx_desc_write; 12235748Sduboff clock_t now = ddi_get_lbolt(); 12245748Sduboff 12255748Sduboff sn = start_slot; 12265748Sduboff tbp = GET_TXBUF(dp, sn); 12275748Sduboff do { 12285748Sduboff #if GEM_DEBUG_LEVEL > 1 12295748Sduboff if (dp->tx_cnt < 100) { 12305748Sduboff dp->tx_cnt++; 12315748Sduboff flags |= GEM_TXFLAG_INTR; 12325748Sduboff } 12335748Sduboff #endif 12345748Sduboff /* write a tx descriptor */ 12355748Sduboff tbp->txb_desc = sn; 12365748Sduboff tbp->txb_ndescs = (*tx_desc_write)(dp, 12375748Sduboff SLOT(sn, tx_ring_size), 12385748Sduboff tbp->txb_dmacookie, 12395748Sduboff tbp->txb_nfrags, flags | tbp->txb_flag); 12405748Sduboff tbp->txb_stime = now; 12415748Sduboff ASSERT(tbp->txb_ndescs == 1); 12425748Sduboff 12435748Sduboff flags = 0; 12445748Sduboff sn++; 12455748Sduboff tbp = tbp->txb_next; 12465748Sduboff } while (sn != end_slot); 12475748Sduboff } 12485748Sduboff 12495748Sduboff __INLINE__ 1250*7116Sduboff static size_t 12515748Sduboff gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp) 12525748Sduboff { 12535748Sduboff size_t min_pkt; 12545748Sduboff caddr_t bp; 12555748Sduboff size_t off; 12565748Sduboff mblk_t *tp; 12575748Sduboff size_t len; 12585748Sduboff uint64_t flag; 12595748Sduboff 12605748Sduboff ASSERT(tbp->txb_mp == NULL); 12615748Sduboff 12625748Sduboff /* we use bounce buffer for the packet */ 12635748Sduboff min_pkt = ETHERMIN; 12645748Sduboff bp = tbp->txb_buf; 12655748Sduboff off = 0; 12665748Sduboff tp = mp; 12675748Sduboff 12685748Sduboff flag = tbp->txb_flag; 12695748Sduboff if (flag & GEM_TXFLAG_SWVTAG) { 12705748Sduboff /* need to increase min packet size */ 12715748Sduboff min_pkt += VTAG_SIZE; 12725748Sduboff ASSERT((flag & GEM_TXFLAG_VTAG) == 0); 12735748Sduboff } 12745748Sduboff 12755748Sduboff /* copy the rest */ 12765748Sduboff for (; tp; tp = tp->b_cont) { 12775748Sduboff if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) { 12785748Sduboff bcopy(tp->b_rptr, &bp[off], len); 12795748Sduboff off += len; 12805748Sduboff } 12815748Sduboff } 12825748Sduboff 12835748Sduboff if (off < min_pkt && 12845748Sduboff (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) { 12855748Sduboff /* 1286*7116Sduboff * Extend the packet to minimum packet size explicitly. 12875748Sduboff * For software vlan packets, we shouldn't use tx autopad 1288*7116Sduboff * function because nics may not be aware of vlan. 12895748Sduboff * we must keep 46 octet of payload even if we use vlan. 12905748Sduboff */ 12915748Sduboff bzero(&bp[off], min_pkt - off); 12925748Sduboff off = min_pkt; 12935748Sduboff } 12945748Sduboff 12955748Sduboff (void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV); 12965748Sduboff 12975748Sduboff tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma; 12985748Sduboff tbp->txb_dmacookie[0].dmac_size = off; 12995748Sduboff 13005748Sduboff DPRINTF(2, (CE_CONT, 13015748Sduboff "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d", 13025748Sduboff dp->name, __func__, 13035748Sduboff tbp->txb_dmacookie[0].dmac_laddress, 13045748Sduboff tbp->txb_dmacookie[0].dmac_size, 13055748Sduboff (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT, 13065748Sduboff min_pkt)); 13075748Sduboff 13085748Sduboff /* save misc info */ 13095748Sduboff tbp->txb_mp = mp; 13105748Sduboff tbp->txb_nfrags = 1; 13115748Sduboff #ifdef DEBUG_MULTIFRAGS 13125748Sduboff if (dp->gc.gc_tx_max_frags >= 3 && 13135748Sduboff tbp->txb_dmacookie[0].dmac_size > 16*3) { 13145748Sduboff tbp->txb_dmacookie[1].dmac_laddress = 13155748Sduboff tbp->txb_dmacookie[0].dmac_laddress + 16; 13165748Sduboff tbp->txb_dmacookie[2].dmac_laddress = 13175748Sduboff tbp->txb_dmacookie[1].dmac_laddress + 16; 13185748Sduboff 13195748Sduboff tbp->txb_dmacookie[2].dmac_size = 13205748Sduboff tbp->txb_dmacookie[0].dmac_size - 16*2; 13215748Sduboff tbp->txb_dmacookie[1].dmac_size = 16; 13225748Sduboff tbp->txb_dmacookie[0].dmac_size = 16; 13235748Sduboff tbp->txb_nfrags = 3; 13245748Sduboff } 13255748Sduboff #endif 1326*7116Sduboff return (off); 13275748Sduboff } 13285748Sduboff #pragma inline(gem_setup_txbuf_copy) 13295748Sduboff 13305748Sduboff __INLINE__ 13315748Sduboff static void 13325748Sduboff gem_tx_start_unit(struct gem_dev *dp) 13335748Sduboff { 13345748Sduboff seqnum_t head; 13355748Sduboff seqnum_t tail; 13365748Sduboff struct txbuf *tbp_head; 13375748Sduboff struct txbuf *tbp_tail; 13385748Sduboff 13395748Sduboff /* update HW descriptors from soft queue */ 13405748Sduboff ASSERT(mutex_owned(&dp->xmitlock)); 13415748Sduboff ASSERT(dp->tx_softq_head == dp->tx_active_tail); 13425748Sduboff 13435748Sduboff head = dp->tx_softq_head; 13445748Sduboff tail = dp->tx_softq_tail; 13455748Sduboff 13465748Sduboff DPRINTF(1, (CE_CONT, 13475748Sduboff "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]", 13485748Sduboff dp->name, __func__, head, tail, tail - head, 13495748Sduboff dp->tx_desc_head, dp->tx_desc_tail, 13505748Sduboff dp->tx_desc_tail - dp->tx_desc_head)); 13515748Sduboff 13525748Sduboff ASSERT(tail - head > 0); 13535748Sduboff 13545748Sduboff dp->tx_desc_tail = tail; 13555748Sduboff 13565748Sduboff tbp_head = GET_TXBUF(dp, head); 13575748Sduboff tbp_tail = GET_TXBUF(dp, tail - 1); 13585748Sduboff 13595748Sduboff ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail); 13605748Sduboff 13615748Sduboff dp->gc.gc_tx_start(dp, 13625748Sduboff SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size), 13635748Sduboff tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc); 13645748Sduboff 13655748Sduboff /* advance softq head and active tail */ 13665748Sduboff dp->tx_softq_head = dp->tx_active_tail = tail; 13675748Sduboff } 13685748Sduboff #pragma inline(gem_tx_start_unit) 13695748Sduboff 13705748Sduboff #ifdef GEM_DEBUG_LEVEL 13715748Sduboff static int gem_send_cnt[10]; 13725748Sduboff #endif 1373*7116Sduboff #define PKT_MIN_SIZE (sizeof (struct ether_header) + 10 + VTAG_SIZE) 1374*7116Sduboff #define EHLEN (sizeof (struct ether_header)) 1375*7116Sduboff /* 1376*7116Sduboff * check ether packet type and ip protocol 1377*7116Sduboff */ 1378*7116Sduboff static uint64_t 1379*7116Sduboff gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp) 1380*7116Sduboff { 1381*7116Sduboff mblk_t *tp; 1382*7116Sduboff ssize_t len; 1383*7116Sduboff uint_t vtag; 1384*7116Sduboff int off; 1385*7116Sduboff uint64_t flag; 1386*7116Sduboff 1387*7116Sduboff flag = 0ULL; 1388*7116Sduboff 1389*7116Sduboff /* 1390*7116Sduboff * prepare continuous header of the packet for protocol analysis 1391*7116Sduboff */ 1392*7116Sduboff if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) { 1393*7116Sduboff /* we use work buffer to copy mblk */ 1394*7116Sduboff for (tp = mp, off = 0; 1395*7116Sduboff tp && (off < PKT_MIN_SIZE); 1396*7116Sduboff tp = tp->b_cont, off += len) { 1397*7116Sduboff len = (long)tp->b_wptr - (long)tp->b_rptr; 1398*7116Sduboff len = min(len, PKT_MIN_SIZE - off); 1399*7116Sduboff bcopy(tp->b_rptr, &bp[off], len); 1400*7116Sduboff } 1401*7116Sduboff } else { 1402*7116Sduboff /* we can use mblk without copy */ 1403*7116Sduboff bp = mp->b_rptr; 1404*7116Sduboff } 1405*7116Sduboff 1406*7116Sduboff /* process vlan tag for GLD v3 */ 1407*7116Sduboff if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) { 1408*7116Sduboff if (dp->misc_flag & GEM_VLAN_HARD) { 1409*7116Sduboff vtag = GET_NET16(&bp[VTAG_OFF + 2]); 1410*7116Sduboff ASSERT(vtag); 1411*7116Sduboff flag |= vtag << GEM_TXFLAG_VTAG_SHIFT; 1412*7116Sduboff } else { 1413*7116Sduboff flag |= GEM_TXFLAG_SWVTAG; 1414*7116Sduboff } 1415*7116Sduboff } 1416*7116Sduboff return (flag); 1417*7116Sduboff } 1418*7116Sduboff #undef EHLEN 1419*7116Sduboff #undef PKT_MIN_SIZE 14205748Sduboff /* 14215748Sduboff * gem_send_common is an exported function because hw depend routines may 14225748Sduboff * use it for sending control frames like setup frames for 2114x chipset. 14235748Sduboff */ 14245748Sduboff mblk_t * 14255748Sduboff gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags) 14265748Sduboff { 14275748Sduboff int nmblk; 14285748Sduboff int avail; 14295748Sduboff mblk_t *tp; 14305748Sduboff mblk_t *mp; 1431*7116Sduboff int i; 14325748Sduboff struct txbuf *tbp; 14335748Sduboff seqnum_t head; 14345748Sduboff uint64_t load_flags; 14355748Sduboff uint64_t len_total = 0; 1436*7116Sduboff uint32_t bcast = 0; 1437*7116Sduboff uint32_t mcast = 0; 14385748Sduboff 14395748Sduboff ASSERT(mp_head != NULL); 14405748Sduboff 14415748Sduboff mp = mp_head; 14425748Sduboff nmblk = 1; 14435748Sduboff while ((mp = mp->b_next) != NULL) { 14445748Sduboff nmblk++; 14455748Sduboff } 14465748Sduboff #ifdef GEM_DEBUG_LEVEL 14475748Sduboff gem_send_cnt[0]++; 14485748Sduboff gem_send_cnt[min(nmblk, 9)]++; 14495748Sduboff #endif 14505748Sduboff /* 14515748Sduboff * Aquire resources 14525748Sduboff */ 14535748Sduboff mutex_enter(&dp->xmitlock); 14545748Sduboff if (dp->mac_suspended) { 14555748Sduboff mutex_exit(&dp->xmitlock); 14565748Sduboff mp = mp_head; 14575748Sduboff while (mp) { 14585748Sduboff tp = mp->b_next; 14595748Sduboff freemsg(mp); 14605748Sduboff mp = tp; 14615748Sduboff } 14625748Sduboff return (NULL); 14635748Sduboff } 14645748Sduboff 14655748Sduboff if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) { 14665748Sduboff /* don't send data packets while mac isn't active */ 1467*7116Sduboff /* XXX - should we discard packets? */ 14685748Sduboff mutex_exit(&dp->xmitlock); 14695748Sduboff return (mp_head); 14705748Sduboff } 14715748Sduboff 14725748Sduboff /* allocate free slots */ 14735748Sduboff head = dp->tx_free_head; 14745748Sduboff avail = dp->tx_free_tail - head; 14755748Sduboff 14765748Sduboff DPRINTF(2, (CE_CONT, 14775748Sduboff "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d", 14785748Sduboff dp->name, __func__, 14795748Sduboff dp->tx_free_head, dp->tx_free_tail, avail, nmblk)); 14805748Sduboff 1481*7116Sduboff avail = min(avail, dp->tx_max_packets); 14825748Sduboff 14835748Sduboff if (nmblk > avail) { 14845748Sduboff if (avail == 0) { 14855748Sduboff /* no resources; short cut */ 14865748Sduboff DPRINTF(2, (CE_CONT, "!%s: no resources", __func__)); 1487*7116Sduboff dp->tx_max_packets = max(dp->tx_max_packets - 1, 1); 14885748Sduboff goto done; 14895748Sduboff } 14905748Sduboff nmblk = avail; 14915748Sduboff } 14925748Sduboff 14935748Sduboff dp->tx_free_head = head + nmblk; 14945748Sduboff load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0; 14955748Sduboff 1496*7116Sduboff /* update last interrupt position if tx buffers exhaust. */ 1497*7116Sduboff if (nmblk == avail) { 1498*7116Sduboff tbp = GET_TXBUF(dp, head + avail - 1); 1499*7116Sduboff tbp->txb_flag = GEM_TXFLAG_INTR; 1500*7116Sduboff dp->tx_desc_intr = head + avail; 15015748Sduboff } 15025748Sduboff mutex_exit(&dp->xmitlock); 15035748Sduboff 15045748Sduboff tbp = GET_TXBUF(dp, head); 15055748Sduboff 1506*7116Sduboff for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) { 15075748Sduboff uint8_t *bp; 1508*7116Sduboff uint64_t txflag; 15095748Sduboff 15105748Sduboff /* remove one from the mblk list */ 15115748Sduboff ASSERT(mp_head != NULL); 15125748Sduboff mp = mp_head; 15135748Sduboff mp_head = mp_head->b_next; 15145748Sduboff mp->b_next = NULL; 15155748Sduboff 1516*7116Sduboff /* statistics for non-unicast packets */ 1517*7116Sduboff bp = mp->b_rptr; 15185748Sduboff if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) { 15195748Sduboff if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet, 15205748Sduboff ETHERADDRL) == 0) { 1521*7116Sduboff bcast++; 15225748Sduboff } else { 1523*7116Sduboff mcast++; 15245748Sduboff } 15255748Sduboff } 15265748Sduboff 1527*7116Sduboff /* save misc info */ 1528*7116Sduboff txflag = tbp->txb_flag; 1529*7116Sduboff txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT; 1530*7116Sduboff txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf); 1531*7116Sduboff tbp->txb_flag = txflag; 1532*7116Sduboff 1533*7116Sduboff len_total += gem_setup_txbuf_copy(dp, mp, tbp); 1534*7116Sduboff } 1535*7116Sduboff 1536*7116Sduboff (void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags); 15375748Sduboff 15385748Sduboff /* Append the tbp at the tail of the active tx buffer list */ 15395748Sduboff mutex_enter(&dp->xmitlock); 15405748Sduboff 15415748Sduboff if ((--dp->tx_busy) == 0) { 15425748Sduboff /* extend the tail of softq, as new packets have been ready. */ 15435748Sduboff dp->tx_softq_tail = dp->tx_free_head; 15445748Sduboff 15455748Sduboff if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) { 15465748Sduboff /* 15475748Sduboff * The device status has changed while we are 15485748Sduboff * preparing tx buf. 15495748Sduboff * As we are the last one that make tx non-busy. 15505748Sduboff * wake up someone who may wait for us. 15515748Sduboff */ 15525748Sduboff cv_broadcast(&dp->tx_drain_cv); 15535748Sduboff } else { 15545748Sduboff ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0); 15555748Sduboff gem_tx_start_unit(dp); 15565748Sduboff } 15575748Sduboff } 15585748Sduboff dp->stats.obytes += len_total; 1559*7116Sduboff dp->stats.opackets += nmblk; 1560*7116Sduboff dp->stats.obcast += bcast; 1561*7116Sduboff dp->stats.omcast += mcast; 15625748Sduboff done: 15635748Sduboff mutex_exit(&dp->xmitlock); 15645748Sduboff 15655748Sduboff return (mp_head); 15665748Sduboff } 15675748Sduboff 15685748Sduboff /* ========================================================== */ 15695748Sduboff /* 15705748Sduboff * error detection and restart routines 15715748Sduboff */ 15725748Sduboff /* ========================================================== */ 15735748Sduboff int 15745748Sduboff gem_restart_nic(struct gem_dev *dp, uint_t flags) 15755748Sduboff { 15765748Sduboff ASSERT(mutex_owned(&dp->intrlock)); 15775748Sduboff 1578*7116Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 1579*7116Sduboff #ifdef GEM_DEBUG_LEVEL 1580*7116Sduboff #if GEM_DEBUG_LEVEL > 1 1581*7116Sduboff gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic"); 1582*7116Sduboff #endif 1583*7116Sduboff #endif 15845748Sduboff 15855748Sduboff if (dp->mac_suspended) { 15865748Sduboff /* should we return GEM_FAILURE ? */ 15875748Sduboff return (GEM_FAILURE); 15885748Sduboff } 15895748Sduboff 15905748Sduboff /* 15915748Sduboff * We should avoid calling any routines except xxx_chip_reset 15925748Sduboff * when we are resuming the system. 15935748Sduboff */ 15945748Sduboff if (dp->mac_active) { 15955748Sduboff if (flags & GEM_RESTART_KEEP_BUF) { 15965748Sduboff /* stop rx gracefully */ 15975748Sduboff dp->rxmode &= ~RXMODE_ENABLE; 15985748Sduboff (void) (*dp->gc.gc_set_rx_filter)(dp); 15995748Sduboff } 16005748Sduboff (void) gem_mac_stop(dp, flags); 16015748Sduboff } 16025748Sduboff 16035748Sduboff /* reset the chip. */ 16045748Sduboff if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) { 16055748Sduboff cmn_err(CE_WARN, "%s: %s: failed to reset chip", 16065748Sduboff dp->name, __func__); 16075748Sduboff goto err; 16085748Sduboff } 16095748Sduboff 16105748Sduboff if (gem_mac_init(dp) != GEM_SUCCESS) { 16115748Sduboff goto err; 16125748Sduboff } 16135748Sduboff 16145748Sduboff /* setup media mode if the link have been up */ 16155748Sduboff if (dp->mii_state == MII_STATE_LINKUP) { 16165748Sduboff if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) { 16175748Sduboff goto err; 16185748Sduboff } 16195748Sduboff } 16205748Sduboff 16215748Sduboff /* setup mac address and enable rx filter */ 16225748Sduboff dp->rxmode |= RXMODE_ENABLE; 16235748Sduboff if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) { 16245748Sduboff goto err; 16255748Sduboff } 16265748Sduboff 16275748Sduboff /* 1628*7116Sduboff * XXX - a panic happened because of linkdown. 16295748Sduboff * We must check mii_state here, because the link can be down just 16305748Sduboff * before the restart event happen. If the link is down now, 16315748Sduboff * gem_mac_start() will be called from gem_mii_link_check() when 16325748Sduboff * the link become up later. 16335748Sduboff */ 16345748Sduboff if (dp->mii_state == MII_STATE_LINKUP) { 16355748Sduboff /* restart the nic */ 16365748Sduboff ASSERT(!dp->mac_active); 16375748Sduboff (void) gem_mac_start(dp); 16385748Sduboff } 16395748Sduboff return (GEM_SUCCESS); 16405748Sduboff err: 16415748Sduboff return (GEM_FAILURE); 16425748Sduboff } 16435748Sduboff 16445748Sduboff 16455748Sduboff static void 16465748Sduboff gem_tx_timeout(struct gem_dev *dp) 16475748Sduboff { 16485748Sduboff clock_t now; 16495748Sduboff boolean_t tx_sched; 16505748Sduboff struct txbuf *tbp; 16515748Sduboff 16525748Sduboff mutex_enter(&dp->intrlock); 16535748Sduboff 16545748Sduboff tx_sched = B_FALSE; 16555748Sduboff now = ddi_get_lbolt(); 16565748Sduboff 16575748Sduboff mutex_enter(&dp->xmitlock); 16585748Sduboff if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) { 16595748Sduboff mutex_exit(&dp->xmitlock); 16605748Sduboff goto schedule_next; 16615748Sduboff } 16625748Sduboff mutex_exit(&dp->xmitlock); 16635748Sduboff 16645748Sduboff /* reclaim transmitted buffers to check the trasmitter hangs or not. */ 16655748Sduboff if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) { 16665748Sduboff /* tx error happened, reset transmitter in the chip */ 16675748Sduboff (void) gem_restart_nic(dp, 0); 16685748Sduboff tx_sched = B_TRUE; 1669*7116Sduboff dp->tx_blocked = (clock_t)0; 16705748Sduboff 16715748Sduboff goto schedule_next; 16725748Sduboff } 16735748Sduboff 16745748Sduboff mutex_enter(&dp->xmitlock); 1675*7116Sduboff /* check if the transmitter thread is stuck */ 16765748Sduboff if (dp->tx_active_head == dp->tx_active_tail) { 16775748Sduboff /* no tx buffer is loaded to the nic */ 1678*7116Sduboff if (dp->tx_blocked && 1679*7116Sduboff now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) { 1680*7116Sduboff gem_dump_txbuf(dp, CE_WARN, 1681*7116Sduboff "gem_tx_timeout: tx blocked"); 1682*7116Sduboff tx_sched = B_TRUE; 1683*7116Sduboff dp->tx_blocked = (clock_t)0; 1684*7116Sduboff } 16855748Sduboff mutex_exit(&dp->xmitlock); 16865748Sduboff goto schedule_next; 16875748Sduboff } 16885748Sduboff 16895748Sduboff tbp = GET_TXBUF(dp, dp->tx_active_head); 16905748Sduboff if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) { 16915748Sduboff mutex_exit(&dp->xmitlock); 16925748Sduboff goto schedule_next; 16935748Sduboff } 16945748Sduboff mutex_exit(&dp->xmitlock); 16955748Sduboff 1696*7116Sduboff gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout"); 16975748Sduboff 16985748Sduboff /* discard untransmitted packet and restart tx. */ 1699*7116Sduboff (void) gem_restart_nic(dp, GEM_RESTART_NOWAIT); 17005748Sduboff tx_sched = B_TRUE; 1701*7116Sduboff dp->tx_blocked = (clock_t)0; 17025748Sduboff 17035748Sduboff schedule_next: 17045748Sduboff mutex_exit(&dp->intrlock); 17055748Sduboff 17065748Sduboff /* restart the downstream if needed */ 17075748Sduboff if (tx_sched) { 17085748Sduboff mac_tx_update(dp->mh); 17095748Sduboff } 17105748Sduboff 17115748Sduboff DPRINTF(4, (CE_CONT, 1712*7116Sduboff "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d", 1713*7116Sduboff dp->name, BOOLEAN(dp->tx_blocked), 17145748Sduboff dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr)); 17155748Sduboff dp->timeout_id = 17165748Sduboff timeout((void (*)(void *))gem_tx_timeout, 17175748Sduboff (void *)dp, dp->gc.gc_tx_timeout_interval); 17185748Sduboff } 17195748Sduboff 17205748Sduboff /* ================================================================== */ 17215748Sduboff /* 17225748Sduboff * Interrupt handler 17235748Sduboff */ 17245748Sduboff /* ================================================================== */ 17255748Sduboff __INLINE__ 17265748Sduboff static void 17275748Sduboff gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head) 17285748Sduboff { 17295748Sduboff struct rxbuf *rbp; 17305748Sduboff seqnum_t tail; 17315748Sduboff int rx_ring_size = dp->gc.gc_rx_ring_size; 17325748Sduboff 17335748Sduboff ASSERT(rbp_head != NULL); 17345748Sduboff ASSERT(mutex_owned(&dp->intrlock)); 17355748Sduboff 17365748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d", 17375748Sduboff dp->name, __func__, dp->rx_active_head, dp->rx_active_tail)); 17385748Sduboff 17395748Sduboff /* 17405748Sduboff * Add new buffers into active rx buffer list 17415748Sduboff */ 17425748Sduboff if (dp->rx_buf_head == NULL) { 17435748Sduboff dp->rx_buf_head = rbp_head; 17445748Sduboff ASSERT(dp->rx_buf_tail == NULL); 17455748Sduboff } else { 17465748Sduboff dp->rx_buf_tail->rxb_next = rbp_head; 17475748Sduboff } 17485748Sduboff 17495748Sduboff tail = dp->rx_active_tail; 17505748Sduboff for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) { 17515748Sduboff /* need to notify the tail for the lower layer */ 17525748Sduboff dp->rx_buf_tail = rbp; 17535748Sduboff 17545748Sduboff dp->gc.gc_rx_desc_write(dp, 17555748Sduboff SLOT(tail, rx_ring_size), 17565748Sduboff rbp->rxb_dmacookie, 17575748Sduboff rbp->rxb_nfrags); 17585748Sduboff 17595748Sduboff dp->rx_active_tail = tail = tail + 1; 17605748Sduboff } 17615748Sduboff } 17625748Sduboff #pragma inline(gem_append_rxbuf) 17635748Sduboff 17645748Sduboff mblk_t * 17655748Sduboff gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len) 17665748Sduboff { 17675748Sduboff int rx_header_len = dp->gc.gc_rx_header_len; 17685748Sduboff uint8_t *bp; 17695748Sduboff mblk_t *mp; 17705748Sduboff 17715748Sduboff /* allocate a new mblk */ 17725748Sduboff if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) { 17735748Sduboff ASSERT(mp->b_next == NULL); 17745748Sduboff ASSERT(mp->b_cont == NULL); 17755748Sduboff 17765748Sduboff mp->b_rptr += VTAG_SIZE; 17775748Sduboff bp = mp->b_rptr; 17785748Sduboff mp->b_wptr = bp + len; 17795748Sduboff 1780*7116Sduboff /* 1781*7116Sduboff * flush the range of the entire buffer to invalidate 1782*7116Sduboff * all of corresponding dirty entries in iocache. 1783*7116Sduboff */ 17845748Sduboff (void) ddi_dma_sync(rbp->rxb_dh, rx_header_len, 1785*7116Sduboff 0, DDI_DMA_SYNC_FORKERNEL); 17865748Sduboff 17875748Sduboff bcopy(rbp->rxb_buf + rx_header_len, bp, len); 17885748Sduboff } 17895748Sduboff return (mp); 17905748Sduboff } 17915748Sduboff 17925748Sduboff #ifdef GEM_DEBUG_LEVEL 17935748Sduboff uint_t gem_rx_pkts[17]; 17945748Sduboff #endif 17955748Sduboff 17965748Sduboff 17975748Sduboff int 17985748Sduboff gem_receive(struct gem_dev *dp) 17995748Sduboff { 18005748Sduboff uint64_t len_total = 0; 18015748Sduboff struct rxbuf *rbp; 18025748Sduboff mblk_t *mp; 18035748Sduboff int cnt = 0; 18045748Sduboff uint64_t rxstat; 18055748Sduboff struct rxbuf *newbufs; 18065748Sduboff struct rxbuf **newbufs_tailp; 18075748Sduboff mblk_t *rx_head; 18085748Sduboff mblk_t **rx_tailp; 18095748Sduboff int rx_ring_size = dp->gc.gc_rx_ring_size; 18105748Sduboff seqnum_t active_head; 18115748Sduboff uint64_t (*rx_desc_stat)(struct gem_dev *dp, 18125748Sduboff int slot, int ndesc); 18135748Sduboff int ethermin = ETHERMIN; 18145748Sduboff int ethermax = dp->mtu + sizeof (struct ether_header); 1815*7116Sduboff int rx_header_len = dp->gc.gc_rx_header_len; 18165748Sduboff 18175748Sduboff ASSERT(mutex_owned(&dp->intrlock)); 18185748Sduboff 18195748Sduboff DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p", 18205748Sduboff dp->name, dp->rx_buf_head)); 18215748Sduboff 18225748Sduboff rx_desc_stat = dp->gc.gc_rx_desc_stat; 18235748Sduboff newbufs_tailp = &newbufs; 18245748Sduboff rx_tailp = &rx_head; 18255748Sduboff for (active_head = dp->rx_active_head; 18265748Sduboff (rbp = dp->rx_buf_head) != NULL; active_head++) { 18275748Sduboff int len; 18285748Sduboff if (cnt == 0) { 18295748Sduboff cnt = max(dp->poll_pkt_delay*2, 10); 18305748Sduboff cnt = min(cnt, 18315748Sduboff dp->rx_active_tail - active_head); 18325748Sduboff gem_rx_desc_dma_sync(dp, 18335748Sduboff SLOT(active_head, rx_ring_size), 18345748Sduboff cnt, 18355748Sduboff DDI_DMA_SYNC_FORKERNEL); 18365748Sduboff } 1837*7116Sduboff 1838*7116Sduboff if (rx_header_len > 0) { 1839*7116Sduboff (void) ddi_dma_sync(rbp->rxb_dh, 0, 1840*7116Sduboff rx_header_len, DDI_DMA_SYNC_FORKERNEL); 1841*7116Sduboff } 1842*7116Sduboff 18435748Sduboff if (((rxstat = (*rx_desc_stat)(dp, 18445748Sduboff SLOT(active_head, rx_ring_size), 18455748Sduboff rbp->rxb_nfrags)) 18465748Sduboff & (GEM_RX_DONE | GEM_RX_ERR)) == 0) { 18475748Sduboff /* not received yet */ 18485748Sduboff break; 18495748Sduboff } 18505748Sduboff 18515748Sduboff /* Remove the head of the rx buffer list */ 18525748Sduboff dp->rx_buf_head = rbp->rxb_next; 18535748Sduboff cnt--; 18545748Sduboff 18555748Sduboff 18565748Sduboff if (rxstat & GEM_RX_ERR) { 18575748Sduboff goto next; 18585748Sduboff } 18595748Sduboff 18605748Sduboff len = rxstat & GEM_RX_LEN; 18615748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x", 18625748Sduboff dp->name, __func__, rxstat, len)); 18635748Sduboff 18645748Sduboff /* 18655748Sduboff * Copy the packet 18665748Sduboff */ 18675748Sduboff if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) { 18685748Sduboff /* no memory, discard the packet */ 18695748Sduboff dp->stats.norcvbuf++; 18705748Sduboff goto next; 18715748Sduboff } 18725748Sduboff 18735748Sduboff /* 18745748Sduboff * Process VLAN tag 18755748Sduboff */ 18765748Sduboff ethermin = ETHERMIN; 18775748Sduboff ethermax = dp->mtu + sizeof (struct ether_header); 1878*7116Sduboff if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) { 18795748Sduboff ethermax += VTAG_SIZE; 18805748Sduboff } 18815748Sduboff 18825748Sduboff /* check packet size */ 18835748Sduboff if (len < ethermin) { 18845748Sduboff dp->stats.errrcv++; 18855748Sduboff dp->stats.runt++; 18865748Sduboff freemsg(mp); 18875748Sduboff goto next; 18885748Sduboff } 18895748Sduboff 18905748Sduboff if (len > ethermax) { 18915748Sduboff dp->stats.errrcv++; 18925748Sduboff dp->stats.frame_too_long++; 18935748Sduboff freemsg(mp); 18945748Sduboff goto next; 18955748Sduboff } 18965748Sduboff 18975748Sduboff len_total += len; 18985748Sduboff 1899*7116Sduboff #ifdef GEM_DEBUG_VLAN 1900*7116Sduboff if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) { 1901*7116Sduboff gem_dump_packet(dp, (char *)__func__, mp, B_TRUE); 1902*7116Sduboff } 1903*7116Sduboff #endif 19045748Sduboff /* append received packet to temporaly rx buffer list */ 19055748Sduboff *rx_tailp = mp; 19065748Sduboff rx_tailp = &mp->b_next; 19075748Sduboff 19085748Sduboff if (mp->b_rptr[0] & 1) { 19095748Sduboff if (bcmp(mp->b_rptr, 19105748Sduboff gem_etherbroadcastaddr.ether_addr_octet, 19115748Sduboff ETHERADDRL) == 0) { 19125748Sduboff dp->stats.rbcast++; 19135748Sduboff } else { 19145748Sduboff dp->stats.rmcast++; 19155748Sduboff } 19165748Sduboff } 19175748Sduboff next: 19185748Sduboff ASSERT(rbp != NULL); 19195748Sduboff 19205748Sduboff /* append new one to temporal new buffer list */ 19215748Sduboff *newbufs_tailp = rbp; 19225748Sduboff newbufs_tailp = &rbp->rxb_next; 19235748Sduboff } 19245748Sduboff 19255748Sduboff /* advance rx_active_head */ 19265748Sduboff if ((cnt = active_head - dp->rx_active_head) > 0) { 19275748Sduboff dp->stats.rbytes += len_total; 19285748Sduboff dp->stats.rpackets += cnt; 19295748Sduboff } 19305748Sduboff dp->rx_active_head = active_head; 19315748Sduboff 19325748Sduboff /* terminate the working list */ 19335748Sduboff *newbufs_tailp = NULL; 19345748Sduboff *rx_tailp = NULL; 19355748Sduboff 19365748Sduboff if (dp->rx_buf_head == NULL) { 19375748Sduboff dp->rx_buf_tail = NULL; 19385748Sduboff } 19395748Sduboff 19405748Sduboff DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p", 19415748Sduboff dp->name, __func__, cnt, rx_head)); 19425748Sduboff 19435748Sduboff if (newbufs) { 19445748Sduboff /* 19455748Sduboff * fillfull rx list with new buffers 19465748Sduboff */ 19475748Sduboff seqnum_t head; 19485748Sduboff 19495748Sduboff /* save current tail */ 19505748Sduboff head = dp->rx_active_tail; 19515748Sduboff gem_append_rxbuf(dp, newbufs); 19525748Sduboff 19535748Sduboff /* call hw depend start routine if we have. */ 19545748Sduboff dp->gc.gc_rx_start(dp, 19555748Sduboff SLOT(head, rx_ring_size), dp->rx_active_tail - head); 19565748Sduboff } 19575748Sduboff 19585748Sduboff if (rx_head) { 19595748Sduboff /* 19605748Sduboff * send up received packets 19615748Sduboff */ 19625748Sduboff mutex_exit(&dp->intrlock); 19635748Sduboff mac_rx(dp->mh, dp->mac_rx_ring_ha, rx_head); 19645748Sduboff mutex_enter(&dp->intrlock); 19655748Sduboff } 19665748Sduboff 19675748Sduboff #ifdef GEM_DEBUG_LEVEL 19685748Sduboff gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++; 19695748Sduboff #endif 19705748Sduboff return (cnt); 19715748Sduboff } 19725748Sduboff 19735748Sduboff boolean_t 19745748Sduboff gem_tx_done(struct gem_dev *dp) 19755748Sduboff { 1976*7116Sduboff boolean_t tx_sched = B_FALSE; 19775748Sduboff 19785748Sduboff if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) { 19795748Sduboff (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF); 19805748Sduboff DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d", 19815748Sduboff dp->name, dp->tx_active_head, dp->tx_active_tail)); 19825748Sduboff tx_sched = B_TRUE; 19835748Sduboff goto x; 19845748Sduboff } 19855748Sduboff 19865748Sduboff mutex_enter(&dp->xmitlock); 19875748Sduboff 1988*7116Sduboff /* XXX - we must not have any packets in soft queue */ 1989*7116Sduboff ASSERT(dp->tx_softq_head == dp->tx_softq_tail); 19905748Sduboff /* 1991*7116Sduboff * If we won't have chance to get more free tx buffers, and blocked, 19925748Sduboff * it is worth to reschedule the downstream i.e. tx side. 19935748Sduboff */ 1994*7116Sduboff ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0); 1995*7116Sduboff if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) { 19965748Sduboff /* 19975748Sduboff * As no further tx-done interrupts are scheduled, this 19985748Sduboff * is the last chance to kick tx side, which may be 19995748Sduboff * blocked now, otherwise the tx side never works again. 20005748Sduboff */ 20015748Sduboff tx_sched = B_TRUE; 2002*7116Sduboff dp->tx_blocked = (clock_t)0; 2003*7116Sduboff dp->tx_max_packets = 2004*7116Sduboff min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit); 20055748Sduboff } 20065748Sduboff 20075748Sduboff mutex_exit(&dp->xmitlock); 20085748Sduboff 2009*7116Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d", 2010*7116Sduboff dp->name, __func__, BOOLEAN(dp->tx_blocked))); 20115748Sduboff x: 20125748Sduboff return (tx_sched); 20135748Sduboff } 20145748Sduboff 20155748Sduboff static uint_t 20165748Sduboff gem_intr(struct gem_dev *dp) 20175748Sduboff { 20185748Sduboff uint_t ret; 20195748Sduboff 20205748Sduboff mutex_enter(&dp->intrlock); 20215748Sduboff if (dp->mac_suspended) { 20225748Sduboff mutex_exit(&dp->intrlock); 20235748Sduboff return (DDI_INTR_UNCLAIMED); 20245748Sduboff } 20255748Sduboff dp->intr_busy = B_TRUE; 20265748Sduboff 20275748Sduboff ret = (*dp->gc.gc_interrupt)(dp); 20285748Sduboff 20295748Sduboff if (ret == DDI_INTR_UNCLAIMED) { 20305748Sduboff dp->intr_busy = B_FALSE; 20315748Sduboff mutex_exit(&dp->intrlock); 20325748Sduboff return (ret); 20335748Sduboff } 20345748Sduboff 20355748Sduboff if (!dp->mac_active) { 20365748Sduboff cv_broadcast(&dp->tx_drain_cv); 20375748Sduboff } 20385748Sduboff 20395748Sduboff 20405748Sduboff dp->stats.intr++; 20415748Sduboff dp->intr_busy = B_FALSE; 20425748Sduboff 20435748Sduboff mutex_exit(&dp->intrlock); 20445748Sduboff 20455748Sduboff if (ret & INTR_RESTART_TX) { 20465748Sduboff DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name)); 20475748Sduboff mac_tx_update(dp->mh); 20485748Sduboff ret &= ~INTR_RESTART_TX; 20495748Sduboff } 20505748Sduboff return (ret); 20515748Sduboff } 20525748Sduboff 20535748Sduboff static void 20545748Sduboff gem_intr_watcher(struct gem_dev *dp) 20555748Sduboff { 20565748Sduboff (void) gem_intr(dp); 20575748Sduboff 20585748Sduboff /* schedule next call of tu_intr_watcher */ 20595748Sduboff dp->intr_watcher_id = 20605748Sduboff timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1); 20615748Sduboff } 20625748Sduboff 20635748Sduboff /* ======================================================================== */ 20645748Sduboff /* 20655748Sduboff * MII support routines 20665748Sduboff */ 20675748Sduboff /* ======================================================================== */ 20685748Sduboff static void 20695748Sduboff gem_choose_forcedmode(struct gem_dev *dp) 20705748Sduboff { 20715748Sduboff /* choose media mode */ 20725748Sduboff if (dp->anadv_1000fdx || dp->anadv_1000hdx) { 20735748Sduboff dp->speed = GEM_SPD_1000; 20745748Sduboff dp->full_duplex = dp->anadv_1000fdx; 20755748Sduboff } else if (dp->anadv_100fdx || dp->anadv_100t4) { 20765748Sduboff dp->speed = GEM_SPD_100; 20775748Sduboff dp->full_duplex = B_TRUE; 20785748Sduboff } else if (dp->anadv_100hdx) { 20795748Sduboff dp->speed = GEM_SPD_100; 20805748Sduboff dp->full_duplex = B_FALSE; 20815748Sduboff } else { 20825748Sduboff dp->speed = GEM_SPD_10; 20835748Sduboff dp->full_duplex = dp->anadv_10fdx; 20845748Sduboff } 20855748Sduboff } 20865748Sduboff 20875748Sduboff uint16_t 20885748Sduboff gem_mii_read(struct gem_dev *dp, uint_t reg) 20895748Sduboff { 20905748Sduboff if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) { 20915748Sduboff (*dp->gc.gc_mii_sync)(dp); 20925748Sduboff } 20935748Sduboff return ((*dp->gc.gc_mii_read)(dp, reg)); 20945748Sduboff } 20955748Sduboff 20965748Sduboff void 20975748Sduboff gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val) 20985748Sduboff { 20995748Sduboff if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) { 21005748Sduboff (*dp->gc.gc_mii_sync)(dp); 21015748Sduboff } 21025748Sduboff (*dp->gc.gc_mii_write)(dp, reg, val); 21035748Sduboff } 21045748Sduboff 21055748Sduboff #define fc_cap_decode(x) \ 21065748Sduboff ((((x) & MII_ABILITY_PAUSE) ? 1 : 0) | \ 21075748Sduboff (((x) & MII_ABILITY_ASM_DIR) ? 2 : 0)) 21085748Sduboff 21095748Sduboff int 21105748Sduboff gem_mii_config_default(struct gem_dev *dp) 21115748Sduboff { 21125748Sduboff uint16_t mii_stat; 21135748Sduboff uint16_t val; 21145748Sduboff static uint16_t fc_cap_encode[4] = { 21155748Sduboff /* none */ 0, 21165748Sduboff /* symmetric */ MII_ABILITY_PAUSE, 21175748Sduboff /* tx */ MII_ABILITY_ASM_DIR, 21185748Sduboff /* rx-symmetric */ MII_ABILITY_PAUSE | MII_ABILITY_ASM_DIR, 21195748Sduboff }; 21205748Sduboff 21215748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 21225748Sduboff 21235748Sduboff /* 21245748Sduboff * Configure bits in advertisement register 21255748Sduboff */ 21265748Sduboff mii_stat = dp->mii_status; 21275748Sduboff 21285748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b", 21295748Sduboff dp->name, __func__, mii_stat, MII_STATUS_BITS)); 21305748Sduboff 21315748Sduboff if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) { 21325748Sduboff /* it's funny */ 21335748Sduboff cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b", 21345748Sduboff dp->name, mii_stat, MII_STATUS_BITS); 21355748Sduboff return (GEM_FAILURE); 21365748Sduboff } 21375748Sduboff 21385748Sduboff /* Do not change the rest of the ability bits in the advert reg */ 21395748Sduboff val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL; 21405748Sduboff 21415748Sduboff DPRINTF(0, (CE_CONT, 21425748Sduboff "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d", 21435748Sduboff dp->name, __func__, 21445748Sduboff dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx, 21455748Sduboff dp->anadv_10fdx, dp->anadv_10hdx)); 21465748Sduboff 21475748Sduboff if (dp->anadv_100t4) { 21485748Sduboff val |= MII_ABILITY_100BASE_T4; 21495748Sduboff } 21505748Sduboff if (dp->anadv_100fdx) { 21515748Sduboff val |= MII_ABILITY_100BASE_TX_FD; 21525748Sduboff } 21535748Sduboff if (dp->anadv_100hdx) { 21545748Sduboff val |= MII_ABILITY_100BASE_TX; 21555748Sduboff } 21565748Sduboff if (dp->anadv_10fdx) { 21575748Sduboff val |= MII_ABILITY_10BASE_T_FD; 21585748Sduboff } 21595748Sduboff if (dp->anadv_10hdx) { 21605748Sduboff val |= MII_ABILITY_10BASE_T; 21615748Sduboff } 21625748Sduboff 21635748Sduboff /* set flow control capability */ 21645748Sduboff val |= fc_cap_encode[dp->anadv_flow_control]; 21655748Sduboff 21665748Sduboff DPRINTF(0, (CE_CONT, 21675748Sduboff "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d", 21685748Sduboff dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode, 21695748Sduboff dp->anadv_flow_control)); 21705748Sduboff 21715748Sduboff gem_mii_write(dp, MII_AN_ADVERT, val); 21725748Sduboff 21735748Sduboff if (mii_stat & MII_STATUS_XSTATUS) { 21745748Sduboff /* 21755748Sduboff * 1000Base-T GMII support 21765748Sduboff */ 21775748Sduboff if (!dp->anadv_autoneg) { 21785748Sduboff /* enable manual configuration */ 21795748Sduboff val = MII_1000TC_CFG_EN; 21805748Sduboff } else { 21815748Sduboff val = 0; 21825748Sduboff if (dp->anadv_1000fdx) { 21835748Sduboff val |= MII_1000TC_ADV_FULL; 21845748Sduboff } 21855748Sduboff if (dp->anadv_1000hdx) { 21865748Sduboff val |= MII_1000TC_ADV_HALF; 21875748Sduboff } 21885748Sduboff } 21895748Sduboff DPRINTF(0, (CE_CONT, 21905748Sduboff "!%s: %s: setting MII_1000TC reg:%b", 21915748Sduboff dp->name, __func__, val, MII_1000TC_BITS)); 21925748Sduboff 21935748Sduboff gem_mii_write(dp, MII_1000TC, val); 21945748Sduboff } 21955748Sduboff 21965748Sduboff return (GEM_SUCCESS); 21975748Sduboff } 21985748Sduboff 21995748Sduboff #define GEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP) 22005748Sduboff #define GEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN) 22015748Sduboff 22025748Sduboff static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = { 22035748Sduboff /* none symm tx rx/symm */ 22045748Sduboff /* none */ 22055748Sduboff {FLOW_CONTROL_NONE, 22065748Sduboff FLOW_CONTROL_NONE, 22075748Sduboff FLOW_CONTROL_NONE, 22085748Sduboff FLOW_CONTROL_NONE}, 22095748Sduboff /* sym */ 22105748Sduboff {FLOW_CONTROL_NONE, 22115748Sduboff FLOW_CONTROL_SYMMETRIC, 22125748Sduboff FLOW_CONTROL_NONE, 22135748Sduboff FLOW_CONTROL_SYMMETRIC}, 22145748Sduboff /* tx */ 22155748Sduboff {FLOW_CONTROL_NONE, 22165748Sduboff FLOW_CONTROL_NONE, 22175748Sduboff FLOW_CONTROL_NONE, 22185748Sduboff FLOW_CONTROL_TX_PAUSE}, 22195748Sduboff /* rx/symm */ 22205748Sduboff {FLOW_CONTROL_NONE, 22215748Sduboff FLOW_CONTROL_SYMMETRIC, 22225748Sduboff FLOW_CONTROL_RX_PAUSE, 22235748Sduboff FLOW_CONTROL_SYMMETRIC}, 22245748Sduboff }; 22255748Sduboff 22265748Sduboff static char *gem_fc_type[] = { 22275748Sduboff "without", 22285748Sduboff "with symmetric", 22295748Sduboff "with tx", 22305748Sduboff "with rx", 22315748Sduboff }; 22325748Sduboff 22335748Sduboff boolean_t 22345748Sduboff gem_mii_link_check(struct gem_dev *dp) 22355748Sduboff { 22365748Sduboff uint16_t old_mii_state; 22375748Sduboff boolean_t tx_sched = B_FALSE; 22385748Sduboff uint16_t status; 22395748Sduboff uint16_t advert; 22405748Sduboff uint16_t lpable; 22415748Sduboff uint16_t exp; 22425748Sduboff uint16_t ctl1000; 22435748Sduboff uint16_t stat1000; 22445748Sduboff uint16_t val; 22455748Sduboff clock_t now; 22465748Sduboff clock_t diff; 22475748Sduboff int linkdown_action; 22485748Sduboff boolean_t fix_phy = B_FALSE; 22495748Sduboff 22505748Sduboff now = ddi_get_lbolt(); 22515748Sduboff old_mii_state = dp->mii_state; 22525748Sduboff 22535748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d", 22545748Sduboff dp->name, __func__, now, dp->mii_state)); 22555748Sduboff 22565748Sduboff diff = now - dp->mii_last_check; 22575748Sduboff dp->mii_last_check = now; 22585748Sduboff 2259*7116Sduboff /* 2260*7116Sduboff * For NWAM, don't show linkdown state right 2261*7116Sduboff * after the system boots 2262*7116Sduboff */ 2263*7116Sduboff if (dp->linkup_delay > 0) { 2264*7116Sduboff if (dp->linkup_delay > diff) { 2265*7116Sduboff dp->linkup_delay -= diff; 2266*7116Sduboff } else { 2267*7116Sduboff /* link up timeout */ 2268*7116Sduboff dp->linkup_delay = -1; 2269*7116Sduboff } 2270*7116Sduboff } 2271*7116Sduboff 22725748Sduboff next_nowait: 22735748Sduboff switch (dp->mii_state) { 22745748Sduboff case MII_STATE_UNKNOWN: 22755748Sduboff /* power-up, DP83840 requires 32 sync bits */ 22765748Sduboff (*dp->gc.gc_mii_sync)(dp); 22775748Sduboff goto reset_phy; 22785748Sduboff 22795748Sduboff case MII_STATE_RESETTING: 22805748Sduboff dp->mii_timer -= diff; 22815748Sduboff if (dp->mii_timer > 0) { 22825748Sduboff /* don't read phy registers in resetting */ 22835748Sduboff dp->mii_interval = WATCH_INTERVAL_FAST; 22845748Sduboff goto next; 22855748Sduboff } 22865748Sduboff 22875748Sduboff /* Timer expired, ensure reset bit is not set */ 22885748Sduboff 22895748Sduboff if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) { 22905748Sduboff /* some phys need sync bits after reset */ 22915748Sduboff (*dp->gc.gc_mii_sync)(dp); 22925748Sduboff } 22935748Sduboff val = gem_mii_read(dp, MII_CONTROL); 22945748Sduboff if (val & MII_CONTROL_RESET) { 22955748Sduboff cmn_err(CE_NOTE, 22965748Sduboff "!%s: time:%ld resetting phy not complete." 22975748Sduboff " mii_control:0x%b", 22985748Sduboff dp->name, ddi_get_lbolt(), 22995748Sduboff val, MII_CONTROL_BITS); 23005748Sduboff } 23015748Sduboff 23025748Sduboff /* ensure neither isolated nor pwrdown nor auto-nego mode */ 23035748Sduboff /* XXX -- this operation is required for NS DP83840A. */ 23045748Sduboff gem_mii_write(dp, MII_CONTROL, 0); 23055748Sduboff 23065748Sduboff /* As resetting PHY has completed, configure PHY registers */ 23075748Sduboff if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) { 23085748Sduboff /* we failed to configure PHY. */ 23095748Sduboff goto reset_phy; 23105748Sduboff } 23115748Sduboff 23125748Sduboff /* mii_config may disable autonegatiation */ 23135748Sduboff gem_choose_forcedmode(dp); 23145748Sduboff 23155748Sduboff dp->mii_lpable = 0; 23165748Sduboff dp->mii_advert = 0; 23175748Sduboff dp->mii_exp = 0; 23185748Sduboff dp->mii_ctl1000 = 0; 23195748Sduboff dp->mii_stat1000 = 0; 23205748Sduboff dp->flow_control = FLOW_CONTROL_NONE; 23215748Sduboff 23225748Sduboff if (!dp->anadv_autoneg) { 23235748Sduboff /* skip auto-negotiation phase */ 23245748Sduboff dp->mii_state = MII_STATE_MEDIA_SETUP; 23255748Sduboff dp->mii_timer = 0; 23265748Sduboff dp->mii_interval = 0; 23275748Sduboff goto next_nowait; 23285748Sduboff } 23295748Sduboff 23305748Sduboff /* Issue auto-negotiation command */ 23315748Sduboff goto autonego; 23325748Sduboff 23335748Sduboff case MII_STATE_AUTONEGOTIATING: 23345748Sduboff /* 23355748Sduboff * Autonegotiation is in progress 23365748Sduboff */ 23375748Sduboff dp->mii_timer -= diff; 23385748Sduboff if (dp->mii_timer - 23395748Sduboff (dp->gc.gc_mii_an_timeout 23405748Sduboff - dp->gc.gc_mii_an_wait) > 0) { 23415748Sduboff /* 23425748Sduboff * wait for a while, typically autonegotiation 23435748Sduboff * completes in 2.3 - 2.5 sec. 23445748Sduboff */ 23455748Sduboff dp->mii_interval = WATCH_INTERVAL_FAST; 23465748Sduboff goto next; 23475748Sduboff } 23485748Sduboff 23495748Sduboff /* read PHY status */ 23505748Sduboff status = gem_mii_read(dp, MII_STATUS); 23515748Sduboff DPRINTF(4, (CE_CONT, 23525748Sduboff "!%s: %s: called: mii_state:%d MII_STATUS reg:%b", 23535748Sduboff dp->name, __func__, dp->mii_state, 23545748Sduboff status, MII_STATUS_BITS)); 23555748Sduboff 23565748Sduboff if (status & MII_STATUS_REMFAULT) { 23575748Sduboff /* 23585748Sduboff * The link parnert told me something wrong happend. 23595748Sduboff * What do we do ? 23605748Sduboff */ 23615748Sduboff cmn_err(CE_CONT, 23625748Sduboff "!%s: auto-negotiation failed: remote fault", 23635748Sduboff dp->name); 23645748Sduboff goto autonego; 23655748Sduboff } 23665748Sduboff 23675748Sduboff if ((status & MII_STATUS_ANDONE) == 0) { 23685748Sduboff if (dp->mii_timer <= 0) { 23695748Sduboff /* 23705748Sduboff * Auto-negotiation was timed out, 23715748Sduboff * try again w/o resetting phy. 23725748Sduboff */ 23735748Sduboff if (!dp->mii_supress_msg) { 23745748Sduboff cmn_err(CE_WARN, 23755748Sduboff "!%s: auto-negotiation failed: timeout", 23765748Sduboff dp->name); 23775748Sduboff dp->mii_supress_msg = B_TRUE; 23785748Sduboff } 23795748Sduboff goto autonego; 23805748Sduboff } 23815748Sduboff /* 23825748Sduboff * Auto-negotiation is in progress. Wait. 23835748Sduboff */ 23845748Sduboff dp->mii_interval = dp->gc.gc_mii_an_watch_interval; 23855748Sduboff goto next; 23865748Sduboff } 23875748Sduboff 23885748Sduboff /* 23895748Sduboff * Auto-negotiation have completed. 23905748Sduboff * Assume linkdown and fall through. 23915748Sduboff */ 23925748Sduboff dp->mii_supress_msg = B_FALSE; 23935748Sduboff dp->mii_state = MII_STATE_AN_DONE; 23945748Sduboff DPRINTF(0, (CE_CONT, 23955748Sduboff "!%s: auto-negotiation completed, MII_STATUS:%b", 23965748Sduboff dp->name, status, MII_STATUS_BITS)); 23975748Sduboff 23985748Sduboff if (dp->gc.gc_mii_an_delay > 0) { 23995748Sduboff dp->mii_timer = dp->gc.gc_mii_an_delay; 24005748Sduboff dp->mii_interval = drv_usectohz(20*1000); 24015748Sduboff goto next; 24025748Sduboff } 24035748Sduboff 24045748Sduboff dp->mii_timer = 0; 24055748Sduboff diff = 0; 24065748Sduboff goto next_nowait; 24075748Sduboff 24085748Sduboff case MII_STATE_AN_DONE: 24095748Sduboff /* 24105748Sduboff * Auto-negotiation have done. Now we can set up media. 24115748Sduboff */ 24125748Sduboff dp->mii_timer -= diff; 24135748Sduboff if (dp->mii_timer > 0) { 24145748Sduboff /* wait for a while */ 24155748Sduboff dp->mii_interval = WATCH_INTERVAL_FAST; 24165748Sduboff goto next; 24175748Sduboff } 24185748Sduboff 24195748Sduboff /* 24205748Sduboff * set up the result of auto negotiation 24215748Sduboff */ 24225748Sduboff 24235748Sduboff /* 24245748Sduboff * Read registers required to determin current 24255748Sduboff * duplex mode and media speed. 24265748Sduboff */ 24275748Sduboff if (dp->gc.gc_mii_an_delay > 0) { 24285748Sduboff /* 24295748Sduboff * As the link watcher context has been suspended, 24305748Sduboff * 'status' is invalid. We must status register here 24315748Sduboff */ 24325748Sduboff status = gem_mii_read(dp, MII_STATUS); 24335748Sduboff } 24345748Sduboff advert = gem_mii_read(dp, MII_AN_ADVERT); 24355748Sduboff lpable = gem_mii_read(dp, MII_AN_LPABLE); 24365748Sduboff exp = gem_mii_read(dp, MII_AN_EXPANSION); 24375748Sduboff if (exp == 0xffff) { 24385748Sduboff /* some phys don't have exp register */ 24395748Sduboff exp = 0; 24405748Sduboff } 24415748Sduboff ctl1000 = 0; 24425748Sduboff stat1000 = 0; 24435748Sduboff if (dp->mii_status & MII_STATUS_XSTATUS) { 24445748Sduboff ctl1000 = gem_mii_read(dp, MII_1000TC); 24455748Sduboff stat1000 = gem_mii_read(dp, MII_1000TS); 24465748Sduboff } 24475748Sduboff dp->mii_lpable = lpable; 24485748Sduboff dp->mii_advert = advert; 24495748Sduboff dp->mii_exp = exp; 24505748Sduboff dp->mii_ctl1000 = ctl1000; 24515748Sduboff dp->mii_stat1000 = stat1000; 24525748Sduboff 24535748Sduboff cmn_err(CE_CONT, 24545748Sduboff "!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b", 24555748Sduboff dp->name, 24565748Sduboff advert, MII_ABILITY_BITS, 24575748Sduboff lpable, MII_ABILITY_BITS, 24585748Sduboff exp, MII_AN_EXP_BITS); 24595748Sduboff 24605748Sduboff if (dp->mii_status & MII_STATUS_XSTATUS) { 24615748Sduboff cmn_err(CE_CONT, 24625748Sduboff "! MII_1000TC:%b, MII_1000TS:%b", 24635748Sduboff ctl1000, MII_1000TC_BITS, 24645748Sduboff stat1000, MII_1000TS_BITS); 24655748Sduboff } 24665748Sduboff 24675748Sduboff if (gem_population(lpable) <= 1 && 24685748Sduboff (exp & MII_AN_EXP_LPCANAN) == 0) { 24695748Sduboff if ((advert & MII_ABILITY_TECH) != lpable) { 24705748Sduboff cmn_err(CE_WARN, 24715748Sduboff "!%s: but the link partnar doesn't seem" 24725748Sduboff " to have auto-negotiation capability." 24735748Sduboff " please check the link configuration.", 24745748Sduboff dp->name); 24755748Sduboff } 24765748Sduboff /* 24775748Sduboff * it should be result of pararell detection, which 24785748Sduboff * cannot detect duplex mode. 24795748Sduboff */ 24805748Sduboff if (lpable & MII_ABILITY_100BASE_TX) { 24815748Sduboff /* 24825748Sduboff * we prefer full duplex mode for 100Mbps 24835748Sduboff * connection, if we can. 24845748Sduboff */ 24855748Sduboff lpable |= advert & MII_ABILITY_100BASE_TX_FD; 24865748Sduboff } 24875748Sduboff 24885748Sduboff if ((advert & lpable) == 0 && 24895748Sduboff lpable & MII_ABILITY_10BASE_T) { 24905748Sduboff lpable |= advert & MII_ABILITY_10BASE_T_FD; 24915748Sduboff } 24925748Sduboff /* 24935748Sduboff * as the link partnar isn't auto-negotiatable, use 24945748Sduboff * fixed mode temporally. 24955748Sduboff */ 24965748Sduboff fix_phy = B_TRUE; 24975748Sduboff } else if (lpable == 0) { 24985748Sduboff cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name); 24995748Sduboff goto reset_phy; 25005748Sduboff } 25015748Sduboff /* 25025748Sduboff * configure current link mode according to AN priority. 25035748Sduboff */ 25045748Sduboff val = advert & lpable; 25055748Sduboff if ((ctl1000 & MII_1000TC_ADV_FULL) && 25065748Sduboff (stat1000 & MII_1000TS_LP_FULL)) { 25075748Sduboff /* 1000BaseT & full duplex */ 25085748Sduboff dp->speed = GEM_SPD_1000; 25095748Sduboff dp->full_duplex = B_TRUE; 25105748Sduboff } else if ((ctl1000 & MII_1000TC_ADV_HALF) && 25115748Sduboff (stat1000 & MII_1000TS_LP_HALF)) { 25125748Sduboff /* 1000BaseT & half duplex */ 25135748Sduboff dp->speed = GEM_SPD_1000; 25145748Sduboff dp->full_duplex = B_FALSE; 25155748Sduboff } else if (val & MII_ABILITY_100BASE_TX_FD) { 25165748Sduboff /* 100BaseTx & full duplex */ 25175748Sduboff dp->speed = GEM_SPD_100; 25185748Sduboff dp->full_duplex = B_TRUE; 25195748Sduboff } else if (val & MII_ABILITY_100BASE_T4) { 25205748Sduboff /* 100BaseT4 & full duplex */ 25215748Sduboff dp->speed = GEM_SPD_100; 25225748Sduboff dp->full_duplex = B_TRUE; 25235748Sduboff } else if (val & MII_ABILITY_100BASE_TX) { 25245748Sduboff /* 100BaseTx & half duplex */ 25255748Sduboff dp->speed = GEM_SPD_100; 25265748Sduboff dp->full_duplex = B_FALSE; 25275748Sduboff } else if (val & MII_ABILITY_10BASE_T_FD) { 25285748Sduboff /* 10BaseT & full duplex */ 25295748Sduboff dp->speed = GEM_SPD_10; 25305748Sduboff dp->full_duplex = B_TRUE; 25315748Sduboff } else if (val & MII_ABILITY_10BASE_T) { 25325748Sduboff /* 10BaseT & half duplex */ 25335748Sduboff dp->speed = GEM_SPD_10; 25345748Sduboff dp->full_duplex = B_FALSE; 25355748Sduboff } else { 25365748Sduboff /* 25375748Sduboff * It seems that the link partnar doesn't have 25385748Sduboff * auto-negotiation capability and our PHY 25395748Sduboff * could not report the correct current mode. 25405748Sduboff * We guess current mode by mii_control register. 25415748Sduboff */ 25425748Sduboff val = gem_mii_read(dp, MII_CONTROL); 25435748Sduboff 25445748Sduboff /* select 100m full or 10m half */ 25455748Sduboff dp->speed = (val & MII_CONTROL_100MB) ? 25465748Sduboff GEM_SPD_100 : GEM_SPD_10; 25475748Sduboff dp->full_duplex = dp->speed != GEM_SPD_10; 25485748Sduboff fix_phy = B_TRUE; 25495748Sduboff 25505748Sduboff cmn_err(CE_NOTE, 25515748Sduboff "!%s: auto-negotiation done but " 25525748Sduboff "common ability not found.\n" 25535748Sduboff "PHY state: control:%b advert:%b lpable:%b\n" 25545748Sduboff "guessing %d Mbps %s duplex mode", 25555748Sduboff dp->name, 25565748Sduboff val, MII_CONTROL_BITS, 25575748Sduboff advert, MII_ABILITY_BITS, 25585748Sduboff lpable, MII_ABILITY_BITS, 25595748Sduboff gem_speed_value[dp->speed], 25605748Sduboff dp->full_duplex ? "full" : "half"); 25615748Sduboff } 25625748Sduboff 25635748Sduboff if (dp->full_duplex) { 25645748Sduboff dp->flow_control = 25655748Sduboff gem_fc_result[fc_cap_decode(advert)] 25665748Sduboff [fc_cap_decode(lpable)]; 25675748Sduboff } else { 25685748Sduboff dp->flow_control = FLOW_CONTROL_NONE; 25695748Sduboff } 25705748Sduboff dp->mii_state = MII_STATE_MEDIA_SETUP; 25715748Sduboff /* FALLTHROUGH */ 25725748Sduboff 25735748Sduboff case MII_STATE_MEDIA_SETUP: 25745748Sduboff dp->mii_state = MII_STATE_LINKDOWN; 25755748Sduboff dp->mii_timer = dp->gc.gc_mii_linkdown_timeout; 25765748Sduboff DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name)); 25775748Sduboff dp->mii_supress_msg = B_FALSE; 25785748Sduboff 25795748Sduboff /* use short interval */ 25805748Sduboff dp->mii_interval = WATCH_INTERVAL_FAST; 25815748Sduboff 25825748Sduboff if ((!dp->anadv_autoneg) || 25835748Sduboff dp->gc.gc_mii_an_oneshot || fix_phy) { 25845748Sduboff 25855748Sduboff /* 25865748Sduboff * write specified mode to phy. 25875748Sduboff */ 25885748Sduboff val = gem_mii_read(dp, MII_CONTROL); 25895748Sduboff val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX | 25905748Sduboff MII_CONTROL_ANE | MII_CONTROL_RSAN); 25915748Sduboff 25925748Sduboff if (dp->full_duplex) { 25935748Sduboff val |= MII_CONTROL_FDUPLEX; 25945748Sduboff } 25955748Sduboff 25965748Sduboff switch (dp->speed) { 25975748Sduboff case GEM_SPD_1000: 25985748Sduboff val |= MII_CONTROL_1000MB; 25995748Sduboff break; 26005748Sduboff 26015748Sduboff case GEM_SPD_100: 26025748Sduboff val |= MII_CONTROL_100MB; 26035748Sduboff break; 26045748Sduboff 26055748Sduboff default: 26065748Sduboff cmn_err(CE_WARN, "%s: unknown speed:%d", 26075748Sduboff dp->name, dp->speed); 26085748Sduboff /* FALLTHROUGH */ 26095748Sduboff case GEM_SPD_10: 26105748Sduboff /* for GEM_SPD_10, do nothing */ 26115748Sduboff break; 26125748Sduboff } 26135748Sduboff 26145748Sduboff if (dp->mii_status & MII_STATUS_XSTATUS) { 26155748Sduboff gem_mii_write(dp, 26165748Sduboff MII_1000TC, MII_1000TC_CFG_EN); 26175748Sduboff } 26185748Sduboff gem_mii_write(dp, MII_CONTROL, val); 26195748Sduboff } 26205748Sduboff 26215748Sduboff if (dp->nic_state >= NIC_STATE_INITIALIZED) { 26225748Sduboff /* notify the result of auto-negotiation to mac */ 26235748Sduboff (*dp->gc.gc_set_media)(dp); 26245748Sduboff } 26255748Sduboff 26265748Sduboff if ((void *)dp->gc.gc_mii_tune_phy) { 26275748Sduboff /* for built-in sis900 */ 26285748Sduboff /* XXX - this code should be removed. */ 26295748Sduboff (*dp->gc.gc_mii_tune_phy)(dp); 26305748Sduboff } 26315748Sduboff 26325748Sduboff goto next_nowait; 26335748Sduboff 26345748Sduboff case MII_STATE_LINKDOWN: 26355748Sduboff status = gem_mii_read(dp, MII_STATUS); 26365748Sduboff if (status & MII_STATUS_LINKUP) { 26375748Sduboff /* 26385748Sduboff * Link going up 26395748Sduboff */ 26405748Sduboff dp->mii_state = MII_STATE_LINKUP; 26415748Sduboff dp->mii_supress_msg = B_FALSE; 26425748Sduboff 26435748Sduboff DPRINTF(0, (CE_CONT, 26445748Sduboff "!%s: link up detected: mii_stat:%b", 26455748Sduboff dp->name, status, MII_STATUS_BITS)); 26465748Sduboff 26475748Sduboff /* 26485748Sduboff * MII_CONTROL_100MB and MII_CONTROL_FDUPLEX are 26495748Sduboff * ignored when MII_CONTROL_ANE is set. 26505748Sduboff */ 26515748Sduboff cmn_err(CE_CONT, 26525748Sduboff "!%s: Link up: %d Mbps %s duplex %s flow control", 26535748Sduboff dp->name, 26545748Sduboff gem_speed_value[dp->speed], 26555748Sduboff dp->full_duplex ? "full" : "half", 26565748Sduboff gem_fc_type[dp->flow_control]); 26575748Sduboff 26585748Sduboff dp->mii_interval = dp->gc.gc_mii_link_watch_interval; 26595748Sduboff 26605748Sduboff /* XXX - we need other timer to watch statictics */ 26615748Sduboff if (dp->gc.gc_mii_hw_link_detection && 26625748Sduboff dp->nic_state == NIC_STATE_ONLINE) { 26635748Sduboff dp->mii_interval = 0; 26645748Sduboff } 26655748Sduboff 26665748Sduboff if (dp->nic_state == NIC_STATE_ONLINE) { 26675748Sduboff if (!dp->mac_active) { 26685748Sduboff (void) gem_mac_start(dp); 26695748Sduboff } 26705748Sduboff tx_sched = B_TRUE; 26715748Sduboff } 26725748Sduboff goto next; 26735748Sduboff } 26745748Sduboff 26755748Sduboff dp->mii_supress_msg = B_TRUE; 26765748Sduboff if (dp->anadv_autoneg) { 26775748Sduboff dp->mii_timer -= diff; 26785748Sduboff if (dp->mii_timer <= 0) { 26795748Sduboff /* 26805748Sduboff * link down timer expired. 26815748Sduboff * need to restart auto-negotiation. 26825748Sduboff */ 26835748Sduboff linkdown_action = 26845748Sduboff dp->gc.gc_mii_linkdown_timeout_action; 26855748Sduboff goto restart_autonego; 26865748Sduboff } 26875748Sduboff } 26885748Sduboff /* don't change mii_state */ 26895748Sduboff break; 26905748Sduboff 26915748Sduboff case MII_STATE_LINKUP: 26925748Sduboff status = gem_mii_read(dp, MII_STATUS); 26935748Sduboff if ((status & MII_STATUS_LINKUP) == 0) { 26945748Sduboff /* 26955748Sduboff * Link going down 26965748Sduboff */ 26975748Sduboff cmn_err(CE_NOTE, 26985748Sduboff "!%s: link down detected: mii_stat:%b", 26995748Sduboff dp->name, status, MII_STATUS_BITS); 27005748Sduboff 27015748Sduboff if (dp->nic_state == NIC_STATE_ONLINE && 27025748Sduboff dp->mac_active && 27035748Sduboff dp->gc.gc_mii_stop_mac_on_linkdown) { 27045748Sduboff (void) gem_mac_stop(dp, 0); 2705*7116Sduboff 2706*7116Sduboff if (dp->tx_blocked) { 2707*7116Sduboff /* drain tx */ 2708*7116Sduboff tx_sched = B_TRUE; 2709*7116Sduboff } 27105748Sduboff } 27115748Sduboff 27125748Sduboff if (dp->anadv_autoneg) { 27135748Sduboff /* need to restart auto-negotiation */ 27145748Sduboff linkdown_action = dp->gc.gc_mii_linkdown_action; 27155748Sduboff goto restart_autonego; 27165748Sduboff } 27175748Sduboff 27185748Sduboff dp->mii_state = MII_STATE_LINKDOWN; 27195748Sduboff dp->mii_timer = dp->gc.gc_mii_linkdown_timeout; 27205748Sduboff 27215748Sduboff if ((void *)dp->gc.gc_mii_tune_phy) { 27225748Sduboff /* for built-in sis900 */ 27235748Sduboff (*dp->gc.gc_mii_tune_phy)(dp); 27245748Sduboff } 27255748Sduboff dp->mii_interval = dp->gc.gc_mii_link_watch_interval; 27265748Sduboff goto next; 27275748Sduboff } 27285748Sduboff 27295748Sduboff /* don't change mii_state */ 27305748Sduboff if (dp->gc.gc_mii_hw_link_detection && 27315748Sduboff dp->nic_state == NIC_STATE_ONLINE) { 27325748Sduboff dp->mii_interval = 0; 27335748Sduboff goto next; 27345748Sduboff } 27355748Sduboff break; 27365748Sduboff } 27375748Sduboff dp->mii_interval = dp->gc.gc_mii_link_watch_interval; 27385748Sduboff goto next; 27395748Sduboff 27405748Sduboff /* Actions on the end of state routine */ 27415748Sduboff 27425748Sduboff restart_autonego: 27435748Sduboff switch (linkdown_action) { 27445748Sduboff case MII_ACTION_RESET: 27455748Sduboff if (!dp->mii_supress_msg) { 27465748Sduboff cmn_err(CE_CONT, "!%s: resetting PHY", dp->name); 27475748Sduboff } 27485748Sduboff dp->mii_supress_msg = B_TRUE; 27495748Sduboff goto reset_phy; 27505748Sduboff 27515748Sduboff case MII_ACTION_NONE: 27525748Sduboff dp->mii_supress_msg = B_TRUE; 27535748Sduboff if (dp->gc.gc_mii_an_oneshot) { 27545748Sduboff goto autonego; 27555748Sduboff } 27565748Sduboff /* PHY will restart autonego automatically */ 27575748Sduboff dp->mii_state = MII_STATE_AUTONEGOTIATING; 27585748Sduboff dp->mii_timer = dp->gc.gc_mii_an_timeout; 27595748Sduboff dp->mii_interval = dp->gc.gc_mii_an_watch_interval; 27605748Sduboff goto next; 27615748Sduboff 27625748Sduboff case MII_ACTION_RSA: 27635748Sduboff if (!dp->mii_supress_msg) { 27645748Sduboff cmn_err(CE_CONT, "!%s: restarting auto-negotiation", 27655748Sduboff dp->name); 27665748Sduboff } 27675748Sduboff dp->mii_supress_msg = B_TRUE; 27685748Sduboff goto autonego; 27695748Sduboff 27705748Sduboff default: 27715748Sduboff cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d", 27725748Sduboff dp->name, dp->gc.gc_mii_linkdown_action); 27735748Sduboff dp->mii_supress_msg = B_TRUE; 27745748Sduboff } 27755748Sduboff /* NOTREACHED */ 27765748Sduboff 27775748Sduboff reset_phy: 27785748Sduboff if (!dp->mii_supress_msg) { 27795748Sduboff cmn_err(CE_CONT, "!%s: resetting PHY", dp->name); 27805748Sduboff } 27815748Sduboff dp->mii_state = MII_STATE_RESETTING; 27825748Sduboff dp->mii_timer = dp->gc.gc_mii_reset_timeout; 27835748Sduboff if (!dp->gc.gc_mii_dont_reset) { 27845748Sduboff gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET); 27855748Sduboff } 27865748Sduboff dp->mii_interval = WATCH_INTERVAL_FAST; 27875748Sduboff goto next; 27885748Sduboff 27895748Sduboff autonego: 27905748Sduboff if (!dp->mii_supress_msg) { 27915748Sduboff cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name); 27925748Sduboff } 27935748Sduboff dp->mii_state = MII_STATE_AUTONEGOTIATING; 27945748Sduboff dp->mii_timer = dp->gc.gc_mii_an_timeout; 27955748Sduboff 27965748Sduboff /* start/restart auto nego */ 27975748Sduboff val = gem_mii_read(dp, MII_CONTROL) & 27985748Sduboff ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET); 27995748Sduboff 2800*7116Sduboff gem_mii_write(dp, MII_CONTROL, 2801*7116Sduboff val | MII_CONTROL_RSAN | MII_CONTROL_ANE); 28025748Sduboff 28035748Sduboff dp->mii_interval = dp->gc.gc_mii_an_watch_interval; 28045748Sduboff 28055748Sduboff next: 28065748Sduboff if (dp->link_watcher_id == 0 && dp->mii_interval) { 28075748Sduboff /* we must schedule next mii_watcher */ 28085748Sduboff dp->link_watcher_id = 2809*7116Sduboff timeout((void (*)(void *))&gem_mii_link_watcher, 28105748Sduboff (void *)dp, dp->mii_interval); 28115748Sduboff } 28125748Sduboff 2813*7116Sduboff if (old_mii_state != dp->mii_state) { 28145748Sduboff /* notify new mii link state */ 28155748Sduboff if (dp->mii_state == MII_STATE_LINKUP) { 2816*7116Sduboff dp->linkup_delay = 0; 28175748Sduboff GEM_LINKUP(dp); 2818*7116Sduboff } else if (dp->linkup_delay <= 0) { 28195748Sduboff GEM_LINKDOWN(dp); 28205748Sduboff } 2821*7116Sduboff } else if (dp->linkup_delay < 0) { 2822*7116Sduboff /* first linkup timeout */ 2823*7116Sduboff dp->linkup_delay = 0; 2824*7116Sduboff GEM_LINKDOWN(dp); 2825*7116Sduboff } 2826*7116Sduboff 28275748Sduboff return (tx_sched); 28285748Sduboff } 28295748Sduboff 28305748Sduboff static void 28315748Sduboff gem_mii_link_watcher(struct gem_dev *dp) 28325748Sduboff { 28335748Sduboff boolean_t tx_sched; 28345748Sduboff 28355748Sduboff mutex_enter(&dp->intrlock); 28365748Sduboff 28375748Sduboff dp->link_watcher_id = 0; 28385748Sduboff tx_sched = gem_mii_link_check(dp); 28395748Sduboff #if GEM_DEBUG_LEVEL > 2 28405748Sduboff if (dp->link_watcher_id == 0) { 28415748Sduboff cmn_err(CE_CONT, "%s: link watcher stopped", dp->name); 28425748Sduboff } 28435748Sduboff #endif 28445748Sduboff mutex_exit(&dp->intrlock); 28455748Sduboff 28465748Sduboff if (tx_sched) { 28475748Sduboff /* kick potentially stopped downstream */ 28485748Sduboff mac_tx_update(dp->mh); 28495748Sduboff } 28505748Sduboff } 28515748Sduboff 28525748Sduboff int 28535748Sduboff gem_mii_probe_default(struct gem_dev *dp) 28545748Sduboff { 28555748Sduboff int8_t phy; 28565748Sduboff uint16_t status; 28575748Sduboff uint16_t adv; 28585748Sduboff uint16_t adv_org; 28595748Sduboff 28605748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 28615748Sduboff 28625748Sduboff /* 28635748Sduboff * Scan PHY 28645748Sduboff */ 28655748Sduboff /* ensure to send sync bits */ 28665748Sduboff dp->mii_status = 0; 28675748Sduboff 28685748Sduboff /* Try default phy first */ 28695748Sduboff if (dp->mii_phy_addr) { 28705748Sduboff status = gem_mii_read(dp, MII_STATUS); 28715748Sduboff if (status != 0xffff && status != 0) { 28725748Sduboff gem_mii_write(dp, MII_CONTROL, 0); 28735748Sduboff goto PHY_found; 28745748Sduboff } 28755748Sduboff 28765748Sduboff if (dp->mii_phy_addr < 0) { 28775748Sduboff cmn_err(CE_NOTE, 28785748Sduboff "!%s: failed to probe default internal and/or non-MII PHY", 28795748Sduboff dp->name); 28805748Sduboff return (GEM_FAILURE); 28815748Sduboff } 28825748Sduboff 28835748Sduboff cmn_err(CE_NOTE, 28845748Sduboff "!%s: failed to probe default MII PHY at %d", 28855748Sduboff dp->name, dp->mii_phy_addr); 28865748Sduboff } 28875748Sduboff 28885748Sduboff /* Try all possible address */ 28895748Sduboff for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) { 28905748Sduboff dp->mii_phy_addr = phy; 28915748Sduboff status = gem_mii_read(dp, MII_STATUS); 28925748Sduboff 28935748Sduboff if (status != 0xffff && status != 0) { 28945748Sduboff gem_mii_write(dp, MII_CONTROL, 0); 28955748Sduboff goto PHY_found; 28965748Sduboff } 28975748Sduboff } 28985748Sduboff 28995748Sduboff for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) { 29005748Sduboff dp->mii_phy_addr = phy; 29015748Sduboff gem_mii_write(dp, MII_CONTROL, 0); 29025748Sduboff status = gem_mii_read(dp, MII_STATUS); 29035748Sduboff 29045748Sduboff if (status != 0xffff && status != 0) { 29055748Sduboff goto PHY_found; 29065748Sduboff } 29075748Sduboff } 29085748Sduboff 29095748Sduboff cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name); 29105748Sduboff dp->mii_phy_addr = -1; 29115748Sduboff 29125748Sduboff return (GEM_FAILURE); 29135748Sduboff 29145748Sduboff PHY_found: 29155748Sduboff dp->mii_status = status; 29165748Sduboff dp->mii_phy_id = (gem_mii_read(dp, MII_PHYIDH) << 16) | 29175748Sduboff gem_mii_read(dp, MII_PHYIDL); 29185748Sduboff 29195748Sduboff if (dp->mii_phy_addr < 0) { 29205748Sduboff cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)", 29215748Sduboff dp->name, dp->mii_phy_id); 29225748Sduboff } else { 29235748Sduboff cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d", 29245748Sduboff dp->name, dp->mii_phy_id, dp->mii_phy_addr); 29255748Sduboff } 29265748Sduboff 29275748Sduboff cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b", 29285748Sduboff dp->name, 29295748Sduboff gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS, 29305748Sduboff status, MII_STATUS_BITS, 29315748Sduboff gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS, 29325748Sduboff gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS); 29335748Sduboff 29345748Sduboff dp->mii_xstatus = 0; 29355748Sduboff if (status & MII_STATUS_XSTATUS) { 29365748Sduboff dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS); 29375748Sduboff 29385748Sduboff cmn_err(CE_CONT, "!%s: xstatus:%b", 29395748Sduboff dp->name, dp->mii_xstatus, MII_XSTATUS_BITS); 29405748Sduboff } 29415748Sduboff 29425748Sduboff /* check if the phy can advertize pause abilities */ 29435748Sduboff adv_org = gem_mii_read(dp, MII_AN_ADVERT); 29445748Sduboff 29455748Sduboff gem_mii_write(dp, MII_AN_ADVERT, 29465748Sduboff MII_ABILITY_PAUSE | MII_ABILITY_ASM_DIR); 29475748Sduboff 29485748Sduboff adv = gem_mii_read(dp, MII_AN_ADVERT); 29495748Sduboff 29505748Sduboff if ((adv & MII_ABILITY_PAUSE) == 0) { 29515748Sduboff dp->gc.gc_flow_control &= ~1; 29525748Sduboff } 29535748Sduboff 29545748Sduboff if ((adv & MII_ABILITY_ASM_DIR) == 0) { 29555748Sduboff dp->gc.gc_flow_control &= ~2; 29565748Sduboff } 29575748Sduboff 29585748Sduboff gem_mii_write(dp, MII_AN_ADVERT, adv_org); 29595748Sduboff 29605748Sduboff return (GEM_SUCCESS); 29615748Sduboff } 29625748Sduboff 29635748Sduboff static void 29645748Sduboff gem_mii_start(struct gem_dev *dp) 29655748Sduboff { 29665748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 29675748Sduboff 29685748Sduboff /* make a first call of check link */ 29695748Sduboff dp->mii_state = MII_STATE_UNKNOWN; 29705748Sduboff dp->mii_last_check = ddi_get_lbolt(); 2971*7116Sduboff dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout; 29725748Sduboff (void) gem_mii_link_watcher(dp); 29735748Sduboff } 29745748Sduboff 29755748Sduboff static void 29765748Sduboff gem_mii_stop(struct gem_dev *dp) 29775748Sduboff { 29785748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 29795748Sduboff 29805748Sduboff /* Ensure timer routine stopped */ 29815748Sduboff mutex_enter(&dp->intrlock); 29825748Sduboff if (dp->link_watcher_id) { 29835748Sduboff while (untimeout(dp->link_watcher_id) == -1) 29845748Sduboff ; 29855748Sduboff dp->link_watcher_id = 0; 29865748Sduboff } 29875748Sduboff mutex_exit(&dp->intrlock); 29885748Sduboff } 29895748Sduboff 29905748Sduboff boolean_t 29915748Sduboff gem_get_mac_addr_conf(struct gem_dev *dp) 29925748Sduboff { 29935748Sduboff char propname[32]; 29945748Sduboff char *valstr; 29955748Sduboff uint8_t mac[ETHERADDRL]; 29965748Sduboff char *cp; 29975748Sduboff int c; 29985748Sduboff int i; 29995748Sduboff int j; 30005748Sduboff uint8_t v; 30015748Sduboff uint8_t d; 30025748Sduboff uint8_t ored; 30035748Sduboff 30045748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 30055748Sduboff /* 30065748Sduboff * Get ethernet address from .conf file 30075748Sduboff */ 30085748Sduboff (void) sprintf(propname, "mac-addr"); 30095748Sduboff if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip, 30105748Sduboff DDI_PROP_DONTPASS, propname, &valstr)) != 30115748Sduboff DDI_PROP_SUCCESS) { 30125748Sduboff return (B_FALSE); 30135748Sduboff } 30145748Sduboff 30155748Sduboff if (strlen(valstr) != ETHERADDRL*3-1) { 30165748Sduboff goto syntax_err; 30175748Sduboff } 30185748Sduboff 30195748Sduboff cp = valstr; 30205748Sduboff j = 0; 30215748Sduboff ored = 0; 30225748Sduboff for (;;) { 30235748Sduboff v = 0; 30245748Sduboff for (i = 0; i < 2; i++) { 30255748Sduboff c = *cp++; 30265748Sduboff 30275748Sduboff if (c >= 'a' && c <= 'f') { 30285748Sduboff d = c - 'a' + 10; 30295748Sduboff } else if (c >= 'A' && c <= 'F') { 30305748Sduboff d = c - 'A' + 10; 30315748Sduboff } else if (c >= '0' && c <= '9') { 30325748Sduboff d = c - '0'; 30335748Sduboff } else { 30345748Sduboff goto syntax_err; 30355748Sduboff } 30365748Sduboff v = (v << 4) | d; 30375748Sduboff } 30385748Sduboff 30395748Sduboff mac[j++] = v; 30405748Sduboff ored |= v; 30415748Sduboff if (j == ETHERADDRL) { 30425748Sduboff /* done */ 30435748Sduboff break; 30445748Sduboff } 30455748Sduboff 30465748Sduboff c = *cp++; 30475748Sduboff if (c != ':') { 30485748Sduboff goto syntax_err; 30495748Sduboff } 30505748Sduboff } 30515748Sduboff 30525748Sduboff if (ored == 0) { 30535748Sduboff goto err; 30545748Sduboff } 30555748Sduboff for (i = 0; i < ETHERADDRL; i++) { 30565748Sduboff dp->dev_addr.ether_addr_octet[i] = mac[i]; 30575748Sduboff } 30585748Sduboff ddi_prop_free(valstr); 30595748Sduboff return (B_TRUE); 30605748Sduboff 30615748Sduboff syntax_err: 30625748Sduboff cmn_err(CE_CONT, 30635748Sduboff "!%s: read mac addr: trying .conf: syntax err %s", 30645748Sduboff dp->name, valstr); 30655748Sduboff err: 30665748Sduboff ddi_prop_free(valstr); 30675748Sduboff 30685748Sduboff return (B_FALSE); 30695748Sduboff } 30705748Sduboff 30715748Sduboff 30725748Sduboff /* ============================================================== */ 30735748Sduboff /* 30745748Sduboff * internal start/stop interface 30755748Sduboff */ 30765748Sduboff /* ============================================================== */ 30775748Sduboff static int 30785748Sduboff gem_mac_set_rx_filter(struct gem_dev *dp) 30795748Sduboff { 30805748Sduboff return ((*dp->gc.gc_set_rx_filter)(dp)); 30815748Sduboff } 30825748Sduboff 30835748Sduboff /* 30845748Sduboff * gem_mac_init: cold start 30855748Sduboff */ 30865748Sduboff static int 30875748Sduboff gem_mac_init(struct gem_dev *dp) 30885748Sduboff { 30895748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 30905748Sduboff 30915748Sduboff if (dp->mac_suspended) { 30925748Sduboff return (GEM_FAILURE); 30935748Sduboff } 30945748Sduboff 30955748Sduboff dp->mac_active = B_FALSE; 30965748Sduboff 30975748Sduboff gem_init_rx_ring(dp); 30985748Sduboff gem_init_tx_ring(dp); 30995748Sduboff 31005748Sduboff /* reset transmitter state */ 3101*7116Sduboff dp->tx_blocked = (clock_t)0; 31025748Sduboff dp->tx_busy = 0; 31035748Sduboff dp->tx_reclaim_busy = 0; 3104*7116Sduboff dp->tx_max_packets = dp->gc.gc_tx_buf_limit; 31055748Sduboff 31065748Sduboff if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) { 31075748Sduboff return (GEM_FAILURE); 31085748Sduboff } 31095748Sduboff 31105748Sduboff gem_prepare_rx_buf(dp); 31115748Sduboff 31125748Sduboff return (GEM_SUCCESS); 31135748Sduboff } 31145748Sduboff /* 31155748Sduboff * gem_mac_start: warm start 31165748Sduboff */ 31175748Sduboff static int 31185748Sduboff gem_mac_start(struct gem_dev *dp) 31195748Sduboff { 31205748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 31215748Sduboff 31225748Sduboff ASSERT(mutex_owned(&dp->intrlock)); 31235748Sduboff ASSERT(dp->nic_state == NIC_STATE_ONLINE); 31245748Sduboff ASSERT(dp->mii_state == MII_STATE_LINKUP); 31255748Sduboff 31265748Sduboff /* enable tx and rx */ 31275748Sduboff mutex_enter(&dp->xmitlock); 31285748Sduboff if (dp->mac_suspended) { 31295748Sduboff mutex_exit(&dp->xmitlock); 31305748Sduboff return (GEM_FAILURE); 31315748Sduboff } 31325748Sduboff dp->mac_active = B_TRUE; 31335748Sduboff mutex_exit(&dp->xmitlock); 31345748Sduboff 3135*7116Sduboff /* setup rx buffers */ 3136*7116Sduboff (*dp->gc.gc_rx_start)(dp, 3137*7116Sduboff SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size), 3138*7116Sduboff dp->rx_active_tail - dp->rx_active_head); 3139*7116Sduboff 31405748Sduboff if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) { 31415748Sduboff cmn_err(CE_WARN, "%s: %s: start_chip: failed", 31425748Sduboff dp->name, __func__); 31435748Sduboff return (GEM_FAILURE); 31445748Sduboff } 31455748Sduboff 31465748Sduboff mutex_enter(&dp->xmitlock); 31475748Sduboff 31485748Sduboff /* load untranmitted packets to the nic */ 31495748Sduboff ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0); 31505748Sduboff if (dp->tx_softq_tail - dp->tx_softq_head > 0) { 31515748Sduboff gem_tx_load_descs_oo(dp, 31525748Sduboff dp->tx_softq_head, dp->tx_softq_tail, 31535748Sduboff GEM_TXFLAG_HEAD); 31545748Sduboff /* issue preloaded tx buffers */ 31555748Sduboff gem_tx_start_unit(dp); 31565748Sduboff } 31575748Sduboff 31585748Sduboff mutex_exit(&dp->xmitlock); 31595748Sduboff 31605748Sduboff return (GEM_SUCCESS); 31615748Sduboff } 31625748Sduboff 31635748Sduboff static int 31645748Sduboff gem_mac_stop(struct gem_dev *dp, uint_t flags) 31655748Sduboff { 31665748Sduboff int i; 31675748Sduboff int wait_time; /* in uS */ 31685748Sduboff #ifdef GEM_DEBUG_LEVEL 31695748Sduboff clock_t now; 31705748Sduboff #endif 31715748Sduboff int ret = GEM_SUCCESS; 31725748Sduboff 31735748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d", 31745748Sduboff dp->name, __func__, dp->rx_buf_freecnt)); 31755748Sduboff 31765748Sduboff ASSERT(mutex_owned(&dp->intrlock)); 31775748Sduboff ASSERT(!mutex_owned(&dp->xmitlock)); 31785748Sduboff 31795748Sduboff /* 31805748Sduboff * Block transmits 31815748Sduboff */ 31825748Sduboff mutex_enter(&dp->xmitlock); 31835748Sduboff if (dp->mac_suspended) { 31845748Sduboff mutex_exit(&dp->xmitlock); 31855748Sduboff return (GEM_SUCCESS); 31865748Sduboff } 31875748Sduboff dp->mac_active = B_FALSE; 31885748Sduboff 31895748Sduboff while (dp->tx_busy > 0) { 31905748Sduboff cv_wait(&dp->tx_drain_cv, &dp->xmitlock); 31915748Sduboff } 31925748Sduboff mutex_exit(&dp->xmitlock); 31935748Sduboff 31945748Sduboff if ((flags & GEM_RESTART_NOWAIT) == 0) { 31955748Sduboff /* 31965748Sduboff * Wait for all tx buffers sent. 31975748Sduboff */ 31985748Sduboff wait_time = 31995748Sduboff 2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) * 32005748Sduboff (dp->tx_active_tail - dp->tx_active_head); 32015748Sduboff 32025748Sduboff DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS", 32035748Sduboff dp->name, __func__, wait_time)); 32045748Sduboff i = 0; 32055748Sduboff #ifdef GEM_DEBUG_LEVEL 32065748Sduboff now = ddi_get_lbolt(); 32075748Sduboff #endif 32085748Sduboff while (dp->tx_active_tail != dp->tx_active_head) { 32095748Sduboff if (i > wait_time) { 32105748Sduboff /* timeout */ 32115748Sduboff cmn_err(CE_NOTE, "%s: %s timeout: tx drain", 32125748Sduboff dp->name, __func__); 32135748Sduboff break; 32145748Sduboff } 32155748Sduboff (void) gem_reclaim_txbuf(dp); 32165748Sduboff drv_usecwait(100); 32175748Sduboff i += 100; 32185748Sduboff } 32195748Sduboff DPRINTF(0, (CE_NOTE, 32205748Sduboff "!%s: %s: the nic have drained in %d uS, real %d mS", 32215748Sduboff dp->name, __func__, i, 32225748Sduboff 10*((int)(ddi_get_lbolt() - now)))); 32235748Sduboff } 32245748Sduboff 32255748Sduboff /* 32265748Sduboff * Now we can stop the nic safely. 32275748Sduboff */ 32285748Sduboff if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) { 32295748Sduboff cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it", 32305748Sduboff dp->name, __func__); 32315748Sduboff if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) { 32325748Sduboff cmn_err(CE_WARN, "%s: %s: failed to reset chip", 32335748Sduboff dp->name, __func__); 32345748Sduboff } 32355748Sduboff } 32365748Sduboff 32375748Sduboff /* 32385748Sduboff * Clear all rx buffers 32395748Sduboff */ 32405748Sduboff if (flags & GEM_RESTART_KEEP_BUF) { 32415748Sduboff (void) gem_receive(dp); 32425748Sduboff } 32435748Sduboff gem_clean_rx_buf(dp); 32445748Sduboff 32455748Sduboff /* 32465748Sduboff * Update final statistics 32475748Sduboff */ 32485748Sduboff (*dp->gc.gc_get_stats)(dp); 32495748Sduboff 32505748Sduboff /* 32515748Sduboff * Clear all pended tx packets 32525748Sduboff */ 32535748Sduboff ASSERT(dp->tx_active_tail == dp->tx_softq_head); 32545748Sduboff ASSERT(dp->tx_softq_tail == dp->tx_free_head); 32555748Sduboff if (flags & GEM_RESTART_KEEP_BUF) { 32565748Sduboff /* restore active tx buffers */ 32575748Sduboff dp->tx_active_tail = dp->tx_active_head; 32585748Sduboff dp->tx_softq_head = dp->tx_active_head; 32595748Sduboff } else { 32605748Sduboff gem_clean_tx_buf(dp); 32615748Sduboff } 32625748Sduboff 32635748Sduboff return (ret); 32645748Sduboff } 32655748Sduboff 32665748Sduboff static int 32675748Sduboff gem_add_multicast(struct gem_dev *dp, const uint8_t *ep) 32685748Sduboff { 32695748Sduboff int cnt; 32705748Sduboff int err; 32715748Sduboff 32725748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 32735748Sduboff 32745748Sduboff mutex_enter(&dp->intrlock); 32755748Sduboff if (dp->mac_suspended) { 32765748Sduboff mutex_exit(&dp->intrlock); 32775748Sduboff return (GEM_FAILURE); 32785748Sduboff } 32795748Sduboff 32805748Sduboff if (dp->mc_count_req++ < GEM_MAXMC) { 32815748Sduboff /* append the new address at the end of the mclist */ 32825748Sduboff cnt = dp->mc_count; 32835748Sduboff bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet, 32845748Sduboff ETHERADDRL); 32855748Sduboff if (dp->gc.gc_multicast_hash) { 32865748Sduboff dp->mc_list[cnt].hash = 32875748Sduboff (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep); 32885748Sduboff } 32895748Sduboff dp->mc_count = cnt + 1; 32905748Sduboff } 32915748Sduboff 32925748Sduboff if (dp->mc_count_req != dp->mc_count) { 32935748Sduboff /* multicast address list overflow */ 32945748Sduboff dp->rxmode |= RXMODE_MULTI_OVF; 32955748Sduboff } else { 32965748Sduboff dp->rxmode &= ~RXMODE_MULTI_OVF; 32975748Sduboff } 32985748Sduboff 3299*7116Sduboff /* tell new multicast list to the hardware */ 33005748Sduboff err = gem_mac_set_rx_filter(dp); 33015748Sduboff 33025748Sduboff mutex_exit(&dp->intrlock); 33035748Sduboff 33045748Sduboff return (err); 33055748Sduboff } 33065748Sduboff 33075748Sduboff static int 33085748Sduboff gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep) 33095748Sduboff { 33105748Sduboff size_t len; 33115748Sduboff int i; 33125748Sduboff int cnt; 33135748Sduboff int err; 33145748Sduboff 33155748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 33165748Sduboff 33175748Sduboff mutex_enter(&dp->intrlock); 33185748Sduboff if (dp->mac_suspended) { 33195748Sduboff mutex_exit(&dp->intrlock); 33205748Sduboff return (GEM_FAILURE); 33215748Sduboff } 33225748Sduboff 33235748Sduboff dp->mc_count_req--; 33245748Sduboff cnt = dp->mc_count; 33255748Sduboff for (i = 0; i < cnt; i++) { 33265748Sduboff if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) { 33275748Sduboff continue; 33285748Sduboff } 33295748Sduboff /* shrink the mclist by copying forward */ 33305748Sduboff len = (cnt - (i + 1)) * sizeof (*dp->mc_list); 33315748Sduboff if (len > 0) { 33325748Sduboff bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len); 33335748Sduboff } 33345748Sduboff dp->mc_count--; 33355748Sduboff break; 33365748Sduboff } 33375748Sduboff 33385748Sduboff if (dp->mc_count_req != dp->mc_count) { 33395748Sduboff /* multicast address list overflow */ 33405748Sduboff dp->rxmode |= RXMODE_MULTI_OVF; 33415748Sduboff } else { 33425748Sduboff dp->rxmode &= ~RXMODE_MULTI_OVF; 33435748Sduboff } 33445748Sduboff /* In gem v2, don't hold xmitlock on calling set_rx_filter */ 33455748Sduboff err = gem_mac_set_rx_filter(dp); 33465748Sduboff 33475748Sduboff mutex_exit(&dp->intrlock); 33485748Sduboff 33495748Sduboff return (err); 33505748Sduboff } 33515748Sduboff 33525748Sduboff /* ============================================================== */ 33535748Sduboff /* 33545748Sduboff * ND interface 33555748Sduboff */ 33565748Sduboff /* ============================================================== */ 33575748Sduboff enum { 33585748Sduboff PARAM_AUTONEG_CAP, 33595748Sduboff PARAM_PAUSE_CAP, 33605748Sduboff PARAM_ASYM_PAUSE_CAP, 33615748Sduboff PARAM_1000FDX_CAP, 33625748Sduboff PARAM_1000HDX_CAP, 33635748Sduboff PARAM_100T4_CAP, 33645748Sduboff PARAM_100FDX_CAP, 33655748Sduboff PARAM_100HDX_CAP, 33665748Sduboff PARAM_10FDX_CAP, 33675748Sduboff PARAM_10HDX_CAP, 33685748Sduboff 33695748Sduboff PARAM_ADV_AUTONEG_CAP, 33705748Sduboff PARAM_ADV_PAUSE_CAP, 33715748Sduboff PARAM_ADV_ASYM_PAUSE_CAP, 33725748Sduboff PARAM_ADV_1000FDX_CAP, 33735748Sduboff PARAM_ADV_1000HDX_CAP, 33745748Sduboff PARAM_ADV_100T4_CAP, 33755748Sduboff PARAM_ADV_100FDX_CAP, 33765748Sduboff PARAM_ADV_100HDX_CAP, 33775748Sduboff PARAM_ADV_10FDX_CAP, 33785748Sduboff PARAM_ADV_10HDX_CAP, 33795748Sduboff 33805748Sduboff PARAM_LP_AUTONEG_CAP, 33815748Sduboff PARAM_LP_PAUSE_CAP, 33825748Sduboff PARAM_LP_ASYM_PAUSE_CAP, 33835748Sduboff PARAM_LP_1000FDX_CAP, 33845748Sduboff PARAM_LP_1000HDX_CAP, 33855748Sduboff PARAM_LP_100T4_CAP, 33865748Sduboff PARAM_LP_100FDX_CAP, 33875748Sduboff PARAM_LP_100HDX_CAP, 33885748Sduboff PARAM_LP_10FDX_CAP, 33895748Sduboff PARAM_LP_10HDX_CAP, 33905748Sduboff 33915748Sduboff PARAM_LINK_STATUS, 33925748Sduboff PARAM_LINK_SPEED, 33935748Sduboff PARAM_LINK_DUPLEX, 33945748Sduboff 33955748Sduboff PARAM_LINK_AUTONEG, 33965748Sduboff PARAM_LINK_RX_PAUSE, 33975748Sduboff PARAM_LINK_TX_PAUSE, 33985748Sduboff 33995748Sduboff PARAM_LOOP_MODE, 34005748Sduboff PARAM_MSI_CNT, 34015748Sduboff 34025748Sduboff #ifdef DEBUG_RESUME 34035748Sduboff PARAM_RESUME_TEST, 34045748Sduboff #endif 34055748Sduboff PARAM_COUNT 34065748Sduboff }; 34075748Sduboff 34085748Sduboff enum ioc_reply { 34095748Sduboff IOC_INVAL = -1, /* bad, NAK with EINVAL */ 34105748Sduboff IOC_DONE, /* OK, reply sent */ 34115748Sduboff IOC_ACK, /* OK, just send ACK */ 34125748Sduboff IOC_REPLY, /* OK, just send reply */ 34135748Sduboff IOC_RESTART_ACK, /* OK, restart & ACK */ 34145748Sduboff IOC_RESTART_REPLY /* OK, restart & reply */ 34155748Sduboff }; 34165748Sduboff 34175748Sduboff struct gem_nd_arg { 34185748Sduboff struct gem_dev *dp; 34195748Sduboff int item; 34205748Sduboff }; 34215748Sduboff 34225748Sduboff static int 34235748Sduboff gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp) 34245748Sduboff { 34255748Sduboff struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp; 34265748Sduboff int item = ((struct gem_nd_arg *)(void *)arg)->item; 34275748Sduboff long val; 34285748Sduboff 34295748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d", 34305748Sduboff dp->name, __func__, item)); 34315748Sduboff 34325748Sduboff switch (item) { 34335748Sduboff case PARAM_AUTONEG_CAP: 34345748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG); 34355748Sduboff DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val)); 34365748Sduboff break; 34375748Sduboff 34385748Sduboff case PARAM_PAUSE_CAP: 34395748Sduboff val = BOOLEAN(dp->gc.gc_flow_control & 1); 34405748Sduboff break; 34415748Sduboff 34425748Sduboff case PARAM_ASYM_PAUSE_CAP: 34435748Sduboff val = BOOLEAN(dp->gc.gc_flow_control & 2); 34445748Sduboff break; 34455748Sduboff 34465748Sduboff case PARAM_1000FDX_CAP: 34475748Sduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) || 34485748Sduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD); 34495748Sduboff break; 34505748Sduboff 34515748Sduboff case PARAM_1000HDX_CAP: 34525748Sduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) || 34535748Sduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX); 34545748Sduboff break; 34555748Sduboff 34565748Sduboff case PARAM_100T4_CAP: 34575748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4); 34585748Sduboff break; 34595748Sduboff 34605748Sduboff case PARAM_100FDX_CAP: 34615748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD); 34625748Sduboff break; 34635748Sduboff 34645748Sduboff case PARAM_100HDX_CAP: 34655748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX); 34665748Sduboff break; 34675748Sduboff 34685748Sduboff case PARAM_10FDX_CAP: 34695748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD); 34705748Sduboff break; 34715748Sduboff 34725748Sduboff case PARAM_10HDX_CAP: 34735748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10); 34745748Sduboff break; 34755748Sduboff 34765748Sduboff case PARAM_ADV_AUTONEG_CAP: 34775748Sduboff val = dp->anadv_autoneg; 34785748Sduboff break; 34795748Sduboff 34805748Sduboff case PARAM_ADV_PAUSE_CAP: 34815748Sduboff val = BOOLEAN(dp->anadv_flow_control & 1); 34825748Sduboff break; 34835748Sduboff 34845748Sduboff case PARAM_ADV_ASYM_PAUSE_CAP: 34855748Sduboff val = BOOLEAN(dp->anadv_flow_control & 2); 34865748Sduboff break; 34875748Sduboff 34885748Sduboff case PARAM_ADV_1000FDX_CAP: 34895748Sduboff val = dp->anadv_1000fdx; 34905748Sduboff break; 34915748Sduboff 34925748Sduboff case PARAM_ADV_1000HDX_CAP: 34935748Sduboff val = dp->anadv_1000hdx; 34945748Sduboff break; 34955748Sduboff 34965748Sduboff case PARAM_ADV_100T4_CAP: 34975748Sduboff val = dp->anadv_100t4; 34985748Sduboff break; 34995748Sduboff 35005748Sduboff case PARAM_ADV_100FDX_CAP: 35015748Sduboff val = dp->anadv_100fdx; 35025748Sduboff break; 35035748Sduboff 35045748Sduboff case PARAM_ADV_100HDX_CAP: 35055748Sduboff val = dp->anadv_100hdx; 35065748Sduboff break; 35075748Sduboff 35085748Sduboff case PARAM_ADV_10FDX_CAP: 35095748Sduboff val = dp->anadv_10fdx; 35105748Sduboff break; 35115748Sduboff 35125748Sduboff case PARAM_ADV_10HDX_CAP: 35135748Sduboff val = dp->anadv_10hdx; 35145748Sduboff break; 35155748Sduboff 35165748Sduboff case PARAM_LP_AUTONEG_CAP: 35175748Sduboff val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN); 35185748Sduboff break; 35195748Sduboff 35205748Sduboff case PARAM_LP_PAUSE_CAP: 35215748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE); 35225748Sduboff break; 35235748Sduboff 35245748Sduboff case PARAM_LP_ASYM_PAUSE_CAP: 35255748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR); 35265748Sduboff break; 35275748Sduboff 35285748Sduboff case PARAM_LP_1000FDX_CAP: 35295748Sduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL); 35305748Sduboff break; 35315748Sduboff 35325748Sduboff case PARAM_LP_1000HDX_CAP: 35335748Sduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF); 35345748Sduboff break; 35355748Sduboff 35365748Sduboff case PARAM_LP_100T4_CAP: 35375748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4); 35385748Sduboff break; 35395748Sduboff 35405748Sduboff case PARAM_LP_100FDX_CAP: 35415748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD); 35425748Sduboff break; 35435748Sduboff 35445748Sduboff case PARAM_LP_100HDX_CAP: 35455748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX); 35465748Sduboff break; 35475748Sduboff 35485748Sduboff case PARAM_LP_10FDX_CAP: 35495748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD); 35505748Sduboff break; 35515748Sduboff 35525748Sduboff case PARAM_LP_10HDX_CAP: 35535748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T); 35545748Sduboff break; 35555748Sduboff 35565748Sduboff case PARAM_LINK_STATUS: 35575748Sduboff val = (dp->mii_state == MII_STATE_LINKUP); 35585748Sduboff break; 35595748Sduboff 35605748Sduboff case PARAM_LINK_SPEED: 35615748Sduboff val = gem_speed_value[dp->speed]; 35625748Sduboff break; 35635748Sduboff 35645748Sduboff case PARAM_LINK_DUPLEX: 35655748Sduboff val = 0; 35665748Sduboff if (dp->mii_state == MII_STATE_LINKUP) { 35675748Sduboff val = dp->full_duplex ? 2 : 1; 35685748Sduboff } 35695748Sduboff break; 35705748Sduboff 35715748Sduboff case PARAM_LINK_AUTONEG: 35725748Sduboff val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN); 35735748Sduboff break; 35745748Sduboff 35755748Sduboff case PARAM_LINK_RX_PAUSE: 35765748Sduboff val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) || 35775748Sduboff (dp->flow_control == FLOW_CONTROL_RX_PAUSE); 35785748Sduboff break; 35795748Sduboff 35805748Sduboff case PARAM_LINK_TX_PAUSE: 35815748Sduboff val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) || 35825748Sduboff (dp->flow_control == FLOW_CONTROL_TX_PAUSE); 35835748Sduboff break; 35845748Sduboff 35855748Sduboff #ifdef DEBUG_RESUME 35865748Sduboff case PARAM_RESUME_TEST: 35875748Sduboff val = 0; 35885748Sduboff break; 35895748Sduboff #endif 35905748Sduboff default: 35915748Sduboff cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)", 35925748Sduboff dp->name, item); 35935748Sduboff break; 35945748Sduboff } 35955748Sduboff 35965748Sduboff (void) mi_mpprintf(mp, "%ld", val); 35975748Sduboff 35985748Sduboff return (0); 35995748Sduboff } 36005748Sduboff 36015748Sduboff static int 36025748Sduboff gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp) 36035748Sduboff { 36045748Sduboff struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp; 36055748Sduboff int item = ((struct gem_nd_arg *)(void *)arg)->item; 36065748Sduboff long val; 36075748Sduboff char *end; 36085748Sduboff 36095748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 36105748Sduboff if (ddi_strtol(value, &end, 10, &val)) { 36115748Sduboff return (EINVAL); 36125748Sduboff } 36135748Sduboff if (end == value) { 36145748Sduboff return (EINVAL); 36155748Sduboff } 36165748Sduboff 36175748Sduboff switch (item) { 36185748Sduboff case PARAM_ADV_AUTONEG_CAP: 36195748Sduboff if (val != 0 && val != 1) { 36205748Sduboff goto err; 36215748Sduboff } 36225748Sduboff if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) { 36235748Sduboff goto err; 36245748Sduboff } 36255748Sduboff dp->anadv_autoneg = (int)val; 36265748Sduboff break; 36275748Sduboff 36285748Sduboff case PARAM_ADV_PAUSE_CAP: 36295748Sduboff if (val != 0 && val != 1) { 36305748Sduboff goto err; 36315748Sduboff } 36325748Sduboff if (val) { 36335748Sduboff dp->anadv_flow_control |= 1; 36345748Sduboff } else { 36355748Sduboff dp->anadv_flow_control &= ~1; 36365748Sduboff } 36375748Sduboff break; 36385748Sduboff 36395748Sduboff case PARAM_ADV_ASYM_PAUSE_CAP: 36405748Sduboff if (val != 0 && val != 1) { 36415748Sduboff goto err; 36425748Sduboff } 36435748Sduboff if (val) { 36445748Sduboff dp->anadv_flow_control |= 2; 36455748Sduboff } else { 36465748Sduboff dp->anadv_flow_control &= ~2; 36475748Sduboff } 36485748Sduboff break; 36495748Sduboff 36505748Sduboff case PARAM_ADV_1000FDX_CAP: 36515748Sduboff if (val != 0 && val != 1) { 36525748Sduboff goto err; 36535748Sduboff } 36545748Sduboff if (val && (dp->mii_xstatus & 36555748Sduboff (MII_XSTATUS_1000BASET_FD | 36565748Sduboff MII_XSTATUS_1000BASEX_FD)) == 0) { 36575748Sduboff goto err; 36585748Sduboff } 36595748Sduboff dp->anadv_1000fdx = (int)val; 36605748Sduboff break; 36615748Sduboff 36625748Sduboff case PARAM_ADV_1000HDX_CAP: 36635748Sduboff if (val != 0 && val != 1) { 36645748Sduboff goto err; 36655748Sduboff } 36665748Sduboff if (val && (dp->mii_xstatus & 36675748Sduboff (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) { 36685748Sduboff goto err; 36695748Sduboff } 36705748Sduboff dp->anadv_1000hdx = (int)val; 36715748Sduboff break; 36725748Sduboff 36735748Sduboff case PARAM_ADV_100T4_CAP: 36745748Sduboff if (val != 0 && val != 1) { 36755748Sduboff goto err; 36765748Sduboff } 36775748Sduboff if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) { 36785748Sduboff goto err; 36795748Sduboff } 36805748Sduboff dp->anadv_100t4 = (int)val; 36815748Sduboff break; 36825748Sduboff 36835748Sduboff case PARAM_ADV_100FDX_CAP: 36845748Sduboff if (val != 0 && val != 1) { 36855748Sduboff goto err; 36865748Sduboff } 36875748Sduboff if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) { 36885748Sduboff goto err; 36895748Sduboff } 36905748Sduboff dp->anadv_100fdx = (int)val; 36915748Sduboff break; 36925748Sduboff 36935748Sduboff case PARAM_ADV_100HDX_CAP: 36945748Sduboff if (val != 0 && val != 1) { 36955748Sduboff goto err; 36965748Sduboff } 36975748Sduboff if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) { 36985748Sduboff goto err; 36995748Sduboff } 37005748Sduboff dp->anadv_100hdx = (int)val; 37015748Sduboff break; 37025748Sduboff 37035748Sduboff case PARAM_ADV_10FDX_CAP: 37045748Sduboff if (val != 0 && val != 1) { 37055748Sduboff goto err; 37065748Sduboff } 37075748Sduboff if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) { 37085748Sduboff goto err; 37095748Sduboff } 37105748Sduboff dp->anadv_10fdx = (int)val; 37115748Sduboff break; 37125748Sduboff 37135748Sduboff case PARAM_ADV_10HDX_CAP: 37145748Sduboff if (val != 0 && val != 1) { 37155748Sduboff goto err; 37165748Sduboff } 37175748Sduboff if (val && (dp->mii_status & MII_STATUS_10) == 0) { 37185748Sduboff goto err; 37195748Sduboff } 37205748Sduboff dp->anadv_10hdx = (int)val; 37215748Sduboff break; 37225748Sduboff } 37235748Sduboff 37245748Sduboff /* sync with PHY */ 37255748Sduboff gem_choose_forcedmode(dp); 37265748Sduboff 37275748Sduboff dp->mii_state = MII_STATE_UNKNOWN; 37285748Sduboff if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) { 37295748Sduboff /* XXX - Can we ignore the return code ? */ 37305748Sduboff (void) gem_mii_link_check(dp); 37315748Sduboff } 37325748Sduboff 37335748Sduboff return (0); 37345748Sduboff err: 37355748Sduboff return (EINVAL); 37365748Sduboff } 37375748Sduboff 37385748Sduboff static void 37395748Sduboff gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item) 37405748Sduboff { 37415748Sduboff struct gem_nd_arg *arg; 37425748Sduboff 37435748Sduboff ASSERT(item >= 0); 37445748Sduboff ASSERT(item < PARAM_COUNT); 37455748Sduboff 37465748Sduboff arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item]; 37475748Sduboff arg->dp = dp; 37485748Sduboff arg->item = item; 37495748Sduboff 37505748Sduboff DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d", 37515748Sduboff dp->name, __func__, name, item)); 3752*7116Sduboff (void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg); 37535748Sduboff } 37545748Sduboff 37555748Sduboff static void 37565748Sduboff gem_nd_setup(struct gem_dev *dp) 37575748Sduboff { 37585748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b", 37595748Sduboff dp->name, __func__, dp->mii_status, MII_STATUS_BITS)); 37605748Sduboff 37615748Sduboff ASSERT(dp->nd_arg_p == NULL); 37625748Sduboff 37635748Sduboff dp->nd_arg_p = 37645748Sduboff kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP); 37655748Sduboff 37665748Sduboff #define SETFUNC(x) ((x) ? gem_param_set : NULL) 37675748Sduboff 37685748Sduboff gem_nd_load(dp, "autoneg_cap", 37695748Sduboff gem_param_get, NULL, PARAM_AUTONEG_CAP); 37705748Sduboff gem_nd_load(dp, "pause_cap", 37715748Sduboff gem_param_get, NULL, PARAM_PAUSE_CAP); 37725748Sduboff gem_nd_load(dp, "asym_pause_cap", 37735748Sduboff gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP); 37745748Sduboff gem_nd_load(dp, "1000fdx_cap", 37755748Sduboff gem_param_get, NULL, PARAM_1000FDX_CAP); 37765748Sduboff gem_nd_load(dp, "1000hdx_cap", 37775748Sduboff gem_param_get, NULL, PARAM_1000HDX_CAP); 37785748Sduboff gem_nd_load(dp, "100T4_cap", 37795748Sduboff gem_param_get, NULL, PARAM_100T4_CAP); 37805748Sduboff gem_nd_load(dp, "100fdx_cap", 37815748Sduboff gem_param_get, NULL, PARAM_100FDX_CAP); 37825748Sduboff gem_nd_load(dp, "100hdx_cap", 37835748Sduboff gem_param_get, NULL, PARAM_100HDX_CAP); 37845748Sduboff gem_nd_load(dp, "10fdx_cap", 37855748Sduboff gem_param_get, NULL, PARAM_10FDX_CAP); 37865748Sduboff gem_nd_load(dp, "10hdx_cap", 37875748Sduboff gem_param_get, NULL, PARAM_10HDX_CAP); 37885748Sduboff 37895748Sduboff /* Our advertised capabilities */ 37905748Sduboff gem_nd_load(dp, "adv_autoneg_cap", gem_param_get, 37915748Sduboff SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG), 37925748Sduboff PARAM_ADV_AUTONEG_CAP); 37935748Sduboff gem_nd_load(dp, "adv_pause_cap", gem_param_get, 37945748Sduboff SETFUNC(dp->gc.gc_flow_control & 1), 37955748Sduboff PARAM_ADV_PAUSE_CAP); 37965748Sduboff gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get, 37975748Sduboff SETFUNC(dp->gc.gc_flow_control & 2), 37985748Sduboff PARAM_ADV_ASYM_PAUSE_CAP); 37995748Sduboff gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get, 38005748Sduboff SETFUNC(dp->mii_xstatus & 38015748Sduboff (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)), 38025748Sduboff PARAM_ADV_1000FDX_CAP); 38035748Sduboff gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get, 38045748Sduboff SETFUNC(dp->mii_xstatus & 38055748Sduboff (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)), 38065748Sduboff PARAM_ADV_1000HDX_CAP); 38075748Sduboff gem_nd_load(dp, "adv_100T4_cap", gem_param_get, 38085748Sduboff SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) && 38095748Sduboff !dp->mii_advert_ro), 38105748Sduboff PARAM_ADV_100T4_CAP); 38115748Sduboff gem_nd_load(dp, "adv_100fdx_cap", gem_param_get, 38125748Sduboff SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) && 38135748Sduboff !dp->mii_advert_ro), 38145748Sduboff PARAM_ADV_100FDX_CAP); 38155748Sduboff gem_nd_load(dp, "adv_100hdx_cap", gem_param_get, 38165748Sduboff SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) && 38175748Sduboff !dp->mii_advert_ro), 38185748Sduboff PARAM_ADV_100HDX_CAP); 38195748Sduboff gem_nd_load(dp, "adv_10fdx_cap", gem_param_get, 38205748Sduboff SETFUNC((dp->mii_status & MII_STATUS_10_FD) && 38215748Sduboff !dp->mii_advert_ro), 38225748Sduboff PARAM_ADV_10FDX_CAP); 38235748Sduboff gem_nd_load(dp, "adv_10hdx_cap", gem_param_get, 38245748Sduboff SETFUNC((dp->mii_status & MII_STATUS_10) && 38255748Sduboff !dp->mii_advert_ro), 38265748Sduboff PARAM_ADV_10HDX_CAP); 38275748Sduboff 38285748Sduboff /* Partner's advertised capabilities */ 38295748Sduboff gem_nd_load(dp, "lp_autoneg_cap", 38305748Sduboff gem_param_get, NULL, PARAM_LP_AUTONEG_CAP); 38315748Sduboff gem_nd_load(dp, "lp_pause_cap", 38325748Sduboff gem_param_get, NULL, PARAM_LP_PAUSE_CAP); 38335748Sduboff gem_nd_load(dp, "lp_asym_pause_cap", 38345748Sduboff gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP); 38355748Sduboff gem_nd_load(dp, "lp_1000fdx_cap", 38365748Sduboff gem_param_get, NULL, PARAM_LP_1000FDX_CAP); 38375748Sduboff gem_nd_load(dp, "lp_1000hdx_cap", 38385748Sduboff gem_param_get, NULL, PARAM_LP_1000HDX_CAP); 38395748Sduboff gem_nd_load(dp, "lp_100T4_cap", 38405748Sduboff gem_param_get, NULL, PARAM_LP_100T4_CAP); 38415748Sduboff gem_nd_load(dp, "lp_100fdx_cap", 38425748Sduboff gem_param_get, NULL, PARAM_LP_100FDX_CAP); 38435748Sduboff gem_nd_load(dp, "lp_100hdx_cap", 38445748Sduboff gem_param_get, NULL, PARAM_LP_100HDX_CAP); 38455748Sduboff gem_nd_load(dp, "lp_10fdx_cap", 38465748Sduboff gem_param_get, NULL, PARAM_LP_10FDX_CAP); 38475748Sduboff gem_nd_load(dp, "lp_10hdx_cap", 38485748Sduboff gem_param_get, NULL, PARAM_LP_10HDX_CAP); 38495748Sduboff 38505748Sduboff /* Current operating modes */ 38515748Sduboff gem_nd_load(dp, "link_status", 38525748Sduboff gem_param_get, NULL, PARAM_LINK_STATUS); 38535748Sduboff gem_nd_load(dp, "link_speed", 38545748Sduboff gem_param_get, NULL, PARAM_LINK_SPEED); 38555748Sduboff gem_nd_load(dp, "link_duplex", 38565748Sduboff gem_param_get, NULL, PARAM_LINK_DUPLEX); 38575748Sduboff gem_nd_load(dp, "link_autoneg", 38585748Sduboff gem_param_get, NULL, PARAM_LINK_AUTONEG); 38595748Sduboff gem_nd_load(dp, "link_rx_pause", 38605748Sduboff gem_param_get, NULL, PARAM_LINK_RX_PAUSE); 38615748Sduboff gem_nd_load(dp, "link_tx_pause", 38625748Sduboff gem_param_get, NULL, PARAM_LINK_TX_PAUSE); 38635748Sduboff #ifdef DEBUG_RESUME 38645748Sduboff gem_nd_load(dp, "resume_test", 38655748Sduboff gem_param_get, NULL, PARAM_RESUME_TEST); 38665748Sduboff #endif 38675748Sduboff #undef SETFUNC 38685748Sduboff } 38695748Sduboff 38705748Sduboff static 38715748Sduboff enum ioc_reply 38725748Sduboff gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 38735748Sduboff { 38745748Sduboff boolean_t ok; 38755748Sduboff 38765748Sduboff ASSERT(mutex_owned(&dp->intrlock)); 38775748Sduboff 38785748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 38795748Sduboff 38805748Sduboff switch (iocp->ioc_cmd) { 38815748Sduboff case ND_GET: 38825748Sduboff ok = nd_getset(wq, dp->nd_data_p, mp); 38835748Sduboff DPRINTF(0, (CE_CONT, 38845748Sduboff "%s: get %s", dp->name, ok ? "OK" : "FAIL")); 38855748Sduboff return (ok ? IOC_REPLY : IOC_INVAL); 38865748Sduboff 38875748Sduboff case ND_SET: 38885748Sduboff ok = nd_getset(wq, dp->nd_data_p, mp); 38895748Sduboff 38905748Sduboff DPRINTF(0, (CE_CONT, "%s: set %s err %d", 38915748Sduboff dp->name, ok ? "OK" : "FAIL", iocp->ioc_error)); 38925748Sduboff 38935748Sduboff if (!ok) { 38945748Sduboff return (IOC_INVAL); 38955748Sduboff } 38965748Sduboff 38975748Sduboff if (iocp->ioc_error) { 38985748Sduboff return (IOC_REPLY); 38995748Sduboff } 39005748Sduboff 39015748Sduboff return (IOC_RESTART_REPLY); 39025748Sduboff } 39035748Sduboff 39045748Sduboff cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd); 39055748Sduboff 39065748Sduboff return (IOC_INVAL); 39075748Sduboff } 39085748Sduboff 39095748Sduboff static void 39105748Sduboff gem_nd_cleanup(struct gem_dev *dp) 39115748Sduboff { 39125748Sduboff ASSERT(dp->nd_data_p != NULL); 39135748Sduboff ASSERT(dp->nd_arg_p != NULL); 39145748Sduboff 39155748Sduboff nd_free(&dp->nd_data_p); 39165748Sduboff 39175748Sduboff kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT); 39185748Sduboff dp->nd_arg_p = NULL; 39195748Sduboff } 39205748Sduboff 39215748Sduboff static void 39225748Sduboff gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp) 39235748Sduboff { 39245748Sduboff struct iocblk *iocp; 39255748Sduboff enum ioc_reply status; 39265748Sduboff int cmd; 39275748Sduboff 39285748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 39295748Sduboff 39305748Sduboff /* 39315748Sduboff * Validate the command before bothering with the mutex ... 39325748Sduboff */ 39335748Sduboff iocp = (void *)mp->b_rptr; 39345748Sduboff iocp->ioc_error = 0; 39355748Sduboff cmd = iocp->ioc_cmd; 39365748Sduboff 39375748Sduboff DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd)); 39385748Sduboff 39395748Sduboff mutex_enter(&dp->intrlock); 39405748Sduboff mutex_enter(&dp->xmitlock); 39415748Sduboff 39425748Sduboff switch (cmd) { 39435748Sduboff default: 39445748Sduboff _NOTE(NOTREACHED) 39455748Sduboff status = IOC_INVAL; 39465748Sduboff break; 39475748Sduboff 39485748Sduboff case ND_GET: 39495748Sduboff case ND_SET: 39505748Sduboff status = gem_nd_ioctl(dp, wq, mp, iocp); 39515748Sduboff break; 39525748Sduboff } 39535748Sduboff 39545748Sduboff mutex_exit(&dp->xmitlock); 39555748Sduboff mutex_exit(&dp->intrlock); 39565748Sduboff 39575748Sduboff #ifdef DEBUG_RESUME 39585748Sduboff if (cmd == ND_GET) { 39595748Sduboff gem_suspend(dp->dip); 39605748Sduboff gem_resume(dp->dip); 39615748Sduboff } 39625748Sduboff #endif 39635748Sduboff /* 39645748Sduboff * Finally, decide how to reply 39655748Sduboff */ 39665748Sduboff switch (status) { 39675748Sduboff default: 39685748Sduboff case IOC_INVAL: 39695748Sduboff /* 39705748Sduboff * Error, reply with a NAK and EINVAL or the specified error 39715748Sduboff */ 39725748Sduboff miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 39735748Sduboff EINVAL : iocp->ioc_error); 39745748Sduboff break; 39755748Sduboff 39765748Sduboff case IOC_DONE: 39775748Sduboff /* 39785748Sduboff * OK, reply already sent 39795748Sduboff */ 39805748Sduboff break; 39815748Sduboff 39825748Sduboff case IOC_RESTART_ACK: 39835748Sduboff case IOC_ACK: 39845748Sduboff /* 39855748Sduboff * OK, reply with an ACK 39865748Sduboff */ 39875748Sduboff miocack(wq, mp, 0, 0); 39885748Sduboff break; 39895748Sduboff 39905748Sduboff case IOC_RESTART_REPLY: 39915748Sduboff case IOC_REPLY: 39925748Sduboff /* 39935748Sduboff * OK, send prepared reply as ACK or NAK 39945748Sduboff */ 39955748Sduboff mp->b_datap->db_type = 39965748Sduboff iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK; 39975748Sduboff qreply(wq, mp); 39985748Sduboff break; 39995748Sduboff } 40005748Sduboff } 40015748Sduboff 40025748Sduboff #ifndef SYS_MAC_H 40035748Sduboff #define XCVR_UNDEFINED 0 40045748Sduboff #define XCVR_NONE 1 40055748Sduboff #define XCVR_10 2 40065748Sduboff #define XCVR_100T4 3 40075748Sduboff #define XCVR_100X 4 40085748Sduboff #define XCVR_100T2 5 40095748Sduboff #define XCVR_1000X 6 40105748Sduboff #define XCVR_1000T 7 40115748Sduboff #endif 40125748Sduboff static int 40135748Sduboff gem_mac_xcvr_inuse(struct gem_dev *dp) 40145748Sduboff { 40155748Sduboff int val = XCVR_UNDEFINED; 40165748Sduboff 40175748Sduboff if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) { 40185748Sduboff if (dp->mii_status & MII_STATUS_100_BASE_T4) { 40195748Sduboff val = XCVR_100T4; 40205748Sduboff } else if (dp->mii_status & 40215748Sduboff (MII_STATUS_100_BASEX_FD | 40225748Sduboff MII_STATUS_100_BASEX)) { 40235748Sduboff val = XCVR_100X; 40245748Sduboff } else if (dp->mii_status & 40255748Sduboff (MII_STATUS_100_BASE_T2_FD | 40265748Sduboff MII_STATUS_100_BASE_T2)) { 40275748Sduboff val = XCVR_100T2; 40285748Sduboff } else if (dp->mii_status & 40295748Sduboff (MII_STATUS_10_FD | MII_STATUS_10)) { 40305748Sduboff val = XCVR_10; 40315748Sduboff } 40325748Sduboff } else if (dp->mii_xstatus & 40335748Sduboff (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) { 40345748Sduboff val = XCVR_1000T; 40355748Sduboff } else if (dp->mii_xstatus & 40365748Sduboff (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) { 40375748Sduboff val = XCVR_1000X; 40385748Sduboff } 40395748Sduboff 40405748Sduboff return (val); 40415748Sduboff } 40425748Sduboff 40435748Sduboff /* ============================================================== */ 40445748Sduboff /* 40455748Sduboff * GLDv3 interface 40465748Sduboff */ 40475748Sduboff /* ============================================================== */ 40485748Sduboff static int gem_m_getstat(void *, uint_t, uint64_t *); 40495748Sduboff static int gem_m_start(void *); 40505748Sduboff static void gem_m_stop(void *); 40515748Sduboff static int gem_m_setpromisc(void *, boolean_t); 40525748Sduboff static int gem_m_multicst(void *, boolean_t, const uint8_t *); 40535748Sduboff static int gem_m_unicst(void *, const uint8_t *); 40545748Sduboff static mblk_t *gem_m_tx(void *, mblk_t *); 40555748Sduboff static void gem_m_resources(void *); 40565748Sduboff static void gem_m_ioctl(void *, queue_t *, mblk_t *); 40575748Sduboff static boolean_t gem_m_getcapab(void *, mac_capab_t, void *); 40585748Sduboff 40595748Sduboff #define GEM_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 40605748Sduboff 40615748Sduboff static mac_callbacks_t gem_m_callbacks = { 40625748Sduboff GEM_M_CALLBACK_FLAGS, 40635748Sduboff gem_m_getstat, 40645748Sduboff gem_m_start, 40655748Sduboff gem_m_stop, 40665748Sduboff gem_m_setpromisc, 40675748Sduboff gem_m_multicst, 40685748Sduboff gem_m_unicst, 40695748Sduboff gem_m_tx, 40705748Sduboff gem_m_resources, 40715748Sduboff gem_m_ioctl, 40725748Sduboff gem_m_getcapab, 40735748Sduboff }; 40745748Sduboff 40755748Sduboff static int 40765748Sduboff gem_m_start(void *arg) 40775748Sduboff { 40785748Sduboff int err = 0; 40795748Sduboff struct gem_dev *dp = arg; 40805748Sduboff 40815748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 40825748Sduboff 40835748Sduboff mutex_enter(&dp->intrlock); 40845748Sduboff if (dp->mac_suspended) { 40855748Sduboff err = EIO; 40865748Sduboff goto x; 40875748Sduboff } 40885748Sduboff if (gem_mac_init(dp) != GEM_SUCCESS) { 40895748Sduboff err = EIO; 40905748Sduboff goto x; 40915748Sduboff } 40925748Sduboff dp->nic_state = NIC_STATE_INITIALIZED; 40935748Sduboff 40945748Sduboff /* reset rx filter state */ 40955748Sduboff dp->mc_count = 0; 40965748Sduboff dp->mc_count_req = 0; 40975748Sduboff 40985748Sduboff /* setup media mode if the link have been up */ 40995748Sduboff if (dp->mii_state == MII_STATE_LINKUP) { 41005748Sduboff (dp->gc.gc_set_media)(dp); 41015748Sduboff } 41025748Sduboff 41035748Sduboff /* setup initial rx filter */ 41045748Sduboff bcopy(dp->dev_addr.ether_addr_octet, 41055748Sduboff dp->cur_addr.ether_addr_octet, ETHERADDRL); 41065748Sduboff dp->rxmode |= RXMODE_ENABLE; 41075748Sduboff 41085748Sduboff if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) { 41095748Sduboff err = EIO; 41105748Sduboff goto x; 41115748Sduboff } 41125748Sduboff 41135748Sduboff dp->nic_state = NIC_STATE_ONLINE; 41145748Sduboff if (dp->mii_state == MII_STATE_LINKUP) { 41155748Sduboff if (gem_mac_start(dp) != GEM_SUCCESS) { 41165748Sduboff err = EIO; 41175748Sduboff goto x; 41185748Sduboff } 41195748Sduboff } 41205748Sduboff 41215748Sduboff dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout, 41225748Sduboff (void *)dp, dp->gc.gc_tx_timeout_interval); 41235748Sduboff mutex_exit(&dp->intrlock); 41245748Sduboff 41255748Sduboff return (0); 41265748Sduboff x: 41275748Sduboff dp->nic_state = NIC_STATE_STOPPED; 41285748Sduboff mutex_exit(&dp->intrlock); 41295748Sduboff return (err); 41305748Sduboff } 41315748Sduboff 41325748Sduboff static void 41335748Sduboff gem_m_stop(void *arg) 41345748Sduboff { 41355748Sduboff struct gem_dev *dp = arg; 41365748Sduboff 41375748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 41385748Sduboff 41395748Sduboff /* stop rx */ 41405748Sduboff mutex_enter(&dp->intrlock); 41415748Sduboff if (dp->mac_suspended) { 41425748Sduboff mutex_exit(&dp->intrlock); 41435748Sduboff return; 41445748Sduboff } 41455748Sduboff dp->rxmode &= ~RXMODE_ENABLE; 41465748Sduboff (void) gem_mac_set_rx_filter(dp); 41475748Sduboff mutex_exit(&dp->intrlock); 41485748Sduboff 41495748Sduboff /* stop tx timeout watcher */ 41505748Sduboff if (dp->timeout_id) { 41515748Sduboff while (untimeout(dp->timeout_id) == -1) 41525748Sduboff ; 41535748Sduboff dp->timeout_id = 0; 41545748Sduboff } 41555748Sduboff 41565748Sduboff /* make the nic state inactive */ 41575748Sduboff mutex_enter(&dp->intrlock); 41585748Sduboff if (dp->mac_suspended) { 41595748Sduboff mutex_exit(&dp->intrlock); 41605748Sduboff return; 41615748Sduboff } 41625748Sduboff dp->nic_state = NIC_STATE_STOPPED; 41635748Sduboff 41645748Sduboff /* we need deassert mac_active due to block interrupt handler */ 41655748Sduboff mutex_enter(&dp->xmitlock); 41665748Sduboff dp->mac_active = B_FALSE; 41675748Sduboff mutex_exit(&dp->xmitlock); 41685748Sduboff 41695748Sduboff /* block interrupts */ 41705748Sduboff while (dp->intr_busy) { 41715748Sduboff cv_wait(&dp->tx_drain_cv, &dp->intrlock); 41725748Sduboff } 41735748Sduboff (void) gem_mac_stop(dp, 0); 41745748Sduboff mutex_exit(&dp->intrlock); 41755748Sduboff } 41765748Sduboff 41775748Sduboff static int 41785748Sduboff gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep) 41795748Sduboff { 41805748Sduboff int err; 41815748Sduboff int ret; 41825748Sduboff struct gem_dev *dp = arg; 41835748Sduboff 41845748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 41855748Sduboff 41865748Sduboff if (add) { 41875748Sduboff ret = gem_add_multicast(dp, ep); 41885748Sduboff } else { 41895748Sduboff ret = gem_remove_multicast(dp, ep); 41905748Sduboff } 41915748Sduboff 41925748Sduboff err = 0; 41935748Sduboff if (ret != GEM_SUCCESS) { 41945748Sduboff err = EIO; 41955748Sduboff } 41965748Sduboff 41975748Sduboff return (err); 41985748Sduboff } 41995748Sduboff 42005748Sduboff static int 42015748Sduboff gem_m_setpromisc(void *arg, boolean_t on) 42025748Sduboff { 42035748Sduboff int err = 0; /* no error */ 42045748Sduboff struct gem_dev *dp = arg; 42055748Sduboff 42065748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 42075748Sduboff 42085748Sduboff mutex_enter(&dp->intrlock); 42095748Sduboff if (dp->mac_suspended) { 42105748Sduboff mutex_exit(&dp->intrlock); 42115748Sduboff return (EIO); 42125748Sduboff } 42135748Sduboff if (on) { 42145748Sduboff dp->rxmode |= RXMODE_PROMISC; 42155748Sduboff } else { 42165748Sduboff dp->rxmode &= ~RXMODE_PROMISC; 42175748Sduboff } 42185748Sduboff 42195748Sduboff if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) { 42205748Sduboff err = EIO; 42215748Sduboff } 42225748Sduboff mutex_exit(&dp->intrlock); 42235748Sduboff 42245748Sduboff return (err); 42255748Sduboff } 42265748Sduboff 42275748Sduboff int 42285748Sduboff gem_m_getstat(void *arg, uint_t stat, uint64_t *valp) 42295748Sduboff { 42305748Sduboff struct gem_dev *dp = arg; 42315748Sduboff struct gem_stats *gstp = &dp->stats; 42325748Sduboff uint64_t val = 0; 42335748Sduboff 42345748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 42355748Sduboff 4236*7116Sduboff if (mutex_owned(&dp->intrlock)) { 4237*7116Sduboff if (dp->mac_suspended) { 4238*7116Sduboff return (EIO); 4239*7116Sduboff } 4240*7116Sduboff } else { 4241*7116Sduboff mutex_enter(&dp->intrlock); 4242*7116Sduboff if (dp->mac_suspended) { 4243*7116Sduboff mutex_exit(&dp->intrlock); 4244*7116Sduboff return (EIO); 4245*7116Sduboff } 42465748Sduboff mutex_exit(&dp->intrlock); 4247*7116Sduboff } 42485748Sduboff 42495748Sduboff if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) { 42505748Sduboff return (EIO); 42515748Sduboff } 42525748Sduboff 42535748Sduboff switch (stat) { 42545748Sduboff case MAC_STAT_IFSPEED: 42555748Sduboff val = gem_speed_value[dp->speed] *1000000ull; 42565748Sduboff break; 42575748Sduboff 42585748Sduboff case MAC_STAT_MULTIRCV: 42595748Sduboff val = gstp->rmcast; 42605748Sduboff break; 42615748Sduboff 42625748Sduboff case MAC_STAT_BRDCSTRCV: 42635748Sduboff val = gstp->rbcast; 42645748Sduboff break; 42655748Sduboff 42665748Sduboff case MAC_STAT_MULTIXMT: 42675748Sduboff val = gstp->omcast; 42685748Sduboff break; 42695748Sduboff 42705748Sduboff case MAC_STAT_BRDCSTXMT: 42715748Sduboff val = gstp->obcast; 42725748Sduboff break; 42735748Sduboff 42745748Sduboff case MAC_STAT_NORCVBUF: 42755748Sduboff val = gstp->norcvbuf + gstp->missed; 42765748Sduboff break; 42775748Sduboff 42785748Sduboff case MAC_STAT_IERRORS: 42795748Sduboff val = gstp->errrcv; 42805748Sduboff break; 42815748Sduboff 42825748Sduboff case MAC_STAT_NOXMTBUF: 42835748Sduboff val = gstp->noxmtbuf; 42845748Sduboff break; 42855748Sduboff 42865748Sduboff case MAC_STAT_OERRORS: 42875748Sduboff val = gstp->errxmt; 42885748Sduboff break; 42895748Sduboff 42905748Sduboff case MAC_STAT_COLLISIONS: 42915748Sduboff val = gstp->collisions; 42925748Sduboff break; 42935748Sduboff 42945748Sduboff case MAC_STAT_RBYTES: 42955748Sduboff val = gstp->rbytes; 42965748Sduboff break; 42975748Sduboff 42985748Sduboff case MAC_STAT_IPACKETS: 42995748Sduboff val = gstp->rpackets; 43005748Sduboff break; 43015748Sduboff 43025748Sduboff case MAC_STAT_OBYTES: 43035748Sduboff val = gstp->obytes; 43045748Sduboff break; 43055748Sduboff 43065748Sduboff case MAC_STAT_OPACKETS: 43075748Sduboff val = gstp->opackets; 43085748Sduboff break; 43095748Sduboff 43105748Sduboff case MAC_STAT_UNDERFLOWS: 43115748Sduboff val = gstp->underflow; 43125748Sduboff break; 43135748Sduboff 43145748Sduboff case MAC_STAT_OVERFLOWS: 43155748Sduboff val = gstp->overflow; 43165748Sduboff break; 43175748Sduboff 43185748Sduboff case ETHER_STAT_ALIGN_ERRORS: 43195748Sduboff val = gstp->frame; 43205748Sduboff break; 43215748Sduboff 43225748Sduboff case ETHER_STAT_FCS_ERRORS: 43235748Sduboff val = gstp->crc; 43245748Sduboff break; 43255748Sduboff 43265748Sduboff case ETHER_STAT_FIRST_COLLISIONS: 43275748Sduboff val = gstp->first_coll; 43285748Sduboff break; 43295748Sduboff 43305748Sduboff case ETHER_STAT_MULTI_COLLISIONS: 43315748Sduboff val = gstp->multi_coll; 43325748Sduboff break; 43335748Sduboff 43345748Sduboff case ETHER_STAT_SQE_ERRORS: 43355748Sduboff val = gstp->sqe; 43365748Sduboff break; 43375748Sduboff 43385748Sduboff case ETHER_STAT_DEFER_XMTS: 43395748Sduboff val = gstp->defer; 43405748Sduboff break; 43415748Sduboff 43425748Sduboff case ETHER_STAT_TX_LATE_COLLISIONS: 43435748Sduboff val = gstp->xmtlatecoll; 43445748Sduboff break; 43455748Sduboff 43465748Sduboff case ETHER_STAT_EX_COLLISIONS: 43475748Sduboff val = gstp->excoll; 43485748Sduboff break; 43495748Sduboff 43505748Sduboff case ETHER_STAT_MACXMT_ERRORS: 43515748Sduboff val = gstp->xmit_internal_err; 43525748Sduboff break; 43535748Sduboff 43545748Sduboff case ETHER_STAT_CARRIER_ERRORS: 43555748Sduboff val = gstp->nocarrier; 43565748Sduboff break; 43575748Sduboff 43585748Sduboff case ETHER_STAT_TOOLONG_ERRORS: 43595748Sduboff val = gstp->frame_too_long; 43605748Sduboff break; 43615748Sduboff 43625748Sduboff case ETHER_STAT_MACRCV_ERRORS: 43635748Sduboff val = gstp->rcv_internal_err; 43645748Sduboff break; 43655748Sduboff 43665748Sduboff case ETHER_STAT_XCVR_ADDR: 43675748Sduboff val = dp->mii_phy_addr; 43685748Sduboff break; 43695748Sduboff 43705748Sduboff case ETHER_STAT_XCVR_ID: 43715748Sduboff val = dp->mii_phy_id; 43725748Sduboff break; 43735748Sduboff 43745748Sduboff case ETHER_STAT_XCVR_INUSE: 43755748Sduboff val = gem_mac_xcvr_inuse(dp); 43765748Sduboff break; 43775748Sduboff 43785748Sduboff case ETHER_STAT_CAP_1000FDX: 43795748Sduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) || 43805748Sduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD); 43815748Sduboff break; 43825748Sduboff 43835748Sduboff case ETHER_STAT_CAP_1000HDX: 43845748Sduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) || 43855748Sduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX); 43865748Sduboff break; 43875748Sduboff 43885748Sduboff case ETHER_STAT_CAP_100FDX: 43895748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD); 43905748Sduboff break; 43915748Sduboff 43925748Sduboff case ETHER_STAT_CAP_100HDX: 43935748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX); 43945748Sduboff break; 43955748Sduboff 43965748Sduboff case ETHER_STAT_CAP_10FDX: 43975748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD); 43985748Sduboff break; 43995748Sduboff 44005748Sduboff case ETHER_STAT_CAP_10HDX: 44015748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10); 44025748Sduboff break; 44035748Sduboff 44045748Sduboff case ETHER_STAT_CAP_ASMPAUSE: 44055748Sduboff val = BOOLEAN(dp->gc.gc_flow_control & 2); 44065748Sduboff break; 44075748Sduboff 44085748Sduboff case ETHER_STAT_CAP_PAUSE: 44095748Sduboff val = BOOLEAN(dp->gc.gc_flow_control & 1); 44105748Sduboff break; 44115748Sduboff 44125748Sduboff case ETHER_STAT_CAP_AUTONEG: 4413*7116Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG); 44145748Sduboff break; 44155748Sduboff 44165748Sduboff case ETHER_STAT_ADV_CAP_1000FDX: 44175748Sduboff val = dp->anadv_1000fdx; 44185748Sduboff break; 44195748Sduboff 44205748Sduboff case ETHER_STAT_ADV_CAP_1000HDX: 44215748Sduboff val = dp->anadv_1000hdx; 44225748Sduboff break; 44235748Sduboff 44245748Sduboff case ETHER_STAT_ADV_CAP_100FDX: 44255748Sduboff val = dp->anadv_100fdx; 44265748Sduboff break; 44275748Sduboff 44285748Sduboff case ETHER_STAT_ADV_CAP_100HDX: 44295748Sduboff val = dp->anadv_100hdx; 44305748Sduboff break; 44315748Sduboff 44325748Sduboff case ETHER_STAT_ADV_CAP_10FDX: 44335748Sduboff val = dp->anadv_10fdx; 44345748Sduboff break; 44355748Sduboff 44365748Sduboff case ETHER_STAT_ADV_CAP_10HDX: 44375748Sduboff val = dp->anadv_10hdx; 44385748Sduboff break; 44395748Sduboff 44405748Sduboff case ETHER_STAT_ADV_CAP_ASMPAUSE: 44415748Sduboff val = BOOLEAN(dp->anadv_flow_control & 2); 44425748Sduboff break; 44435748Sduboff 44445748Sduboff case ETHER_STAT_ADV_CAP_PAUSE: 44455748Sduboff val = BOOLEAN(dp->anadv_flow_control & 1); 44465748Sduboff break; 44475748Sduboff 44485748Sduboff case ETHER_STAT_ADV_CAP_AUTONEG: 44495748Sduboff val = dp->anadv_autoneg; 44505748Sduboff break; 44515748Sduboff 44525748Sduboff case ETHER_STAT_LP_CAP_1000FDX: 44535748Sduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL); 44545748Sduboff break; 44555748Sduboff 44565748Sduboff case ETHER_STAT_LP_CAP_1000HDX: 44575748Sduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF); 44585748Sduboff break; 44595748Sduboff 44605748Sduboff case ETHER_STAT_LP_CAP_100FDX: 44615748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD); 44625748Sduboff break; 44635748Sduboff 44645748Sduboff case ETHER_STAT_LP_CAP_100HDX: 44655748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX); 44665748Sduboff break; 44675748Sduboff 44685748Sduboff case ETHER_STAT_LP_CAP_10FDX: 44695748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD); 44705748Sduboff break; 44715748Sduboff 44725748Sduboff case ETHER_STAT_LP_CAP_10HDX: 44735748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T); 44745748Sduboff break; 44755748Sduboff 44765748Sduboff case ETHER_STAT_LP_CAP_ASMPAUSE: 44775748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR); 44785748Sduboff break; 44795748Sduboff 44805748Sduboff case ETHER_STAT_LP_CAP_PAUSE: 44815748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE); 44825748Sduboff break; 44835748Sduboff 44845748Sduboff case ETHER_STAT_LP_CAP_AUTONEG: 44855748Sduboff val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN); 44865748Sduboff break; 44875748Sduboff 44885748Sduboff case ETHER_STAT_LINK_ASMPAUSE: 44895748Sduboff val = BOOLEAN(dp->flow_control & 2); 44905748Sduboff break; 44915748Sduboff 44925748Sduboff case ETHER_STAT_LINK_PAUSE: 44935748Sduboff val = BOOLEAN(dp->flow_control & 1); 44945748Sduboff break; 44955748Sduboff 44965748Sduboff case ETHER_STAT_LINK_AUTONEG: 44975748Sduboff val = dp->anadv_autoneg && 44985748Sduboff BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN); 44995748Sduboff break; 45005748Sduboff 45015748Sduboff case ETHER_STAT_LINK_DUPLEX: 45025748Sduboff val = (dp->mii_state == MII_STATE_LINKUP) ? 45035748Sduboff (dp->full_duplex ? 2 : 1) : 0; 45045748Sduboff break; 45055748Sduboff 45065748Sduboff case ETHER_STAT_TOOSHORT_ERRORS: 45075748Sduboff val = gstp->runt; 45085748Sduboff break; 45095748Sduboff case ETHER_STAT_LP_REMFAULT: 45105748Sduboff val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT); 45115748Sduboff break; 45125748Sduboff 45135748Sduboff case ETHER_STAT_JABBER_ERRORS: 45145748Sduboff val = gstp->jabber; 45155748Sduboff break; 45165748Sduboff 45175748Sduboff case ETHER_STAT_CAP_100T4: 45185748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4); 45195748Sduboff break; 45205748Sduboff 45215748Sduboff case ETHER_STAT_ADV_CAP_100T4: 45225748Sduboff val = dp->anadv_100t4; 45235748Sduboff break; 45245748Sduboff 45255748Sduboff case ETHER_STAT_LP_CAP_100T4: 45265748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4); 45275748Sduboff break; 45285748Sduboff 45295748Sduboff default: 45305748Sduboff #if GEM_DEBUG_LEVEL > 2 45315748Sduboff cmn_err(CE_WARN, 45325748Sduboff "%s: unrecognized parameter value = %d", 45335748Sduboff __func__, stat); 45345748Sduboff #endif 45355748Sduboff return (ENOTSUP); 45365748Sduboff } 45375748Sduboff 45385748Sduboff *valp = val; 45395748Sduboff 45405748Sduboff return (0); 45415748Sduboff } 45425748Sduboff 45435748Sduboff static int 45445748Sduboff gem_m_unicst(void *arg, const uint8_t *mac) 45455748Sduboff { 45465748Sduboff int err = 0; 45475748Sduboff struct gem_dev *dp = arg; 45485748Sduboff 45495748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 45505748Sduboff 45515748Sduboff mutex_enter(&dp->intrlock); 45525748Sduboff if (dp->mac_suspended) { 45535748Sduboff mutex_exit(&dp->intrlock); 45545748Sduboff return (EIO); 45555748Sduboff } 45565748Sduboff bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL); 45575748Sduboff dp->rxmode |= RXMODE_ENABLE; 45585748Sduboff 45595748Sduboff if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) { 45605748Sduboff err = EIO; 45615748Sduboff } 45625748Sduboff mutex_exit(&dp->intrlock); 45635748Sduboff 45645748Sduboff return (err); 45655748Sduboff } 45665748Sduboff 45675748Sduboff /* 45685748Sduboff * gem_m_tx is used only for sending data packets into ethernet wire. 45695748Sduboff */ 45705748Sduboff static mblk_t * 45715748Sduboff gem_m_tx(void *arg, mblk_t *mp) 45725748Sduboff { 45735748Sduboff uint32_t flags = 0; 45745748Sduboff struct gem_dev *dp = arg; 45755748Sduboff mblk_t *tp; 45765748Sduboff 45775748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 45785748Sduboff 45795748Sduboff ASSERT(dp->nic_state == NIC_STATE_ONLINE); 45805748Sduboff if (dp->mii_state != MII_STATE_LINKUP) { 45815748Sduboff /* Some nics hate to send packets when the link is down. */ 45825748Sduboff while (mp) { 45835748Sduboff tp = mp->b_next; 45845748Sduboff mp->b_next = NULL; 45855748Sduboff freemsg(mp); 45865748Sduboff mp = tp; 45875748Sduboff } 45885748Sduboff return (NULL); 45895748Sduboff } 45905748Sduboff 45915748Sduboff return (gem_send_common(dp, mp, flags)); 45925748Sduboff } 45935748Sduboff 45945748Sduboff static void 45955748Sduboff gem_set_coalease(void *arg, time_t ticks, uint_t count) 45965748Sduboff { 45975748Sduboff struct gem_dev *dp = arg; 45985748Sduboff DPRINTF(1, (CE_CONT, "%s: %s: ticks:%d count:%d", 45995748Sduboff dp->name, __func__, ticks, count)); 46005748Sduboff 46015748Sduboff mutex_enter(&dp->intrlock); 4602*7116Sduboff dp->poll_pkt_delay = min(count, dp->gc.gc_rx_ring_size/2); 46035748Sduboff mutex_exit(&dp->intrlock); 46045748Sduboff } 46055748Sduboff 46065748Sduboff static void 46075748Sduboff gem_m_resources(void *arg) 46085748Sduboff { 46095748Sduboff struct gem_dev *dp = arg; 46105748Sduboff mac_rx_fifo_t mrf; 46115748Sduboff 46125748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 46135748Sduboff 46145748Sduboff mutex_enter(&dp->intrlock); 46155748Sduboff mutex_enter(&dp->xmitlock); 46165748Sduboff 46175748Sduboff /* 46185748Sduboff * Register Rx rings as resources and save mac 46195748Sduboff * resource id for future reference 46205748Sduboff */ 46215748Sduboff mrf.mrf_type = MAC_RX_FIFO; 46225748Sduboff mrf.mrf_blank = gem_set_coalease; 46235748Sduboff mrf.mrf_arg = (void *)dp; 4624*7116Sduboff mrf.mrf_normal_blank_time = 1; /* in uS */ 46255748Sduboff mrf.mrf_normal_pkt_count = dp->poll_pkt_delay; 46265748Sduboff 46275748Sduboff dp->mac_rx_ring_ha = mac_resource_add(dp->mh, (mac_resource_t *)&mrf); 46285748Sduboff 46295748Sduboff mutex_exit(&dp->xmitlock); 46305748Sduboff mutex_exit(&dp->intrlock); 46315748Sduboff } 46325748Sduboff 46335748Sduboff static void 46345748Sduboff gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 46355748Sduboff { 46365748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", 46375748Sduboff ((struct gem_dev *)arg)->name, __func__)); 46385748Sduboff 46395748Sduboff gem_mac_ioctl((struct gem_dev *)arg, wq, mp); 46405748Sduboff } 46415748Sduboff 46425748Sduboff static boolean_t 46435748Sduboff gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 46445748Sduboff { 46455748Sduboff boolean_t ret; 46465748Sduboff 46475748Sduboff ret = B_FALSE; 46485748Sduboff switch (cap) { 46495748Sduboff case MAC_CAPAB_POLL: 46505748Sduboff ret = B_TRUE; 46515748Sduboff break; 46525748Sduboff } 46535748Sduboff return (ret); 46545748Sduboff } 46555748Sduboff 46565748Sduboff static void 46575748Sduboff gem_gld3_init(struct gem_dev *dp, mac_register_t *macp) 46585748Sduboff { 46595748Sduboff macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 46605748Sduboff macp->m_driver = dp; 46615748Sduboff macp->m_dip = dp->dip; 46625748Sduboff macp->m_src_addr = dp->dev_addr.ether_addr_octet; 46635748Sduboff macp->m_callbacks = &gem_m_callbacks; 46645748Sduboff macp->m_min_sdu = 0; 46655748Sduboff macp->m_max_sdu = dp->mtu; 4666*7116Sduboff 4667*7116Sduboff if (dp->misc_flag & GEM_VLAN) { 4668*7116Sduboff macp->m_margin = VTAG_SIZE; 4669*7116Sduboff } 46705748Sduboff } 46715748Sduboff 46725748Sduboff /* ======================================================================== */ 46735748Sduboff /* 46745748Sduboff * attach/detatch support 46755748Sduboff */ 46765748Sduboff /* ======================================================================== */ 46775748Sduboff static void 46785748Sduboff gem_read_conf(struct gem_dev *dp) 46795748Sduboff { 4680*7116Sduboff int val; 46815748Sduboff 46825748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 46835748Sduboff 46845748Sduboff /* 46855748Sduboff * Get media mode infomation from .conf file 46865748Sduboff */ 46875748Sduboff dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0; 46885748Sduboff dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0; 46895748Sduboff dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0; 46905748Sduboff dp->anadv_100t4 = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0; 46915748Sduboff dp->anadv_100fdx = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0; 46925748Sduboff dp->anadv_100hdx = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0; 46935748Sduboff dp->anadv_10fdx = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0; 46945748Sduboff dp->anadv_10hdx = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0; 46955748Sduboff 46965748Sduboff if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip, 46975748Sduboff DDI_PROP_DONTPASS, "full-duplex"))) { 46985748Sduboff dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0; 46995748Sduboff dp->anadv_autoneg = B_FALSE; 4700*7116Sduboff if (dp->full_duplex) { 4701*7116Sduboff dp->anadv_1000hdx = B_FALSE; 4702*7116Sduboff dp->anadv_100hdx = B_FALSE; 4703*7116Sduboff dp->anadv_10hdx = B_FALSE; 4704*7116Sduboff } else { 4705*7116Sduboff dp->anadv_1000fdx = B_FALSE; 4706*7116Sduboff dp->anadv_100fdx = B_FALSE; 4707*7116Sduboff dp->anadv_10fdx = B_FALSE; 4708*7116Sduboff } 47095748Sduboff } 47105748Sduboff 47115748Sduboff if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) { 47125748Sduboff dp->anadv_autoneg = B_FALSE; 47135748Sduboff switch (val) { 47145748Sduboff case 1000: 47155748Sduboff dp->speed = GEM_SPD_1000; 47165748Sduboff dp->anadv_100t4 = B_FALSE; 47175748Sduboff dp->anadv_100fdx = B_FALSE; 47185748Sduboff dp->anadv_100hdx = B_FALSE; 47195748Sduboff dp->anadv_10fdx = B_FALSE; 47205748Sduboff dp->anadv_10hdx = B_FALSE; 47215748Sduboff break; 47225748Sduboff case 100: 47235748Sduboff dp->speed = GEM_SPD_100; 47245748Sduboff dp->anadv_1000fdx = B_FALSE; 47255748Sduboff dp->anadv_1000hdx = B_FALSE; 47265748Sduboff dp->anadv_10fdx = B_FALSE; 47275748Sduboff dp->anadv_10hdx = B_FALSE; 47285748Sduboff break; 47295748Sduboff case 10: 47305748Sduboff dp->speed = GEM_SPD_10; 47315748Sduboff dp->anadv_1000fdx = B_FALSE; 47325748Sduboff dp->anadv_1000hdx = B_FALSE; 47335748Sduboff dp->anadv_100t4 = B_FALSE; 47345748Sduboff dp->anadv_100fdx = B_FALSE; 47355748Sduboff dp->anadv_100hdx = B_FALSE; 47365748Sduboff break; 47375748Sduboff default: 47385748Sduboff cmn_err(CE_WARN, 47395748Sduboff "!%s: property %s: illegal value:%d", 4740*7116Sduboff dp->name, "speed", val); 47415748Sduboff dp->anadv_autoneg = B_TRUE; 47425748Sduboff break; 47435748Sduboff } 47445748Sduboff } 47455748Sduboff 47465748Sduboff val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control); 47475748Sduboff if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) { 47485748Sduboff cmn_err(CE_WARN, 47495748Sduboff "!%s: property %s: illegal value:%d", 4750*7116Sduboff dp->name, "flow-control", val); 47515748Sduboff } else { 47525748Sduboff val = min(val, dp->gc.gc_flow_control); 47535748Sduboff } 47545748Sduboff dp->anadv_flow_control = val; 47555748Sduboff 47565748Sduboff if (gem_prop_get_int(dp, "nointr", 0)) { 47575748Sduboff dp->misc_flag |= GEM_NOINTR; 47585748Sduboff cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name); 47595748Sduboff } 47605748Sduboff 47615748Sduboff dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu); 47625748Sduboff dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr); 47635748Sduboff dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr); 47645748Sduboff dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma); 47655748Sduboff dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma); 47665748Sduboff } 47675748Sduboff 47685748Sduboff 47695748Sduboff /* 47705748Sduboff * Gem kstat support 47715748Sduboff */ 47725748Sduboff 47735748Sduboff #define GEM_LOCAL_DATA_SIZE(gc) \ 47745748Sduboff (sizeof (struct gem_dev) + \ 47755748Sduboff sizeof (struct mcast_addr) * GEM_MAXMC + \ 47765748Sduboff sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \ 47775748Sduboff sizeof (void *) * ((gc)->gc_tx_buf_size)) 47785748Sduboff 47795748Sduboff struct gem_dev * 47805748Sduboff gem_do_attach(dev_info_t *dip, int port, 47815748Sduboff struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep, 47825748Sduboff void *lp, int lmsize) 47835748Sduboff { 47845748Sduboff struct gem_dev *dp; 47855748Sduboff int i; 47865748Sduboff ddi_iblock_cookie_t c; 47875748Sduboff mac_register_t *macp = NULL; 47885748Sduboff int ret; 47895748Sduboff int unit; 47905748Sduboff int nports; 47915748Sduboff 47925748Sduboff unit = ddi_get_instance(dip); 47935748Sduboff if ((nports = gc->gc_nports) == 0) { 47945748Sduboff nports = 1; 47955748Sduboff } 47965748Sduboff if (nports == 1) { 47975748Sduboff ddi_set_driver_private(dip, NULL); 47985748Sduboff } 47995748Sduboff 48005748Sduboff DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH", 48015748Sduboff unit)); 48025748Sduboff 48035748Sduboff /* 48045748Sduboff * Allocate soft data structure 48055748Sduboff */ 48065748Sduboff dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP); 48075748Sduboff 48085748Sduboff if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 48095748Sduboff cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed", 48105748Sduboff unit, __func__); 48115748Sduboff return (NULL); 48125748Sduboff } 48135748Sduboff /* ddi_set_driver_private(dip, dp); */ 48145748Sduboff 48155748Sduboff /* link to private area */ 4816*7116Sduboff dp->private = lp; 48175748Sduboff dp->priv_size = lmsize; 48185748Sduboff dp->mc_list = (struct mcast_addr *)&dp[1]; 48195748Sduboff 48205748Sduboff dp->dip = dip; 48215748Sduboff (void) sprintf(dp->name, gc->gc_name, nports * unit + port); 48225748Sduboff 48235748Sduboff /* 48245748Sduboff * Get iblock cookie 48255748Sduboff */ 48265748Sduboff if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) { 48275748Sduboff cmn_err(CE_CONT, 48285748Sduboff "!%s: gem_do_attach: ddi_get_iblock_cookie: failed", 48295748Sduboff dp->name); 48305748Sduboff goto err_free_private; 48315748Sduboff } 48325748Sduboff dp->iblock_cookie = c; 48335748Sduboff 48345748Sduboff /* 48355748Sduboff * Initialize mutex's for this device. 48365748Sduboff */ 48375748Sduboff mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c); 48385748Sduboff mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c); 48395748Sduboff cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL); 48405748Sduboff 48415748Sduboff /* 48425748Sduboff * configure gem parameter 48435748Sduboff */ 4844*7116Sduboff dp->base_addr = base; 48455748Sduboff dp->regs_handle = *regs_handlep; 48465748Sduboff dp->gc = *gc; 48475748Sduboff gc = &dp->gc; 4848*7116Sduboff /* patch for simplify dma resource management */ 4849*7116Sduboff gc->gc_tx_max_frags = 1; 4850*7116Sduboff gc->gc_tx_max_descs_per_pkt = 1; 4851*7116Sduboff gc->gc_tx_ring_size = gc->gc_tx_buf_size; 4852*7116Sduboff gc->gc_tx_ring_limit = gc->gc_tx_buf_limit; 4853*7116Sduboff gc->gc_tx_desc_write_oo = B_TRUE; 48545748Sduboff 48555748Sduboff gc->gc_nports = nports; /* fix nports */ 48565748Sduboff 48575748Sduboff /* fix copy threadsholds */ 48585748Sduboff gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh); 48595748Sduboff gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh); 48605748Sduboff 48615748Sduboff /* fix rx buffer boundary for iocache line size */ 48625748Sduboff ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align); 48635748Sduboff ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align); 48645748Sduboff gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1); 48655748Sduboff gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1; 48665748Sduboff 4867*7116Sduboff /* fix descriptor boundary for cache line size */ 4868*7116Sduboff gc->gc_dma_attr_desc.dma_attr_align = 4869*7116Sduboff max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE); 4870*7116Sduboff 48715748Sduboff /* patch get_packet method */ 48725748Sduboff if (gc->gc_get_packet == NULL) { 48735748Sduboff gc->gc_get_packet = &gem_get_packet_default; 48745748Sduboff } 48755748Sduboff 48765748Sduboff /* patch get_rx_start method */ 48775748Sduboff if (gc->gc_rx_start == NULL) { 48785748Sduboff gc->gc_rx_start = &gem_rx_start_default; 48795748Sduboff } 48805748Sduboff 48815748Sduboff /* calculate descriptor area */ 48825748Sduboff if (gc->gc_rx_desc_unit_shift >= 0) { 48835748Sduboff dp->rx_desc_size = 48845748Sduboff ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift, 48855748Sduboff gc->gc_dma_attr_desc.dma_attr_align); 48865748Sduboff } 48875748Sduboff if (gc->gc_tx_desc_unit_shift >= 0) { 48885748Sduboff dp->tx_desc_size = 48895748Sduboff ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift, 48905748Sduboff gc->gc_dma_attr_desc.dma_attr_align); 48915748Sduboff } 48925748Sduboff 48935748Sduboff dp->mtu = ETHERMTU; 48945748Sduboff dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC]; 48955748Sduboff /* link tx buffers */ 48965748Sduboff for (i = 0; i < dp->gc.gc_tx_buf_size; i++) { 48975748Sduboff dp->tx_buf[i].txb_next = 48985748Sduboff &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)]; 48995748Sduboff } 49005748Sduboff 49015748Sduboff dp->rxmode = 0; 49025748Sduboff dp->speed = GEM_SPD_10; /* default is 10Mbps */ 49035748Sduboff dp->full_duplex = B_FALSE; /* default is half */ 49045748Sduboff dp->flow_control = FLOW_CONTROL_NONE; 4905*7116Sduboff dp->poll_pkt_delay = 8; /* typical coalease for rx packets */ 49065748Sduboff 49075748Sduboff /* performance tuning parameters */ 49085748Sduboff dp->txthr = ETHERMAX; /* tx fifo threshold */ 49095748Sduboff dp->txmaxdma = 16*4; /* tx max dma burst size */ 49105748Sduboff dp->rxthr = 128; /* rx fifo threshold */ 49115748Sduboff dp->rxmaxdma = 16*4; /* rx max dma burst size */ 49125748Sduboff 49135748Sduboff /* 49145748Sduboff * Get media mode information from .conf file 49155748Sduboff */ 49165748Sduboff gem_read_conf(dp); 49175748Sduboff 49185748Sduboff /* rx_buf_len is required buffer length without padding for alignment */ 49195748Sduboff dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len; 49205748Sduboff 49215748Sduboff /* 49225748Sduboff * Reset the chip 49235748Sduboff */ 49245748Sduboff mutex_enter(&dp->intrlock); 49255748Sduboff dp->nic_state = NIC_STATE_STOPPED; 49265748Sduboff ret = (*dp->gc.gc_reset_chip)(dp); 49275748Sduboff mutex_exit(&dp->intrlock); 49285748Sduboff if (ret != GEM_SUCCESS) { 49295748Sduboff goto err_free_regs; 49305748Sduboff } 49315748Sduboff 49325748Sduboff /* 49335748Sduboff * HW dependant paremeter initialization 49345748Sduboff */ 49355748Sduboff mutex_enter(&dp->intrlock); 49365748Sduboff ret = (*dp->gc.gc_attach_chip)(dp); 49375748Sduboff mutex_exit(&dp->intrlock); 49385748Sduboff if (ret != GEM_SUCCESS) { 49395748Sduboff goto err_free_regs; 49405748Sduboff } 49415748Sduboff 49425748Sduboff #ifdef DEBUG_MULTIFRAGS 49435748Sduboff dp->gc.gc_tx_copy_thresh = dp->mtu; 49445748Sduboff #endif 49455748Sduboff /* allocate tx and rx resources */ 49465748Sduboff if (gem_alloc_memory(dp)) { 49475748Sduboff goto err_free_regs; 49485748Sduboff } 49495748Sduboff 49505748Sduboff DPRINTF(0, (CE_CONT, 49515748Sduboff "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x", 49525748Sduboff dp->name, (long)dp->base_addr, 49535748Sduboff dp->dev_addr.ether_addr_octet[0], 49545748Sduboff dp->dev_addr.ether_addr_octet[1], 49555748Sduboff dp->dev_addr.ether_addr_octet[2], 49565748Sduboff dp->dev_addr.ether_addr_octet[3], 49575748Sduboff dp->dev_addr.ether_addr_octet[4], 49585748Sduboff dp->dev_addr.ether_addr_octet[5])); 49595748Sduboff 49605748Sduboff /* copy mac address */ 49615748Sduboff dp->cur_addr = dp->dev_addr; 49625748Sduboff 49635748Sduboff gem_gld3_init(dp, macp); 49645748Sduboff 49655748Sduboff /* Probe MII phy (scan phy) */ 49665748Sduboff dp->mii_lpable = 0; 49675748Sduboff dp->mii_advert = 0; 49685748Sduboff dp->mii_exp = 0; 49695748Sduboff dp->mii_ctl1000 = 0; 49705748Sduboff dp->mii_stat1000 = 0; 49715748Sduboff if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) { 49725748Sduboff goto err_free_ring; 49735748Sduboff } 49745748Sduboff 49755748Sduboff /* mask unsupported abilities */ 4976*7116Sduboff dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG); 49775748Sduboff dp->anadv_1000fdx &= 49785748Sduboff BOOLEAN(dp->mii_xstatus & 49795748Sduboff (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)); 49805748Sduboff dp->anadv_1000hdx &= 49815748Sduboff BOOLEAN(dp->mii_xstatus & 49825748Sduboff (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)); 49835748Sduboff dp->anadv_100t4 &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4); 49845748Sduboff dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD); 49855748Sduboff dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX); 49865748Sduboff dp->anadv_10fdx &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD); 49875748Sduboff dp->anadv_10hdx &= BOOLEAN(dp->mii_status & MII_STATUS_10); 49885748Sduboff 49895748Sduboff gem_choose_forcedmode(dp); 49905748Sduboff 49915748Sduboff /* initialize MII phy if required */ 49925748Sduboff if (dp->gc.gc_mii_init) { 49935748Sduboff if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) { 49945748Sduboff goto err_free_ring; 49955748Sduboff } 49965748Sduboff } 49975748Sduboff 49985748Sduboff /* 49995748Sduboff * initialize kstats including mii statistics 50005748Sduboff */ 50015748Sduboff gem_nd_setup(dp); 50025748Sduboff 50035748Sduboff /* 50045748Sduboff * Add interrupt to system. 50055748Sduboff */ 50065748Sduboff if (ret = mac_register(macp, &dp->mh)) { 50075748Sduboff cmn_err(CE_WARN, "!%s: mac_register failed, error:%d", 50085748Sduboff dp->name, ret); 50095748Sduboff goto err_release_stats; 50105748Sduboff } 50115748Sduboff mac_free(macp); 50125748Sduboff macp = NULL; 50135748Sduboff 50145748Sduboff if (dp->misc_flag & GEM_SOFTINTR) { 50155748Sduboff if (ddi_add_softintr(dip, 50165748Sduboff DDI_SOFTINT_LOW, &dp->soft_id, 50175748Sduboff NULL, NULL, 50185748Sduboff (uint_t (*)(caddr_t))gem_intr, 50195748Sduboff (caddr_t)dp) != DDI_SUCCESS) { 50205748Sduboff cmn_err(CE_WARN, "!%s: ddi_add_softintr failed", 50215748Sduboff dp->name); 50225748Sduboff goto err_unregister; 50235748Sduboff } 50245748Sduboff } else if ((dp->misc_flag & GEM_NOINTR) == 0) { 50255748Sduboff if (ddi_add_intr(dip, 0, NULL, NULL, 50265748Sduboff (uint_t (*)(caddr_t))gem_intr, 50275748Sduboff (caddr_t)dp) != DDI_SUCCESS) { 50285748Sduboff cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name); 50295748Sduboff goto err_unregister; 50305748Sduboff } 50315748Sduboff } else { 50325748Sduboff /* 50335748Sduboff * Dont use interrupt. 50345748Sduboff * schedule first call of gem_intr_watcher 50355748Sduboff */ 50365748Sduboff dp->intr_watcher_id = 50375748Sduboff timeout((void (*)(void *))gem_intr_watcher, 50385748Sduboff (void *)dp, drv_usectohz(3*1000000)); 50395748Sduboff } 50405748Sduboff 50415748Sduboff /* link this device to dev_info */ 50425748Sduboff dp->next = (struct gem_dev *)ddi_get_driver_private(dip); 5043*7116Sduboff dp->port = port; 50445748Sduboff ddi_set_driver_private(dip, (caddr_t)dp); 50455748Sduboff 5046*7116Sduboff /* reset mii phy and start mii link watcher */ 50475748Sduboff gem_mii_start(dp); 50485748Sduboff 50495748Sduboff DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success")); 50505748Sduboff return (dp); 50515748Sduboff 50525748Sduboff err_unregister: 50535748Sduboff (void) mac_unregister(dp->mh); 50545748Sduboff err_release_stats: 50555748Sduboff /* release NDD resources */ 50565748Sduboff gem_nd_cleanup(dp); 50575748Sduboff 50585748Sduboff err_free_ring: 50595748Sduboff gem_free_memory(dp); 50605748Sduboff err_free_regs: 50615748Sduboff ddi_regs_map_free(&dp->regs_handle); 50625748Sduboff err_free_locks: 50635748Sduboff mutex_destroy(&dp->xmitlock); 50645748Sduboff mutex_destroy(&dp->intrlock); 50655748Sduboff cv_destroy(&dp->tx_drain_cv); 50665748Sduboff err_free_private: 50675748Sduboff if (macp) { 50685748Sduboff mac_free(macp); 50695748Sduboff } 50705748Sduboff kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc)); 50715748Sduboff 50725748Sduboff return (NULL); 50735748Sduboff } 50745748Sduboff 50755748Sduboff int 50765748Sduboff gem_do_detach(dev_info_t *dip) 50775748Sduboff { 50785748Sduboff struct gem_dev *dp; 50795748Sduboff struct gem_dev *tmp; 50805748Sduboff caddr_t private; 50815748Sduboff int priv_size; 50825748Sduboff ddi_acc_handle_t rh; 50835748Sduboff 50845748Sduboff dp = GEM_GET_DEV(dip); 50855748Sduboff if (dp == NULL) { 50865748Sduboff return (DDI_SUCCESS); 50875748Sduboff } 50885748Sduboff 50895748Sduboff rh = dp->regs_handle; 50905748Sduboff private = dp->private; 50915748Sduboff priv_size = dp->priv_size; 50925748Sduboff 50935748Sduboff while (dp) { 5094*7116Sduboff /* unregister with gld v3 */ 5095*7116Sduboff if (mac_unregister(dp->mh) != 0) { 5096*7116Sduboff return (DDI_FAILURE); 5097*7116Sduboff } 5098*7116Sduboff 50995748Sduboff /* ensure any rx buffers are not used */ 51005748Sduboff if (dp->rx_buf_allocated != dp->rx_buf_freecnt) { 51015748Sduboff /* resource is busy */ 51025748Sduboff cmn_err(CE_PANIC, 51035748Sduboff "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d", 51045748Sduboff dp->name, __func__, 51055748Sduboff dp->rx_buf_allocated, dp->rx_buf_freecnt); 51065748Sduboff /* NOT REACHED */ 51075748Sduboff } 51085748Sduboff 51095748Sduboff /* stop mii link watcher */ 51105748Sduboff gem_mii_stop(dp); 51115748Sduboff 51125748Sduboff /* unregister interrupt handler */ 51135748Sduboff if (dp->misc_flag & GEM_SOFTINTR) { 51145748Sduboff ddi_remove_softintr(dp->soft_id); 51155748Sduboff } else if ((dp->misc_flag & GEM_NOINTR) == 0) { 51165748Sduboff ddi_remove_intr(dip, 0, dp->iblock_cookie); 51175748Sduboff } else { 51185748Sduboff /* stop interrupt watcher */ 51195748Sduboff if (dp->intr_watcher_id) { 51205748Sduboff while (untimeout(dp->intr_watcher_id) == -1) 51215748Sduboff ; 51225748Sduboff dp->intr_watcher_id = 0; 51235748Sduboff } 51245748Sduboff } 51255748Sduboff 51265748Sduboff /* release NDD resources */ 51275748Sduboff gem_nd_cleanup(dp); 51285748Sduboff /* release buffers, descriptors and dma resources */ 51295748Sduboff gem_free_memory(dp); 51305748Sduboff 51315748Sduboff /* release locks and condition variables */ 51325748Sduboff mutex_destroy(&dp->xmitlock); 51335748Sduboff mutex_destroy(&dp->intrlock); 51345748Sduboff cv_destroy(&dp->tx_drain_cv); 51355748Sduboff 51365748Sduboff /* release basic memory resources */ 51375748Sduboff tmp = dp->next; 51385748Sduboff kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc)); 51395748Sduboff dp = tmp; 51405748Sduboff } 51415748Sduboff 51425748Sduboff /* release common private memory for the nic */ 51435748Sduboff kmem_free(private, priv_size); 51445748Sduboff 51455748Sduboff /* release register mapping resources */ 51465748Sduboff ddi_regs_map_free(&rh); 51475748Sduboff 51485748Sduboff DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success", 51495748Sduboff ddi_driver_name(dip), ddi_get_instance(dip))); 51505748Sduboff 51515748Sduboff return (DDI_SUCCESS); 51525748Sduboff } 51535748Sduboff 51545748Sduboff int 51555748Sduboff gem_suspend(dev_info_t *dip) 51565748Sduboff { 51575748Sduboff struct gem_dev *dp; 51585748Sduboff 51595748Sduboff /* 51605748Sduboff * stop the device 51615748Sduboff */ 51625748Sduboff dp = GEM_GET_DEV(dip); 51635748Sduboff ASSERT(dp); 51645748Sduboff 51655748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 51665748Sduboff 51675748Sduboff for (; dp; dp = dp->next) { 51685748Sduboff 51695748Sduboff /* stop mii link watcher */ 51705748Sduboff gem_mii_stop(dp); 51715748Sduboff 51725748Sduboff /* stop interrupt watcher for no-intr mode */ 51735748Sduboff if (dp->misc_flag & GEM_NOINTR) { 51745748Sduboff if (dp->intr_watcher_id) { 51755748Sduboff while (untimeout(dp->intr_watcher_id) == -1) 51765748Sduboff ; 51775748Sduboff } 51785748Sduboff dp->intr_watcher_id = 0; 51795748Sduboff } 51805748Sduboff 51815748Sduboff /* stop tx timeout watcher */ 51825748Sduboff if (dp->timeout_id) { 51835748Sduboff while (untimeout(dp->timeout_id) == -1) 51845748Sduboff ; 51855748Sduboff dp->timeout_id = 0; 51865748Sduboff } 51875748Sduboff 51885748Sduboff /* make the nic state inactive */ 51895748Sduboff mutex_enter(&dp->intrlock); 51905748Sduboff (void) gem_mac_stop(dp, 0); 51915748Sduboff ASSERT(!dp->mac_active); 51925748Sduboff 51935748Sduboff /* no further register access */ 51945748Sduboff dp->mac_suspended = B_TRUE; 51955748Sduboff mutex_exit(&dp->intrlock); 51965748Sduboff } 51975748Sduboff 51985748Sduboff /* XXX - power down the nic */ 51995748Sduboff 52005748Sduboff return (DDI_SUCCESS); 52015748Sduboff } 52025748Sduboff 52035748Sduboff int 52045748Sduboff gem_resume(dev_info_t *dip) 52055748Sduboff { 52065748Sduboff struct gem_dev *dp; 52075748Sduboff 52085748Sduboff /* 52095748Sduboff * restart the device 52105748Sduboff */ 52115748Sduboff dp = GEM_GET_DEV(dip); 52125748Sduboff ASSERT(dp); 52135748Sduboff 52145748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); 52155748Sduboff 52165748Sduboff for (; dp; dp = dp->next) { 52175748Sduboff 52185748Sduboff /* 52195748Sduboff * Bring up the nic after power up 52205748Sduboff */ 52215748Sduboff 52225748Sduboff /* gem_xxx.c layer to setup power management state. */ 52235748Sduboff ASSERT(!dp->mac_active); 52245748Sduboff 52255748Sduboff /* reset the chip, because we are just after power up. */ 52265748Sduboff mutex_enter(&dp->intrlock); 52275748Sduboff 52285748Sduboff dp->mac_suspended = B_FALSE; 52295748Sduboff dp->nic_state = NIC_STATE_STOPPED; 52305748Sduboff 52315748Sduboff if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) { 52325748Sduboff cmn_err(CE_WARN, "%s: %s: failed to reset chip", 52335748Sduboff dp->name, __func__); 52345748Sduboff mutex_exit(&dp->intrlock); 52355748Sduboff goto err; 52365748Sduboff } 52375748Sduboff mutex_exit(&dp->intrlock); 52385748Sduboff 52395748Sduboff /* initialize mii phy because we are just after power up */ 52405748Sduboff if (dp->gc.gc_mii_init) { 52415748Sduboff (void) (*dp->gc.gc_mii_init)(dp); 52425748Sduboff } 52435748Sduboff 52445748Sduboff if (dp->misc_flag & GEM_NOINTR) { 52455748Sduboff /* 52465748Sduboff * schedule first call of gem_intr_watcher 52475748Sduboff * instead of interrupts. 52485748Sduboff */ 52495748Sduboff dp->intr_watcher_id = 52505748Sduboff timeout((void (*)(void *))gem_intr_watcher, 52515748Sduboff (void *)dp, drv_usectohz(3*1000000)); 52525748Sduboff } 52535748Sduboff 52545748Sduboff /* restart mii link watcher */ 52555748Sduboff gem_mii_start(dp); 52565748Sduboff 52575748Sduboff /* restart mac */ 52585748Sduboff mutex_enter(&dp->intrlock); 52595748Sduboff 52605748Sduboff if (gem_mac_init(dp) != GEM_SUCCESS) { 52615748Sduboff mutex_exit(&dp->intrlock); 52625748Sduboff goto err_reset; 52635748Sduboff } 52645748Sduboff dp->nic_state = NIC_STATE_INITIALIZED; 52655748Sduboff 52665748Sduboff /* setup media mode if the link have been up */ 52675748Sduboff if (dp->mii_state == MII_STATE_LINKUP) { 52685748Sduboff if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) { 52695748Sduboff mutex_exit(&dp->intrlock); 52705748Sduboff goto err_reset; 52715748Sduboff } 52725748Sduboff } 52735748Sduboff 52745748Sduboff /* enable mac address and rx filter */ 52755748Sduboff dp->rxmode |= RXMODE_ENABLE; 52765748Sduboff if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) { 52775748Sduboff mutex_exit(&dp->intrlock); 52785748Sduboff goto err_reset; 52795748Sduboff } 52805748Sduboff dp->nic_state = NIC_STATE_ONLINE; 52815748Sduboff 52825748Sduboff /* restart tx timeout watcher */ 52835748Sduboff dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout, 52845748Sduboff (void *)dp, 52855748Sduboff dp->gc.gc_tx_timeout_interval); 52865748Sduboff 52875748Sduboff /* now the nic is fully functional */ 52885748Sduboff if (dp->mii_state == MII_STATE_LINKUP) { 52895748Sduboff if (gem_mac_start(dp) != GEM_SUCCESS) { 52905748Sduboff mutex_exit(&dp->intrlock); 52915748Sduboff goto err_reset; 52925748Sduboff } 52935748Sduboff } 52945748Sduboff mutex_exit(&dp->intrlock); 52955748Sduboff } 52965748Sduboff 52975748Sduboff return (DDI_SUCCESS); 52985748Sduboff 52995748Sduboff err_reset: 53005748Sduboff if (dp->intr_watcher_id) { 53015748Sduboff while (untimeout(dp->intr_watcher_id) == -1) 53025748Sduboff ; 53035748Sduboff dp->intr_watcher_id = 0; 53045748Sduboff } 53055748Sduboff mutex_enter(&dp->intrlock); 53065748Sduboff (*dp->gc.gc_reset_chip)(dp); 53075748Sduboff dp->nic_state = NIC_STATE_STOPPED; 53085748Sduboff mutex_exit(&dp->intrlock); 53095748Sduboff 53105748Sduboff err: 53115748Sduboff return (DDI_FAILURE); 53125748Sduboff } 53135748Sduboff 53145748Sduboff /* 53155748Sduboff * misc routines for PCI 53165748Sduboff */ 53175748Sduboff uint8_t 53185748Sduboff gem_search_pci_cap(dev_info_t *dip, 53195748Sduboff ddi_acc_handle_t conf_handle, uint8_t target) 53205748Sduboff { 53215748Sduboff uint8_t pci_cap_ptr; 53225748Sduboff uint32_t pci_cap; 53235748Sduboff 53245748Sduboff /* search power management capablities */ 53255748Sduboff pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR); 53265748Sduboff while (pci_cap_ptr) { 53275748Sduboff /* read pci capability header */ 53285748Sduboff pci_cap = pci_config_get32(conf_handle, pci_cap_ptr); 53295748Sduboff if ((pci_cap & 0xff) == target) { 53305748Sduboff /* found */ 53315748Sduboff break; 53325748Sduboff } 53335748Sduboff /* get next_ptr */ 53345748Sduboff pci_cap_ptr = (pci_cap >> 8) & 0xff; 53355748Sduboff } 53365748Sduboff return (pci_cap_ptr); 53375748Sduboff } 53385748Sduboff 53395748Sduboff int 53405748Sduboff gem_pci_set_power_state(dev_info_t *dip, 53415748Sduboff ddi_acc_handle_t conf_handle, uint_t new_mode) 53425748Sduboff { 53435748Sduboff uint8_t pci_cap_ptr; 53445748Sduboff uint32_t pmcsr; 53455748Sduboff uint_t unit; 53465748Sduboff const char *drv_name; 53475748Sduboff 53485748Sduboff ASSERT(new_mode < 4); 53495748Sduboff 53505748Sduboff unit = ddi_get_instance(dip); 53515748Sduboff drv_name = ddi_driver_name(dip); 53525748Sduboff 53535748Sduboff /* search power management capablities */ 53545748Sduboff pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM); 53555748Sduboff 53565748Sduboff if (pci_cap_ptr == 0) { 53575748Sduboff cmn_err(CE_CONT, 53585748Sduboff "!%s%d: doesn't have pci power management capability", 53595748Sduboff drv_name, unit); 53605748Sduboff return (DDI_FAILURE); 53615748Sduboff } 53625748Sduboff 53635748Sduboff /* read power management capabilities */ 53645748Sduboff pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR); 53655748Sduboff 53665748Sduboff DPRINTF(0, (CE_CONT, 53675748Sduboff "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x", 53685748Sduboff drv_name, unit, pci_cap_ptr, pmcsr)); 53695748Sduboff 53705748Sduboff /* 53715748Sduboff * Is the resuested power mode supported? 53725748Sduboff */ 53735748Sduboff /* not yet */ 53745748Sduboff 53755748Sduboff /* 53765748Sduboff * move to new mode 53775748Sduboff */ 53785748Sduboff pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode; 53795748Sduboff pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr); 53805748Sduboff 53815748Sduboff return (DDI_SUCCESS); 53825748Sduboff } 53835748Sduboff 53845748Sduboff /* 53855748Sduboff * select suitable register for by specified address space or register 53865748Sduboff * offset in PCI config space 53875748Sduboff */ 53885748Sduboff int 53895748Sduboff gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask, 53905748Sduboff struct ddi_device_acc_attr *attrp, 53915748Sduboff caddr_t *basep, ddi_acc_handle_t *hp) 53925748Sduboff { 53935748Sduboff struct pci_phys_spec *regs; 53945748Sduboff uint_t len; 53955748Sduboff uint_t unit; 53965748Sduboff uint_t n; 53975748Sduboff uint_t i; 53985748Sduboff int ret; 53995748Sduboff const char *drv_name; 54005748Sduboff 54015748Sduboff unit = ddi_get_instance(dip); 54025748Sduboff drv_name = ddi_driver_name(dip); 54035748Sduboff 54045748Sduboff /* Search IO-range or memory-range to be mapped */ 54055748Sduboff regs = NULL; 54065748Sduboff len = 0; 54075748Sduboff 54085748Sduboff if ((ret = ddi_prop_lookup_int_array( 54095748Sduboff DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 54105748Sduboff "reg", (void *)®s, &len)) != DDI_PROP_SUCCESS) { 54115748Sduboff cmn_err(CE_WARN, 54125748Sduboff "!%s%d: failed to get reg property (ret:%d)", 54135748Sduboff drv_name, unit, ret); 54145748Sduboff return (DDI_FAILURE); 54155748Sduboff } 54165748Sduboff n = len / (sizeof (struct pci_phys_spec) / sizeof (int)); 54175748Sduboff 54185748Sduboff ASSERT(regs != NULL && len > 0); 54195748Sduboff 54205748Sduboff #if GEM_DEBUG_LEVEL > 0 54215748Sduboff for (i = 0; i < n; i++) { 54225748Sduboff cmn_err(CE_CONT, 54235748Sduboff "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x", 54245748Sduboff drv_name, unit, i, 54255748Sduboff regs[i].pci_phys_hi, 54265748Sduboff regs[i].pci_phys_mid, 54275748Sduboff regs[i].pci_phys_low, 54285748Sduboff regs[i].pci_size_hi, 54295748Sduboff regs[i].pci_size_low); 54305748Sduboff } 54315748Sduboff #endif 54325748Sduboff for (i = 0; i < n; i++) { 54335748Sduboff if ((regs[i].pci_phys_hi & mask) == which) { 54345748Sduboff /* it's the requested space */ 54355748Sduboff ddi_prop_free(regs); 54365748Sduboff goto address_range_found; 54375748Sduboff } 54385748Sduboff } 54395748Sduboff ddi_prop_free(regs); 54405748Sduboff return (DDI_FAILURE); 54415748Sduboff 54425748Sduboff address_range_found: 54435748Sduboff if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp)) 54445748Sduboff != DDI_SUCCESS) { 54455748Sduboff cmn_err(CE_CONT, 54465748Sduboff "!%s%d: ddi_regs_map_setup failed (ret:%d)", 54475748Sduboff drv_name, unit, ret); 54485748Sduboff } 54495748Sduboff 54505748Sduboff return (ret); 54515748Sduboff } 54525748Sduboff 54535748Sduboff void 54545748Sduboff gem_mod_init(struct dev_ops *dop, char *name) 54555748Sduboff { 54565748Sduboff mac_init_ops(dop, name); 54575748Sduboff } 54585748Sduboff 54595748Sduboff void 54605748Sduboff gem_mod_fini(struct dev_ops *dop) 54615748Sduboff { 54625748Sduboff mac_fini_ops(dop); 54635748Sduboff } 5464