15748Sduboff /*
25748Sduboff * sfe_util.c: general ethernet mac driver framework version 2.6
35748Sduboff *
47116Sduboff * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved.
55748Sduboff *
65748Sduboff * Redistribution and use in source and binary forms, with or without
75748Sduboff * modification, are permitted provided that the following conditions are met:
85748Sduboff *
95748Sduboff * 1. Redistributions of source code must retain the above copyright notice,
105748Sduboff * this list of conditions and the following disclaimer.
115748Sduboff *
125748Sduboff * 2. Redistributions in binary form must reproduce the above copyright notice,
135748Sduboff * this list of conditions and the following disclaimer in the documentation
145748Sduboff * and/or other materials provided with the distribution.
155748Sduboff *
165748Sduboff * 3. Neither the name of the author nor the names of its contributors may be
175748Sduboff * used to endorse or promote products derived from this software without
185748Sduboff * specific prior written permission.
195748Sduboff *
205748Sduboff * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
215748Sduboff * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
225748Sduboff * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
235748Sduboff * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
245748Sduboff * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
255748Sduboff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
265748Sduboff * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
275748Sduboff * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
285748Sduboff * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
295748Sduboff * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
305748Sduboff * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
315748Sduboff * DAMAGE.
325748Sduboff */
335748Sduboff
345748Sduboff /*
35*11878SVenu.Iyer@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
368275SEric Cheng * Use is subject to license terms.
378275SEric Cheng */
388275SEric Cheng
398275SEric Cheng /*
405748Sduboff * System Header files.
415748Sduboff */
425748Sduboff #include <sys/types.h>
435748Sduboff #include <sys/conf.h>
445748Sduboff #include <sys/debug.h>
455748Sduboff #include <sys/kmem.h>
465748Sduboff #include <sys/vtrace.h>
475748Sduboff #include <sys/ethernet.h>
485748Sduboff #include <sys/modctl.h>
495748Sduboff #include <sys/errno.h>
505748Sduboff #include <sys/ddi.h>
515748Sduboff #include <sys/sunddi.h>
525748Sduboff #include <sys/stream.h> /* required for MBLK* */
535748Sduboff #include <sys/strsun.h> /* required for mionack() */
545748Sduboff #include <sys/byteorder.h>
555748Sduboff #include <sys/pci.h>
565748Sduboff #include <inet/common.h>
575748Sduboff #include <inet/led.h>
585748Sduboff #include <inet/mi.h>
595748Sduboff #include <inet/nd.h>
605748Sduboff #include <sys/crc32.h>
615748Sduboff
625748Sduboff #include <sys/note.h>
635748Sduboff
645748Sduboff #include "sfe_mii.h"
655748Sduboff #include "sfe_util.h"
665748Sduboff
675748Sduboff
685748Sduboff
695748Sduboff extern char ident[];
705748Sduboff
715748Sduboff /* Debugging support */
725748Sduboff #ifdef GEM_DEBUG_LEVEL
735748Sduboff static int gem_debug = GEM_DEBUG_LEVEL;
745748Sduboff #define DPRINTF(n, args) if (gem_debug > (n)) cmn_err args
755748Sduboff #else
765748Sduboff #define DPRINTF(n, args)
775748Sduboff #undef ASSERT
785748Sduboff #define ASSERT(x)
795748Sduboff #endif
805748Sduboff
815748Sduboff #define IOC_LINESIZE 0x40 /* Is it right for amd64? */
825748Sduboff
835748Sduboff /*
845748Sduboff * Useful macros and typedefs
855748Sduboff */
865748Sduboff #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1))
875748Sduboff
885748Sduboff #define GET_NET16(p) ((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
895748Sduboff #define GET_ETHERTYPE(p) GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
905748Sduboff
915748Sduboff #define GET_IPTYPEv4(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 9])
925748Sduboff #define GET_IPTYPEv6(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 6])
935748Sduboff
945748Sduboff
955748Sduboff #ifndef INT32_MAX
965748Sduboff #define INT32_MAX 0x7fffffff
975748Sduboff #endif
985748Sduboff
995748Sduboff #define VTAG_OFF (ETHERADDRL*2)
1005748Sduboff #ifndef VTAG_SIZE
1015748Sduboff #define VTAG_SIZE 4
1025748Sduboff #endif
1035748Sduboff #ifndef VTAG_TPID
1045748Sduboff #define VTAG_TPID 0x8100U
1055748Sduboff #endif
1065748Sduboff
1075748Sduboff #define GET_TXBUF(dp, sn) \
1085748Sduboff &(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
1095748Sduboff
1105748Sduboff #ifndef offsetof
1115748Sduboff #define offsetof(t, m) ((long)&(((t *) 0)->m))
1125748Sduboff #endif
1135748Sduboff #define TXFLAG_VTAG(flag) \
1145748Sduboff (((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
1155748Sduboff
1165748Sduboff #define MAXPKTBUF(dp) \
1175748Sduboff ((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
1185748Sduboff
1195748Sduboff #define WATCH_INTERVAL_FAST drv_usectohz(100*1000) /* 100mS */
1207116Sduboff #define BOOLEAN(x) ((x) != 0)
1215748Sduboff
1225748Sduboff /*
1235748Sduboff * Macros to distinct chip generation.
1245748Sduboff */
1255748Sduboff
1265748Sduboff /*
1275748Sduboff * Private functions
1285748Sduboff */
1295748Sduboff static void gem_mii_start(struct gem_dev *);
1305748Sduboff static void gem_mii_stop(struct gem_dev *);
1315748Sduboff
1325748Sduboff /* local buffer management */
1335748Sduboff static void gem_nd_setup(struct gem_dev *dp);
1345748Sduboff static void gem_nd_cleanup(struct gem_dev *dp);
1355748Sduboff static int gem_alloc_memory(struct gem_dev *);
1365748Sduboff static void gem_free_memory(struct gem_dev *);
1375748Sduboff static void gem_init_rx_ring(struct gem_dev *);
1385748Sduboff static void gem_init_tx_ring(struct gem_dev *);
1395748Sduboff __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
1405748Sduboff
1415748Sduboff static void gem_tx_timeout(struct gem_dev *);
1425748Sduboff static void gem_mii_link_watcher(struct gem_dev *dp);
1435748Sduboff static int gem_mac_init(struct gem_dev *dp);
1445748Sduboff static int gem_mac_start(struct gem_dev *dp);
1455748Sduboff static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
1465748Sduboff static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
1475748Sduboff
1485748Sduboff static struct ether_addr gem_etherbroadcastaddr = {
1495748Sduboff 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1505748Sduboff };
1515748Sduboff
1525748Sduboff int gem_speed_value[] = {10, 100, 1000};
1535748Sduboff
1545748Sduboff /* ============================================================== */
1555748Sduboff /*
1565748Sduboff * Misc runtime routines
1575748Sduboff */
1585748Sduboff /* ============================================================== */
1595748Sduboff /*
1605748Sduboff * Ether CRC calculation according to 21143 data sheet
1615748Sduboff */
1625748Sduboff uint32_t
gem_ether_crc_le(const uint8_t * addr,int len)1635748Sduboff gem_ether_crc_le(const uint8_t *addr, int len)
1645748Sduboff {
1655748Sduboff uint32_t crc;
1665748Sduboff
1675748Sduboff CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
1685748Sduboff return (crc);
1695748Sduboff }
1705748Sduboff
1715748Sduboff uint32_t
gem_ether_crc_be(const uint8_t * addr,int len)1725748Sduboff gem_ether_crc_be(const uint8_t *addr, int len)
1735748Sduboff {
1745748Sduboff int idx;
1755748Sduboff int bit;
1765748Sduboff uint_t data;
1775748Sduboff uint32_t crc;
1785748Sduboff #define CRC32_POLY_BE 0x04c11db7
1795748Sduboff
1805748Sduboff crc = 0xffffffff;
1815748Sduboff for (idx = 0; idx < len; idx++) {
1825748Sduboff for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
1835748Sduboff crc = (crc << 1)
1845748Sduboff ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
1855748Sduboff }
1865748Sduboff }
1875748Sduboff return (crc);
1885748Sduboff #undef CRC32_POLY_BE
1895748Sduboff }
1905748Sduboff
1915748Sduboff int
gem_prop_get_int(struct gem_dev * dp,char * prop_template,int def_val)1925748Sduboff gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
1935748Sduboff {
1945748Sduboff char propname[32];
1955748Sduboff
1965748Sduboff (void) sprintf(propname, prop_template, dp->name);
1975748Sduboff
1985748Sduboff return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
1995748Sduboff DDI_PROP_DONTPASS, propname, def_val));
2005748Sduboff }
2015748Sduboff
2025748Sduboff static int
gem_population(uint32_t x)2035748Sduboff gem_population(uint32_t x)
2045748Sduboff {
2055748Sduboff int i;
2065748Sduboff int cnt;
2075748Sduboff
2085748Sduboff cnt = 0;
2095748Sduboff for (i = 0; i < 32; i++) {
2105748Sduboff if (x & (1 << i)) {
2115748Sduboff cnt++;
2125748Sduboff }
2135748Sduboff }
2145748Sduboff return (cnt);
2155748Sduboff }
2165748Sduboff
2177116Sduboff #ifdef GEM_DEBUG_LEVEL
2187116Sduboff #ifdef GEM_DEBUG_VLAN
2195748Sduboff static void
gem_dump_packet(struct gem_dev * dp,char * title,mblk_t * mp,boolean_t check_cksum)2207116Sduboff gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
2217116Sduboff boolean_t check_cksum)
2225748Sduboff {
2237116Sduboff char msg[180];
2247116Sduboff uint8_t buf[18+20+20];
2257116Sduboff uint8_t *p;
2267116Sduboff size_t offset;
2277116Sduboff uint_t ethertype;
2287116Sduboff uint_t proto;
2297116Sduboff uint_t ipproto = 0;
2307116Sduboff uint_t iplen;
2317116Sduboff uint_t iphlen;
2327116Sduboff uint_t tcplen;
2337116Sduboff uint_t udplen;
2347116Sduboff uint_t cksum;
2357116Sduboff int rest;
2367116Sduboff int len;
2377116Sduboff char *bp;
2387116Sduboff mblk_t *tp;
2397116Sduboff extern uint_t ip_cksum(mblk_t *, int, uint32_t);
2407116Sduboff
2417116Sduboff msg[0] = 0;
2427116Sduboff bp = msg;
2437116Sduboff
2447116Sduboff rest = sizeof (buf);
2457116Sduboff offset = 0;
2467116Sduboff for (tp = mp; tp; tp = tp->b_cont) {
2477116Sduboff len = tp->b_wptr - tp->b_rptr;
2487116Sduboff len = min(rest, len);
2497116Sduboff bcopy(tp->b_rptr, &buf[offset], len);
2507116Sduboff rest -= len;
2517116Sduboff offset += len;
2527116Sduboff if (rest == 0) {
2537116Sduboff break;
2547116Sduboff }
2557116Sduboff }
2567116Sduboff
2577116Sduboff offset = 0;
2587116Sduboff p = &buf[offset];
2597116Sduboff
2607116Sduboff /* ethernet address */
2617116Sduboff sprintf(bp,
2627116Sduboff "ether: %02x:%02x:%02x:%02x:%02x:%02x"
2637116Sduboff " -> %02x:%02x:%02x:%02x:%02x:%02x",
2647116Sduboff p[6], p[7], p[8], p[9], p[10], p[11],
2657116Sduboff p[0], p[1], p[2], p[3], p[4], p[5]);
2667116Sduboff bp = &msg[strlen(msg)];
2677116Sduboff
2687116Sduboff /* vlag tag and etherrtype */
2697116Sduboff ethertype = GET_ETHERTYPE(p);
2707116Sduboff if (ethertype == VTAG_TPID) {
2717116Sduboff sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
2727116Sduboff bp = &msg[strlen(msg)];
2737116Sduboff
2747116Sduboff offset += VTAG_SIZE;
2757116Sduboff p = &buf[offset];
2767116Sduboff ethertype = GET_ETHERTYPE(p);
2777116Sduboff }
2787116Sduboff sprintf(bp, " type:%04x", ethertype);
2797116Sduboff bp = &msg[strlen(msg)];
2807116Sduboff
2817116Sduboff /* ethernet packet length */
2827116Sduboff sprintf(bp, " mblklen:%d", msgdsize(mp));
2837116Sduboff bp = &msg[strlen(msg)];
2847116Sduboff if (mp->b_cont) {
2857116Sduboff sprintf(bp, "(");
2867116Sduboff bp = &msg[strlen(msg)];
2877116Sduboff for (tp = mp; tp; tp = tp->b_cont) {
2887116Sduboff if (tp == mp) {
2897116Sduboff sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
2907116Sduboff } else {
2917116Sduboff sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
2927116Sduboff }
2937116Sduboff bp = &msg[strlen(msg)];
2947116Sduboff }
2957116Sduboff sprintf(bp, ")");
2967116Sduboff bp = &msg[strlen(msg)];
2977116Sduboff }
2987116Sduboff
2997116Sduboff if (ethertype != ETHERTYPE_IP) {
3007116Sduboff goto x;
3017116Sduboff }
3027116Sduboff
3037116Sduboff /* ip address */
3047116Sduboff offset += sizeof (struct ether_header);
3057116Sduboff p = &buf[offset];
3067116Sduboff ipproto = p[9];
3077116Sduboff iplen = GET_NET16(&p[2]);
3087116Sduboff sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
3097116Sduboff p[12], p[13], p[14], p[15],
3107116Sduboff p[16], p[17], p[18], p[19],
3117116Sduboff ipproto, iplen);
3127116Sduboff bp = (void *)&msg[strlen(msg)];
3137116Sduboff
3147116Sduboff iphlen = (p[0] & 0xf) * 4;
3157116Sduboff
3167116Sduboff /* cksum for psuedo header */
3177116Sduboff cksum = *(uint16_t *)&p[12];
3187116Sduboff cksum += *(uint16_t *)&p[14];
3197116Sduboff cksum += *(uint16_t *)&p[16];
3207116Sduboff cksum += *(uint16_t *)&p[18];
3217116Sduboff cksum += BE_16(ipproto);
3227116Sduboff
3237116Sduboff /* tcp or udp protocol header */
3247116Sduboff offset += iphlen;
3257116Sduboff p = &buf[offset];
3267116Sduboff if (ipproto == IPPROTO_TCP) {
3277116Sduboff tcplen = iplen - iphlen;
3287116Sduboff sprintf(bp, ", tcp: len:%d cksum:%x",
3297116Sduboff tcplen, GET_NET16(&p[16]));
3307116Sduboff bp = (void *)&msg[strlen(msg)];
3317116Sduboff
3327116Sduboff if (check_cksum) {
3337116Sduboff cksum += BE_16(tcplen);
3347116Sduboff cksum = (uint16_t)ip_cksum(mp, offset, cksum);
3357116Sduboff sprintf(bp, " (%s)",
3367116Sduboff (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
3377116Sduboff bp = (void *)&msg[strlen(msg)];
3387116Sduboff }
3397116Sduboff } else if (ipproto == IPPROTO_UDP) {
3407116Sduboff udplen = GET_NET16(&p[4]);
3417116Sduboff sprintf(bp, ", udp: len:%d cksum:%x",
3427116Sduboff udplen, GET_NET16(&p[6]));
3437116Sduboff bp = (void *)&msg[strlen(msg)];
3447116Sduboff
3457116Sduboff if (GET_NET16(&p[6]) && check_cksum) {
3467116Sduboff cksum += *(uint16_t *)&p[4];
3477116Sduboff cksum = (uint16_t)ip_cksum(mp, offset, cksum);
3487116Sduboff sprintf(bp, " (%s)",
3497116Sduboff (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
3507116Sduboff bp = (void *)&msg[strlen(msg)];
3517116Sduboff }
3527116Sduboff }
3537116Sduboff x:
3547116Sduboff cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
3555748Sduboff }
3567116Sduboff #endif /* GEM_DEBUG_VLAN */
3577116Sduboff #endif /* GEM_DEBUG_LEVEL */
3587116Sduboff
3595748Sduboff /* ============================================================== */
3605748Sduboff /*
3615748Sduboff * IO cache flush
3625748Sduboff */
3635748Sduboff /* ============================================================== */
3645748Sduboff __INLINE__ void
gem_rx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)3655748Sduboff gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
3665748Sduboff {
3675748Sduboff int n;
3685748Sduboff int m;
3695748Sduboff int rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
3705748Sduboff
3715748Sduboff /* sync active descriptors */
3725748Sduboff if (rx_desc_unit_shift < 0 || nslot == 0) {
3735748Sduboff /* no rx descriptor ring */
3745748Sduboff return;
3755748Sduboff }
3765748Sduboff
3775748Sduboff n = dp->gc.gc_rx_ring_size - head;
3785748Sduboff if ((m = nslot - n) > 0) {
3795748Sduboff (void) ddi_dma_sync(dp->desc_dma_handle,
3805748Sduboff (off_t)0,
3815748Sduboff (size_t)(m << rx_desc_unit_shift),
3825748Sduboff how);
3835748Sduboff nslot = n;
3845748Sduboff }
3855748Sduboff
3865748Sduboff (void) ddi_dma_sync(dp->desc_dma_handle,
3875748Sduboff (off_t)(head << rx_desc_unit_shift),
3885748Sduboff (size_t)(nslot << rx_desc_unit_shift),
3895748Sduboff how);
3905748Sduboff }
3915748Sduboff
3925748Sduboff __INLINE__ void
gem_tx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)3935748Sduboff gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
3945748Sduboff {
3955748Sduboff int n;
3965748Sduboff int m;
3975748Sduboff int tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
3985748Sduboff
3995748Sduboff /* sync active descriptors */
4005748Sduboff if (tx_desc_unit_shift < 0 || nslot == 0) {
4015748Sduboff /* no tx descriptor ring */
4025748Sduboff return;
4035748Sduboff }
4045748Sduboff
4055748Sduboff n = dp->gc.gc_tx_ring_size - head;
4065748Sduboff if ((m = nslot - n) > 0) {
4075748Sduboff (void) ddi_dma_sync(dp->desc_dma_handle,
4085748Sduboff (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
4095748Sduboff (size_t)(m << tx_desc_unit_shift),
4105748Sduboff how);
4115748Sduboff nslot = n;
4125748Sduboff }
4135748Sduboff
4145748Sduboff (void) ddi_dma_sync(dp->desc_dma_handle,
4155748Sduboff (off_t)((head << tx_desc_unit_shift)
4165748Sduboff + (dp->tx_ring_dma - dp->rx_ring_dma)),
4175748Sduboff (size_t)(nslot << tx_desc_unit_shift),
4185748Sduboff how);
4195748Sduboff }
4205748Sduboff
4215748Sduboff static void
gem_rx_start_default(struct gem_dev * dp,int head,int nslot)4225748Sduboff gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
4235748Sduboff {
4245748Sduboff gem_rx_desc_dma_sync(dp,
4255748Sduboff SLOT(head, dp->gc.gc_rx_ring_size), nslot,
4265748Sduboff DDI_DMA_SYNC_FORDEV);
4275748Sduboff }
4285748Sduboff
4295748Sduboff /* ============================================================== */
4305748Sduboff /*
4315748Sduboff * Buffer management
4325748Sduboff */
4335748Sduboff /* ============================================================== */
4345748Sduboff static void
gem_dump_txbuf(struct gem_dev * dp,int level,const char * title)4355748Sduboff gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
4365748Sduboff {
4375748Sduboff cmn_err(level,
4385748Sduboff "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
4395748Sduboff "tx_softq: %d[%d] %d[%d] (+%d), "
4405748Sduboff "tx_free: %d[%d] %d[%d] (+%d), "
4415748Sduboff "tx_desc: %d[%d] %d[%d] (+%d), "
4427116Sduboff "intr: %d[%d] (+%d), ",
4435748Sduboff dp->name, title,
4445748Sduboff dp->tx_active_head,
4455748Sduboff SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
4465748Sduboff dp->tx_active_tail,
4475748Sduboff SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
4485748Sduboff dp->tx_active_tail - dp->tx_active_head,
4495748Sduboff dp->tx_softq_head,
4505748Sduboff SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
4515748Sduboff dp->tx_softq_tail,
4525748Sduboff SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
4535748Sduboff dp->tx_softq_tail - dp->tx_softq_head,
4545748Sduboff dp->tx_free_head,
4555748Sduboff SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
4565748Sduboff dp->tx_free_tail,
4575748Sduboff SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
4585748Sduboff dp->tx_free_tail - dp->tx_free_head,
4595748Sduboff dp->tx_desc_head,
4605748Sduboff SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
4615748Sduboff dp->tx_desc_tail,
4625748Sduboff SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
4635748Sduboff dp->tx_desc_tail - dp->tx_desc_head,
4645748Sduboff dp->tx_desc_intr,
4655748Sduboff SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
4665748Sduboff dp->tx_desc_intr - dp->tx_desc_head);
4675748Sduboff }
4685748Sduboff
4695748Sduboff static void
gem_free_rxbuf(struct rxbuf * rbp)4705748Sduboff gem_free_rxbuf(struct rxbuf *rbp)
4715748Sduboff {
4725748Sduboff struct gem_dev *dp;
4735748Sduboff
4745748Sduboff dp = rbp->rxb_devp;
4755748Sduboff ASSERT(mutex_owned(&dp->intrlock));
4765748Sduboff rbp->rxb_next = dp->rx_buf_freelist;
4775748Sduboff dp->rx_buf_freelist = rbp;
4785748Sduboff dp->rx_buf_freecnt++;
4795748Sduboff }
4805748Sduboff
4815748Sduboff /*
4825748Sduboff * gem_get_rxbuf: supply a receive buffer which have been mapped into
4835748Sduboff * DMA space.
4845748Sduboff */
4855748Sduboff struct rxbuf *
gem_get_rxbuf(struct gem_dev * dp,int cansleep)4865748Sduboff gem_get_rxbuf(struct gem_dev *dp, int cansleep)
4875748Sduboff {
4885748Sduboff struct rxbuf *rbp;
4895748Sduboff uint_t count = 0;
4905748Sduboff int i;
4915748Sduboff int err;
4925748Sduboff
4935748Sduboff ASSERT(mutex_owned(&dp->intrlock));
4945748Sduboff
4955748Sduboff DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
4965748Sduboff dp->rx_buf_freecnt));
4975748Sduboff /*
4985748Sduboff * Get rx buffer management structure
4995748Sduboff */
5005748Sduboff rbp = dp->rx_buf_freelist;
5015748Sduboff if (rbp) {
5025748Sduboff /* get one from the recycle list */
5035748Sduboff ASSERT(dp->rx_buf_freecnt > 0);
5045748Sduboff
5055748Sduboff dp->rx_buf_freelist = rbp->rxb_next;
5065748Sduboff dp->rx_buf_freecnt--;
5075748Sduboff rbp->rxb_next = NULL;
5085748Sduboff return (rbp);
5095748Sduboff }
5105748Sduboff
5115748Sduboff /*
5125748Sduboff * Allocate a rx buffer management structure
5135748Sduboff */
5145748Sduboff rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
5155748Sduboff if (rbp == NULL) {
5165748Sduboff /* no memory */
5175748Sduboff return (NULL);
5185748Sduboff }
5195748Sduboff
5205748Sduboff /*
5215748Sduboff * Prepare a back pointer to the device structure which will be
5225748Sduboff * refered on freeing the buffer later.
5235748Sduboff */
5245748Sduboff rbp->rxb_devp = dp;
5255748Sduboff
5265748Sduboff /* allocate a dma handle for rx data buffer */
5275748Sduboff if ((err = ddi_dma_alloc_handle(dp->dip,
5285748Sduboff &dp->gc.gc_dma_attr_rxbuf,
5295748Sduboff (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
5305748Sduboff NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
5315748Sduboff
5325748Sduboff cmn_err(CE_WARN,
5335748Sduboff "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
5345748Sduboff dp->name, __func__, err);
5355748Sduboff
5365748Sduboff kmem_free(rbp, sizeof (struct rxbuf));
5375748Sduboff return (NULL);
5385748Sduboff }
5395748Sduboff
5405748Sduboff /* allocate a bounce buffer for rx */
5415748Sduboff if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
5425748Sduboff ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
5435748Sduboff &dp->gc.gc_buf_attr,
5445748Sduboff /*
5455748Sduboff * if the nic requires a header at the top of receive buffers,
5465748Sduboff * it may access the rx buffer randomly.
5475748Sduboff */
5485748Sduboff (dp->gc.gc_rx_header_len > 0)
5495748Sduboff ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
5505748Sduboff cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
5515748Sduboff NULL,
5525748Sduboff &rbp->rxb_buf, &rbp->rxb_buf_len,
5535748Sduboff &rbp->rxb_bah)) != DDI_SUCCESS) {
5545748Sduboff
5555748Sduboff cmn_err(CE_WARN,
5565748Sduboff "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
5575748Sduboff dp->name, __func__, err);
5585748Sduboff
5595748Sduboff ddi_dma_free_handle(&rbp->rxb_dh);
5605748Sduboff kmem_free(rbp, sizeof (struct rxbuf));
5615748Sduboff return (NULL);
5625748Sduboff }
5635748Sduboff
5645748Sduboff /* Mapin the bounce buffer into the DMA space */
5655748Sduboff if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
5665748Sduboff NULL, rbp->rxb_buf, dp->rx_buf_len,
5675748Sduboff ((dp->gc.gc_rx_header_len > 0)
5685748Sduboff ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
5695748Sduboff :(DDI_DMA_READ | DDI_DMA_STREAMING)),
5705748Sduboff cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
5715748Sduboff NULL,
5725748Sduboff rbp->rxb_dmacookie,
5735748Sduboff &count)) != DDI_DMA_MAPPED) {
5745748Sduboff
5755748Sduboff ASSERT(err != DDI_DMA_INUSE);
5765748Sduboff DPRINTF(0, (CE_WARN,
5775748Sduboff "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
5785748Sduboff dp->name, __func__, err));
5795748Sduboff
5805748Sduboff /*
5815748Sduboff * we failed to allocate a dma resource
5825748Sduboff * for the rx bounce buffer.
5835748Sduboff */
5845748Sduboff ddi_dma_mem_free(&rbp->rxb_bah);
5855748Sduboff ddi_dma_free_handle(&rbp->rxb_dh);
5865748Sduboff kmem_free(rbp, sizeof (struct rxbuf));
5875748Sduboff return (NULL);
5885748Sduboff }
5895748Sduboff
5905748Sduboff /* correct the rest of the DMA mapping */
5915748Sduboff for (i = 1; i < count; i++) {
5925748Sduboff ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
5935748Sduboff }
5945748Sduboff rbp->rxb_nfrags = count;
5955748Sduboff
5965748Sduboff /* Now we successfully prepared an rx buffer */
5975748Sduboff dp->rx_buf_allocated++;
5985748Sduboff
5995748Sduboff return (rbp);
6005748Sduboff }
6015748Sduboff
6025748Sduboff /* ============================================================== */
6035748Sduboff /*
6045748Sduboff * memory resource management
6055748Sduboff */
6065748Sduboff /* ============================================================== */
6075748Sduboff static int
gem_alloc_memory(struct gem_dev * dp)6085748Sduboff gem_alloc_memory(struct gem_dev *dp)
6095748Sduboff {
6105748Sduboff caddr_t ring;
6115748Sduboff caddr_t buf;
6125748Sduboff size_t req_size;
6135748Sduboff size_t ring_len;
6145748Sduboff size_t buf_len;
6155748Sduboff ddi_dma_cookie_t ring_cookie;
6165748Sduboff ddi_dma_cookie_t buf_cookie;
6175748Sduboff uint_t count;
6185748Sduboff int i;
6195748Sduboff int err;
6205748Sduboff struct txbuf *tbp;
6215748Sduboff int tx_buf_len;
6225748Sduboff ddi_dma_attr_t dma_attr_txbounce;
6235748Sduboff
6245748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
6255748Sduboff
6265748Sduboff dp->desc_dma_handle = NULL;
6275748Sduboff req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
6285748Sduboff
6295748Sduboff if (req_size > 0) {
6305748Sduboff /*
6315748Sduboff * Alloc RX/TX descriptors and a io area.
6325748Sduboff */
6335748Sduboff if ((err = ddi_dma_alloc_handle(dp->dip,
6345748Sduboff &dp->gc.gc_dma_attr_desc,
6355748Sduboff DDI_DMA_SLEEP, NULL,
6365748Sduboff &dp->desc_dma_handle)) != DDI_SUCCESS) {
6375748Sduboff cmn_err(CE_WARN,
6385748Sduboff "!%s: %s: ddi_dma_alloc_handle failed: %d",
6395748Sduboff dp->name, __func__, err);
6405748Sduboff return (ENOMEM);
6415748Sduboff }
6425748Sduboff
6435748Sduboff if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
6445748Sduboff req_size, &dp->gc.gc_desc_attr,
6455748Sduboff DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
6465748Sduboff &ring, &ring_len,
6475748Sduboff &dp->desc_acc_handle)) != DDI_SUCCESS) {
6485748Sduboff cmn_err(CE_WARN,
6495748Sduboff "!%s: %s: ddi_dma_mem_alloc failed: "
6505748Sduboff "ret %d, request size: %d",
6515748Sduboff dp->name, __func__, err, (int)req_size);
6525748Sduboff ddi_dma_free_handle(&dp->desc_dma_handle);
6535748Sduboff return (ENOMEM);
6545748Sduboff }
6555748Sduboff
6565748Sduboff if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
6575748Sduboff NULL, ring, ring_len,
6585748Sduboff DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6595748Sduboff DDI_DMA_SLEEP, NULL,
6605748Sduboff &ring_cookie, &count)) != DDI_SUCCESS) {
6615748Sduboff ASSERT(err != DDI_DMA_INUSE);
6625748Sduboff cmn_err(CE_WARN,
6635748Sduboff "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
6645748Sduboff dp->name, __func__, err);
6655748Sduboff ddi_dma_mem_free(&dp->desc_acc_handle);
6665748Sduboff ddi_dma_free_handle(&dp->desc_dma_handle);
6675748Sduboff return (ENOMEM);
6685748Sduboff }
6695748Sduboff ASSERT(count == 1);
6705748Sduboff
6715748Sduboff /* set base of rx descriptor ring */
6725748Sduboff dp->rx_ring = ring;
6735748Sduboff dp->rx_ring_dma = ring_cookie.dmac_laddress;
6745748Sduboff
6755748Sduboff /* set base of tx descriptor ring */
6765748Sduboff dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
6775748Sduboff dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
6785748Sduboff
6795748Sduboff /* set base of io area */
6805748Sduboff dp->io_area = dp->tx_ring + dp->tx_desc_size;
6815748Sduboff dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
6825748Sduboff }
6835748Sduboff
6845748Sduboff /*
6855748Sduboff * Prepare DMA resources for tx packets
6865748Sduboff */
6875748Sduboff ASSERT(dp->gc.gc_tx_buf_size > 0);
6885748Sduboff
6895748Sduboff /* Special dma attribute for tx bounce buffers */
6905748Sduboff dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
6915748Sduboff dma_attr_txbounce.dma_attr_sgllen = 1;
6925748Sduboff dma_attr_txbounce.dma_attr_align =
6935748Sduboff max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
6945748Sduboff
6955748Sduboff /* Size for tx bounce buffers must be max tx packet size. */
6965748Sduboff tx_buf_len = MAXPKTBUF(dp);
6975748Sduboff tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
6985748Sduboff
6995748Sduboff ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
7005748Sduboff
7015748Sduboff for (i = 0, tbp = dp->tx_buf;
7025748Sduboff i < dp->gc.gc_tx_buf_size; i++, tbp++) {
7035748Sduboff
7045748Sduboff /* setup bounce buffers for tx packets */
7055748Sduboff if ((err = ddi_dma_alloc_handle(dp->dip,
7065748Sduboff &dma_attr_txbounce,
7075748Sduboff DDI_DMA_SLEEP, NULL,
7085748Sduboff &tbp->txb_bdh)) != DDI_SUCCESS) {
7095748Sduboff
7105748Sduboff cmn_err(CE_WARN,
7115748Sduboff "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
7125748Sduboff " err=%d, i=%d",
7135748Sduboff dp->name, __func__, err, i);
7145748Sduboff goto err_alloc_dh;
7155748Sduboff }
7165748Sduboff
7175748Sduboff if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
7185748Sduboff tx_buf_len,
7195748Sduboff &dp->gc.gc_buf_attr,
7205748Sduboff DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
7215748Sduboff &buf, &buf_len,
7225748Sduboff &tbp->txb_bah)) != DDI_SUCCESS) {
7235748Sduboff cmn_err(CE_WARN,
7245748Sduboff "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
7255748Sduboff "ret %d, request size %d",
7265748Sduboff dp->name, __func__, err, tx_buf_len);
7275748Sduboff ddi_dma_free_handle(&tbp->txb_bdh);
7285748Sduboff goto err_alloc_dh;
7295748Sduboff }
7305748Sduboff
7315748Sduboff if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
7325748Sduboff NULL, buf, buf_len,
7335748Sduboff DDI_DMA_WRITE | DDI_DMA_STREAMING,
7345748Sduboff DDI_DMA_SLEEP, NULL,
7355748Sduboff &buf_cookie, &count)) != DDI_SUCCESS) {
7365748Sduboff ASSERT(err != DDI_DMA_INUSE);
7375748Sduboff cmn_err(CE_WARN,
7385748Sduboff "!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
7395748Sduboff dp->name, __func__, err);
7405748Sduboff ddi_dma_mem_free(&tbp->txb_bah);
7415748Sduboff ddi_dma_free_handle(&tbp->txb_bdh);
7425748Sduboff goto err_alloc_dh;
7435748Sduboff }
7445748Sduboff ASSERT(count == 1);
7455748Sduboff tbp->txb_buf = buf;
7465748Sduboff tbp->txb_buf_dma = buf_cookie.dmac_laddress;
7475748Sduboff }
7485748Sduboff
7495748Sduboff return (0);
7505748Sduboff
7515748Sduboff err_alloc_dh:
7525748Sduboff if (dp->gc.gc_tx_buf_size > 0) {
7535748Sduboff while (i-- > 0) {
7545748Sduboff (void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
7555748Sduboff ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
7565748Sduboff ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
7575748Sduboff }
7585748Sduboff }
7595748Sduboff
7605748Sduboff if (dp->desc_dma_handle) {
7615748Sduboff (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
7625748Sduboff ddi_dma_mem_free(&dp->desc_acc_handle);
7635748Sduboff ddi_dma_free_handle(&dp->desc_dma_handle);
7645748Sduboff dp->desc_dma_handle = NULL;
7655748Sduboff }
7665748Sduboff
7675748Sduboff return (ENOMEM);
7685748Sduboff }
7695748Sduboff
7705748Sduboff static void
gem_free_memory(struct gem_dev * dp)7715748Sduboff gem_free_memory(struct gem_dev *dp)
7725748Sduboff {
7735748Sduboff int i;
7745748Sduboff struct rxbuf *rbp;
7755748Sduboff struct txbuf *tbp;
7765748Sduboff
7775748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
7785748Sduboff
7795748Sduboff /* Free TX/RX descriptors and tx padding buffer */
7805748Sduboff if (dp->desc_dma_handle) {
7815748Sduboff (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
7825748Sduboff ddi_dma_mem_free(&dp->desc_acc_handle);
7835748Sduboff ddi_dma_free_handle(&dp->desc_dma_handle);
7845748Sduboff dp->desc_dma_handle = NULL;
7855748Sduboff }
7865748Sduboff
7875748Sduboff /* Free dma handles for Tx */
7885748Sduboff for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
7895748Sduboff /* Free bounce buffer associated to each txbuf */
7905748Sduboff (void) ddi_dma_unbind_handle(tbp->txb_bdh);
7915748Sduboff ddi_dma_mem_free(&tbp->txb_bah);
7925748Sduboff ddi_dma_free_handle(&tbp->txb_bdh);
7935748Sduboff }
7945748Sduboff
7955748Sduboff /* Free rx buffer */
7965748Sduboff while ((rbp = dp->rx_buf_freelist) != NULL) {
7975748Sduboff
7985748Sduboff ASSERT(dp->rx_buf_freecnt > 0);
7995748Sduboff
8005748Sduboff dp->rx_buf_freelist = rbp->rxb_next;
8015748Sduboff dp->rx_buf_freecnt--;
8025748Sduboff
8035748Sduboff /* release DMA mapping */
8045748Sduboff ASSERT(rbp->rxb_dh != NULL);
8055748Sduboff
8065748Sduboff /* free dma handles for rx bbuf */
8075748Sduboff /* it has dma mapping always */
8085748Sduboff ASSERT(rbp->rxb_nfrags > 0);
8095748Sduboff (void) ddi_dma_unbind_handle(rbp->rxb_dh);
8105748Sduboff
8115748Sduboff /* free the associated bounce buffer and dma handle */
8125748Sduboff ASSERT(rbp->rxb_bah != NULL);
8135748Sduboff ddi_dma_mem_free(&rbp->rxb_bah);
8145748Sduboff /* free the associated dma handle */
8155748Sduboff ddi_dma_free_handle(&rbp->rxb_dh);
8165748Sduboff
8175748Sduboff /* free the base memory of rx buffer management */
8185748Sduboff kmem_free(rbp, sizeof (struct rxbuf));
8195748Sduboff }
8205748Sduboff }
8215748Sduboff
8225748Sduboff /* ============================================================== */
8235748Sduboff /*
8245748Sduboff * Rx/Tx descriptor slot management
8255748Sduboff */
8265748Sduboff /* ============================================================== */
8275748Sduboff /*
8285748Sduboff * Initialize an empty rx ring.
8295748Sduboff */
8305748Sduboff static void
gem_init_rx_ring(struct gem_dev * dp)8315748Sduboff gem_init_rx_ring(struct gem_dev *dp)
8325748Sduboff {
8335748Sduboff int i;
8345748Sduboff int rx_ring_size = dp->gc.gc_rx_ring_size;
8355748Sduboff
8365748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
8375748Sduboff dp->name, __func__,
8385748Sduboff rx_ring_size, dp->gc.gc_rx_buf_max));
8395748Sduboff
8405748Sduboff /* make a physical chain of rx descriptors */
8415748Sduboff for (i = 0; i < rx_ring_size; i++) {
8425748Sduboff (*dp->gc.gc_rx_desc_init)(dp, i);
8435748Sduboff }
8445748Sduboff gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
8455748Sduboff
8465748Sduboff dp->rx_active_head = (seqnum_t)0;
8475748Sduboff dp->rx_active_tail = (seqnum_t)0;
8485748Sduboff
8495748Sduboff ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
8505748Sduboff ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
8515748Sduboff }
8525748Sduboff
8535748Sduboff /*
8545748Sduboff * Prepare rx buffers and put them into the rx buffer/descriptor ring.
8555748Sduboff */
8565748Sduboff static void
gem_prepare_rx_buf(struct gem_dev * dp)8575748Sduboff gem_prepare_rx_buf(struct gem_dev *dp)
8585748Sduboff {
8595748Sduboff int i;
8605748Sduboff int nrbuf;
8615748Sduboff struct rxbuf *rbp;
8625748Sduboff
8635748Sduboff ASSERT(mutex_owned(&dp->intrlock));
8645748Sduboff
8655748Sduboff /* Now we have no active buffers in rx ring */
8665748Sduboff
8675748Sduboff nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
8685748Sduboff for (i = 0; i < nrbuf; i++) {
8695748Sduboff if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
8705748Sduboff break;
8715748Sduboff }
8725748Sduboff gem_append_rxbuf(dp, rbp);
8735748Sduboff }
8745748Sduboff
8755748Sduboff gem_rx_desc_dma_sync(dp,
8765748Sduboff 0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
8775748Sduboff }
8785748Sduboff
8795748Sduboff /*
8805748Sduboff * Reclaim active rx buffers in rx buffer ring.
8815748Sduboff */
8825748Sduboff static void
gem_clean_rx_buf(struct gem_dev * dp)8835748Sduboff gem_clean_rx_buf(struct gem_dev *dp)
8845748Sduboff {
8855748Sduboff int i;
8865748Sduboff struct rxbuf *rbp;
8875748Sduboff int rx_ring_size = dp->gc.gc_rx_ring_size;
8885748Sduboff #ifdef GEM_DEBUG_LEVEL
8895748Sduboff int total;
8905748Sduboff #endif
8915748Sduboff ASSERT(mutex_owned(&dp->intrlock));
8925748Sduboff
8935748Sduboff DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
8945748Sduboff dp->name, __func__, dp->rx_buf_freecnt));
8955748Sduboff /*
8965748Sduboff * clean up HW descriptors
8975748Sduboff */
8985748Sduboff for (i = 0; i < rx_ring_size; i++) {
8995748Sduboff (*dp->gc.gc_rx_desc_clean)(dp, i);
9005748Sduboff }
9015748Sduboff gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
9025748Sduboff
9035748Sduboff #ifdef GEM_DEBUG_LEVEL
9045748Sduboff total = 0;
9055748Sduboff #endif
9065748Sduboff /*
9075748Sduboff * Reclaim allocated rx buffers
9085748Sduboff */
9095748Sduboff while ((rbp = dp->rx_buf_head) != NULL) {
9105748Sduboff #ifdef GEM_DEBUG_LEVEL
9115748Sduboff total++;
9125748Sduboff #endif
9135748Sduboff /* remove the first one from rx buffer list */
9145748Sduboff dp->rx_buf_head = rbp->rxb_next;
9155748Sduboff
9165748Sduboff /* recycle the rxbuf */
9175748Sduboff gem_free_rxbuf(rbp);
9185748Sduboff }
9195748Sduboff dp->rx_buf_tail = (struct rxbuf *)NULL;
9205748Sduboff
9215748Sduboff DPRINTF(2, (CE_CONT,
9225748Sduboff "!%s: %s: %d buffers freeed, total: %d free",
9235748Sduboff dp->name, __func__, total, dp->rx_buf_freecnt));
9245748Sduboff }
9255748Sduboff
9265748Sduboff /*
9275748Sduboff * Initialize an empty transmit buffer/descriptor ring
9285748Sduboff */
9295748Sduboff static void
gem_init_tx_ring(struct gem_dev * dp)9305748Sduboff gem_init_tx_ring(struct gem_dev *dp)
9315748Sduboff {
9325748Sduboff int i;
9335748Sduboff int tx_buf_size = dp->gc.gc_tx_buf_size;
9345748Sduboff int tx_ring_size = dp->gc.gc_tx_ring_size;
9355748Sduboff
9365748Sduboff DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
9375748Sduboff dp->name, __func__,
9385748Sduboff dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
9395748Sduboff
9405748Sduboff ASSERT(!dp->mac_active);
9415748Sduboff
9425748Sduboff /* initialize active list and free list */
9435748Sduboff dp->tx_slots_base =
9445748Sduboff SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
9455748Sduboff dp->tx_softq_tail -= dp->tx_softq_head;
9465748Sduboff dp->tx_softq_head = (seqnum_t)0;
9475748Sduboff
9485748Sduboff dp->tx_active_head = dp->tx_softq_head;
9495748Sduboff dp->tx_active_tail = dp->tx_softq_head;
9505748Sduboff
9515748Sduboff dp->tx_free_head = dp->tx_softq_tail;
9525748Sduboff dp->tx_free_tail = dp->gc.gc_tx_buf_limit;
9535748Sduboff
9545748Sduboff dp->tx_desc_head = (seqnum_t)0;
9555748Sduboff dp->tx_desc_tail = (seqnum_t)0;
9565748Sduboff dp->tx_desc_intr = (seqnum_t)0;
9575748Sduboff
9585748Sduboff for (i = 0; i < tx_ring_size; i++) {
9595748Sduboff (*dp->gc.gc_tx_desc_init)(dp, i);
9605748Sduboff }
9615748Sduboff gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
9625748Sduboff }
9635748Sduboff
9645748Sduboff __INLINE__
9655748Sduboff static void
gem_txbuf_free_dma_resources(struct txbuf * tbp)9665748Sduboff gem_txbuf_free_dma_resources(struct txbuf *tbp)
9675748Sduboff {
9685748Sduboff if (tbp->txb_mp) {
9695748Sduboff freemsg(tbp->txb_mp);
9705748Sduboff tbp->txb_mp = NULL;
9715748Sduboff }
9725748Sduboff tbp->txb_nfrags = 0;
9737116Sduboff tbp->txb_flag = 0;
9745748Sduboff }
9755748Sduboff #pragma inline(gem_txbuf_free_dma_resources)
9765748Sduboff
9775748Sduboff /*
9785748Sduboff * reclaim active tx buffers and reset positions in tx rings.
9795748Sduboff */
9805748Sduboff static void
gem_clean_tx_buf(struct gem_dev * dp)9815748Sduboff gem_clean_tx_buf(struct gem_dev *dp)
9825748Sduboff {
9835748Sduboff int i;
9845748Sduboff seqnum_t head;
9855748Sduboff seqnum_t tail;
9865748Sduboff seqnum_t sn;
9875748Sduboff struct txbuf *tbp;
9885748Sduboff int tx_ring_size = dp->gc.gc_tx_ring_size;
9895748Sduboff #ifdef GEM_DEBUG_LEVEL
9905748Sduboff int err;
9915748Sduboff #endif
9925748Sduboff
9935748Sduboff ASSERT(!dp->mac_active);
9945748Sduboff ASSERT(dp->tx_busy == 0);
9955748Sduboff ASSERT(dp->tx_softq_tail == dp->tx_free_head);
9965748Sduboff
9975748Sduboff /*
9985748Sduboff * clean up all HW descriptors
9995748Sduboff */
10005748Sduboff for (i = 0; i < tx_ring_size; i++) {
10015748Sduboff (*dp->gc.gc_tx_desc_clean)(dp, i);
10025748Sduboff }
10035748Sduboff gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
10045748Sduboff
10055748Sduboff /* dequeue all active and loaded buffers */
10065748Sduboff head = dp->tx_active_head;
10075748Sduboff tail = dp->tx_softq_tail;
10085748Sduboff
10095748Sduboff ASSERT(dp->tx_free_head - head >= 0);
10105748Sduboff tbp = GET_TXBUF(dp, head);
10115748Sduboff for (sn = head; sn != tail; sn++) {
10125748Sduboff gem_txbuf_free_dma_resources(tbp);
10135748Sduboff ASSERT(tbp->txb_mp == NULL);
10145748Sduboff dp->stats.errxmt++;
10155748Sduboff tbp = tbp->txb_next;
10165748Sduboff }
10175748Sduboff
10185748Sduboff #ifdef GEM_DEBUG_LEVEL
10195748Sduboff /* ensure no dma resources for tx are not in use now */
10205748Sduboff err = 0;
10215748Sduboff while (sn != head + dp->gc.gc_tx_buf_size) {
10225748Sduboff if (tbp->txb_mp || tbp->txb_nfrags) {
10235748Sduboff DPRINTF(0, (CE_CONT,
10245748Sduboff "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
10255748Sduboff dp->name, __func__,
10265748Sduboff sn, SLOT(sn, dp->gc.gc_tx_buf_size),
10275748Sduboff tbp->txb_mp, tbp->txb_nfrags));
10285748Sduboff err = 1;
10295748Sduboff }
10305748Sduboff sn++;
10315748Sduboff tbp = tbp->txb_next;
10325748Sduboff }
10335748Sduboff
10345748Sduboff if (err) {
10355748Sduboff gem_dump_txbuf(dp, CE_WARN,
10365748Sduboff "gem_clean_tx_buf: tbp->txb_mp != NULL");
10375748Sduboff }
10385748Sduboff #endif
10395748Sduboff /* recycle buffers, now no active tx buffers in the ring */
10405748Sduboff dp->tx_free_tail += tail - head;
10415748Sduboff ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
10425748Sduboff
10435748Sduboff /* fix positions in tx buffer rings */
10445748Sduboff dp->tx_active_head = dp->tx_free_head;
10455748Sduboff dp->tx_active_tail = dp->tx_free_head;
10465748Sduboff dp->tx_softq_head = dp->tx_free_head;
10475748Sduboff dp->tx_softq_tail = dp->tx_free_head;
10485748Sduboff }
10495748Sduboff
10505748Sduboff /*
10515748Sduboff * Reclaim transmitted buffers from tx buffer/descriptor ring.
10525748Sduboff */
10535748Sduboff __INLINE__ int
gem_reclaim_txbuf(struct gem_dev * dp)10545748Sduboff gem_reclaim_txbuf(struct gem_dev *dp)
10555748Sduboff {
10565748Sduboff struct txbuf *tbp;
10575748Sduboff uint_t txstat;
10585748Sduboff int err = GEM_SUCCESS;
10595748Sduboff seqnum_t head;
10605748Sduboff seqnum_t tail;
10615748Sduboff seqnum_t sn;
10625748Sduboff seqnum_t desc_head;
10635748Sduboff int tx_ring_size = dp->gc.gc_tx_ring_size;
10645748Sduboff uint_t (*tx_desc_stat)(struct gem_dev *dp,
10655748Sduboff int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
10667116Sduboff clock_t now;
10677116Sduboff
10687116Sduboff now = ddi_get_lbolt();
10697116Sduboff if (now == (clock_t)0) {
10707116Sduboff /* make non-zero timestamp */
10717116Sduboff now--;
10727116Sduboff }
10735748Sduboff
10745748Sduboff mutex_enter(&dp->xmitlock);
10755748Sduboff
10765748Sduboff head = dp->tx_active_head;
10775748Sduboff tail = dp->tx_active_tail;
10785748Sduboff
10795748Sduboff #if GEM_DEBUG_LEVEL > 2
10805748Sduboff if (head != tail) {
10815748Sduboff cmn_err(CE_CONT, "!%s: %s: "
10825748Sduboff "testing active_head:%d[%d], active_tail:%d[%d]",
10835748Sduboff dp->name, __func__,
10845748Sduboff head, SLOT(head, dp->gc.gc_tx_buf_size),
10855748Sduboff tail, SLOT(tail, dp->gc.gc_tx_buf_size));
10865748Sduboff }
10875748Sduboff #endif
10885748Sduboff #ifdef DEBUG
10895748Sduboff if (dp->tx_reclaim_busy == 0) {
10905748Sduboff /* check tx buffer management consistency */
10915748Sduboff ASSERT(dp->tx_free_tail - dp->tx_active_head
10925748Sduboff == dp->gc.gc_tx_buf_limit);
10935748Sduboff /* EMPTY */
10945748Sduboff }
10955748Sduboff #endif
10965748Sduboff dp->tx_reclaim_busy++;
10975748Sduboff
10985748Sduboff /* sync all active HW descriptors */
10995748Sduboff gem_tx_desc_dma_sync(dp,
11005748Sduboff SLOT(dp->tx_desc_head, tx_ring_size),
11015748Sduboff dp->tx_desc_tail - dp->tx_desc_head,
11025748Sduboff DDI_DMA_SYNC_FORKERNEL);
11035748Sduboff
11045748Sduboff tbp = GET_TXBUF(dp, head);
11055748Sduboff desc_head = dp->tx_desc_head;
11065748Sduboff for (sn = head; sn != tail;
11075748Sduboff dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
11085748Sduboff int ndescs;
11095748Sduboff
11105748Sduboff ASSERT(tbp->txb_desc == desc_head);
11115748Sduboff
11125748Sduboff ndescs = tbp->txb_ndescs;
11137116Sduboff if (ndescs == 0) {
11147116Sduboff /* skip errored descriptors */
11157116Sduboff continue;
11167116Sduboff }
11175748Sduboff txstat = (*tx_desc_stat)(dp,
11185748Sduboff SLOT(tbp->txb_desc, tx_ring_size), ndescs);
11195748Sduboff
11205748Sduboff if (txstat == 0) {
11215748Sduboff /* not transmitted yet */
11225748Sduboff break;
11235748Sduboff }
11245748Sduboff
11257116Sduboff if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
11267116Sduboff dp->tx_blocked = now;
11277116Sduboff }
11287116Sduboff
11295748Sduboff ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
11305748Sduboff
11315748Sduboff if (txstat & GEM_TX_ERR) {
11325748Sduboff err = GEM_FAILURE;
11335748Sduboff cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
11345748Sduboff dp->name, sn, SLOT(sn, tx_ring_size));
11355748Sduboff }
11365748Sduboff #if GEM_DEBUG_LEVEL > 4
11375748Sduboff if (now - tbp->txb_stime >= 50) {
11385748Sduboff cmn_err(CE_WARN, "!%s: tx delay while %d mS",
11395748Sduboff dp->name, (now - tbp->txb_stime)*10);
11405748Sduboff }
11415748Sduboff #endif
11425748Sduboff /* free transmitted descriptors */
11435748Sduboff desc_head += ndescs;
11445748Sduboff }
11455748Sduboff
11465748Sduboff if (dp->tx_desc_head != desc_head) {
11475748Sduboff /* we have reclaimed one or more tx buffers */
11485748Sduboff dp->tx_desc_head = desc_head;
11495748Sduboff
11505748Sduboff /* If we passed the next interrupt position, update it */
11517116Sduboff if (desc_head - dp->tx_desc_intr > 0) {
11525748Sduboff dp->tx_desc_intr = desc_head;
11535748Sduboff }
11545748Sduboff }
11555748Sduboff mutex_exit(&dp->xmitlock);
11565748Sduboff
11575748Sduboff /* free dma mapping resources associated with transmitted tx buffers */
11585748Sduboff tbp = GET_TXBUF(dp, head);
11595748Sduboff tail = sn;
11605748Sduboff #if GEM_DEBUG_LEVEL > 2
11615748Sduboff if (head != tail) {
11625748Sduboff cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
11635748Sduboff __func__,
11645748Sduboff head, SLOT(head, dp->gc.gc_tx_buf_size),
11655748Sduboff tail, SLOT(tail, dp->gc.gc_tx_buf_size));
11665748Sduboff }
11675748Sduboff #endif
11685748Sduboff for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
11695748Sduboff gem_txbuf_free_dma_resources(tbp);
11705748Sduboff }
11715748Sduboff
11725748Sduboff /* recycle the tx buffers */
11735748Sduboff mutex_enter(&dp->xmitlock);
11745748Sduboff if (--dp->tx_reclaim_busy == 0) {
11755748Sduboff /* we are the last thread who can update free tail */
11765748Sduboff #if GEM_DEBUG_LEVEL > 4
11775748Sduboff /* check all resouces have been deallocated */
11785748Sduboff sn = dp->tx_free_tail;
11795748Sduboff tbp = GET_TXBUF(dp, new_tail);
11805748Sduboff while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
11815748Sduboff if (tbp->txb_nfrags) {
11825748Sduboff /* in use */
11835748Sduboff break;
11845748Sduboff }
11855748Sduboff ASSERT(tbp->txb_mp == NULL);
11865748Sduboff tbp = tbp->txb_next;
11875748Sduboff sn++;
11885748Sduboff }
11895748Sduboff ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
11905748Sduboff #endif
11915748Sduboff dp->tx_free_tail =
11925748Sduboff dp->tx_active_head + dp->gc.gc_tx_buf_limit;
11935748Sduboff }
11945748Sduboff if (!dp->mac_active) {
11955748Sduboff /* someone may be waiting for me. */
11965748Sduboff cv_broadcast(&dp->tx_drain_cv);
11975748Sduboff }
11985748Sduboff #if GEM_DEBUG_LEVEL > 2
11995748Sduboff cmn_err(CE_CONT, "!%s: %s: called, "
12005748Sduboff "free_head:%d free_tail:%d(+%d) added:%d",
12015748Sduboff dp->name, __func__,
12025748Sduboff dp->tx_free_head, dp->tx_free_tail,
12035748Sduboff dp->tx_free_tail - dp->tx_free_head, tail - head);
12045748Sduboff #endif
12055748Sduboff mutex_exit(&dp->xmitlock);
12065748Sduboff
12075748Sduboff return (err);
12085748Sduboff }
12095748Sduboff #pragma inline(gem_reclaim_txbuf)
12105748Sduboff
12115748Sduboff
12125748Sduboff /*
12135748Sduboff * Make tx descriptors in out-of-order manner
12145748Sduboff */
12155748Sduboff static void
gem_tx_load_descs_oo(struct gem_dev * dp,seqnum_t start_slot,seqnum_t end_slot,uint64_t flags)12165748Sduboff gem_tx_load_descs_oo(struct gem_dev *dp,
12177116Sduboff seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
12185748Sduboff {
12195748Sduboff seqnum_t sn;
12205748Sduboff struct txbuf *tbp;
12215748Sduboff int tx_ring_size = dp->gc.gc_tx_ring_size;
12225748Sduboff int (*tx_desc_write)
12235748Sduboff (struct gem_dev *dp, int slot,
12245748Sduboff ddi_dma_cookie_t *dmacookie,
12255748Sduboff int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
12265748Sduboff clock_t now = ddi_get_lbolt();
12275748Sduboff
12285748Sduboff sn = start_slot;
12295748Sduboff tbp = GET_TXBUF(dp, sn);
12305748Sduboff do {
12315748Sduboff #if GEM_DEBUG_LEVEL > 1
12325748Sduboff if (dp->tx_cnt < 100) {
12335748Sduboff dp->tx_cnt++;
12345748Sduboff flags |= GEM_TXFLAG_INTR;
12355748Sduboff }
12365748Sduboff #endif
12375748Sduboff /* write a tx descriptor */
12385748Sduboff tbp->txb_desc = sn;
12395748Sduboff tbp->txb_ndescs = (*tx_desc_write)(dp,
12405748Sduboff SLOT(sn, tx_ring_size),
12415748Sduboff tbp->txb_dmacookie,
12425748Sduboff tbp->txb_nfrags, flags | tbp->txb_flag);
12435748Sduboff tbp->txb_stime = now;
12445748Sduboff ASSERT(tbp->txb_ndescs == 1);
12455748Sduboff
12465748Sduboff flags = 0;
12475748Sduboff sn++;
12485748Sduboff tbp = tbp->txb_next;
12495748Sduboff } while (sn != end_slot);
12505748Sduboff }
12515748Sduboff
12525748Sduboff __INLINE__
12537116Sduboff static size_t
gem_setup_txbuf_copy(struct gem_dev * dp,mblk_t * mp,struct txbuf * tbp)12545748Sduboff gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
12555748Sduboff {
12565748Sduboff size_t min_pkt;
12575748Sduboff caddr_t bp;
12585748Sduboff size_t off;
12595748Sduboff mblk_t *tp;
12605748Sduboff size_t len;
12615748Sduboff uint64_t flag;
12625748Sduboff
12635748Sduboff ASSERT(tbp->txb_mp == NULL);
12645748Sduboff
12655748Sduboff /* we use bounce buffer for the packet */
12665748Sduboff min_pkt = ETHERMIN;
12675748Sduboff bp = tbp->txb_buf;
12685748Sduboff off = 0;
12695748Sduboff tp = mp;
12705748Sduboff
12715748Sduboff flag = tbp->txb_flag;
12725748Sduboff if (flag & GEM_TXFLAG_SWVTAG) {
12735748Sduboff /* need to increase min packet size */
12745748Sduboff min_pkt += VTAG_SIZE;
12755748Sduboff ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
12765748Sduboff }
12775748Sduboff
12785748Sduboff /* copy the rest */
12795748Sduboff for (; tp; tp = tp->b_cont) {
12805748Sduboff if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
12815748Sduboff bcopy(tp->b_rptr, &bp[off], len);
12825748Sduboff off += len;
12835748Sduboff }
12845748Sduboff }
12855748Sduboff
12865748Sduboff if (off < min_pkt &&
12875748Sduboff (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
12885748Sduboff /*
12897116Sduboff * Extend the packet to minimum packet size explicitly.
12905748Sduboff * For software vlan packets, we shouldn't use tx autopad
12917116Sduboff * function because nics may not be aware of vlan.
12925748Sduboff * we must keep 46 octet of payload even if we use vlan.
12935748Sduboff */
12945748Sduboff bzero(&bp[off], min_pkt - off);
12955748Sduboff off = min_pkt;
12965748Sduboff }
12975748Sduboff
12985748Sduboff (void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
12995748Sduboff
13005748Sduboff tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
13015748Sduboff tbp->txb_dmacookie[0].dmac_size = off;
13025748Sduboff
13035748Sduboff DPRINTF(2, (CE_CONT,
13045748Sduboff "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
13055748Sduboff dp->name, __func__,
13065748Sduboff tbp->txb_dmacookie[0].dmac_laddress,
13075748Sduboff tbp->txb_dmacookie[0].dmac_size,
13085748Sduboff (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
13095748Sduboff min_pkt));
13105748Sduboff
13115748Sduboff /* save misc info */
13125748Sduboff tbp->txb_mp = mp;
13135748Sduboff tbp->txb_nfrags = 1;
13145748Sduboff #ifdef DEBUG_MULTIFRAGS
13155748Sduboff if (dp->gc.gc_tx_max_frags >= 3 &&
13165748Sduboff tbp->txb_dmacookie[0].dmac_size > 16*3) {
13175748Sduboff tbp->txb_dmacookie[1].dmac_laddress =
13185748Sduboff tbp->txb_dmacookie[0].dmac_laddress + 16;
13195748Sduboff tbp->txb_dmacookie[2].dmac_laddress =
13205748Sduboff tbp->txb_dmacookie[1].dmac_laddress + 16;
13215748Sduboff
13225748Sduboff tbp->txb_dmacookie[2].dmac_size =
13235748Sduboff tbp->txb_dmacookie[0].dmac_size - 16*2;
13245748Sduboff tbp->txb_dmacookie[1].dmac_size = 16;
13255748Sduboff tbp->txb_dmacookie[0].dmac_size = 16;
13265748Sduboff tbp->txb_nfrags = 3;
13275748Sduboff }
13285748Sduboff #endif
13297116Sduboff return (off);
13305748Sduboff }
13315748Sduboff #pragma inline(gem_setup_txbuf_copy)
13325748Sduboff
13335748Sduboff __INLINE__
13345748Sduboff static void
gem_tx_start_unit(struct gem_dev * dp)13355748Sduboff gem_tx_start_unit(struct gem_dev *dp)
13365748Sduboff {
13375748Sduboff seqnum_t head;
13385748Sduboff seqnum_t tail;
13395748Sduboff struct txbuf *tbp_head;
13405748Sduboff struct txbuf *tbp_tail;
13415748Sduboff
13425748Sduboff /* update HW descriptors from soft queue */
13435748Sduboff ASSERT(mutex_owned(&dp->xmitlock));
13445748Sduboff ASSERT(dp->tx_softq_head == dp->tx_active_tail);
13455748Sduboff
13465748Sduboff head = dp->tx_softq_head;
13475748Sduboff tail = dp->tx_softq_tail;
13485748Sduboff
13495748Sduboff DPRINTF(1, (CE_CONT,
13505748Sduboff "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
13515748Sduboff dp->name, __func__, head, tail, tail - head,
13525748Sduboff dp->tx_desc_head, dp->tx_desc_tail,
13535748Sduboff dp->tx_desc_tail - dp->tx_desc_head));
13545748Sduboff
13555748Sduboff ASSERT(tail - head > 0);
13565748Sduboff
13575748Sduboff dp->tx_desc_tail = tail;
13585748Sduboff
13595748Sduboff tbp_head = GET_TXBUF(dp, head);
13605748Sduboff tbp_tail = GET_TXBUF(dp, tail - 1);
13615748Sduboff
13625748Sduboff ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
13635748Sduboff
13645748Sduboff dp->gc.gc_tx_start(dp,
13655748Sduboff SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
13665748Sduboff tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
13675748Sduboff
13685748Sduboff /* advance softq head and active tail */
13695748Sduboff dp->tx_softq_head = dp->tx_active_tail = tail;
13705748Sduboff }
13715748Sduboff #pragma inline(gem_tx_start_unit)
13725748Sduboff
13735748Sduboff #ifdef GEM_DEBUG_LEVEL
13745748Sduboff static int gem_send_cnt[10];
13755748Sduboff #endif
13767116Sduboff #define PKT_MIN_SIZE (sizeof (struct ether_header) + 10 + VTAG_SIZE)
13777116Sduboff #define EHLEN (sizeof (struct ether_header))
13787116Sduboff /*
13797116Sduboff * check ether packet type and ip protocol
13807116Sduboff */
13817116Sduboff static uint64_t
gem_txbuf_options(struct gem_dev * dp,mblk_t * mp,uint8_t * bp)13827116Sduboff gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
13837116Sduboff {
13847116Sduboff mblk_t *tp;
13857116Sduboff ssize_t len;
13867116Sduboff uint_t vtag;
13877116Sduboff int off;
13887116Sduboff uint64_t flag;
13897116Sduboff
13907116Sduboff flag = 0ULL;
13917116Sduboff
13927116Sduboff /*
13937116Sduboff * prepare continuous header of the packet for protocol analysis
13947116Sduboff */
13957116Sduboff if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
13967116Sduboff /* we use work buffer to copy mblk */
13977116Sduboff for (tp = mp, off = 0;
13987116Sduboff tp && (off < PKT_MIN_SIZE);
13997116Sduboff tp = tp->b_cont, off += len) {
14007116Sduboff len = (long)tp->b_wptr - (long)tp->b_rptr;
14017116Sduboff len = min(len, PKT_MIN_SIZE - off);
14027116Sduboff bcopy(tp->b_rptr, &bp[off], len);
14037116Sduboff }
14047116Sduboff } else {
14057116Sduboff /* we can use mblk without copy */
14067116Sduboff bp = mp->b_rptr;
14077116Sduboff }
14087116Sduboff
14097116Sduboff /* process vlan tag for GLD v3 */
14107116Sduboff if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
14117116Sduboff if (dp->misc_flag & GEM_VLAN_HARD) {
14127116Sduboff vtag = GET_NET16(&bp[VTAG_OFF + 2]);
14137116Sduboff ASSERT(vtag);
14147116Sduboff flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
14157116Sduboff } else {
14167116Sduboff flag |= GEM_TXFLAG_SWVTAG;
14177116Sduboff }
14187116Sduboff }
14197116Sduboff return (flag);
14207116Sduboff }
14217116Sduboff #undef EHLEN
14227116Sduboff #undef PKT_MIN_SIZE
14235748Sduboff /*
14245748Sduboff * gem_send_common is an exported function because hw depend routines may
14255748Sduboff * use it for sending control frames like setup frames for 2114x chipset.
14265748Sduboff */
14275748Sduboff mblk_t *
gem_send_common(struct gem_dev * dp,mblk_t * mp_head,uint32_t flags)14285748Sduboff gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
14295748Sduboff {
14305748Sduboff int nmblk;
14315748Sduboff int avail;
14325748Sduboff mblk_t *tp;
14335748Sduboff mblk_t *mp;
14347116Sduboff int i;
14355748Sduboff struct txbuf *tbp;
14365748Sduboff seqnum_t head;
14375748Sduboff uint64_t load_flags;
14385748Sduboff uint64_t len_total = 0;
14397116Sduboff uint32_t bcast = 0;
14407116Sduboff uint32_t mcast = 0;
14415748Sduboff
14425748Sduboff ASSERT(mp_head != NULL);
14435748Sduboff
14445748Sduboff mp = mp_head;
14455748Sduboff nmblk = 1;
14465748Sduboff while ((mp = mp->b_next) != NULL) {
14475748Sduboff nmblk++;
14485748Sduboff }
14495748Sduboff #ifdef GEM_DEBUG_LEVEL
14505748Sduboff gem_send_cnt[0]++;
14515748Sduboff gem_send_cnt[min(nmblk, 9)]++;
14525748Sduboff #endif
14535748Sduboff /*
14545748Sduboff * Aquire resources
14555748Sduboff */
14565748Sduboff mutex_enter(&dp->xmitlock);
14575748Sduboff if (dp->mac_suspended) {
14585748Sduboff mutex_exit(&dp->xmitlock);
14595748Sduboff mp = mp_head;
14605748Sduboff while (mp) {
14615748Sduboff tp = mp->b_next;
14625748Sduboff freemsg(mp);
14635748Sduboff mp = tp;
14645748Sduboff }
14655748Sduboff return (NULL);
14665748Sduboff }
14675748Sduboff
14685748Sduboff if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
14695748Sduboff /* don't send data packets while mac isn't active */
14707116Sduboff /* XXX - should we discard packets? */
14715748Sduboff mutex_exit(&dp->xmitlock);
14725748Sduboff return (mp_head);
14735748Sduboff }
14745748Sduboff
14755748Sduboff /* allocate free slots */
14765748Sduboff head = dp->tx_free_head;
14775748Sduboff avail = dp->tx_free_tail - head;
14785748Sduboff
14795748Sduboff DPRINTF(2, (CE_CONT,
14805748Sduboff "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
14815748Sduboff dp->name, __func__,
14825748Sduboff dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
14835748Sduboff
14847116Sduboff avail = min(avail, dp->tx_max_packets);
14855748Sduboff
14865748Sduboff if (nmblk > avail) {
14875748Sduboff if (avail == 0) {
14885748Sduboff /* no resources; short cut */
14895748Sduboff DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
14907116Sduboff dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
14915748Sduboff goto done;
14925748Sduboff }
14935748Sduboff nmblk = avail;
14945748Sduboff }
14955748Sduboff
14965748Sduboff dp->tx_free_head = head + nmblk;
14975748Sduboff load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
14985748Sduboff
14997116Sduboff /* update last interrupt position if tx buffers exhaust. */
15007116Sduboff if (nmblk == avail) {
15017116Sduboff tbp = GET_TXBUF(dp, head + avail - 1);
15027116Sduboff tbp->txb_flag = GEM_TXFLAG_INTR;
15037116Sduboff dp->tx_desc_intr = head + avail;
15045748Sduboff }
15055748Sduboff mutex_exit(&dp->xmitlock);
15065748Sduboff
15075748Sduboff tbp = GET_TXBUF(dp, head);
15085748Sduboff
15097116Sduboff for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
15105748Sduboff uint8_t *bp;
15117116Sduboff uint64_t txflag;
15125748Sduboff
15135748Sduboff /* remove one from the mblk list */
15145748Sduboff ASSERT(mp_head != NULL);
15155748Sduboff mp = mp_head;
15165748Sduboff mp_head = mp_head->b_next;
15175748Sduboff mp->b_next = NULL;
15185748Sduboff
15197116Sduboff /* statistics for non-unicast packets */
15207116Sduboff bp = mp->b_rptr;
15215748Sduboff if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
15225748Sduboff if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
15235748Sduboff ETHERADDRL) == 0) {
15247116Sduboff bcast++;
15255748Sduboff } else {
15267116Sduboff mcast++;
15275748Sduboff }
15285748Sduboff }
15295748Sduboff
15307116Sduboff /* save misc info */
15317116Sduboff txflag = tbp->txb_flag;
15327116Sduboff txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
15337116Sduboff txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
15347116Sduboff tbp->txb_flag = txflag;
15357116Sduboff
15367116Sduboff len_total += gem_setup_txbuf_copy(dp, mp, tbp);
15377116Sduboff }
15387116Sduboff
15397116Sduboff (void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
15405748Sduboff
15415748Sduboff /* Append the tbp at the tail of the active tx buffer list */
15425748Sduboff mutex_enter(&dp->xmitlock);
15435748Sduboff
15445748Sduboff if ((--dp->tx_busy) == 0) {
15455748Sduboff /* extend the tail of softq, as new packets have been ready. */
15465748Sduboff dp->tx_softq_tail = dp->tx_free_head;
15475748Sduboff
15485748Sduboff if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
15495748Sduboff /*
15505748Sduboff * The device status has changed while we are
15515748Sduboff * preparing tx buf.
15525748Sduboff * As we are the last one that make tx non-busy.
15535748Sduboff * wake up someone who may wait for us.
15545748Sduboff */
15555748Sduboff cv_broadcast(&dp->tx_drain_cv);
15565748Sduboff } else {
15575748Sduboff ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
15585748Sduboff gem_tx_start_unit(dp);
15595748Sduboff }
15605748Sduboff }
15615748Sduboff dp->stats.obytes += len_total;
15627116Sduboff dp->stats.opackets += nmblk;
15637116Sduboff dp->stats.obcast += bcast;
15647116Sduboff dp->stats.omcast += mcast;
15655748Sduboff done:
15665748Sduboff mutex_exit(&dp->xmitlock);
15675748Sduboff
15685748Sduboff return (mp_head);
15695748Sduboff }
15705748Sduboff
15715748Sduboff /* ========================================================== */
15725748Sduboff /*
15735748Sduboff * error detection and restart routines
15745748Sduboff */
15755748Sduboff /* ========================================================== */
15765748Sduboff int
gem_restart_nic(struct gem_dev * dp,uint_t flags)15775748Sduboff gem_restart_nic(struct gem_dev *dp, uint_t flags)
15785748Sduboff {
15795748Sduboff ASSERT(mutex_owned(&dp->intrlock));
15805748Sduboff
15817116Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
15827116Sduboff #ifdef GEM_DEBUG_LEVEL
15837116Sduboff #if GEM_DEBUG_LEVEL > 1
15847116Sduboff gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
15857116Sduboff #endif
15867116Sduboff #endif
15875748Sduboff
15885748Sduboff if (dp->mac_suspended) {
15895748Sduboff /* should we return GEM_FAILURE ? */
15905748Sduboff return (GEM_FAILURE);
15915748Sduboff }
15925748Sduboff
15935748Sduboff /*
15945748Sduboff * We should avoid calling any routines except xxx_chip_reset
15955748Sduboff * when we are resuming the system.
15965748Sduboff */
15975748Sduboff if (dp->mac_active) {
15985748Sduboff if (flags & GEM_RESTART_KEEP_BUF) {
15995748Sduboff /* stop rx gracefully */
16005748Sduboff dp->rxmode &= ~RXMODE_ENABLE;
16015748Sduboff (void) (*dp->gc.gc_set_rx_filter)(dp);
16025748Sduboff }
16035748Sduboff (void) gem_mac_stop(dp, flags);
16045748Sduboff }
16055748Sduboff
16065748Sduboff /* reset the chip. */
16075748Sduboff if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
16085748Sduboff cmn_err(CE_WARN, "%s: %s: failed to reset chip",
16095748Sduboff dp->name, __func__);
16105748Sduboff goto err;
16115748Sduboff }
16125748Sduboff
16135748Sduboff if (gem_mac_init(dp) != GEM_SUCCESS) {
16145748Sduboff goto err;
16155748Sduboff }
16165748Sduboff
16175748Sduboff /* setup media mode if the link have been up */
16185748Sduboff if (dp->mii_state == MII_STATE_LINKUP) {
16195748Sduboff if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
16205748Sduboff goto err;
16215748Sduboff }
16225748Sduboff }
16235748Sduboff
16245748Sduboff /* setup mac address and enable rx filter */
16255748Sduboff dp->rxmode |= RXMODE_ENABLE;
16265748Sduboff if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
16275748Sduboff goto err;
16285748Sduboff }
16295748Sduboff
16305748Sduboff /*
16317116Sduboff * XXX - a panic happened because of linkdown.
16325748Sduboff * We must check mii_state here, because the link can be down just
16335748Sduboff * before the restart event happen. If the link is down now,
16345748Sduboff * gem_mac_start() will be called from gem_mii_link_check() when
16355748Sduboff * the link become up later.
16365748Sduboff */
16375748Sduboff if (dp->mii_state == MII_STATE_LINKUP) {
16385748Sduboff /* restart the nic */
16395748Sduboff ASSERT(!dp->mac_active);
16405748Sduboff (void) gem_mac_start(dp);
16415748Sduboff }
16425748Sduboff return (GEM_SUCCESS);
16435748Sduboff err:
16445748Sduboff return (GEM_FAILURE);
16455748Sduboff }
16465748Sduboff
16475748Sduboff
16485748Sduboff static void
gem_tx_timeout(struct gem_dev * dp)16495748Sduboff gem_tx_timeout(struct gem_dev *dp)
16505748Sduboff {
16515748Sduboff clock_t now;
16525748Sduboff boolean_t tx_sched;
16535748Sduboff struct txbuf *tbp;
16545748Sduboff
16555748Sduboff mutex_enter(&dp->intrlock);
16565748Sduboff
16575748Sduboff tx_sched = B_FALSE;
16585748Sduboff now = ddi_get_lbolt();
16595748Sduboff
16605748Sduboff mutex_enter(&dp->xmitlock);
16615748Sduboff if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
16625748Sduboff mutex_exit(&dp->xmitlock);
16635748Sduboff goto schedule_next;
16645748Sduboff }
16655748Sduboff mutex_exit(&dp->xmitlock);
16665748Sduboff
16675748Sduboff /* reclaim transmitted buffers to check the trasmitter hangs or not. */
16685748Sduboff if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
16695748Sduboff /* tx error happened, reset transmitter in the chip */
16705748Sduboff (void) gem_restart_nic(dp, 0);
16715748Sduboff tx_sched = B_TRUE;
16727116Sduboff dp->tx_blocked = (clock_t)0;
16735748Sduboff
16745748Sduboff goto schedule_next;
16755748Sduboff }
16765748Sduboff
16775748Sduboff mutex_enter(&dp->xmitlock);
16787116Sduboff /* check if the transmitter thread is stuck */
16795748Sduboff if (dp->tx_active_head == dp->tx_active_tail) {
16805748Sduboff /* no tx buffer is loaded to the nic */
16817116Sduboff if (dp->tx_blocked &&
16827116Sduboff now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
16837116Sduboff gem_dump_txbuf(dp, CE_WARN,
16847116Sduboff "gem_tx_timeout: tx blocked");
16857116Sduboff tx_sched = B_TRUE;
16867116Sduboff dp->tx_blocked = (clock_t)0;
16877116Sduboff }
16885748Sduboff mutex_exit(&dp->xmitlock);
16895748Sduboff goto schedule_next;
16905748Sduboff }
16915748Sduboff
16925748Sduboff tbp = GET_TXBUF(dp, dp->tx_active_head);
16935748Sduboff if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
16945748Sduboff mutex_exit(&dp->xmitlock);
16955748Sduboff goto schedule_next;
16965748Sduboff }
16975748Sduboff mutex_exit(&dp->xmitlock);
16985748Sduboff
16997116Sduboff gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
17005748Sduboff
17015748Sduboff /* discard untransmitted packet and restart tx. */
17027116Sduboff (void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
17035748Sduboff tx_sched = B_TRUE;
17047116Sduboff dp->tx_blocked = (clock_t)0;
17055748Sduboff
17065748Sduboff schedule_next:
17075748Sduboff mutex_exit(&dp->intrlock);
17085748Sduboff
17095748Sduboff /* restart the downstream if needed */
17105748Sduboff if (tx_sched) {
17115748Sduboff mac_tx_update(dp->mh);
17125748Sduboff }
17135748Sduboff
17145748Sduboff DPRINTF(4, (CE_CONT,
17157116Sduboff "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
17167116Sduboff dp->name, BOOLEAN(dp->tx_blocked),
17175748Sduboff dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
17185748Sduboff dp->timeout_id =
17195748Sduboff timeout((void (*)(void *))gem_tx_timeout,
17205748Sduboff (void *)dp, dp->gc.gc_tx_timeout_interval);
17215748Sduboff }
17225748Sduboff
17235748Sduboff /* ================================================================== */
17245748Sduboff /*
17255748Sduboff * Interrupt handler
17265748Sduboff */
17275748Sduboff /* ================================================================== */
17285748Sduboff __INLINE__
17295748Sduboff static void
gem_append_rxbuf(struct gem_dev * dp,struct rxbuf * rbp_head)17305748Sduboff gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
17315748Sduboff {
17325748Sduboff struct rxbuf *rbp;
17335748Sduboff seqnum_t tail;
17345748Sduboff int rx_ring_size = dp->gc.gc_rx_ring_size;
17355748Sduboff
17365748Sduboff ASSERT(rbp_head != NULL);
17375748Sduboff ASSERT(mutex_owned(&dp->intrlock));
17385748Sduboff
17395748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
17405748Sduboff dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
17415748Sduboff
17425748Sduboff /*
17435748Sduboff * Add new buffers into active rx buffer list
17445748Sduboff */
17455748Sduboff if (dp->rx_buf_head == NULL) {
17465748Sduboff dp->rx_buf_head = rbp_head;
17475748Sduboff ASSERT(dp->rx_buf_tail == NULL);
17485748Sduboff } else {
17495748Sduboff dp->rx_buf_tail->rxb_next = rbp_head;
17505748Sduboff }
17515748Sduboff
17525748Sduboff tail = dp->rx_active_tail;
17535748Sduboff for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
17545748Sduboff /* need to notify the tail for the lower layer */
17555748Sduboff dp->rx_buf_tail = rbp;
17565748Sduboff
17575748Sduboff dp->gc.gc_rx_desc_write(dp,
17585748Sduboff SLOT(tail, rx_ring_size),
17595748Sduboff rbp->rxb_dmacookie,
17605748Sduboff rbp->rxb_nfrags);
17615748Sduboff
17625748Sduboff dp->rx_active_tail = tail = tail + 1;
17635748Sduboff }
17645748Sduboff }
17655748Sduboff #pragma inline(gem_append_rxbuf)
17665748Sduboff
17675748Sduboff mblk_t *
gem_get_packet_default(struct gem_dev * dp,struct rxbuf * rbp,size_t len)17685748Sduboff gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
17695748Sduboff {
17705748Sduboff int rx_header_len = dp->gc.gc_rx_header_len;
17715748Sduboff uint8_t *bp;
17725748Sduboff mblk_t *mp;
17735748Sduboff
17745748Sduboff /* allocate a new mblk */
17755748Sduboff if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
17765748Sduboff ASSERT(mp->b_next == NULL);
17775748Sduboff ASSERT(mp->b_cont == NULL);
17785748Sduboff
17795748Sduboff mp->b_rptr += VTAG_SIZE;
17805748Sduboff bp = mp->b_rptr;
17815748Sduboff mp->b_wptr = bp + len;
17825748Sduboff
17837116Sduboff /*
17847116Sduboff * flush the range of the entire buffer to invalidate
17857116Sduboff * all of corresponding dirty entries in iocache.
17867116Sduboff */
17875748Sduboff (void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
17887116Sduboff 0, DDI_DMA_SYNC_FORKERNEL);
17895748Sduboff
17905748Sduboff bcopy(rbp->rxb_buf + rx_header_len, bp, len);
17915748Sduboff }
17925748Sduboff return (mp);
17935748Sduboff }
17945748Sduboff
17955748Sduboff #ifdef GEM_DEBUG_LEVEL
17965748Sduboff uint_t gem_rx_pkts[17];
17975748Sduboff #endif
17985748Sduboff
17995748Sduboff
18005748Sduboff int
gem_receive(struct gem_dev * dp)18015748Sduboff gem_receive(struct gem_dev *dp)
18025748Sduboff {
18035748Sduboff uint64_t len_total = 0;
18045748Sduboff struct rxbuf *rbp;
18055748Sduboff mblk_t *mp;
18065748Sduboff int cnt = 0;
18075748Sduboff uint64_t rxstat;
18085748Sduboff struct rxbuf *newbufs;
18095748Sduboff struct rxbuf **newbufs_tailp;
18105748Sduboff mblk_t *rx_head;
18115748Sduboff mblk_t **rx_tailp;
18125748Sduboff int rx_ring_size = dp->gc.gc_rx_ring_size;
18135748Sduboff seqnum_t active_head;
18145748Sduboff uint64_t (*rx_desc_stat)(struct gem_dev *dp,
18155748Sduboff int slot, int ndesc);
18165748Sduboff int ethermin = ETHERMIN;
18175748Sduboff int ethermax = dp->mtu + sizeof (struct ether_header);
18187116Sduboff int rx_header_len = dp->gc.gc_rx_header_len;
18195748Sduboff
18205748Sduboff ASSERT(mutex_owned(&dp->intrlock));
18215748Sduboff
18225748Sduboff DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
18235748Sduboff dp->name, dp->rx_buf_head));
18245748Sduboff
18255748Sduboff rx_desc_stat = dp->gc.gc_rx_desc_stat;
18265748Sduboff newbufs_tailp = &newbufs;
18275748Sduboff rx_tailp = &rx_head;
18285748Sduboff for (active_head = dp->rx_active_head;
18295748Sduboff (rbp = dp->rx_buf_head) != NULL; active_head++) {
18305748Sduboff int len;
18315748Sduboff if (cnt == 0) {
18325748Sduboff cnt = max(dp->poll_pkt_delay*2, 10);
18335748Sduboff cnt = min(cnt,
18345748Sduboff dp->rx_active_tail - active_head);
18355748Sduboff gem_rx_desc_dma_sync(dp,
18365748Sduboff SLOT(active_head, rx_ring_size),
18375748Sduboff cnt,
18385748Sduboff DDI_DMA_SYNC_FORKERNEL);
18395748Sduboff }
18407116Sduboff
18417116Sduboff if (rx_header_len > 0) {
18427116Sduboff (void) ddi_dma_sync(rbp->rxb_dh, 0,
18437116Sduboff rx_header_len, DDI_DMA_SYNC_FORKERNEL);
18447116Sduboff }
18457116Sduboff
18465748Sduboff if (((rxstat = (*rx_desc_stat)(dp,
18475748Sduboff SLOT(active_head, rx_ring_size),
18485748Sduboff rbp->rxb_nfrags))
18495748Sduboff & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
18505748Sduboff /* not received yet */
18515748Sduboff break;
18525748Sduboff }
18535748Sduboff
18545748Sduboff /* Remove the head of the rx buffer list */
18555748Sduboff dp->rx_buf_head = rbp->rxb_next;
18565748Sduboff cnt--;
18575748Sduboff
18585748Sduboff
18595748Sduboff if (rxstat & GEM_RX_ERR) {
18605748Sduboff goto next;
18615748Sduboff }
18625748Sduboff
18635748Sduboff len = rxstat & GEM_RX_LEN;
18645748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
18655748Sduboff dp->name, __func__, rxstat, len));
18665748Sduboff
18675748Sduboff /*
18685748Sduboff * Copy the packet
18695748Sduboff */
18705748Sduboff if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
18715748Sduboff /* no memory, discard the packet */
18725748Sduboff dp->stats.norcvbuf++;
18735748Sduboff goto next;
18745748Sduboff }
18755748Sduboff
18765748Sduboff /*
18775748Sduboff * Process VLAN tag
18785748Sduboff */
18795748Sduboff ethermin = ETHERMIN;
18805748Sduboff ethermax = dp->mtu + sizeof (struct ether_header);
18817116Sduboff if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
18825748Sduboff ethermax += VTAG_SIZE;
18835748Sduboff }
18845748Sduboff
18855748Sduboff /* check packet size */
18865748Sduboff if (len < ethermin) {
18875748Sduboff dp->stats.errrcv++;
18885748Sduboff dp->stats.runt++;
18895748Sduboff freemsg(mp);
18905748Sduboff goto next;
18915748Sduboff }
18925748Sduboff
18935748Sduboff if (len > ethermax) {
18945748Sduboff dp->stats.errrcv++;
18955748Sduboff dp->stats.frame_too_long++;
18965748Sduboff freemsg(mp);
18975748Sduboff goto next;
18985748Sduboff }
18995748Sduboff
19005748Sduboff len_total += len;
19015748Sduboff
19027116Sduboff #ifdef GEM_DEBUG_VLAN
19037116Sduboff if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
19047116Sduboff gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
19057116Sduboff }
19067116Sduboff #endif
19075748Sduboff /* append received packet to temporaly rx buffer list */
19085748Sduboff *rx_tailp = mp;
19095748Sduboff rx_tailp = &mp->b_next;
19105748Sduboff
19115748Sduboff if (mp->b_rptr[0] & 1) {
19125748Sduboff if (bcmp(mp->b_rptr,
19135748Sduboff gem_etherbroadcastaddr.ether_addr_octet,
19145748Sduboff ETHERADDRL) == 0) {
19155748Sduboff dp->stats.rbcast++;
19165748Sduboff } else {
19175748Sduboff dp->stats.rmcast++;
19185748Sduboff }
19195748Sduboff }
19205748Sduboff next:
19215748Sduboff ASSERT(rbp != NULL);
19225748Sduboff
19235748Sduboff /* append new one to temporal new buffer list */
19245748Sduboff *newbufs_tailp = rbp;
19255748Sduboff newbufs_tailp = &rbp->rxb_next;
19265748Sduboff }
19275748Sduboff
19285748Sduboff /* advance rx_active_head */
19295748Sduboff if ((cnt = active_head - dp->rx_active_head) > 0) {
19305748Sduboff dp->stats.rbytes += len_total;
19315748Sduboff dp->stats.rpackets += cnt;
19325748Sduboff }
19335748Sduboff dp->rx_active_head = active_head;
19345748Sduboff
19355748Sduboff /* terminate the working list */
19365748Sduboff *newbufs_tailp = NULL;
19375748Sduboff *rx_tailp = NULL;
19385748Sduboff
19395748Sduboff if (dp->rx_buf_head == NULL) {
19405748Sduboff dp->rx_buf_tail = NULL;
19415748Sduboff }
19425748Sduboff
19435748Sduboff DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
19445748Sduboff dp->name, __func__, cnt, rx_head));
19455748Sduboff
19465748Sduboff if (newbufs) {
19475748Sduboff /*
19485748Sduboff * fillfull rx list with new buffers
19495748Sduboff */
19505748Sduboff seqnum_t head;
19515748Sduboff
19525748Sduboff /* save current tail */
19535748Sduboff head = dp->rx_active_tail;
19545748Sduboff gem_append_rxbuf(dp, newbufs);
19555748Sduboff
19565748Sduboff /* call hw depend start routine if we have. */
19575748Sduboff dp->gc.gc_rx_start(dp,
19585748Sduboff SLOT(head, rx_ring_size), dp->rx_active_tail - head);
19595748Sduboff }
19605748Sduboff
19615748Sduboff if (rx_head) {
19625748Sduboff /*
19635748Sduboff * send up received packets
19645748Sduboff */
19655748Sduboff mutex_exit(&dp->intrlock);
19668275SEric Cheng mac_rx(dp->mh, NULL, rx_head);
19675748Sduboff mutex_enter(&dp->intrlock);
19685748Sduboff }
19695748Sduboff
19705748Sduboff #ifdef GEM_DEBUG_LEVEL
19715748Sduboff gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
19725748Sduboff #endif
19735748Sduboff return (cnt);
19745748Sduboff }
19755748Sduboff
19765748Sduboff boolean_t
gem_tx_done(struct gem_dev * dp)19775748Sduboff gem_tx_done(struct gem_dev *dp)
19785748Sduboff {
19797116Sduboff boolean_t tx_sched = B_FALSE;
19805748Sduboff
19815748Sduboff if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
19825748Sduboff (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
19835748Sduboff DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
19845748Sduboff dp->name, dp->tx_active_head, dp->tx_active_tail));
19855748Sduboff tx_sched = B_TRUE;
19865748Sduboff goto x;
19875748Sduboff }
19885748Sduboff
19895748Sduboff mutex_enter(&dp->xmitlock);
19905748Sduboff
19917116Sduboff /* XXX - we must not have any packets in soft queue */
19927116Sduboff ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
19935748Sduboff /*
19947116Sduboff * If we won't have chance to get more free tx buffers, and blocked,
19955748Sduboff * it is worth to reschedule the downstream i.e. tx side.
19965748Sduboff */
19977116Sduboff ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
19987116Sduboff if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
19995748Sduboff /*
20005748Sduboff * As no further tx-done interrupts are scheduled, this
20015748Sduboff * is the last chance to kick tx side, which may be
20025748Sduboff * blocked now, otherwise the tx side never works again.
20035748Sduboff */
20045748Sduboff tx_sched = B_TRUE;
20057116Sduboff dp->tx_blocked = (clock_t)0;
20067116Sduboff dp->tx_max_packets =
20077116Sduboff min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
20085748Sduboff }
20095748Sduboff
20105748Sduboff mutex_exit(&dp->xmitlock);
20115748Sduboff
20127116Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
20137116Sduboff dp->name, __func__, BOOLEAN(dp->tx_blocked)));
20145748Sduboff x:
20155748Sduboff return (tx_sched);
20165748Sduboff }
20175748Sduboff
20185748Sduboff static uint_t
gem_intr(struct gem_dev * dp)20195748Sduboff gem_intr(struct gem_dev *dp)
20205748Sduboff {
20215748Sduboff uint_t ret;
20225748Sduboff
20235748Sduboff mutex_enter(&dp->intrlock);
20245748Sduboff if (dp->mac_suspended) {
20255748Sduboff mutex_exit(&dp->intrlock);
20265748Sduboff return (DDI_INTR_UNCLAIMED);
20275748Sduboff }
20285748Sduboff dp->intr_busy = B_TRUE;
20295748Sduboff
20305748Sduboff ret = (*dp->gc.gc_interrupt)(dp);
20315748Sduboff
20325748Sduboff if (ret == DDI_INTR_UNCLAIMED) {
20335748Sduboff dp->intr_busy = B_FALSE;
20345748Sduboff mutex_exit(&dp->intrlock);
20355748Sduboff return (ret);
20365748Sduboff }
20375748Sduboff
20385748Sduboff if (!dp->mac_active) {
20395748Sduboff cv_broadcast(&dp->tx_drain_cv);
20405748Sduboff }
20415748Sduboff
20425748Sduboff
20435748Sduboff dp->stats.intr++;
20445748Sduboff dp->intr_busy = B_FALSE;
20455748Sduboff
20465748Sduboff mutex_exit(&dp->intrlock);
20475748Sduboff
20485748Sduboff if (ret & INTR_RESTART_TX) {
20495748Sduboff DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
20505748Sduboff mac_tx_update(dp->mh);
20515748Sduboff ret &= ~INTR_RESTART_TX;
20525748Sduboff }
20535748Sduboff return (ret);
20545748Sduboff }
20555748Sduboff
20565748Sduboff static void
gem_intr_watcher(struct gem_dev * dp)20575748Sduboff gem_intr_watcher(struct gem_dev *dp)
20585748Sduboff {
20595748Sduboff (void) gem_intr(dp);
20605748Sduboff
20615748Sduboff /* schedule next call of tu_intr_watcher */
20625748Sduboff dp->intr_watcher_id =
20635748Sduboff timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
20645748Sduboff }
20655748Sduboff
20665748Sduboff /* ======================================================================== */
20675748Sduboff /*
20685748Sduboff * MII support routines
20695748Sduboff */
20705748Sduboff /* ======================================================================== */
20715748Sduboff static void
gem_choose_forcedmode(struct gem_dev * dp)20725748Sduboff gem_choose_forcedmode(struct gem_dev *dp)
20735748Sduboff {
20745748Sduboff /* choose media mode */
20755748Sduboff if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
20765748Sduboff dp->speed = GEM_SPD_1000;
20775748Sduboff dp->full_duplex = dp->anadv_1000fdx;
20785748Sduboff } else if (dp->anadv_100fdx || dp->anadv_100t4) {
20795748Sduboff dp->speed = GEM_SPD_100;
20805748Sduboff dp->full_duplex = B_TRUE;
20815748Sduboff } else if (dp->anadv_100hdx) {
20825748Sduboff dp->speed = GEM_SPD_100;
20835748Sduboff dp->full_duplex = B_FALSE;
20845748Sduboff } else {
20855748Sduboff dp->speed = GEM_SPD_10;
20865748Sduboff dp->full_duplex = dp->anadv_10fdx;
20875748Sduboff }
20885748Sduboff }
20895748Sduboff
20905748Sduboff uint16_t
gem_mii_read(struct gem_dev * dp,uint_t reg)20915748Sduboff gem_mii_read(struct gem_dev *dp, uint_t reg)
20925748Sduboff {
20935748Sduboff if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
20945748Sduboff (*dp->gc.gc_mii_sync)(dp);
20955748Sduboff }
20965748Sduboff return ((*dp->gc.gc_mii_read)(dp, reg));
20975748Sduboff }
20985748Sduboff
20995748Sduboff void
gem_mii_write(struct gem_dev * dp,uint_t reg,uint16_t val)21005748Sduboff gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
21015748Sduboff {
21025748Sduboff if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
21035748Sduboff (*dp->gc.gc_mii_sync)(dp);
21045748Sduboff }
21055748Sduboff (*dp->gc.gc_mii_write)(dp, reg, val);
21065748Sduboff }
21075748Sduboff
21085748Sduboff #define fc_cap_decode(x) \
21095748Sduboff ((((x) & MII_ABILITY_PAUSE) ? 1 : 0) | \
21109860Sgdamore@opensolaris.org (((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0))
21115748Sduboff
21125748Sduboff int
gem_mii_config_default(struct gem_dev * dp)21135748Sduboff gem_mii_config_default(struct gem_dev *dp)
21145748Sduboff {
21155748Sduboff uint16_t mii_stat;
21165748Sduboff uint16_t val;
21175748Sduboff static uint16_t fc_cap_encode[4] = {
21189860Sgdamore@opensolaris.org 0, /* none */
21199860Sgdamore@opensolaris.org MII_ABILITY_PAUSE, /* symmetric */
21209860Sgdamore@opensolaris.org MII_ABILITY_ASMPAUSE, /* tx */
21219860Sgdamore@opensolaris.org MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE, /* rx-symmetric */
21225748Sduboff };
21235748Sduboff
21245748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
21255748Sduboff
21265748Sduboff /*
21275748Sduboff * Configure bits in advertisement register
21285748Sduboff */
21295748Sduboff mii_stat = dp->mii_status;
21305748Sduboff
21315748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
21325748Sduboff dp->name, __func__, mii_stat, MII_STATUS_BITS));
21335748Sduboff
21345748Sduboff if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
21355748Sduboff /* it's funny */
21365748Sduboff cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
21375748Sduboff dp->name, mii_stat, MII_STATUS_BITS);
21385748Sduboff return (GEM_FAILURE);
21395748Sduboff }
21405748Sduboff
21415748Sduboff /* Do not change the rest of the ability bits in the advert reg */
21425748Sduboff val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
21435748Sduboff
21445748Sduboff DPRINTF(0, (CE_CONT,
21455748Sduboff "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
21465748Sduboff dp->name, __func__,
21475748Sduboff dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
21485748Sduboff dp->anadv_10fdx, dp->anadv_10hdx));
21495748Sduboff
21505748Sduboff if (dp->anadv_100t4) {
21515748Sduboff val |= MII_ABILITY_100BASE_T4;
21525748Sduboff }
21535748Sduboff if (dp->anadv_100fdx) {
21545748Sduboff val |= MII_ABILITY_100BASE_TX_FD;
21555748Sduboff }
21565748Sduboff if (dp->anadv_100hdx) {
21575748Sduboff val |= MII_ABILITY_100BASE_TX;
21585748Sduboff }
21595748Sduboff if (dp->anadv_10fdx) {
21605748Sduboff val |= MII_ABILITY_10BASE_T_FD;
21615748Sduboff }
21625748Sduboff if (dp->anadv_10hdx) {
21635748Sduboff val |= MII_ABILITY_10BASE_T;
21645748Sduboff }
21655748Sduboff
21665748Sduboff /* set flow control capability */
21675748Sduboff val |= fc_cap_encode[dp->anadv_flow_control];
21685748Sduboff
21695748Sduboff DPRINTF(0, (CE_CONT,
21705748Sduboff "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
21715748Sduboff dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
21725748Sduboff dp->anadv_flow_control));
21735748Sduboff
21745748Sduboff gem_mii_write(dp, MII_AN_ADVERT, val);
21755748Sduboff
21765748Sduboff if (mii_stat & MII_STATUS_XSTATUS) {
21775748Sduboff /*
21785748Sduboff * 1000Base-T GMII support
21795748Sduboff */
21805748Sduboff if (!dp->anadv_autoneg) {
21815748Sduboff /* enable manual configuration */
21825748Sduboff val = MII_1000TC_CFG_EN;
21835748Sduboff } else {
21845748Sduboff val = 0;
21855748Sduboff if (dp->anadv_1000fdx) {
21865748Sduboff val |= MII_1000TC_ADV_FULL;
21875748Sduboff }
21885748Sduboff if (dp->anadv_1000hdx) {
21895748Sduboff val |= MII_1000TC_ADV_HALF;
21905748Sduboff }
21915748Sduboff }
21925748Sduboff DPRINTF(0, (CE_CONT,
21935748Sduboff "!%s: %s: setting MII_1000TC reg:%b",
21945748Sduboff dp->name, __func__, val, MII_1000TC_BITS));
21955748Sduboff
21965748Sduboff gem_mii_write(dp, MII_1000TC, val);
21975748Sduboff }
21985748Sduboff
21995748Sduboff return (GEM_SUCCESS);
22005748Sduboff }
22015748Sduboff
22025748Sduboff #define GEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP)
22035748Sduboff #define GEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN)
22045748Sduboff
22055748Sduboff static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
22065748Sduboff /* none symm tx rx/symm */
22075748Sduboff /* none */
22085748Sduboff {FLOW_CONTROL_NONE,
22095748Sduboff FLOW_CONTROL_NONE,
22105748Sduboff FLOW_CONTROL_NONE,
22115748Sduboff FLOW_CONTROL_NONE},
22125748Sduboff /* sym */
22135748Sduboff {FLOW_CONTROL_NONE,
22145748Sduboff FLOW_CONTROL_SYMMETRIC,
22155748Sduboff FLOW_CONTROL_NONE,
22165748Sduboff FLOW_CONTROL_SYMMETRIC},
22175748Sduboff /* tx */
22185748Sduboff {FLOW_CONTROL_NONE,
22195748Sduboff FLOW_CONTROL_NONE,
22205748Sduboff FLOW_CONTROL_NONE,
22215748Sduboff FLOW_CONTROL_TX_PAUSE},
22225748Sduboff /* rx/symm */
22235748Sduboff {FLOW_CONTROL_NONE,
22245748Sduboff FLOW_CONTROL_SYMMETRIC,
22255748Sduboff FLOW_CONTROL_RX_PAUSE,
22265748Sduboff FLOW_CONTROL_SYMMETRIC},
22275748Sduboff };
22285748Sduboff
22295748Sduboff static char *gem_fc_type[] = {
22305748Sduboff "without",
22315748Sduboff "with symmetric",
22325748Sduboff "with tx",
22335748Sduboff "with rx",
22345748Sduboff };
22355748Sduboff
22365748Sduboff boolean_t
gem_mii_link_check(struct gem_dev * dp)22375748Sduboff gem_mii_link_check(struct gem_dev *dp)
22385748Sduboff {
22395748Sduboff uint16_t old_mii_state;
22405748Sduboff boolean_t tx_sched = B_FALSE;
22415748Sduboff uint16_t status;
22425748Sduboff uint16_t advert;
22435748Sduboff uint16_t lpable;
22445748Sduboff uint16_t exp;
22455748Sduboff uint16_t ctl1000;
22465748Sduboff uint16_t stat1000;
22475748Sduboff uint16_t val;
22485748Sduboff clock_t now;
22495748Sduboff clock_t diff;
22505748Sduboff int linkdown_action;
22515748Sduboff boolean_t fix_phy = B_FALSE;
22525748Sduboff
22535748Sduboff now = ddi_get_lbolt();
22545748Sduboff old_mii_state = dp->mii_state;
22555748Sduboff
22565748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
22575748Sduboff dp->name, __func__, now, dp->mii_state));
22585748Sduboff
22595748Sduboff diff = now - dp->mii_last_check;
22605748Sduboff dp->mii_last_check = now;
22615748Sduboff
22627116Sduboff /*
22637116Sduboff * For NWAM, don't show linkdown state right
22647116Sduboff * after the system boots
22657116Sduboff */
22667116Sduboff if (dp->linkup_delay > 0) {
22677116Sduboff if (dp->linkup_delay > diff) {
22687116Sduboff dp->linkup_delay -= diff;
22697116Sduboff } else {
22707116Sduboff /* link up timeout */
22717116Sduboff dp->linkup_delay = -1;
22727116Sduboff }
22737116Sduboff }
22747116Sduboff
22755748Sduboff next_nowait:
22765748Sduboff switch (dp->mii_state) {
22775748Sduboff case MII_STATE_UNKNOWN:
22785748Sduboff /* power-up, DP83840 requires 32 sync bits */
22795748Sduboff (*dp->gc.gc_mii_sync)(dp);
22805748Sduboff goto reset_phy;
22815748Sduboff
22825748Sduboff case MII_STATE_RESETTING:
22835748Sduboff dp->mii_timer -= diff;
22845748Sduboff if (dp->mii_timer > 0) {
22855748Sduboff /* don't read phy registers in resetting */
22865748Sduboff dp->mii_interval = WATCH_INTERVAL_FAST;
22875748Sduboff goto next;
22885748Sduboff }
22895748Sduboff
22905748Sduboff /* Timer expired, ensure reset bit is not set */
22915748Sduboff
22925748Sduboff if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
22935748Sduboff /* some phys need sync bits after reset */
22945748Sduboff (*dp->gc.gc_mii_sync)(dp);
22955748Sduboff }
22965748Sduboff val = gem_mii_read(dp, MII_CONTROL);
22975748Sduboff if (val & MII_CONTROL_RESET) {
22985748Sduboff cmn_err(CE_NOTE,
22995748Sduboff "!%s: time:%ld resetting phy not complete."
23005748Sduboff " mii_control:0x%b",
23015748Sduboff dp->name, ddi_get_lbolt(),
23025748Sduboff val, MII_CONTROL_BITS);
23035748Sduboff }
23045748Sduboff
23055748Sduboff /* ensure neither isolated nor pwrdown nor auto-nego mode */
23065748Sduboff /* XXX -- this operation is required for NS DP83840A. */
23075748Sduboff gem_mii_write(dp, MII_CONTROL, 0);
23085748Sduboff
23095748Sduboff /* As resetting PHY has completed, configure PHY registers */
23105748Sduboff if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
23115748Sduboff /* we failed to configure PHY. */
23125748Sduboff goto reset_phy;
23135748Sduboff }
23145748Sduboff
23155748Sduboff /* mii_config may disable autonegatiation */
23165748Sduboff gem_choose_forcedmode(dp);
23175748Sduboff
23185748Sduboff dp->mii_lpable = 0;
23195748Sduboff dp->mii_advert = 0;
23205748Sduboff dp->mii_exp = 0;
23215748Sduboff dp->mii_ctl1000 = 0;
23225748Sduboff dp->mii_stat1000 = 0;
23235748Sduboff dp->flow_control = FLOW_CONTROL_NONE;
23245748Sduboff
23255748Sduboff if (!dp->anadv_autoneg) {
23265748Sduboff /* skip auto-negotiation phase */
23275748Sduboff dp->mii_state = MII_STATE_MEDIA_SETUP;
23285748Sduboff dp->mii_timer = 0;
23295748Sduboff dp->mii_interval = 0;
23305748Sduboff goto next_nowait;
23315748Sduboff }
23325748Sduboff
23335748Sduboff /* Issue auto-negotiation command */
23345748Sduboff goto autonego;
23355748Sduboff
23365748Sduboff case MII_STATE_AUTONEGOTIATING:
23375748Sduboff /*
23385748Sduboff * Autonegotiation is in progress
23395748Sduboff */
23405748Sduboff dp->mii_timer -= diff;
23415748Sduboff if (dp->mii_timer -
23425748Sduboff (dp->gc.gc_mii_an_timeout
23435748Sduboff - dp->gc.gc_mii_an_wait) > 0) {
23445748Sduboff /*
23455748Sduboff * wait for a while, typically autonegotiation
23465748Sduboff * completes in 2.3 - 2.5 sec.
23475748Sduboff */
23485748Sduboff dp->mii_interval = WATCH_INTERVAL_FAST;
23495748Sduboff goto next;
23505748Sduboff }
23515748Sduboff
23525748Sduboff /* read PHY status */
23535748Sduboff status = gem_mii_read(dp, MII_STATUS);
23545748Sduboff DPRINTF(4, (CE_CONT,
23555748Sduboff "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
23565748Sduboff dp->name, __func__, dp->mii_state,
23575748Sduboff status, MII_STATUS_BITS));
23585748Sduboff
23595748Sduboff if (status & MII_STATUS_REMFAULT) {
23605748Sduboff /*
23615748Sduboff * The link parnert told me something wrong happend.
23625748Sduboff * What do we do ?
23635748Sduboff */
23645748Sduboff cmn_err(CE_CONT,
23655748Sduboff "!%s: auto-negotiation failed: remote fault",
23665748Sduboff dp->name);
23675748Sduboff goto autonego;
23685748Sduboff }
23695748Sduboff
23705748Sduboff if ((status & MII_STATUS_ANDONE) == 0) {
23715748Sduboff if (dp->mii_timer <= 0) {
23725748Sduboff /*
23735748Sduboff * Auto-negotiation was timed out,
23745748Sduboff * try again w/o resetting phy.
23755748Sduboff */
23765748Sduboff if (!dp->mii_supress_msg) {
23775748Sduboff cmn_err(CE_WARN,
23785748Sduboff "!%s: auto-negotiation failed: timeout",
23795748Sduboff dp->name);
23805748Sduboff dp->mii_supress_msg = B_TRUE;
23815748Sduboff }
23825748Sduboff goto autonego;
23835748Sduboff }
23845748Sduboff /*
23855748Sduboff * Auto-negotiation is in progress. Wait.
23865748Sduboff */
23875748Sduboff dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
23885748Sduboff goto next;
23895748Sduboff }
23905748Sduboff
23915748Sduboff /*
23925748Sduboff * Auto-negotiation have completed.
23935748Sduboff * Assume linkdown and fall through.
23945748Sduboff */
23955748Sduboff dp->mii_supress_msg = B_FALSE;
23965748Sduboff dp->mii_state = MII_STATE_AN_DONE;
23975748Sduboff DPRINTF(0, (CE_CONT,
23985748Sduboff "!%s: auto-negotiation completed, MII_STATUS:%b",
23995748Sduboff dp->name, status, MII_STATUS_BITS));
24005748Sduboff
24015748Sduboff if (dp->gc.gc_mii_an_delay > 0) {
24025748Sduboff dp->mii_timer = dp->gc.gc_mii_an_delay;
24035748Sduboff dp->mii_interval = drv_usectohz(20*1000);
24045748Sduboff goto next;
24055748Sduboff }
24065748Sduboff
24075748Sduboff dp->mii_timer = 0;
24085748Sduboff diff = 0;
24095748Sduboff goto next_nowait;
24105748Sduboff
24115748Sduboff case MII_STATE_AN_DONE:
24125748Sduboff /*
24135748Sduboff * Auto-negotiation have done. Now we can set up media.
24145748Sduboff */
24155748Sduboff dp->mii_timer -= diff;
24165748Sduboff if (dp->mii_timer > 0) {
24175748Sduboff /* wait for a while */
24185748Sduboff dp->mii_interval = WATCH_INTERVAL_FAST;
24195748Sduboff goto next;
24205748Sduboff }
24215748Sduboff
24225748Sduboff /*
24235748Sduboff * set up the result of auto negotiation
24245748Sduboff */
24255748Sduboff
24265748Sduboff /*
24275748Sduboff * Read registers required to determin current
24285748Sduboff * duplex mode and media speed.
24295748Sduboff */
24305748Sduboff if (dp->gc.gc_mii_an_delay > 0) {
24315748Sduboff /*
24325748Sduboff * As the link watcher context has been suspended,
24335748Sduboff * 'status' is invalid. We must status register here
24345748Sduboff */
24355748Sduboff status = gem_mii_read(dp, MII_STATUS);
24365748Sduboff }
24375748Sduboff advert = gem_mii_read(dp, MII_AN_ADVERT);
24385748Sduboff lpable = gem_mii_read(dp, MII_AN_LPABLE);
24395748Sduboff exp = gem_mii_read(dp, MII_AN_EXPANSION);
24405748Sduboff if (exp == 0xffff) {
24415748Sduboff /* some phys don't have exp register */
24425748Sduboff exp = 0;
24435748Sduboff }
24445748Sduboff ctl1000 = 0;
24455748Sduboff stat1000 = 0;
24465748Sduboff if (dp->mii_status & MII_STATUS_XSTATUS) {
24475748Sduboff ctl1000 = gem_mii_read(dp, MII_1000TC);
24485748Sduboff stat1000 = gem_mii_read(dp, MII_1000TS);
24495748Sduboff }
24505748Sduboff dp->mii_lpable = lpable;
24515748Sduboff dp->mii_advert = advert;
24525748Sduboff dp->mii_exp = exp;
24535748Sduboff dp->mii_ctl1000 = ctl1000;
24545748Sduboff dp->mii_stat1000 = stat1000;
24555748Sduboff
24565748Sduboff cmn_err(CE_CONT,
24575748Sduboff "!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
24585748Sduboff dp->name,
24595748Sduboff advert, MII_ABILITY_BITS,
24605748Sduboff lpable, MII_ABILITY_BITS,
24615748Sduboff exp, MII_AN_EXP_BITS);
24625748Sduboff
24635748Sduboff if (dp->mii_status & MII_STATUS_XSTATUS) {
24645748Sduboff cmn_err(CE_CONT,
24655748Sduboff "! MII_1000TC:%b, MII_1000TS:%b",
24665748Sduboff ctl1000, MII_1000TC_BITS,
24675748Sduboff stat1000, MII_1000TS_BITS);
24685748Sduboff }
24695748Sduboff
24705748Sduboff if (gem_population(lpable) <= 1 &&
24715748Sduboff (exp & MII_AN_EXP_LPCANAN) == 0) {
24725748Sduboff if ((advert & MII_ABILITY_TECH) != lpable) {
24735748Sduboff cmn_err(CE_WARN,
24745748Sduboff "!%s: but the link partnar doesn't seem"
24755748Sduboff " to have auto-negotiation capability."
24765748Sduboff " please check the link configuration.",
24775748Sduboff dp->name);
24785748Sduboff }
24795748Sduboff /*
24809860Sgdamore@opensolaris.org * it should be result of parallel detection, which
24815748Sduboff * cannot detect duplex mode.
24825748Sduboff */
24835748Sduboff if (lpable & MII_ABILITY_100BASE_TX) {
24845748Sduboff /*
24855748Sduboff * we prefer full duplex mode for 100Mbps
24865748Sduboff * connection, if we can.
24875748Sduboff */
24885748Sduboff lpable |= advert & MII_ABILITY_100BASE_TX_FD;
24895748Sduboff }
24905748Sduboff
24915748Sduboff if ((advert & lpable) == 0 &&
24925748Sduboff lpable & MII_ABILITY_10BASE_T) {
24935748Sduboff lpable |= advert & MII_ABILITY_10BASE_T_FD;
24945748Sduboff }
24955748Sduboff /*
24965748Sduboff * as the link partnar isn't auto-negotiatable, use
24975748Sduboff * fixed mode temporally.
24985748Sduboff */
24995748Sduboff fix_phy = B_TRUE;
25005748Sduboff } else if (lpable == 0) {
25015748Sduboff cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
25025748Sduboff goto reset_phy;
25035748Sduboff }
25045748Sduboff /*
25055748Sduboff * configure current link mode according to AN priority.
25065748Sduboff */
25075748Sduboff val = advert & lpable;
25085748Sduboff if ((ctl1000 & MII_1000TC_ADV_FULL) &&
25095748Sduboff (stat1000 & MII_1000TS_LP_FULL)) {
25105748Sduboff /* 1000BaseT & full duplex */
25115748Sduboff dp->speed = GEM_SPD_1000;
25125748Sduboff dp->full_duplex = B_TRUE;
25135748Sduboff } else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
25145748Sduboff (stat1000 & MII_1000TS_LP_HALF)) {
25155748Sduboff /* 1000BaseT & half duplex */
25165748Sduboff dp->speed = GEM_SPD_1000;
25175748Sduboff dp->full_duplex = B_FALSE;
25185748Sduboff } else if (val & MII_ABILITY_100BASE_TX_FD) {
25195748Sduboff /* 100BaseTx & full duplex */
25205748Sduboff dp->speed = GEM_SPD_100;
25215748Sduboff dp->full_duplex = B_TRUE;
25225748Sduboff } else if (val & MII_ABILITY_100BASE_T4) {
25235748Sduboff /* 100BaseT4 & full duplex */
25245748Sduboff dp->speed = GEM_SPD_100;
25255748Sduboff dp->full_duplex = B_TRUE;
25265748Sduboff } else if (val & MII_ABILITY_100BASE_TX) {
25275748Sduboff /* 100BaseTx & half duplex */
25285748Sduboff dp->speed = GEM_SPD_100;
25295748Sduboff dp->full_duplex = B_FALSE;
25305748Sduboff } else if (val & MII_ABILITY_10BASE_T_FD) {
25315748Sduboff /* 10BaseT & full duplex */
25325748Sduboff dp->speed = GEM_SPD_10;
25335748Sduboff dp->full_duplex = B_TRUE;
25345748Sduboff } else if (val & MII_ABILITY_10BASE_T) {
25355748Sduboff /* 10BaseT & half duplex */
25365748Sduboff dp->speed = GEM_SPD_10;
25375748Sduboff dp->full_duplex = B_FALSE;
25385748Sduboff } else {
25395748Sduboff /*
25405748Sduboff * It seems that the link partnar doesn't have
25415748Sduboff * auto-negotiation capability and our PHY
25425748Sduboff * could not report the correct current mode.
25435748Sduboff * We guess current mode by mii_control register.
25445748Sduboff */
25455748Sduboff val = gem_mii_read(dp, MII_CONTROL);
25465748Sduboff
25475748Sduboff /* select 100m full or 10m half */
25485748Sduboff dp->speed = (val & MII_CONTROL_100MB) ?
25495748Sduboff GEM_SPD_100 : GEM_SPD_10;
25505748Sduboff dp->full_duplex = dp->speed != GEM_SPD_10;
25515748Sduboff fix_phy = B_TRUE;
25525748Sduboff
25535748Sduboff cmn_err(CE_NOTE,
25545748Sduboff "!%s: auto-negotiation done but "
25555748Sduboff "common ability not found.\n"
25565748Sduboff "PHY state: control:%b advert:%b lpable:%b\n"
25575748Sduboff "guessing %d Mbps %s duplex mode",
25585748Sduboff dp->name,
25595748Sduboff val, MII_CONTROL_BITS,
25605748Sduboff advert, MII_ABILITY_BITS,
25615748Sduboff lpable, MII_ABILITY_BITS,
25625748Sduboff gem_speed_value[dp->speed],
25635748Sduboff dp->full_duplex ? "full" : "half");
25645748Sduboff }
25655748Sduboff
25665748Sduboff if (dp->full_duplex) {
25675748Sduboff dp->flow_control =
25685748Sduboff gem_fc_result[fc_cap_decode(advert)]
25695748Sduboff [fc_cap_decode(lpable)];
25705748Sduboff } else {
25715748Sduboff dp->flow_control = FLOW_CONTROL_NONE;
25725748Sduboff }
25735748Sduboff dp->mii_state = MII_STATE_MEDIA_SETUP;
25745748Sduboff /* FALLTHROUGH */
25755748Sduboff
25765748Sduboff case MII_STATE_MEDIA_SETUP:
25775748Sduboff dp->mii_state = MII_STATE_LINKDOWN;
25785748Sduboff dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
25795748Sduboff DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
25805748Sduboff dp->mii_supress_msg = B_FALSE;
25815748Sduboff
25825748Sduboff /* use short interval */
25835748Sduboff dp->mii_interval = WATCH_INTERVAL_FAST;
25845748Sduboff
25855748Sduboff if ((!dp->anadv_autoneg) ||
25865748Sduboff dp->gc.gc_mii_an_oneshot || fix_phy) {
25875748Sduboff
25885748Sduboff /*
25895748Sduboff * write specified mode to phy.
25905748Sduboff */
25915748Sduboff val = gem_mii_read(dp, MII_CONTROL);
25925748Sduboff val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
25935748Sduboff MII_CONTROL_ANE | MII_CONTROL_RSAN);
25945748Sduboff
25955748Sduboff if (dp->full_duplex) {
25965748Sduboff val |= MII_CONTROL_FDUPLEX;
25975748Sduboff }
25985748Sduboff
25995748Sduboff switch (dp->speed) {
26005748Sduboff case GEM_SPD_1000:
26015748Sduboff val |= MII_CONTROL_1000MB;
26025748Sduboff break;
26035748Sduboff
26045748Sduboff case GEM_SPD_100:
26055748Sduboff val |= MII_CONTROL_100MB;
26065748Sduboff break;
26075748Sduboff
26085748Sduboff default:
26095748Sduboff cmn_err(CE_WARN, "%s: unknown speed:%d",
26105748Sduboff dp->name, dp->speed);
26115748Sduboff /* FALLTHROUGH */
26125748Sduboff case GEM_SPD_10:
26135748Sduboff /* for GEM_SPD_10, do nothing */
26145748Sduboff break;
26155748Sduboff }
26165748Sduboff
26175748Sduboff if (dp->mii_status & MII_STATUS_XSTATUS) {
26185748Sduboff gem_mii_write(dp,
26195748Sduboff MII_1000TC, MII_1000TC_CFG_EN);
26205748Sduboff }
26215748Sduboff gem_mii_write(dp, MII_CONTROL, val);
26225748Sduboff }
26235748Sduboff
26245748Sduboff if (dp->nic_state >= NIC_STATE_INITIALIZED) {
26255748Sduboff /* notify the result of auto-negotiation to mac */
26265748Sduboff (*dp->gc.gc_set_media)(dp);
26275748Sduboff }
26285748Sduboff
26295748Sduboff if ((void *)dp->gc.gc_mii_tune_phy) {
26305748Sduboff /* for built-in sis900 */
26315748Sduboff /* XXX - this code should be removed. */
26325748Sduboff (*dp->gc.gc_mii_tune_phy)(dp);
26335748Sduboff }
26345748Sduboff
26355748Sduboff goto next_nowait;
26365748Sduboff
26375748Sduboff case MII_STATE_LINKDOWN:
26385748Sduboff status = gem_mii_read(dp, MII_STATUS);
26395748Sduboff if (status & MII_STATUS_LINKUP) {
26405748Sduboff /*
26415748Sduboff * Link going up
26425748Sduboff */
26435748Sduboff dp->mii_state = MII_STATE_LINKUP;
26445748Sduboff dp->mii_supress_msg = B_FALSE;
26455748Sduboff
26465748Sduboff DPRINTF(0, (CE_CONT,
26475748Sduboff "!%s: link up detected: mii_stat:%b",
26485748Sduboff dp->name, status, MII_STATUS_BITS));
26495748Sduboff
26505748Sduboff /*
26515748Sduboff * MII_CONTROL_100MB and MII_CONTROL_FDUPLEX are
26525748Sduboff * ignored when MII_CONTROL_ANE is set.
26535748Sduboff */
26545748Sduboff cmn_err(CE_CONT,
26555748Sduboff "!%s: Link up: %d Mbps %s duplex %s flow control",
26565748Sduboff dp->name,
26575748Sduboff gem_speed_value[dp->speed],
26585748Sduboff dp->full_duplex ? "full" : "half",
26595748Sduboff gem_fc_type[dp->flow_control]);
26605748Sduboff
26615748Sduboff dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
26625748Sduboff
26635748Sduboff /* XXX - we need other timer to watch statictics */
26645748Sduboff if (dp->gc.gc_mii_hw_link_detection &&
26655748Sduboff dp->nic_state == NIC_STATE_ONLINE) {
26665748Sduboff dp->mii_interval = 0;
26675748Sduboff }
26685748Sduboff
26695748Sduboff if (dp->nic_state == NIC_STATE_ONLINE) {
26705748Sduboff if (!dp->mac_active) {
26715748Sduboff (void) gem_mac_start(dp);
26725748Sduboff }
26735748Sduboff tx_sched = B_TRUE;
26745748Sduboff }
26755748Sduboff goto next;
26765748Sduboff }
26775748Sduboff
26785748Sduboff dp->mii_supress_msg = B_TRUE;
26795748Sduboff if (dp->anadv_autoneg) {
26805748Sduboff dp->mii_timer -= diff;
26815748Sduboff if (dp->mii_timer <= 0) {
26825748Sduboff /*
26835748Sduboff * link down timer expired.
26845748Sduboff * need to restart auto-negotiation.
26855748Sduboff */
26865748Sduboff linkdown_action =
26875748Sduboff dp->gc.gc_mii_linkdown_timeout_action;
26885748Sduboff goto restart_autonego;
26895748Sduboff }
26905748Sduboff }
26915748Sduboff /* don't change mii_state */
26925748Sduboff break;
26935748Sduboff
26945748Sduboff case MII_STATE_LINKUP:
26955748Sduboff status = gem_mii_read(dp, MII_STATUS);
26965748Sduboff if ((status & MII_STATUS_LINKUP) == 0) {
26975748Sduboff /*
26985748Sduboff * Link going down
26995748Sduboff */
27005748Sduboff cmn_err(CE_NOTE,
27015748Sduboff "!%s: link down detected: mii_stat:%b",
27025748Sduboff dp->name, status, MII_STATUS_BITS);
27035748Sduboff
27045748Sduboff if (dp->nic_state == NIC_STATE_ONLINE &&
27055748Sduboff dp->mac_active &&
27065748Sduboff dp->gc.gc_mii_stop_mac_on_linkdown) {
27075748Sduboff (void) gem_mac_stop(dp, 0);
27087116Sduboff
27097116Sduboff if (dp->tx_blocked) {
27107116Sduboff /* drain tx */
27117116Sduboff tx_sched = B_TRUE;
27127116Sduboff }
27135748Sduboff }
27145748Sduboff
27155748Sduboff if (dp->anadv_autoneg) {
27165748Sduboff /* need to restart auto-negotiation */
27175748Sduboff linkdown_action = dp->gc.gc_mii_linkdown_action;
27185748Sduboff goto restart_autonego;
27195748Sduboff }
27205748Sduboff
27215748Sduboff dp->mii_state = MII_STATE_LINKDOWN;
27225748Sduboff dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
27235748Sduboff
27245748Sduboff if ((void *)dp->gc.gc_mii_tune_phy) {
27255748Sduboff /* for built-in sis900 */
27265748Sduboff (*dp->gc.gc_mii_tune_phy)(dp);
27275748Sduboff }
27285748Sduboff dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
27295748Sduboff goto next;
27305748Sduboff }
27315748Sduboff
27325748Sduboff /* don't change mii_state */
27335748Sduboff if (dp->gc.gc_mii_hw_link_detection &&
27345748Sduboff dp->nic_state == NIC_STATE_ONLINE) {
27355748Sduboff dp->mii_interval = 0;
27365748Sduboff goto next;
27375748Sduboff }
27385748Sduboff break;
27395748Sduboff }
27405748Sduboff dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
27415748Sduboff goto next;
27425748Sduboff
27435748Sduboff /* Actions on the end of state routine */
27445748Sduboff
27455748Sduboff restart_autonego:
27465748Sduboff switch (linkdown_action) {
27475748Sduboff case MII_ACTION_RESET:
27485748Sduboff if (!dp->mii_supress_msg) {
27495748Sduboff cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
27505748Sduboff }
27515748Sduboff dp->mii_supress_msg = B_TRUE;
27525748Sduboff goto reset_phy;
27535748Sduboff
27545748Sduboff case MII_ACTION_NONE:
27555748Sduboff dp->mii_supress_msg = B_TRUE;
27565748Sduboff if (dp->gc.gc_mii_an_oneshot) {
27575748Sduboff goto autonego;
27585748Sduboff }
27595748Sduboff /* PHY will restart autonego automatically */
27605748Sduboff dp->mii_state = MII_STATE_AUTONEGOTIATING;
27615748Sduboff dp->mii_timer = dp->gc.gc_mii_an_timeout;
27625748Sduboff dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
27635748Sduboff goto next;
27645748Sduboff
27655748Sduboff case MII_ACTION_RSA:
27665748Sduboff if (!dp->mii_supress_msg) {
27675748Sduboff cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
27685748Sduboff dp->name);
27695748Sduboff }
27705748Sduboff dp->mii_supress_msg = B_TRUE;
27715748Sduboff goto autonego;
27725748Sduboff
27735748Sduboff default:
27745748Sduboff cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
27755748Sduboff dp->name, dp->gc.gc_mii_linkdown_action);
27765748Sduboff dp->mii_supress_msg = B_TRUE;
27775748Sduboff }
27785748Sduboff /* NOTREACHED */
27795748Sduboff
27805748Sduboff reset_phy:
27815748Sduboff if (!dp->mii_supress_msg) {
27825748Sduboff cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
27835748Sduboff }
27845748Sduboff dp->mii_state = MII_STATE_RESETTING;
27855748Sduboff dp->mii_timer = dp->gc.gc_mii_reset_timeout;
27865748Sduboff if (!dp->gc.gc_mii_dont_reset) {
27875748Sduboff gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
27885748Sduboff }
27895748Sduboff dp->mii_interval = WATCH_INTERVAL_FAST;
27905748Sduboff goto next;
27915748Sduboff
27925748Sduboff autonego:
27935748Sduboff if (!dp->mii_supress_msg) {
27945748Sduboff cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
27955748Sduboff }
27965748Sduboff dp->mii_state = MII_STATE_AUTONEGOTIATING;
27975748Sduboff dp->mii_timer = dp->gc.gc_mii_an_timeout;
27985748Sduboff
27995748Sduboff /* start/restart auto nego */
28005748Sduboff val = gem_mii_read(dp, MII_CONTROL) &
28015748Sduboff ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
28025748Sduboff
28037116Sduboff gem_mii_write(dp, MII_CONTROL,
28047116Sduboff val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
28055748Sduboff
28065748Sduboff dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
28075748Sduboff
28085748Sduboff next:
28095748Sduboff if (dp->link_watcher_id == 0 && dp->mii_interval) {
28105748Sduboff /* we must schedule next mii_watcher */
28115748Sduboff dp->link_watcher_id =
28127116Sduboff timeout((void (*)(void *))&gem_mii_link_watcher,
28135748Sduboff (void *)dp, dp->mii_interval);
28145748Sduboff }
28155748Sduboff
28167116Sduboff if (old_mii_state != dp->mii_state) {
28175748Sduboff /* notify new mii link state */
28185748Sduboff if (dp->mii_state == MII_STATE_LINKUP) {
28197116Sduboff dp->linkup_delay = 0;
28205748Sduboff GEM_LINKUP(dp);
28217116Sduboff } else if (dp->linkup_delay <= 0) {
28225748Sduboff GEM_LINKDOWN(dp);
28235748Sduboff }
28247116Sduboff } else if (dp->linkup_delay < 0) {
28257116Sduboff /* first linkup timeout */
28267116Sduboff dp->linkup_delay = 0;
28277116Sduboff GEM_LINKDOWN(dp);
28287116Sduboff }
28297116Sduboff
28305748Sduboff return (tx_sched);
28315748Sduboff }
28325748Sduboff
28335748Sduboff static void
gem_mii_link_watcher(struct gem_dev * dp)28345748Sduboff gem_mii_link_watcher(struct gem_dev *dp)
28355748Sduboff {
28365748Sduboff boolean_t tx_sched;
28375748Sduboff
28385748Sduboff mutex_enter(&dp->intrlock);
28395748Sduboff
28405748Sduboff dp->link_watcher_id = 0;
28415748Sduboff tx_sched = gem_mii_link_check(dp);
28425748Sduboff #if GEM_DEBUG_LEVEL > 2
28435748Sduboff if (dp->link_watcher_id == 0) {
28445748Sduboff cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
28455748Sduboff }
28465748Sduboff #endif
28475748Sduboff mutex_exit(&dp->intrlock);
28485748Sduboff
28495748Sduboff if (tx_sched) {
28505748Sduboff /* kick potentially stopped downstream */
28515748Sduboff mac_tx_update(dp->mh);
28525748Sduboff }
28535748Sduboff }
28545748Sduboff
28555748Sduboff int
gem_mii_probe_default(struct gem_dev * dp)28565748Sduboff gem_mii_probe_default(struct gem_dev *dp)
28575748Sduboff {
28585748Sduboff int8_t phy;
28595748Sduboff uint16_t status;
28605748Sduboff uint16_t adv;
28615748Sduboff uint16_t adv_org;
28625748Sduboff
28635748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
28645748Sduboff
28655748Sduboff /*
28665748Sduboff * Scan PHY
28675748Sduboff */
28685748Sduboff /* ensure to send sync bits */
28695748Sduboff dp->mii_status = 0;
28705748Sduboff
28715748Sduboff /* Try default phy first */
28725748Sduboff if (dp->mii_phy_addr) {
28735748Sduboff status = gem_mii_read(dp, MII_STATUS);
28745748Sduboff if (status != 0xffff && status != 0) {
28755748Sduboff gem_mii_write(dp, MII_CONTROL, 0);
28765748Sduboff goto PHY_found;
28775748Sduboff }
28785748Sduboff
28795748Sduboff if (dp->mii_phy_addr < 0) {
28805748Sduboff cmn_err(CE_NOTE,
28815748Sduboff "!%s: failed to probe default internal and/or non-MII PHY",
28825748Sduboff dp->name);
28835748Sduboff return (GEM_FAILURE);
28845748Sduboff }
28855748Sduboff
28865748Sduboff cmn_err(CE_NOTE,
28875748Sduboff "!%s: failed to probe default MII PHY at %d",
28885748Sduboff dp->name, dp->mii_phy_addr);
28895748Sduboff }
28905748Sduboff
28915748Sduboff /* Try all possible address */
28925748Sduboff for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
28935748Sduboff dp->mii_phy_addr = phy;
28945748Sduboff status = gem_mii_read(dp, MII_STATUS);
28955748Sduboff
28965748Sduboff if (status != 0xffff && status != 0) {
28975748Sduboff gem_mii_write(dp, MII_CONTROL, 0);
28985748Sduboff goto PHY_found;
28995748Sduboff }
29005748Sduboff }
29015748Sduboff
29025748Sduboff for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
29035748Sduboff dp->mii_phy_addr = phy;
29045748Sduboff gem_mii_write(dp, MII_CONTROL, 0);
29055748Sduboff status = gem_mii_read(dp, MII_STATUS);
29065748Sduboff
29075748Sduboff if (status != 0xffff && status != 0) {
29085748Sduboff goto PHY_found;
29095748Sduboff }
29105748Sduboff }
29115748Sduboff
29125748Sduboff cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
29135748Sduboff dp->mii_phy_addr = -1;
29145748Sduboff
29155748Sduboff return (GEM_FAILURE);
29165748Sduboff
29175748Sduboff PHY_found:
29185748Sduboff dp->mii_status = status;
29195748Sduboff dp->mii_phy_id = (gem_mii_read(dp, MII_PHYIDH) << 16) |
29205748Sduboff gem_mii_read(dp, MII_PHYIDL);
29215748Sduboff
29225748Sduboff if (dp->mii_phy_addr < 0) {
29235748Sduboff cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
29245748Sduboff dp->name, dp->mii_phy_id);
29255748Sduboff } else {
29265748Sduboff cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
29275748Sduboff dp->name, dp->mii_phy_id, dp->mii_phy_addr);
29285748Sduboff }
29295748Sduboff
29305748Sduboff cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
29315748Sduboff dp->name,
29325748Sduboff gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
29335748Sduboff status, MII_STATUS_BITS,
29345748Sduboff gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
29355748Sduboff gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
29365748Sduboff
29375748Sduboff dp->mii_xstatus = 0;
29385748Sduboff if (status & MII_STATUS_XSTATUS) {
29395748Sduboff dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
29405748Sduboff
29415748Sduboff cmn_err(CE_CONT, "!%s: xstatus:%b",
29425748Sduboff dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
29435748Sduboff }
29445748Sduboff
29455748Sduboff /* check if the phy can advertize pause abilities */
29465748Sduboff adv_org = gem_mii_read(dp, MII_AN_ADVERT);
29475748Sduboff
29485748Sduboff gem_mii_write(dp, MII_AN_ADVERT,
29499860Sgdamore@opensolaris.org MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE);
29505748Sduboff
29515748Sduboff adv = gem_mii_read(dp, MII_AN_ADVERT);
29525748Sduboff
29535748Sduboff if ((adv & MII_ABILITY_PAUSE) == 0) {
29545748Sduboff dp->gc.gc_flow_control &= ~1;
29555748Sduboff }
29565748Sduboff
29579860Sgdamore@opensolaris.org if ((adv & MII_ABILITY_ASMPAUSE) == 0) {
29585748Sduboff dp->gc.gc_flow_control &= ~2;
29595748Sduboff }
29605748Sduboff
29615748Sduboff gem_mii_write(dp, MII_AN_ADVERT, adv_org);
29625748Sduboff
29635748Sduboff return (GEM_SUCCESS);
29645748Sduboff }
29655748Sduboff
29665748Sduboff static void
gem_mii_start(struct gem_dev * dp)29675748Sduboff gem_mii_start(struct gem_dev *dp)
29685748Sduboff {
29695748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
29705748Sduboff
29715748Sduboff /* make a first call of check link */
29725748Sduboff dp->mii_state = MII_STATE_UNKNOWN;
29735748Sduboff dp->mii_last_check = ddi_get_lbolt();
29747116Sduboff dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
29755748Sduboff (void) gem_mii_link_watcher(dp);
29765748Sduboff }
29775748Sduboff
29785748Sduboff static void
gem_mii_stop(struct gem_dev * dp)29795748Sduboff gem_mii_stop(struct gem_dev *dp)
29805748Sduboff {
29815748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
29825748Sduboff
29835748Sduboff /* Ensure timer routine stopped */
29845748Sduboff mutex_enter(&dp->intrlock);
29855748Sduboff if (dp->link_watcher_id) {
29865748Sduboff while (untimeout(dp->link_watcher_id) == -1)
29875748Sduboff ;
29885748Sduboff dp->link_watcher_id = 0;
29895748Sduboff }
29905748Sduboff mutex_exit(&dp->intrlock);
29915748Sduboff }
29925748Sduboff
29935748Sduboff boolean_t
gem_get_mac_addr_conf(struct gem_dev * dp)29945748Sduboff gem_get_mac_addr_conf(struct gem_dev *dp)
29955748Sduboff {
29965748Sduboff char propname[32];
29975748Sduboff char *valstr;
29985748Sduboff uint8_t mac[ETHERADDRL];
29995748Sduboff char *cp;
30005748Sduboff int c;
30015748Sduboff int i;
30025748Sduboff int j;
30035748Sduboff uint8_t v;
30045748Sduboff uint8_t d;
30055748Sduboff uint8_t ored;
30065748Sduboff
30075748Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
30085748Sduboff /*
30095748Sduboff * Get ethernet address from .conf file
30105748Sduboff */
30115748Sduboff (void) sprintf(propname, "mac-addr");
30125748Sduboff if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
30135748Sduboff DDI_PROP_DONTPASS, propname, &valstr)) !=
30145748Sduboff DDI_PROP_SUCCESS) {
30155748Sduboff return (B_FALSE);
30165748Sduboff }
30175748Sduboff
30185748Sduboff if (strlen(valstr) != ETHERADDRL*3-1) {
30195748Sduboff goto syntax_err;
30205748Sduboff }
30215748Sduboff
30225748Sduboff cp = valstr;
30235748Sduboff j = 0;
30245748Sduboff ored = 0;
30255748Sduboff for (;;) {
30265748Sduboff v = 0;
30275748Sduboff for (i = 0; i < 2; i++) {
30285748Sduboff c = *cp++;
30295748Sduboff
30305748Sduboff if (c >= 'a' && c <= 'f') {
30315748Sduboff d = c - 'a' + 10;
30325748Sduboff } else if (c >= 'A' && c <= 'F') {
30335748Sduboff d = c - 'A' + 10;
30345748Sduboff } else if (c >= '0' && c <= '9') {
30355748Sduboff d = c - '0';
30365748Sduboff } else {
30375748Sduboff goto syntax_err;
30385748Sduboff }
30395748Sduboff v = (v << 4) | d;
30405748Sduboff }
30415748Sduboff
30425748Sduboff mac[j++] = v;
30435748Sduboff ored |= v;
30445748Sduboff if (j == ETHERADDRL) {
30455748Sduboff /* done */
30465748Sduboff break;
30475748Sduboff }
30485748Sduboff
30495748Sduboff c = *cp++;
30505748Sduboff if (c != ':') {
30515748Sduboff goto syntax_err;
30525748Sduboff }
30535748Sduboff }
30545748Sduboff
30555748Sduboff if (ored == 0) {
30565748Sduboff goto err;
30575748Sduboff }
30585748Sduboff for (i = 0; i < ETHERADDRL; i++) {
30595748Sduboff dp->dev_addr.ether_addr_octet[i] = mac[i];
30605748Sduboff }
30615748Sduboff ddi_prop_free(valstr);
30625748Sduboff return (B_TRUE);
30635748Sduboff
30645748Sduboff syntax_err:
30655748Sduboff cmn_err(CE_CONT,
30665748Sduboff "!%s: read mac addr: trying .conf: syntax err %s",
30675748Sduboff dp->name, valstr);
30685748Sduboff err:
30695748Sduboff ddi_prop_free(valstr);
30705748Sduboff
30715748Sduboff return (B_FALSE);
30725748Sduboff }
30735748Sduboff
30745748Sduboff
30755748Sduboff /* ============================================================== */
30765748Sduboff /*
30775748Sduboff * internal start/stop interface
30785748Sduboff */
30795748Sduboff /* ============================================================== */
30805748Sduboff static int
gem_mac_set_rx_filter(struct gem_dev * dp)30815748Sduboff gem_mac_set_rx_filter(struct gem_dev *dp)
30825748Sduboff {
30835748Sduboff return ((*dp->gc.gc_set_rx_filter)(dp));
30845748Sduboff }
30855748Sduboff
30865748Sduboff /*
30875748Sduboff * gem_mac_init: cold start
30885748Sduboff */
30895748Sduboff static int
gem_mac_init(struct gem_dev * dp)30905748Sduboff gem_mac_init(struct gem_dev *dp)
30915748Sduboff {
30925748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
30935748Sduboff
30945748Sduboff if (dp->mac_suspended) {
30955748Sduboff return (GEM_FAILURE);
30965748Sduboff }
30975748Sduboff
30985748Sduboff dp->mac_active = B_FALSE;
30995748Sduboff
31005748Sduboff gem_init_rx_ring(dp);
31015748Sduboff gem_init_tx_ring(dp);
31025748Sduboff
31035748Sduboff /* reset transmitter state */
31047116Sduboff dp->tx_blocked = (clock_t)0;
31055748Sduboff dp->tx_busy = 0;
31065748Sduboff dp->tx_reclaim_busy = 0;
31077116Sduboff dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
31085748Sduboff
31095748Sduboff if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
31105748Sduboff return (GEM_FAILURE);
31115748Sduboff }
31125748Sduboff
31135748Sduboff gem_prepare_rx_buf(dp);
31145748Sduboff
31155748Sduboff return (GEM_SUCCESS);
31165748Sduboff }
31175748Sduboff /*
31185748Sduboff * gem_mac_start: warm start
31195748Sduboff */
31205748Sduboff static int
gem_mac_start(struct gem_dev * dp)31215748Sduboff gem_mac_start(struct gem_dev *dp)
31225748Sduboff {
31235748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
31245748Sduboff
31255748Sduboff ASSERT(mutex_owned(&dp->intrlock));
31265748Sduboff ASSERT(dp->nic_state == NIC_STATE_ONLINE);
31275748Sduboff ASSERT(dp->mii_state == MII_STATE_LINKUP);
31285748Sduboff
31295748Sduboff /* enable tx and rx */
31305748Sduboff mutex_enter(&dp->xmitlock);
31315748Sduboff if (dp->mac_suspended) {
31325748Sduboff mutex_exit(&dp->xmitlock);
31335748Sduboff return (GEM_FAILURE);
31345748Sduboff }
31355748Sduboff dp->mac_active = B_TRUE;
31365748Sduboff mutex_exit(&dp->xmitlock);
31375748Sduboff
31387116Sduboff /* setup rx buffers */
31397116Sduboff (*dp->gc.gc_rx_start)(dp,
31407116Sduboff SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
31417116Sduboff dp->rx_active_tail - dp->rx_active_head);
31427116Sduboff
31435748Sduboff if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
31445748Sduboff cmn_err(CE_WARN, "%s: %s: start_chip: failed",
31455748Sduboff dp->name, __func__);
31465748Sduboff return (GEM_FAILURE);
31475748Sduboff }
31485748Sduboff
31495748Sduboff mutex_enter(&dp->xmitlock);
31505748Sduboff
31515748Sduboff /* load untranmitted packets to the nic */
31525748Sduboff ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
31535748Sduboff if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
31545748Sduboff gem_tx_load_descs_oo(dp,
31555748Sduboff dp->tx_softq_head, dp->tx_softq_tail,
31565748Sduboff GEM_TXFLAG_HEAD);
31575748Sduboff /* issue preloaded tx buffers */
31585748Sduboff gem_tx_start_unit(dp);
31595748Sduboff }
31605748Sduboff
31615748Sduboff mutex_exit(&dp->xmitlock);
31625748Sduboff
31635748Sduboff return (GEM_SUCCESS);
31645748Sduboff }
31655748Sduboff
31665748Sduboff static int
gem_mac_stop(struct gem_dev * dp,uint_t flags)31675748Sduboff gem_mac_stop(struct gem_dev *dp, uint_t flags)
31685748Sduboff {
31695748Sduboff int i;
31705748Sduboff int wait_time; /* in uS */
31715748Sduboff #ifdef GEM_DEBUG_LEVEL
31725748Sduboff clock_t now;
31735748Sduboff #endif
31745748Sduboff int ret = GEM_SUCCESS;
31755748Sduboff
31765748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
31775748Sduboff dp->name, __func__, dp->rx_buf_freecnt));
31785748Sduboff
31795748Sduboff ASSERT(mutex_owned(&dp->intrlock));
31805748Sduboff ASSERT(!mutex_owned(&dp->xmitlock));
31815748Sduboff
31825748Sduboff /*
31835748Sduboff * Block transmits
31845748Sduboff */
31855748Sduboff mutex_enter(&dp->xmitlock);
31865748Sduboff if (dp->mac_suspended) {
31875748Sduboff mutex_exit(&dp->xmitlock);
31885748Sduboff return (GEM_SUCCESS);
31895748Sduboff }
31905748Sduboff dp->mac_active = B_FALSE;
31915748Sduboff
31925748Sduboff while (dp->tx_busy > 0) {
31935748Sduboff cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
31945748Sduboff }
31955748Sduboff mutex_exit(&dp->xmitlock);
31965748Sduboff
31975748Sduboff if ((flags & GEM_RESTART_NOWAIT) == 0) {
31985748Sduboff /*
31995748Sduboff * Wait for all tx buffers sent.
32005748Sduboff */
32015748Sduboff wait_time =
32025748Sduboff 2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
32035748Sduboff (dp->tx_active_tail - dp->tx_active_head);
32045748Sduboff
32055748Sduboff DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
32065748Sduboff dp->name, __func__, wait_time));
32075748Sduboff i = 0;
32085748Sduboff #ifdef GEM_DEBUG_LEVEL
32095748Sduboff now = ddi_get_lbolt();
32105748Sduboff #endif
32115748Sduboff while (dp->tx_active_tail != dp->tx_active_head) {
32125748Sduboff if (i > wait_time) {
32135748Sduboff /* timeout */
32145748Sduboff cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
32155748Sduboff dp->name, __func__);
32165748Sduboff break;
32175748Sduboff }
32185748Sduboff (void) gem_reclaim_txbuf(dp);
32195748Sduboff drv_usecwait(100);
32205748Sduboff i += 100;
32215748Sduboff }
32225748Sduboff DPRINTF(0, (CE_NOTE,
32235748Sduboff "!%s: %s: the nic have drained in %d uS, real %d mS",
32245748Sduboff dp->name, __func__, i,
32255748Sduboff 10*((int)(ddi_get_lbolt() - now))));
32265748Sduboff }
32275748Sduboff
32285748Sduboff /*
32295748Sduboff * Now we can stop the nic safely.
32305748Sduboff */
32315748Sduboff if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
32325748Sduboff cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
32335748Sduboff dp->name, __func__);
32345748Sduboff if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
32355748Sduboff cmn_err(CE_WARN, "%s: %s: failed to reset chip",
32365748Sduboff dp->name, __func__);
32375748Sduboff }
32385748Sduboff }
32395748Sduboff
32405748Sduboff /*
32415748Sduboff * Clear all rx buffers
32425748Sduboff */
32435748Sduboff if (flags & GEM_RESTART_KEEP_BUF) {
32445748Sduboff (void) gem_receive(dp);
32455748Sduboff }
32465748Sduboff gem_clean_rx_buf(dp);
32475748Sduboff
32485748Sduboff /*
32495748Sduboff * Update final statistics
32505748Sduboff */
32515748Sduboff (*dp->gc.gc_get_stats)(dp);
32525748Sduboff
32535748Sduboff /*
32545748Sduboff * Clear all pended tx packets
32555748Sduboff */
32565748Sduboff ASSERT(dp->tx_active_tail == dp->tx_softq_head);
32575748Sduboff ASSERT(dp->tx_softq_tail == dp->tx_free_head);
32585748Sduboff if (flags & GEM_RESTART_KEEP_BUF) {
32595748Sduboff /* restore active tx buffers */
32605748Sduboff dp->tx_active_tail = dp->tx_active_head;
32615748Sduboff dp->tx_softq_head = dp->tx_active_head;
32625748Sduboff } else {
32635748Sduboff gem_clean_tx_buf(dp);
32645748Sduboff }
32655748Sduboff
32665748Sduboff return (ret);
32675748Sduboff }
32685748Sduboff
32695748Sduboff static int
gem_add_multicast(struct gem_dev * dp,const uint8_t * ep)32705748Sduboff gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
32715748Sduboff {
32725748Sduboff int cnt;
32735748Sduboff int err;
32745748Sduboff
32755748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
32765748Sduboff
32775748Sduboff mutex_enter(&dp->intrlock);
32785748Sduboff if (dp->mac_suspended) {
32795748Sduboff mutex_exit(&dp->intrlock);
32805748Sduboff return (GEM_FAILURE);
32815748Sduboff }
32825748Sduboff
32835748Sduboff if (dp->mc_count_req++ < GEM_MAXMC) {
32845748Sduboff /* append the new address at the end of the mclist */
32855748Sduboff cnt = dp->mc_count;
32865748Sduboff bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
32875748Sduboff ETHERADDRL);
32885748Sduboff if (dp->gc.gc_multicast_hash) {
32895748Sduboff dp->mc_list[cnt].hash =
32905748Sduboff (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
32915748Sduboff }
32925748Sduboff dp->mc_count = cnt + 1;
32935748Sduboff }
32945748Sduboff
32955748Sduboff if (dp->mc_count_req != dp->mc_count) {
32965748Sduboff /* multicast address list overflow */
32975748Sduboff dp->rxmode |= RXMODE_MULTI_OVF;
32985748Sduboff } else {
32995748Sduboff dp->rxmode &= ~RXMODE_MULTI_OVF;
33005748Sduboff }
33015748Sduboff
33027116Sduboff /* tell new multicast list to the hardware */
33035748Sduboff err = gem_mac_set_rx_filter(dp);
33045748Sduboff
33055748Sduboff mutex_exit(&dp->intrlock);
33065748Sduboff
33075748Sduboff return (err);
33085748Sduboff }
33095748Sduboff
33105748Sduboff static int
gem_remove_multicast(struct gem_dev * dp,const uint8_t * ep)33115748Sduboff gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
33125748Sduboff {
33135748Sduboff size_t len;
33145748Sduboff int i;
33155748Sduboff int cnt;
33165748Sduboff int err;
33175748Sduboff
33185748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
33195748Sduboff
33205748Sduboff mutex_enter(&dp->intrlock);
33215748Sduboff if (dp->mac_suspended) {
33225748Sduboff mutex_exit(&dp->intrlock);
33235748Sduboff return (GEM_FAILURE);
33245748Sduboff }
33255748Sduboff
33265748Sduboff dp->mc_count_req--;
33275748Sduboff cnt = dp->mc_count;
33285748Sduboff for (i = 0; i < cnt; i++) {
33295748Sduboff if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
33305748Sduboff continue;
33315748Sduboff }
33325748Sduboff /* shrink the mclist by copying forward */
33335748Sduboff len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
33345748Sduboff if (len > 0) {
33355748Sduboff bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
33365748Sduboff }
33375748Sduboff dp->mc_count--;
33385748Sduboff break;
33395748Sduboff }
33405748Sduboff
33415748Sduboff if (dp->mc_count_req != dp->mc_count) {
33425748Sduboff /* multicast address list overflow */
33435748Sduboff dp->rxmode |= RXMODE_MULTI_OVF;
33445748Sduboff } else {
33455748Sduboff dp->rxmode &= ~RXMODE_MULTI_OVF;
33465748Sduboff }
33475748Sduboff /* In gem v2, don't hold xmitlock on calling set_rx_filter */
33485748Sduboff err = gem_mac_set_rx_filter(dp);
33495748Sduboff
33505748Sduboff mutex_exit(&dp->intrlock);
33515748Sduboff
33525748Sduboff return (err);
33535748Sduboff }
33545748Sduboff
33555748Sduboff /* ============================================================== */
33565748Sduboff /*
33575748Sduboff * ND interface
33585748Sduboff */
33595748Sduboff /* ============================================================== */
33605748Sduboff enum {
33615748Sduboff PARAM_AUTONEG_CAP,
33625748Sduboff PARAM_PAUSE_CAP,
33635748Sduboff PARAM_ASYM_PAUSE_CAP,
33645748Sduboff PARAM_1000FDX_CAP,
33655748Sduboff PARAM_1000HDX_CAP,
33665748Sduboff PARAM_100T4_CAP,
33675748Sduboff PARAM_100FDX_CAP,
33685748Sduboff PARAM_100HDX_CAP,
33695748Sduboff PARAM_10FDX_CAP,
33705748Sduboff PARAM_10HDX_CAP,
33715748Sduboff
33725748Sduboff PARAM_ADV_AUTONEG_CAP,
33735748Sduboff PARAM_ADV_PAUSE_CAP,
33745748Sduboff PARAM_ADV_ASYM_PAUSE_CAP,
33755748Sduboff PARAM_ADV_1000FDX_CAP,
33765748Sduboff PARAM_ADV_1000HDX_CAP,
33775748Sduboff PARAM_ADV_100T4_CAP,
33785748Sduboff PARAM_ADV_100FDX_CAP,
33795748Sduboff PARAM_ADV_100HDX_CAP,
33805748Sduboff PARAM_ADV_10FDX_CAP,
33815748Sduboff PARAM_ADV_10HDX_CAP,
33825748Sduboff
33835748Sduboff PARAM_LP_AUTONEG_CAP,
33845748Sduboff PARAM_LP_PAUSE_CAP,
33855748Sduboff PARAM_LP_ASYM_PAUSE_CAP,
33865748Sduboff PARAM_LP_1000FDX_CAP,
33875748Sduboff PARAM_LP_1000HDX_CAP,
33885748Sduboff PARAM_LP_100T4_CAP,
33895748Sduboff PARAM_LP_100FDX_CAP,
33905748Sduboff PARAM_LP_100HDX_CAP,
33915748Sduboff PARAM_LP_10FDX_CAP,
33925748Sduboff PARAM_LP_10HDX_CAP,
33935748Sduboff
33945748Sduboff PARAM_LINK_STATUS,
33955748Sduboff PARAM_LINK_SPEED,
33965748Sduboff PARAM_LINK_DUPLEX,
33975748Sduboff
33985748Sduboff PARAM_LINK_AUTONEG,
33995748Sduboff PARAM_LINK_RX_PAUSE,
34005748Sduboff PARAM_LINK_TX_PAUSE,
34015748Sduboff
34025748Sduboff PARAM_LOOP_MODE,
34035748Sduboff PARAM_MSI_CNT,
34045748Sduboff
34055748Sduboff #ifdef DEBUG_RESUME
34065748Sduboff PARAM_RESUME_TEST,
34075748Sduboff #endif
34085748Sduboff PARAM_COUNT
34095748Sduboff };
34105748Sduboff
34115748Sduboff enum ioc_reply {
34125748Sduboff IOC_INVAL = -1, /* bad, NAK with EINVAL */
34135748Sduboff IOC_DONE, /* OK, reply sent */
34145748Sduboff IOC_ACK, /* OK, just send ACK */
34155748Sduboff IOC_REPLY, /* OK, just send reply */
34165748Sduboff IOC_RESTART_ACK, /* OK, restart & ACK */
34175748Sduboff IOC_RESTART_REPLY /* OK, restart & reply */
34185748Sduboff };
34195748Sduboff
34205748Sduboff struct gem_nd_arg {
34215748Sduboff struct gem_dev *dp;
34225748Sduboff int item;
34235748Sduboff };
34245748Sduboff
34255748Sduboff static int
gem_param_get(queue_t * q,mblk_t * mp,caddr_t arg,cred_t * credp)34265748Sduboff gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
34275748Sduboff {
34285748Sduboff struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
34295748Sduboff int item = ((struct gem_nd_arg *)(void *)arg)->item;
34305748Sduboff long val;
34315748Sduboff
34325748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
34335748Sduboff dp->name, __func__, item));
34345748Sduboff
34355748Sduboff switch (item) {
34365748Sduboff case PARAM_AUTONEG_CAP:
34375748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
34385748Sduboff DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
34395748Sduboff break;
34405748Sduboff
34415748Sduboff case PARAM_PAUSE_CAP:
34425748Sduboff val = BOOLEAN(dp->gc.gc_flow_control & 1);
34435748Sduboff break;
34445748Sduboff
34455748Sduboff case PARAM_ASYM_PAUSE_CAP:
34465748Sduboff val = BOOLEAN(dp->gc.gc_flow_control & 2);
34475748Sduboff break;
34485748Sduboff
34495748Sduboff case PARAM_1000FDX_CAP:
34505748Sduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
34515748Sduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
34525748Sduboff break;
34535748Sduboff
34545748Sduboff case PARAM_1000HDX_CAP:
34555748Sduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
34565748Sduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
34575748Sduboff break;
34585748Sduboff
34595748Sduboff case PARAM_100T4_CAP:
34605748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
34615748Sduboff break;
34625748Sduboff
34635748Sduboff case PARAM_100FDX_CAP:
34645748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
34655748Sduboff break;
34665748Sduboff
34675748Sduboff case PARAM_100HDX_CAP:
34685748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
34695748Sduboff break;
34705748Sduboff
34715748Sduboff case PARAM_10FDX_CAP:
34725748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
34735748Sduboff break;
34745748Sduboff
34755748Sduboff case PARAM_10HDX_CAP:
34765748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10);
34775748Sduboff break;
34785748Sduboff
34795748Sduboff case PARAM_ADV_AUTONEG_CAP:
34805748Sduboff val = dp->anadv_autoneg;
34815748Sduboff break;
34825748Sduboff
34835748Sduboff case PARAM_ADV_PAUSE_CAP:
34845748Sduboff val = BOOLEAN(dp->anadv_flow_control & 1);
34855748Sduboff break;
34865748Sduboff
34875748Sduboff case PARAM_ADV_ASYM_PAUSE_CAP:
34885748Sduboff val = BOOLEAN(dp->anadv_flow_control & 2);
34895748Sduboff break;
34905748Sduboff
34915748Sduboff case PARAM_ADV_1000FDX_CAP:
34925748Sduboff val = dp->anadv_1000fdx;
34935748Sduboff break;
34945748Sduboff
34955748Sduboff case PARAM_ADV_1000HDX_CAP:
34965748Sduboff val = dp->anadv_1000hdx;
34975748Sduboff break;
34985748Sduboff
34995748Sduboff case PARAM_ADV_100T4_CAP:
35005748Sduboff val = dp->anadv_100t4;
35015748Sduboff break;
35025748Sduboff
35035748Sduboff case PARAM_ADV_100FDX_CAP:
35045748Sduboff val = dp->anadv_100fdx;
35055748Sduboff break;
35065748Sduboff
35075748Sduboff case PARAM_ADV_100HDX_CAP:
35085748Sduboff val = dp->anadv_100hdx;
35095748Sduboff break;
35105748Sduboff
35115748Sduboff case PARAM_ADV_10FDX_CAP:
35125748Sduboff val = dp->anadv_10fdx;
35135748Sduboff break;
35145748Sduboff
35155748Sduboff case PARAM_ADV_10HDX_CAP:
35165748Sduboff val = dp->anadv_10hdx;
35175748Sduboff break;
35185748Sduboff
35195748Sduboff case PARAM_LP_AUTONEG_CAP:
35205748Sduboff val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
35215748Sduboff break;
35225748Sduboff
35235748Sduboff case PARAM_LP_PAUSE_CAP:
35245748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
35255748Sduboff break;
35265748Sduboff
35275748Sduboff case PARAM_LP_ASYM_PAUSE_CAP:
35289860Sgdamore@opensolaris.org val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
35295748Sduboff break;
35305748Sduboff
35315748Sduboff case PARAM_LP_1000FDX_CAP:
35325748Sduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
35335748Sduboff break;
35345748Sduboff
35355748Sduboff case PARAM_LP_1000HDX_CAP:
35365748Sduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
35375748Sduboff break;
35385748Sduboff
35395748Sduboff case PARAM_LP_100T4_CAP:
35405748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
35415748Sduboff break;
35425748Sduboff
35435748Sduboff case PARAM_LP_100FDX_CAP:
35445748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
35455748Sduboff break;
35465748Sduboff
35475748Sduboff case PARAM_LP_100HDX_CAP:
35485748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
35495748Sduboff break;
35505748Sduboff
35515748Sduboff case PARAM_LP_10FDX_CAP:
35525748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
35535748Sduboff break;
35545748Sduboff
35555748Sduboff case PARAM_LP_10HDX_CAP:
35565748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
35575748Sduboff break;
35585748Sduboff
35595748Sduboff case PARAM_LINK_STATUS:
35605748Sduboff val = (dp->mii_state == MII_STATE_LINKUP);
35615748Sduboff break;
35625748Sduboff
35635748Sduboff case PARAM_LINK_SPEED:
35645748Sduboff val = gem_speed_value[dp->speed];
35655748Sduboff break;
35665748Sduboff
35675748Sduboff case PARAM_LINK_DUPLEX:
35685748Sduboff val = 0;
35695748Sduboff if (dp->mii_state == MII_STATE_LINKUP) {
35705748Sduboff val = dp->full_duplex ? 2 : 1;
35715748Sduboff }
35725748Sduboff break;
35735748Sduboff
35745748Sduboff case PARAM_LINK_AUTONEG:
35755748Sduboff val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
35765748Sduboff break;
35775748Sduboff
35785748Sduboff case PARAM_LINK_RX_PAUSE:
35795748Sduboff val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
35805748Sduboff (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
35815748Sduboff break;
35825748Sduboff
35835748Sduboff case PARAM_LINK_TX_PAUSE:
35845748Sduboff val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
35855748Sduboff (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
35865748Sduboff break;
35875748Sduboff
35885748Sduboff #ifdef DEBUG_RESUME
35895748Sduboff case PARAM_RESUME_TEST:
35905748Sduboff val = 0;
35915748Sduboff break;
35925748Sduboff #endif
35935748Sduboff default:
35945748Sduboff cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
35955748Sduboff dp->name, item);
35965748Sduboff break;
35975748Sduboff }
35985748Sduboff
35995748Sduboff (void) mi_mpprintf(mp, "%ld", val);
36005748Sduboff
36015748Sduboff return (0);
36025748Sduboff }
36035748Sduboff
36045748Sduboff static int
gem_param_set(queue_t * q,mblk_t * mp,char * value,caddr_t arg,cred_t * credp)36055748Sduboff gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
36065748Sduboff {
36075748Sduboff struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
36085748Sduboff int item = ((struct gem_nd_arg *)(void *)arg)->item;
36095748Sduboff long val;
36105748Sduboff char *end;
36115748Sduboff
36125748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
36135748Sduboff if (ddi_strtol(value, &end, 10, &val)) {
36145748Sduboff return (EINVAL);
36155748Sduboff }
36165748Sduboff if (end == value) {
36175748Sduboff return (EINVAL);
36185748Sduboff }
36195748Sduboff
36205748Sduboff switch (item) {
36215748Sduboff case PARAM_ADV_AUTONEG_CAP:
36225748Sduboff if (val != 0 && val != 1) {
36235748Sduboff goto err;
36245748Sduboff }
36255748Sduboff if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
36265748Sduboff goto err;
36275748Sduboff }
36285748Sduboff dp->anadv_autoneg = (int)val;
36295748Sduboff break;
36305748Sduboff
36315748Sduboff case PARAM_ADV_PAUSE_CAP:
36325748Sduboff if (val != 0 && val != 1) {
36335748Sduboff goto err;
36345748Sduboff }
36355748Sduboff if (val) {
36365748Sduboff dp->anadv_flow_control |= 1;
36375748Sduboff } else {
36385748Sduboff dp->anadv_flow_control &= ~1;
36395748Sduboff }
36405748Sduboff break;
36415748Sduboff
36425748Sduboff case PARAM_ADV_ASYM_PAUSE_CAP:
36435748Sduboff if (val != 0 && val != 1) {
36445748Sduboff goto err;
36455748Sduboff }
36465748Sduboff if (val) {
36475748Sduboff dp->anadv_flow_control |= 2;
36485748Sduboff } else {
36495748Sduboff dp->anadv_flow_control &= ~2;
36505748Sduboff }
36515748Sduboff break;
36525748Sduboff
36535748Sduboff case PARAM_ADV_1000FDX_CAP:
36545748Sduboff if (val != 0 && val != 1) {
36555748Sduboff goto err;
36565748Sduboff }
36575748Sduboff if (val && (dp->mii_xstatus &
36585748Sduboff (MII_XSTATUS_1000BASET_FD |
36595748Sduboff MII_XSTATUS_1000BASEX_FD)) == 0) {
36605748Sduboff goto err;
36615748Sduboff }
36625748Sduboff dp->anadv_1000fdx = (int)val;
36635748Sduboff break;
36645748Sduboff
36655748Sduboff case PARAM_ADV_1000HDX_CAP:
36665748Sduboff if (val != 0 && val != 1) {
36675748Sduboff goto err;
36685748Sduboff }
36695748Sduboff if (val && (dp->mii_xstatus &
36705748Sduboff (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
36715748Sduboff goto err;
36725748Sduboff }
36735748Sduboff dp->anadv_1000hdx = (int)val;
36745748Sduboff break;
36755748Sduboff
36765748Sduboff case PARAM_ADV_100T4_CAP:
36775748Sduboff if (val != 0 && val != 1) {
36785748Sduboff goto err;
36795748Sduboff }
36805748Sduboff if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
36815748Sduboff goto err;
36825748Sduboff }
36835748Sduboff dp->anadv_100t4 = (int)val;
36845748Sduboff break;
36855748Sduboff
36865748Sduboff case PARAM_ADV_100FDX_CAP:
36875748Sduboff if (val != 0 && val != 1) {
36885748Sduboff goto err;
36895748Sduboff }
36905748Sduboff if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
36915748Sduboff goto err;
36925748Sduboff }
36935748Sduboff dp->anadv_100fdx = (int)val;
36945748Sduboff break;
36955748Sduboff
36965748Sduboff case PARAM_ADV_100HDX_CAP:
36975748Sduboff if (val != 0 && val != 1) {
36985748Sduboff goto err;
36995748Sduboff }
37005748Sduboff if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
37015748Sduboff goto err;
37025748Sduboff }
37035748Sduboff dp->anadv_100hdx = (int)val;
37045748Sduboff break;
37055748Sduboff
37065748Sduboff case PARAM_ADV_10FDX_CAP:
37075748Sduboff if (val != 0 && val != 1) {
37085748Sduboff goto err;
37095748Sduboff }
37105748Sduboff if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
37115748Sduboff goto err;
37125748Sduboff }
37135748Sduboff dp->anadv_10fdx = (int)val;
37145748Sduboff break;
37155748Sduboff
37165748Sduboff case PARAM_ADV_10HDX_CAP:
37175748Sduboff if (val != 0 && val != 1) {
37185748Sduboff goto err;
37195748Sduboff }
37205748Sduboff if (val && (dp->mii_status & MII_STATUS_10) == 0) {
37215748Sduboff goto err;
37225748Sduboff }
37235748Sduboff dp->anadv_10hdx = (int)val;
37245748Sduboff break;
37255748Sduboff }
37265748Sduboff
37275748Sduboff /* sync with PHY */
37285748Sduboff gem_choose_forcedmode(dp);
37295748Sduboff
37305748Sduboff dp->mii_state = MII_STATE_UNKNOWN;
37315748Sduboff if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
37325748Sduboff /* XXX - Can we ignore the return code ? */
37335748Sduboff (void) gem_mii_link_check(dp);
37345748Sduboff }
37355748Sduboff
37365748Sduboff return (0);
37375748Sduboff err:
37385748Sduboff return (EINVAL);
37395748Sduboff }
37405748Sduboff
37415748Sduboff static void
gem_nd_load(struct gem_dev * dp,char * name,ndgetf_t gf,ndsetf_t sf,int item)37425748Sduboff gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
37435748Sduboff {
37445748Sduboff struct gem_nd_arg *arg;
37455748Sduboff
37465748Sduboff ASSERT(item >= 0);
37475748Sduboff ASSERT(item < PARAM_COUNT);
37485748Sduboff
37495748Sduboff arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
37505748Sduboff arg->dp = dp;
37515748Sduboff arg->item = item;
37525748Sduboff
37535748Sduboff DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
37545748Sduboff dp->name, __func__, name, item));
37557116Sduboff (void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
37565748Sduboff }
37575748Sduboff
37585748Sduboff static void
gem_nd_setup(struct gem_dev * dp)37595748Sduboff gem_nd_setup(struct gem_dev *dp)
37605748Sduboff {
37615748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
37625748Sduboff dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
37635748Sduboff
37645748Sduboff ASSERT(dp->nd_arg_p == NULL);
37655748Sduboff
37665748Sduboff dp->nd_arg_p =
37675748Sduboff kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
37685748Sduboff
37695748Sduboff #define SETFUNC(x) ((x) ? gem_param_set : NULL)
37705748Sduboff
37715748Sduboff gem_nd_load(dp, "autoneg_cap",
37725748Sduboff gem_param_get, NULL, PARAM_AUTONEG_CAP);
37735748Sduboff gem_nd_load(dp, "pause_cap",
37745748Sduboff gem_param_get, NULL, PARAM_PAUSE_CAP);
37755748Sduboff gem_nd_load(dp, "asym_pause_cap",
37765748Sduboff gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
37775748Sduboff gem_nd_load(dp, "1000fdx_cap",
37785748Sduboff gem_param_get, NULL, PARAM_1000FDX_CAP);
37795748Sduboff gem_nd_load(dp, "1000hdx_cap",
37805748Sduboff gem_param_get, NULL, PARAM_1000HDX_CAP);
37815748Sduboff gem_nd_load(dp, "100T4_cap",
37825748Sduboff gem_param_get, NULL, PARAM_100T4_CAP);
37835748Sduboff gem_nd_load(dp, "100fdx_cap",
37845748Sduboff gem_param_get, NULL, PARAM_100FDX_CAP);
37855748Sduboff gem_nd_load(dp, "100hdx_cap",
37865748Sduboff gem_param_get, NULL, PARAM_100HDX_CAP);
37875748Sduboff gem_nd_load(dp, "10fdx_cap",
37885748Sduboff gem_param_get, NULL, PARAM_10FDX_CAP);
37895748Sduboff gem_nd_load(dp, "10hdx_cap",
37905748Sduboff gem_param_get, NULL, PARAM_10HDX_CAP);
37915748Sduboff
37925748Sduboff /* Our advertised capabilities */
37935748Sduboff gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
37945748Sduboff SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
37955748Sduboff PARAM_ADV_AUTONEG_CAP);
37965748Sduboff gem_nd_load(dp, "adv_pause_cap", gem_param_get,
37975748Sduboff SETFUNC(dp->gc.gc_flow_control & 1),
37985748Sduboff PARAM_ADV_PAUSE_CAP);
37995748Sduboff gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
38005748Sduboff SETFUNC(dp->gc.gc_flow_control & 2),
38015748Sduboff PARAM_ADV_ASYM_PAUSE_CAP);
38025748Sduboff gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
38035748Sduboff SETFUNC(dp->mii_xstatus &
38045748Sduboff (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
38055748Sduboff PARAM_ADV_1000FDX_CAP);
38065748Sduboff gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
38075748Sduboff SETFUNC(dp->mii_xstatus &
38085748Sduboff (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
38095748Sduboff PARAM_ADV_1000HDX_CAP);
38105748Sduboff gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
38115748Sduboff SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
38125748Sduboff !dp->mii_advert_ro),
38135748Sduboff PARAM_ADV_100T4_CAP);
38145748Sduboff gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
38155748Sduboff SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
38165748Sduboff !dp->mii_advert_ro),
38175748Sduboff PARAM_ADV_100FDX_CAP);
38185748Sduboff gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
38195748Sduboff SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
38205748Sduboff !dp->mii_advert_ro),
38215748Sduboff PARAM_ADV_100HDX_CAP);
38225748Sduboff gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
38235748Sduboff SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
38245748Sduboff !dp->mii_advert_ro),
38255748Sduboff PARAM_ADV_10FDX_CAP);
38265748Sduboff gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
38275748Sduboff SETFUNC((dp->mii_status & MII_STATUS_10) &&
38285748Sduboff !dp->mii_advert_ro),
38295748Sduboff PARAM_ADV_10HDX_CAP);
38305748Sduboff
38315748Sduboff /* Partner's advertised capabilities */
38325748Sduboff gem_nd_load(dp, "lp_autoneg_cap",
38335748Sduboff gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
38345748Sduboff gem_nd_load(dp, "lp_pause_cap",
38355748Sduboff gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
38365748Sduboff gem_nd_load(dp, "lp_asym_pause_cap",
38375748Sduboff gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
38385748Sduboff gem_nd_load(dp, "lp_1000fdx_cap",
38395748Sduboff gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
38405748Sduboff gem_nd_load(dp, "lp_1000hdx_cap",
38415748Sduboff gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
38425748Sduboff gem_nd_load(dp, "lp_100T4_cap",
38435748Sduboff gem_param_get, NULL, PARAM_LP_100T4_CAP);
38445748Sduboff gem_nd_load(dp, "lp_100fdx_cap",
38455748Sduboff gem_param_get, NULL, PARAM_LP_100FDX_CAP);
38465748Sduboff gem_nd_load(dp, "lp_100hdx_cap",
38475748Sduboff gem_param_get, NULL, PARAM_LP_100HDX_CAP);
38485748Sduboff gem_nd_load(dp, "lp_10fdx_cap",
38495748Sduboff gem_param_get, NULL, PARAM_LP_10FDX_CAP);
38505748Sduboff gem_nd_load(dp, "lp_10hdx_cap",
38515748Sduboff gem_param_get, NULL, PARAM_LP_10HDX_CAP);
38525748Sduboff
38535748Sduboff /* Current operating modes */
38545748Sduboff gem_nd_load(dp, "link_status",
38555748Sduboff gem_param_get, NULL, PARAM_LINK_STATUS);
38565748Sduboff gem_nd_load(dp, "link_speed",
38575748Sduboff gem_param_get, NULL, PARAM_LINK_SPEED);
38585748Sduboff gem_nd_load(dp, "link_duplex",
38595748Sduboff gem_param_get, NULL, PARAM_LINK_DUPLEX);
38605748Sduboff gem_nd_load(dp, "link_autoneg",
38615748Sduboff gem_param_get, NULL, PARAM_LINK_AUTONEG);
38625748Sduboff gem_nd_load(dp, "link_rx_pause",
38635748Sduboff gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
38645748Sduboff gem_nd_load(dp, "link_tx_pause",
38655748Sduboff gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
38665748Sduboff #ifdef DEBUG_RESUME
38675748Sduboff gem_nd_load(dp, "resume_test",
38685748Sduboff gem_param_get, NULL, PARAM_RESUME_TEST);
38695748Sduboff #endif
38705748Sduboff #undef SETFUNC
38715748Sduboff }
38725748Sduboff
38735748Sduboff static
38745748Sduboff enum ioc_reply
gem_nd_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp,struct iocblk * iocp)38755748Sduboff gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
38765748Sduboff {
38775748Sduboff boolean_t ok;
38785748Sduboff
38795748Sduboff ASSERT(mutex_owned(&dp->intrlock));
38805748Sduboff
38815748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
38825748Sduboff
38835748Sduboff switch (iocp->ioc_cmd) {
38845748Sduboff case ND_GET:
38855748Sduboff ok = nd_getset(wq, dp->nd_data_p, mp);
38865748Sduboff DPRINTF(0, (CE_CONT,
38875748Sduboff "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
38885748Sduboff return (ok ? IOC_REPLY : IOC_INVAL);
38895748Sduboff
38905748Sduboff case ND_SET:
38915748Sduboff ok = nd_getset(wq, dp->nd_data_p, mp);
38925748Sduboff
38935748Sduboff DPRINTF(0, (CE_CONT, "%s: set %s err %d",
38945748Sduboff dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
38955748Sduboff
38965748Sduboff if (!ok) {
38975748Sduboff return (IOC_INVAL);
38985748Sduboff }
38995748Sduboff
39005748Sduboff if (iocp->ioc_error) {
39015748Sduboff return (IOC_REPLY);
39025748Sduboff }
39035748Sduboff
39045748Sduboff return (IOC_RESTART_REPLY);
39055748Sduboff }
39065748Sduboff
39075748Sduboff cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
39085748Sduboff
39095748Sduboff return (IOC_INVAL);
39105748Sduboff }
39115748Sduboff
39125748Sduboff static void
gem_nd_cleanup(struct gem_dev * dp)39135748Sduboff gem_nd_cleanup(struct gem_dev *dp)
39145748Sduboff {
39155748Sduboff ASSERT(dp->nd_data_p != NULL);
39165748Sduboff ASSERT(dp->nd_arg_p != NULL);
39175748Sduboff
39185748Sduboff nd_free(&dp->nd_data_p);
39195748Sduboff
39205748Sduboff kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
39215748Sduboff dp->nd_arg_p = NULL;
39225748Sduboff }
39235748Sduboff
39245748Sduboff static void
gem_mac_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp)39255748Sduboff gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
39265748Sduboff {
39275748Sduboff struct iocblk *iocp;
39285748Sduboff enum ioc_reply status;
39295748Sduboff int cmd;
39305748Sduboff
39315748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
39325748Sduboff
39335748Sduboff /*
39345748Sduboff * Validate the command before bothering with the mutex ...
39355748Sduboff */
39365748Sduboff iocp = (void *)mp->b_rptr;
39375748Sduboff iocp->ioc_error = 0;
39385748Sduboff cmd = iocp->ioc_cmd;
39395748Sduboff
39405748Sduboff DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
39415748Sduboff
39425748Sduboff mutex_enter(&dp->intrlock);
39435748Sduboff mutex_enter(&dp->xmitlock);
39445748Sduboff
39455748Sduboff switch (cmd) {
39465748Sduboff default:
39475748Sduboff _NOTE(NOTREACHED)
39485748Sduboff status = IOC_INVAL;
39495748Sduboff break;
39505748Sduboff
39515748Sduboff case ND_GET:
39525748Sduboff case ND_SET:
39535748Sduboff status = gem_nd_ioctl(dp, wq, mp, iocp);
39545748Sduboff break;
39555748Sduboff }
39565748Sduboff
39575748Sduboff mutex_exit(&dp->xmitlock);
39585748Sduboff mutex_exit(&dp->intrlock);
39595748Sduboff
39605748Sduboff #ifdef DEBUG_RESUME
39615748Sduboff if (cmd == ND_GET) {
39625748Sduboff gem_suspend(dp->dip);
39635748Sduboff gem_resume(dp->dip);
39645748Sduboff }
39655748Sduboff #endif
39665748Sduboff /*
39675748Sduboff * Finally, decide how to reply
39685748Sduboff */
39695748Sduboff switch (status) {
39705748Sduboff default:
39715748Sduboff case IOC_INVAL:
39725748Sduboff /*
39735748Sduboff * Error, reply with a NAK and EINVAL or the specified error
39745748Sduboff */
39755748Sduboff miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
39765748Sduboff EINVAL : iocp->ioc_error);
39775748Sduboff break;
39785748Sduboff
39795748Sduboff case IOC_DONE:
39805748Sduboff /*
39815748Sduboff * OK, reply already sent
39825748Sduboff */
39835748Sduboff break;
39845748Sduboff
39855748Sduboff case IOC_RESTART_ACK:
39865748Sduboff case IOC_ACK:
39875748Sduboff /*
39885748Sduboff * OK, reply with an ACK
39895748Sduboff */
39905748Sduboff miocack(wq, mp, 0, 0);
39915748Sduboff break;
39925748Sduboff
39935748Sduboff case IOC_RESTART_REPLY:
39945748Sduboff case IOC_REPLY:
39955748Sduboff /*
39965748Sduboff * OK, send prepared reply as ACK or NAK
39975748Sduboff */
39985748Sduboff mp->b_datap->db_type =
39995748Sduboff iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
40005748Sduboff qreply(wq, mp);
40015748Sduboff break;
40025748Sduboff }
40035748Sduboff }
40045748Sduboff
40055748Sduboff #ifndef SYS_MAC_H
40065748Sduboff #define XCVR_UNDEFINED 0
40075748Sduboff #define XCVR_NONE 1
40085748Sduboff #define XCVR_10 2
40095748Sduboff #define XCVR_100T4 3
40105748Sduboff #define XCVR_100X 4
40115748Sduboff #define XCVR_100T2 5
40125748Sduboff #define XCVR_1000X 6
40135748Sduboff #define XCVR_1000T 7
40145748Sduboff #endif
40155748Sduboff static int
gem_mac_xcvr_inuse(struct gem_dev * dp)40165748Sduboff gem_mac_xcvr_inuse(struct gem_dev *dp)
40175748Sduboff {
40185748Sduboff int val = XCVR_UNDEFINED;
40195748Sduboff
40205748Sduboff if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
40215748Sduboff if (dp->mii_status & MII_STATUS_100_BASE_T4) {
40225748Sduboff val = XCVR_100T4;
40235748Sduboff } else if (dp->mii_status &
40245748Sduboff (MII_STATUS_100_BASEX_FD |
40255748Sduboff MII_STATUS_100_BASEX)) {
40265748Sduboff val = XCVR_100X;
40275748Sduboff } else if (dp->mii_status &
40285748Sduboff (MII_STATUS_100_BASE_T2_FD |
40295748Sduboff MII_STATUS_100_BASE_T2)) {
40305748Sduboff val = XCVR_100T2;
40315748Sduboff } else if (dp->mii_status &
40325748Sduboff (MII_STATUS_10_FD | MII_STATUS_10)) {
40335748Sduboff val = XCVR_10;
40345748Sduboff }
40355748Sduboff } else if (dp->mii_xstatus &
40365748Sduboff (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
40375748Sduboff val = XCVR_1000T;
40385748Sduboff } else if (dp->mii_xstatus &
40395748Sduboff (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
40405748Sduboff val = XCVR_1000X;
40415748Sduboff }
40425748Sduboff
40435748Sduboff return (val);
40445748Sduboff }
40455748Sduboff
40465748Sduboff /* ============================================================== */
40475748Sduboff /*
40485748Sduboff * GLDv3 interface
40495748Sduboff */
40505748Sduboff /* ============================================================== */
40515748Sduboff static int gem_m_getstat(void *, uint_t, uint64_t *);
40525748Sduboff static int gem_m_start(void *);
40535748Sduboff static void gem_m_stop(void *);
40545748Sduboff static int gem_m_setpromisc(void *, boolean_t);
40555748Sduboff static int gem_m_multicst(void *, boolean_t, const uint8_t *);
40565748Sduboff static int gem_m_unicst(void *, const uint8_t *);
40575748Sduboff static mblk_t *gem_m_tx(void *, mblk_t *);
40585748Sduboff static void gem_m_ioctl(void *, queue_t *, mblk_t *);
40595748Sduboff static boolean_t gem_m_getcapab(void *, mac_capab_t, void *);
40605748Sduboff
40618275SEric Cheng #define GEM_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
40625748Sduboff
40635748Sduboff static mac_callbacks_t gem_m_callbacks = {
40645748Sduboff GEM_M_CALLBACK_FLAGS,
40655748Sduboff gem_m_getstat,
40665748Sduboff gem_m_start,
40675748Sduboff gem_m_stop,
40685748Sduboff gem_m_setpromisc,
40695748Sduboff gem_m_multicst,
40705748Sduboff gem_m_unicst,
40715748Sduboff gem_m_tx,
4072*11878SVenu.Iyer@Sun.COM NULL,
40735748Sduboff gem_m_ioctl,
40745748Sduboff gem_m_getcapab,
40755748Sduboff };
40765748Sduboff
40775748Sduboff static int
gem_m_start(void * arg)40785748Sduboff gem_m_start(void *arg)
40795748Sduboff {
40805748Sduboff int err = 0;
40815748Sduboff struct gem_dev *dp = arg;
40825748Sduboff
40835748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
40845748Sduboff
40855748Sduboff mutex_enter(&dp->intrlock);
40865748Sduboff if (dp->mac_suspended) {
40875748Sduboff err = EIO;
40885748Sduboff goto x;
40895748Sduboff }
40905748Sduboff if (gem_mac_init(dp) != GEM_SUCCESS) {
40915748Sduboff err = EIO;
40925748Sduboff goto x;
40935748Sduboff }
40945748Sduboff dp->nic_state = NIC_STATE_INITIALIZED;
40955748Sduboff
40965748Sduboff /* reset rx filter state */
40975748Sduboff dp->mc_count = 0;
40985748Sduboff dp->mc_count_req = 0;
40995748Sduboff
41005748Sduboff /* setup media mode if the link have been up */
41015748Sduboff if (dp->mii_state == MII_STATE_LINKUP) {
41025748Sduboff (dp->gc.gc_set_media)(dp);
41035748Sduboff }
41045748Sduboff
41055748Sduboff /* setup initial rx filter */
41065748Sduboff bcopy(dp->dev_addr.ether_addr_octet,
41075748Sduboff dp->cur_addr.ether_addr_octet, ETHERADDRL);
41085748Sduboff dp->rxmode |= RXMODE_ENABLE;
41095748Sduboff
41105748Sduboff if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
41115748Sduboff err = EIO;
41125748Sduboff goto x;
41135748Sduboff }
41145748Sduboff
41155748Sduboff dp->nic_state = NIC_STATE_ONLINE;
41165748Sduboff if (dp->mii_state == MII_STATE_LINKUP) {
41175748Sduboff if (gem_mac_start(dp) != GEM_SUCCESS) {
41185748Sduboff err = EIO;
41195748Sduboff goto x;
41205748Sduboff }
41215748Sduboff }
41225748Sduboff
41235748Sduboff dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
41245748Sduboff (void *)dp, dp->gc.gc_tx_timeout_interval);
41255748Sduboff mutex_exit(&dp->intrlock);
41265748Sduboff
41275748Sduboff return (0);
41285748Sduboff x:
41295748Sduboff dp->nic_state = NIC_STATE_STOPPED;
41305748Sduboff mutex_exit(&dp->intrlock);
41315748Sduboff return (err);
41325748Sduboff }
41335748Sduboff
41345748Sduboff static void
gem_m_stop(void * arg)41355748Sduboff gem_m_stop(void *arg)
41365748Sduboff {
41375748Sduboff struct gem_dev *dp = arg;
41385748Sduboff
41395748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
41405748Sduboff
41415748Sduboff /* stop rx */
41425748Sduboff mutex_enter(&dp->intrlock);
41435748Sduboff if (dp->mac_suspended) {
41445748Sduboff mutex_exit(&dp->intrlock);
41455748Sduboff return;
41465748Sduboff }
41475748Sduboff dp->rxmode &= ~RXMODE_ENABLE;
41485748Sduboff (void) gem_mac_set_rx_filter(dp);
41495748Sduboff mutex_exit(&dp->intrlock);
41505748Sduboff
41515748Sduboff /* stop tx timeout watcher */
41525748Sduboff if (dp->timeout_id) {
41535748Sduboff while (untimeout(dp->timeout_id) == -1)
41545748Sduboff ;
41555748Sduboff dp->timeout_id = 0;
41565748Sduboff }
41575748Sduboff
41585748Sduboff /* make the nic state inactive */
41595748Sduboff mutex_enter(&dp->intrlock);
41605748Sduboff if (dp->mac_suspended) {
41615748Sduboff mutex_exit(&dp->intrlock);
41625748Sduboff return;
41635748Sduboff }
41645748Sduboff dp->nic_state = NIC_STATE_STOPPED;
41655748Sduboff
41665748Sduboff /* we need deassert mac_active due to block interrupt handler */
41675748Sduboff mutex_enter(&dp->xmitlock);
41685748Sduboff dp->mac_active = B_FALSE;
41695748Sduboff mutex_exit(&dp->xmitlock);
41705748Sduboff
41715748Sduboff /* block interrupts */
41725748Sduboff while (dp->intr_busy) {
41735748Sduboff cv_wait(&dp->tx_drain_cv, &dp->intrlock);
41745748Sduboff }
41755748Sduboff (void) gem_mac_stop(dp, 0);
41765748Sduboff mutex_exit(&dp->intrlock);
41775748Sduboff }
41785748Sduboff
41795748Sduboff static int
gem_m_multicst(void * arg,boolean_t add,const uint8_t * ep)41805748Sduboff gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
41815748Sduboff {
41825748Sduboff int err;
41835748Sduboff int ret;
41845748Sduboff struct gem_dev *dp = arg;
41855748Sduboff
41865748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
41875748Sduboff
41885748Sduboff if (add) {
41895748Sduboff ret = gem_add_multicast(dp, ep);
41905748Sduboff } else {
41915748Sduboff ret = gem_remove_multicast(dp, ep);
41925748Sduboff }
41935748Sduboff
41945748Sduboff err = 0;
41955748Sduboff if (ret != GEM_SUCCESS) {
41965748Sduboff err = EIO;
41975748Sduboff }
41985748Sduboff
41995748Sduboff return (err);
42005748Sduboff }
42015748Sduboff
42025748Sduboff static int
gem_m_setpromisc(void * arg,boolean_t on)42035748Sduboff gem_m_setpromisc(void *arg, boolean_t on)
42045748Sduboff {
42055748Sduboff int err = 0; /* no error */
42065748Sduboff struct gem_dev *dp = arg;
42075748Sduboff
42085748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
42095748Sduboff
42105748Sduboff mutex_enter(&dp->intrlock);
42115748Sduboff if (dp->mac_suspended) {
42125748Sduboff mutex_exit(&dp->intrlock);
42135748Sduboff return (EIO);
42145748Sduboff }
42155748Sduboff if (on) {
42165748Sduboff dp->rxmode |= RXMODE_PROMISC;
42175748Sduboff } else {
42185748Sduboff dp->rxmode &= ~RXMODE_PROMISC;
42195748Sduboff }
42205748Sduboff
42215748Sduboff if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
42225748Sduboff err = EIO;
42235748Sduboff }
42245748Sduboff mutex_exit(&dp->intrlock);
42255748Sduboff
42265748Sduboff return (err);
42275748Sduboff }
42285748Sduboff
42295748Sduboff int
gem_m_getstat(void * arg,uint_t stat,uint64_t * valp)42305748Sduboff gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
42315748Sduboff {
42325748Sduboff struct gem_dev *dp = arg;
42335748Sduboff struct gem_stats *gstp = &dp->stats;
42345748Sduboff uint64_t val = 0;
42355748Sduboff
42365748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
42375748Sduboff
42387116Sduboff if (mutex_owned(&dp->intrlock)) {
42397116Sduboff if (dp->mac_suspended) {
42407116Sduboff return (EIO);
42417116Sduboff }
42427116Sduboff } else {
42437116Sduboff mutex_enter(&dp->intrlock);
42447116Sduboff if (dp->mac_suspended) {
42457116Sduboff mutex_exit(&dp->intrlock);
42467116Sduboff return (EIO);
42477116Sduboff }
42485748Sduboff mutex_exit(&dp->intrlock);
42497116Sduboff }
42505748Sduboff
42515748Sduboff if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
42525748Sduboff return (EIO);
42535748Sduboff }
42545748Sduboff
42555748Sduboff switch (stat) {
42565748Sduboff case MAC_STAT_IFSPEED:
42575748Sduboff val = gem_speed_value[dp->speed] *1000000ull;
42585748Sduboff break;
42595748Sduboff
42605748Sduboff case MAC_STAT_MULTIRCV:
42615748Sduboff val = gstp->rmcast;
42625748Sduboff break;
42635748Sduboff
42645748Sduboff case MAC_STAT_BRDCSTRCV:
42655748Sduboff val = gstp->rbcast;
42665748Sduboff break;
42675748Sduboff
42685748Sduboff case MAC_STAT_MULTIXMT:
42695748Sduboff val = gstp->omcast;
42705748Sduboff break;
42715748Sduboff
42725748Sduboff case MAC_STAT_BRDCSTXMT:
42735748Sduboff val = gstp->obcast;
42745748Sduboff break;
42755748Sduboff
42765748Sduboff case MAC_STAT_NORCVBUF:
42775748Sduboff val = gstp->norcvbuf + gstp->missed;
42785748Sduboff break;
42795748Sduboff
42805748Sduboff case MAC_STAT_IERRORS:
42815748Sduboff val = gstp->errrcv;
42825748Sduboff break;
42835748Sduboff
42845748Sduboff case MAC_STAT_NOXMTBUF:
42855748Sduboff val = gstp->noxmtbuf;
42865748Sduboff break;
42875748Sduboff
42885748Sduboff case MAC_STAT_OERRORS:
42895748Sduboff val = gstp->errxmt;
42905748Sduboff break;
42915748Sduboff
42925748Sduboff case MAC_STAT_COLLISIONS:
42935748Sduboff val = gstp->collisions;
42945748Sduboff break;
42955748Sduboff
42965748Sduboff case MAC_STAT_RBYTES:
42975748Sduboff val = gstp->rbytes;
42985748Sduboff break;
42995748Sduboff
43005748Sduboff case MAC_STAT_IPACKETS:
43015748Sduboff val = gstp->rpackets;
43025748Sduboff break;
43035748Sduboff
43045748Sduboff case MAC_STAT_OBYTES:
43055748Sduboff val = gstp->obytes;
43065748Sduboff break;
43075748Sduboff
43085748Sduboff case MAC_STAT_OPACKETS:
43095748Sduboff val = gstp->opackets;
43105748Sduboff break;
43115748Sduboff
43125748Sduboff case MAC_STAT_UNDERFLOWS:
43135748Sduboff val = gstp->underflow;
43145748Sduboff break;
43155748Sduboff
43165748Sduboff case MAC_STAT_OVERFLOWS:
43175748Sduboff val = gstp->overflow;
43185748Sduboff break;
43195748Sduboff
43205748Sduboff case ETHER_STAT_ALIGN_ERRORS:
43215748Sduboff val = gstp->frame;
43225748Sduboff break;
43235748Sduboff
43245748Sduboff case ETHER_STAT_FCS_ERRORS:
43255748Sduboff val = gstp->crc;
43265748Sduboff break;
43275748Sduboff
43285748Sduboff case ETHER_STAT_FIRST_COLLISIONS:
43295748Sduboff val = gstp->first_coll;
43305748Sduboff break;
43315748Sduboff
43325748Sduboff case ETHER_STAT_MULTI_COLLISIONS:
43335748Sduboff val = gstp->multi_coll;
43345748Sduboff break;
43355748Sduboff
43365748Sduboff case ETHER_STAT_SQE_ERRORS:
43375748Sduboff val = gstp->sqe;
43385748Sduboff break;
43395748Sduboff
43405748Sduboff case ETHER_STAT_DEFER_XMTS:
43415748Sduboff val = gstp->defer;
43425748Sduboff break;
43435748Sduboff
43445748Sduboff case ETHER_STAT_TX_LATE_COLLISIONS:
43455748Sduboff val = gstp->xmtlatecoll;
43465748Sduboff break;
43475748Sduboff
43485748Sduboff case ETHER_STAT_EX_COLLISIONS:
43495748Sduboff val = gstp->excoll;
43505748Sduboff break;
43515748Sduboff
43525748Sduboff case ETHER_STAT_MACXMT_ERRORS:
43535748Sduboff val = gstp->xmit_internal_err;
43545748Sduboff break;
43555748Sduboff
43565748Sduboff case ETHER_STAT_CARRIER_ERRORS:
43575748Sduboff val = gstp->nocarrier;
43585748Sduboff break;
43595748Sduboff
43605748Sduboff case ETHER_STAT_TOOLONG_ERRORS:
43615748Sduboff val = gstp->frame_too_long;
43625748Sduboff break;
43635748Sduboff
43645748Sduboff case ETHER_STAT_MACRCV_ERRORS:
43655748Sduboff val = gstp->rcv_internal_err;
43665748Sduboff break;
43675748Sduboff
43685748Sduboff case ETHER_STAT_XCVR_ADDR:
43695748Sduboff val = dp->mii_phy_addr;
43705748Sduboff break;
43715748Sduboff
43725748Sduboff case ETHER_STAT_XCVR_ID:
43735748Sduboff val = dp->mii_phy_id;
43745748Sduboff break;
43755748Sduboff
43765748Sduboff case ETHER_STAT_XCVR_INUSE:
43775748Sduboff val = gem_mac_xcvr_inuse(dp);
43785748Sduboff break;
43795748Sduboff
43805748Sduboff case ETHER_STAT_CAP_1000FDX:
43815748Sduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
43825748Sduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
43835748Sduboff break;
43845748Sduboff
43855748Sduboff case ETHER_STAT_CAP_1000HDX:
43865748Sduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
43875748Sduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
43885748Sduboff break;
43895748Sduboff
43905748Sduboff case ETHER_STAT_CAP_100FDX:
43915748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
43925748Sduboff break;
43935748Sduboff
43945748Sduboff case ETHER_STAT_CAP_100HDX:
43955748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
43965748Sduboff break;
43975748Sduboff
43985748Sduboff case ETHER_STAT_CAP_10FDX:
43995748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
44005748Sduboff break;
44015748Sduboff
44025748Sduboff case ETHER_STAT_CAP_10HDX:
44035748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10);
44045748Sduboff break;
44055748Sduboff
44065748Sduboff case ETHER_STAT_CAP_ASMPAUSE:
44075748Sduboff val = BOOLEAN(dp->gc.gc_flow_control & 2);
44085748Sduboff break;
44095748Sduboff
44105748Sduboff case ETHER_STAT_CAP_PAUSE:
44115748Sduboff val = BOOLEAN(dp->gc.gc_flow_control & 1);
44125748Sduboff break;
44135748Sduboff
44145748Sduboff case ETHER_STAT_CAP_AUTONEG:
44157116Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
44165748Sduboff break;
44175748Sduboff
44185748Sduboff case ETHER_STAT_ADV_CAP_1000FDX:
44195748Sduboff val = dp->anadv_1000fdx;
44205748Sduboff break;
44215748Sduboff
44225748Sduboff case ETHER_STAT_ADV_CAP_1000HDX:
44235748Sduboff val = dp->anadv_1000hdx;
44245748Sduboff break;
44255748Sduboff
44265748Sduboff case ETHER_STAT_ADV_CAP_100FDX:
44275748Sduboff val = dp->anadv_100fdx;
44285748Sduboff break;
44295748Sduboff
44305748Sduboff case ETHER_STAT_ADV_CAP_100HDX:
44315748Sduboff val = dp->anadv_100hdx;
44325748Sduboff break;
44335748Sduboff
44345748Sduboff case ETHER_STAT_ADV_CAP_10FDX:
44355748Sduboff val = dp->anadv_10fdx;
44365748Sduboff break;
44375748Sduboff
44385748Sduboff case ETHER_STAT_ADV_CAP_10HDX:
44395748Sduboff val = dp->anadv_10hdx;
44405748Sduboff break;
44415748Sduboff
44425748Sduboff case ETHER_STAT_ADV_CAP_ASMPAUSE:
44435748Sduboff val = BOOLEAN(dp->anadv_flow_control & 2);
44445748Sduboff break;
44455748Sduboff
44465748Sduboff case ETHER_STAT_ADV_CAP_PAUSE:
44475748Sduboff val = BOOLEAN(dp->anadv_flow_control & 1);
44485748Sduboff break;
44495748Sduboff
44505748Sduboff case ETHER_STAT_ADV_CAP_AUTONEG:
44515748Sduboff val = dp->anadv_autoneg;
44525748Sduboff break;
44535748Sduboff
44545748Sduboff case ETHER_STAT_LP_CAP_1000FDX:
44555748Sduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
44565748Sduboff break;
44575748Sduboff
44585748Sduboff case ETHER_STAT_LP_CAP_1000HDX:
44595748Sduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
44605748Sduboff break;
44615748Sduboff
44625748Sduboff case ETHER_STAT_LP_CAP_100FDX:
44635748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
44645748Sduboff break;
44655748Sduboff
44665748Sduboff case ETHER_STAT_LP_CAP_100HDX:
44675748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
44685748Sduboff break;
44695748Sduboff
44705748Sduboff case ETHER_STAT_LP_CAP_10FDX:
44715748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
44725748Sduboff break;
44735748Sduboff
44745748Sduboff case ETHER_STAT_LP_CAP_10HDX:
44755748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
44765748Sduboff break;
44775748Sduboff
44785748Sduboff case ETHER_STAT_LP_CAP_ASMPAUSE:
44799860Sgdamore@opensolaris.org val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
44805748Sduboff break;
44815748Sduboff
44825748Sduboff case ETHER_STAT_LP_CAP_PAUSE:
44835748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
44845748Sduboff break;
44855748Sduboff
44865748Sduboff case ETHER_STAT_LP_CAP_AUTONEG:
44875748Sduboff val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
44885748Sduboff break;
44895748Sduboff
44905748Sduboff case ETHER_STAT_LINK_ASMPAUSE:
44915748Sduboff val = BOOLEAN(dp->flow_control & 2);
44925748Sduboff break;
44935748Sduboff
44945748Sduboff case ETHER_STAT_LINK_PAUSE:
44955748Sduboff val = BOOLEAN(dp->flow_control & 1);
44965748Sduboff break;
44975748Sduboff
44985748Sduboff case ETHER_STAT_LINK_AUTONEG:
44995748Sduboff val = dp->anadv_autoneg &&
45005748Sduboff BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
45015748Sduboff break;
45025748Sduboff
45035748Sduboff case ETHER_STAT_LINK_DUPLEX:
45045748Sduboff val = (dp->mii_state == MII_STATE_LINKUP) ?
45055748Sduboff (dp->full_duplex ? 2 : 1) : 0;
45065748Sduboff break;
45075748Sduboff
45085748Sduboff case ETHER_STAT_TOOSHORT_ERRORS:
45095748Sduboff val = gstp->runt;
45105748Sduboff break;
45115748Sduboff case ETHER_STAT_LP_REMFAULT:
45125748Sduboff val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
45135748Sduboff break;
45145748Sduboff
45155748Sduboff case ETHER_STAT_JABBER_ERRORS:
45165748Sduboff val = gstp->jabber;
45175748Sduboff break;
45185748Sduboff
45195748Sduboff case ETHER_STAT_CAP_100T4:
45205748Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
45215748Sduboff break;
45225748Sduboff
45235748Sduboff case ETHER_STAT_ADV_CAP_100T4:
45245748Sduboff val = dp->anadv_100t4;
45255748Sduboff break;
45265748Sduboff
45275748Sduboff case ETHER_STAT_LP_CAP_100T4:
45285748Sduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
45295748Sduboff break;
45305748Sduboff
45315748Sduboff default:
45325748Sduboff #if GEM_DEBUG_LEVEL > 2
45335748Sduboff cmn_err(CE_WARN,
45345748Sduboff "%s: unrecognized parameter value = %d",
45355748Sduboff __func__, stat);
45365748Sduboff #endif
45375748Sduboff return (ENOTSUP);
45385748Sduboff }
45395748Sduboff
45405748Sduboff *valp = val;
45415748Sduboff
45425748Sduboff return (0);
45435748Sduboff }
45445748Sduboff
45455748Sduboff static int
gem_m_unicst(void * arg,const uint8_t * mac)45465748Sduboff gem_m_unicst(void *arg, const uint8_t *mac)
45475748Sduboff {
45485748Sduboff int err = 0;
45495748Sduboff struct gem_dev *dp = arg;
45505748Sduboff
45515748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
45525748Sduboff
45535748Sduboff mutex_enter(&dp->intrlock);
45545748Sduboff if (dp->mac_suspended) {
45555748Sduboff mutex_exit(&dp->intrlock);
45565748Sduboff return (EIO);
45575748Sduboff }
45585748Sduboff bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
45595748Sduboff dp->rxmode |= RXMODE_ENABLE;
45605748Sduboff
45615748Sduboff if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
45625748Sduboff err = EIO;
45635748Sduboff }
45645748Sduboff mutex_exit(&dp->intrlock);
45655748Sduboff
45665748Sduboff return (err);
45675748Sduboff }
45685748Sduboff
45695748Sduboff /*
45705748Sduboff * gem_m_tx is used only for sending data packets into ethernet wire.
45715748Sduboff */
45725748Sduboff static mblk_t *
gem_m_tx(void * arg,mblk_t * mp)45735748Sduboff gem_m_tx(void *arg, mblk_t *mp)
45745748Sduboff {
45755748Sduboff uint32_t flags = 0;
45765748Sduboff struct gem_dev *dp = arg;
45775748Sduboff mblk_t *tp;
45785748Sduboff
45795748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
45805748Sduboff
45815748Sduboff ASSERT(dp->nic_state == NIC_STATE_ONLINE);
45825748Sduboff if (dp->mii_state != MII_STATE_LINKUP) {
45835748Sduboff /* Some nics hate to send packets when the link is down. */
45845748Sduboff while (mp) {
45855748Sduboff tp = mp->b_next;
45865748Sduboff mp->b_next = NULL;
45875748Sduboff freemsg(mp);
45885748Sduboff mp = tp;
45895748Sduboff }
45905748Sduboff return (NULL);
45915748Sduboff }
45925748Sduboff
45935748Sduboff return (gem_send_common(dp, mp, flags));
45945748Sduboff }
45955748Sduboff
45965748Sduboff static void
gem_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)45975748Sduboff gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
45985748Sduboff {
45995748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called",
46005748Sduboff ((struct gem_dev *)arg)->name, __func__));
46015748Sduboff
46025748Sduboff gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
46035748Sduboff }
46045748Sduboff
46058275SEric Cheng /* ARGSUSED */
46065748Sduboff static boolean_t
gem_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)46075748Sduboff gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
46085748Sduboff {
46098275SEric Cheng return (B_FALSE);
46105748Sduboff }
46115748Sduboff
46125748Sduboff static void
gem_gld3_init(struct gem_dev * dp,mac_register_t * macp)46135748Sduboff gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
46145748Sduboff {
46155748Sduboff macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
46165748Sduboff macp->m_driver = dp;
46175748Sduboff macp->m_dip = dp->dip;
46185748Sduboff macp->m_src_addr = dp->dev_addr.ether_addr_octet;
46195748Sduboff macp->m_callbacks = &gem_m_callbacks;
46205748Sduboff macp->m_min_sdu = 0;
46215748Sduboff macp->m_max_sdu = dp->mtu;
46227116Sduboff
46237116Sduboff if (dp->misc_flag & GEM_VLAN) {
46247116Sduboff macp->m_margin = VTAG_SIZE;
46257116Sduboff }
46265748Sduboff }
46275748Sduboff
46285748Sduboff /* ======================================================================== */
46295748Sduboff /*
46305748Sduboff * attach/detatch support
46315748Sduboff */
46325748Sduboff /* ======================================================================== */
46335748Sduboff static void
gem_read_conf(struct gem_dev * dp)46345748Sduboff gem_read_conf(struct gem_dev *dp)
46355748Sduboff {
46367116Sduboff int val;
46375748Sduboff
46385748Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
46395748Sduboff
46405748Sduboff /*
46415748Sduboff * Get media mode infomation from .conf file
46425748Sduboff */
46435748Sduboff dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
46445748Sduboff dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
46455748Sduboff dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
46465748Sduboff dp->anadv_100t4 = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
46475748Sduboff dp->anadv_100fdx = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
46485748Sduboff dp->anadv_100hdx = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
46495748Sduboff dp->anadv_10fdx = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
46505748Sduboff dp->anadv_10hdx = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
46515748Sduboff
46525748Sduboff if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
46535748Sduboff DDI_PROP_DONTPASS, "full-duplex"))) {
46545748Sduboff dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
46555748Sduboff dp->anadv_autoneg = B_FALSE;
46567116Sduboff if (dp->full_duplex) {
46577116Sduboff dp->anadv_1000hdx = B_FALSE;
46587116Sduboff dp->anadv_100hdx = B_FALSE;
46597116Sduboff dp->anadv_10hdx = B_FALSE;
46607116Sduboff } else {
46617116Sduboff dp->anadv_1000fdx = B_FALSE;
46627116Sduboff dp->anadv_100fdx = B_FALSE;
46637116Sduboff dp->anadv_10fdx = B_FALSE;
46647116Sduboff }
46655748Sduboff }
46665748Sduboff
46675748Sduboff if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
46685748Sduboff dp->anadv_autoneg = B_FALSE;
46695748Sduboff switch (val) {
46705748Sduboff case 1000:
46715748Sduboff dp->speed = GEM_SPD_1000;
46725748Sduboff dp->anadv_100t4 = B_FALSE;
46735748Sduboff dp->anadv_100fdx = B_FALSE;
46745748Sduboff dp->anadv_100hdx = B_FALSE;
46755748Sduboff dp->anadv_10fdx = B_FALSE;
46765748Sduboff dp->anadv_10hdx = B_FALSE;
46775748Sduboff break;
46785748Sduboff case 100:
46795748Sduboff dp->speed = GEM_SPD_100;
46805748Sduboff dp->anadv_1000fdx = B_FALSE;
46815748Sduboff dp->anadv_1000hdx = B_FALSE;
46825748Sduboff dp->anadv_10fdx = B_FALSE;
46835748Sduboff dp->anadv_10hdx = B_FALSE;
46845748Sduboff break;
46855748Sduboff case 10:
46865748Sduboff dp->speed = GEM_SPD_10;
46875748Sduboff dp->anadv_1000fdx = B_FALSE;
46885748Sduboff dp->anadv_1000hdx = B_FALSE;
46895748Sduboff dp->anadv_100t4 = B_FALSE;
46905748Sduboff dp->anadv_100fdx = B_FALSE;
46915748Sduboff dp->anadv_100hdx = B_FALSE;
46925748Sduboff break;
46935748Sduboff default:
46945748Sduboff cmn_err(CE_WARN,
46955748Sduboff "!%s: property %s: illegal value:%d",
46967116Sduboff dp->name, "speed", val);
46975748Sduboff dp->anadv_autoneg = B_TRUE;
46985748Sduboff break;
46995748Sduboff }
47005748Sduboff }
47015748Sduboff
47025748Sduboff val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
47035748Sduboff if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
47045748Sduboff cmn_err(CE_WARN,
47055748Sduboff "!%s: property %s: illegal value:%d",
47067116Sduboff dp->name, "flow-control", val);
47075748Sduboff } else {
47085748Sduboff val = min(val, dp->gc.gc_flow_control);
47095748Sduboff }
47105748Sduboff dp->anadv_flow_control = val;
47115748Sduboff
47125748Sduboff if (gem_prop_get_int(dp, "nointr", 0)) {
47135748Sduboff dp->misc_flag |= GEM_NOINTR;
47145748Sduboff cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
47155748Sduboff }
47165748Sduboff
47175748Sduboff dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
47185748Sduboff dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
47195748Sduboff dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
47205748Sduboff dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
47215748Sduboff dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
47225748Sduboff }
47235748Sduboff
47245748Sduboff
47255748Sduboff /*
47265748Sduboff * Gem kstat support
47275748Sduboff */
47285748Sduboff
47295748Sduboff #define GEM_LOCAL_DATA_SIZE(gc) \
47305748Sduboff (sizeof (struct gem_dev) + \
47315748Sduboff sizeof (struct mcast_addr) * GEM_MAXMC + \
47325748Sduboff sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
47335748Sduboff sizeof (void *) * ((gc)->gc_tx_buf_size))
47345748Sduboff
47355748Sduboff struct gem_dev *
gem_do_attach(dev_info_t * dip,int port,struct gem_conf * gc,void * base,ddi_acc_handle_t * regs_handlep,void * lp,int lmsize)47365748Sduboff gem_do_attach(dev_info_t *dip, int port,
47375748Sduboff struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
47385748Sduboff void *lp, int lmsize)
47395748Sduboff {
47405748Sduboff struct gem_dev *dp;
47415748Sduboff int i;
47425748Sduboff ddi_iblock_cookie_t c;
47435748Sduboff mac_register_t *macp = NULL;
47445748Sduboff int ret;
47455748Sduboff int unit;
47465748Sduboff int nports;
47475748Sduboff
47485748Sduboff unit = ddi_get_instance(dip);
47495748Sduboff if ((nports = gc->gc_nports) == 0) {
47505748Sduboff nports = 1;
47515748Sduboff }
47525748Sduboff if (nports == 1) {
47535748Sduboff ddi_set_driver_private(dip, NULL);
47545748Sduboff }
47555748Sduboff
47565748Sduboff DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
47575748Sduboff unit));
47585748Sduboff
47595748Sduboff /*
47605748Sduboff * Allocate soft data structure
47615748Sduboff */
47625748Sduboff dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
47635748Sduboff
47645748Sduboff if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
47655748Sduboff cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
47665748Sduboff unit, __func__);
47675748Sduboff return (NULL);
47685748Sduboff }
47695748Sduboff /* ddi_set_driver_private(dip, dp); */
47705748Sduboff
47715748Sduboff /* link to private area */
47727116Sduboff dp->private = lp;
47735748Sduboff dp->priv_size = lmsize;
47745748Sduboff dp->mc_list = (struct mcast_addr *)&dp[1];
47755748Sduboff
47765748Sduboff dp->dip = dip;
47775748Sduboff (void) sprintf(dp->name, gc->gc_name, nports * unit + port);
47785748Sduboff
47795748Sduboff /*
47805748Sduboff * Get iblock cookie
47815748Sduboff */
47825748Sduboff if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
47835748Sduboff cmn_err(CE_CONT,
47845748Sduboff "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
47855748Sduboff dp->name);
47865748Sduboff goto err_free_private;
47875748Sduboff }
47885748Sduboff dp->iblock_cookie = c;
47895748Sduboff
47905748Sduboff /*
47915748Sduboff * Initialize mutex's for this device.
47925748Sduboff */
47935748Sduboff mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
47945748Sduboff mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
47955748Sduboff cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
47965748Sduboff
47975748Sduboff /*
47985748Sduboff * configure gem parameter
47995748Sduboff */
48007116Sduboff dp->base_addr = base;
48015748Sduboff dp->regs_handle = *regs_handlep;
48025748Sduboff dp->gc = *gc;
48035748Sduboff gc = &dp->gc;
48047116Sduboff /* patch for simplify dma resource management */
48057116Sduboff gc->gc_tx_max_frags = 1;
48067116Sduboff gc->gc_tx_max_descs_per_pkt = 1;
48077116Sduboff gc->gc_tx_ring_size = gc->gc_tx_buf_size;
48087116Sduboff gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
48097116Sduboff gc->gc_tx_desc_write_oo = B_TRUE;
48105748Sduboff
48115748Sduboff gc->gc_nports = nports; /* fix nports */
48125748Sduboff
48135748Sduboff /* fix copy threadsholds */
48145748Sduboff gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
48155748Sduboff gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
48165748Sduboff
48175748Sduboff /* fix rx buffer boundary for iocache line size */
48185748Sduboff ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
48195748Sduboff ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
48205748Sduboff gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
48215748Sduboff gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
48225748Sduboff
48237116Sduboff /* fix descriptor boundary for cache line size */
48247116Sduboff gc->gc_dma_attr_desc.dma_attr_align =
48257116Sduboff max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE);
48267116Sduboff
48275748Sduboff /* patch get_packet method */
48285748Sduboff if (gc->gc_get_packet == NULL) {
48295748Sduboff gc->gc_get_packet = &gem_get_packet_default;
48305748Sduboff }
48315748Sduboff
48325748Sduboff /* patch get_rx_start method */
48335748Sduboff if (gc->gc_rx_start == NULL) {
48345748Sduboff gc->gc_rx_start = &gem_rx_start_default;
48355748Sduboff }
48365748Sduboff
48375748Sduboff /* calculate descriptor area */
48385748Sduboff if (gc->gc_rx_desc_unit_shift >= 0) {
48395748Sduboff dp->rx_desc_size =
48405748Sduboff ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
48415748Sduboff gc->gc_dma_attr_desc.dma_attr_align);
48425748Sduboff }
48435748Sduboff if (gc->gc_tx_desc_unit_shift >= 0) {
48445748Sduboff dp->tx_desc_size =
48455748Sduboff ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
48465748Sduboff gc->gc_dma_attr_desc.dma_attr_align);
48475748Sduboff }
48485748Sduboff
48495748Sduboff dp->mtu = ETHERMTU;
48505748Sduboff dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
48515748Sduboff /* link tx buffers */
48525748Sduboff for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
48535748Sduboff dp->tx_buf[i].txb_next =
48545748Sduboff &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
48555748Sduboff }
48565748Sduboff
48575748Sduboff dp->rxmode = 0;
48585748Sduboff dp->speed = GEM_SPD_10; /* default is 10Mbps */
48595748Sduboff dp->full_duplex = B_FALSE; /* default is half */
48605748Sduboff dp->flow_control = FLOW_CONTROL_NONE;
48617116Sduboff dp->poll_pkt_delay = 8; /* typical coalease for rx packets */
48625748Sduboff
48635748Sduboff /* performance tuning parameters */
48645748Sduboff dp->txthr = ETHERMAX; /* tx fifo threshold */
48655748Sduboff dp->txmaxdma = 16*4; /* tx max dma burst size */
48665748Sduboff dp->rxthr = 128; /* rx fifo threshold */
48675748Sduboff dp->rxmaxdma = 16*4; /* rx max dma burst size */
48685748Sduboff
48695748Sduboff /*
48705748Sduboff * Get media mode information from .conf file
48715748Sduboff */
48725748Sduboff gem_read_conf(dp);
48735748Sduboff
48745748Sduboff /* rx_buf_len is required buffer length without padding for alignment */
48755748Sduboff dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
48765748Sduboff
48775748Sduboff /*
48785748Sduboff * Reset the chip
48795748Sduboff */
48805748Sduboff mutex_enter(&dp->intrlock);
48815748Sduboff dp->nic_state = NIC_STATE_STOPPED;
48825748Sduboff ret = (*dp->gc.gc_reset_chip)(dp);
48835748Sduboff mutex_exit(&dp->intrlock);
48845748Sduboff if (ret != GEM_SUCCESS) {
48855748Sduboff goto err_free_regs;
48865748Sduboff }
48875748Sduboff
48885748Sduboff /*
48895748Sduboff * HW dependant paremeter initialization
48905748Sduboff */
48915748Sduboff mutex_enter(&dp->intrlock);
48925748Sduboff ret = (*dp->gc.gc_attach_chip)(dp);
48935748Sduboff mutex_exit(&dp->intrlock);
48945748Sduboff if (ret != GEM_SUCCESS) {
48955748Sduboff goto err_free_regs;
48965748Sduboff }
48975748Sduboff
48985748Sduboff #ifdef DEBUG_MULTIFRAGS
48995748Sduboff dp->gc.gc_tx_copy_thresh = dp->mtu;
49005748Sduboff #endif
49015748Sduboff /* allocate tx and rx resources */
49025748Sduboff if (gem_alloc_memory(dp)) {
49035748Sduboff goto err_free_regs;
49045748Sduboff }
49055748Sduboff
49065748Sduboff DPRINTF(0, (CE_CONT,
49075748Sduboff "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
49085748Sduboff dp->name, (long)dp->base_addr,
49095748Sduboff dp->dev_addr.ether_addr_octet[0],
49105748Sduboff dp->dev_addr.ether_addr_octet[1],
49115748Sduboff dp->dev_addr.ether_addr_octet[2],
49125748Sduboff dp->dev_addr.ether_addr_octet[3],
49135748Sduboff dp->dev_addr.ether_addr_octet[4],
49145748Sduboff dp->dev_addr.ether_addr_octet[5]));
49155748Sduboff
49165748Sduboff /* copy mac address */
49175748Sduboff dp->cur_addr = dp->dev_addr;
49185748Sduboff
49195748Sduboff gem_gld3_init(dp, macp);
49205748Sduboff
49215748Sduboff /* Probe MII phy (scan phy) */
49225748Sduboff dp->mii_lpable = 0;
49235748Sduboff dp->mii_advert = 0;
49245748Sduboff dp->mii_exp = 0;
49255748Sduboff dp->mii_ctl1000 = 0;
49265748Sduboff dp->mii_stat1000 = 0;
49275748Sduboff if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
49285748Sduboff goto err_free_ring;
49295748Sduboff }
49305748Sduboff
49315748Sduboff /* mask unsupported abilities */
49327116Sduboff dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
49335748Sduboff dp->anadv_1000fdx &=
49345748Sduboff BOOLEAN(dp->mii_xstatus &
49355748Sduboff (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
49365748Sduboff dp->anadv_1000hdx &=
49375748Sduboff BOOLEAN(dp->mii_xstatus &
49385748Sduboff (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
49395748Sduboff dp->anadv_100t4 &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
49405748Sduboff dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
49415748Sduboff dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
49425748Sduboff dp->anadv_10fdx &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
49435748Sduboff dp->anadv_10hdx &= BOOLEAN(dp->mii_status & MII_STATUS_10);
49445748Sduboff
49455748Sduboff gem_choose_forcedmode(dp);
49465748Sduboff
49475748Sduboff /* initialize MII phy if required */
49485748Sduboff if (dp->gc.gc_mii_init) {
49495748Sduboff if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
49505748Sduboff goto err_free_ring;
49515748Sduboff }
49525748Sduboff }
49535748Sduboff
49545748Sduboff /*
49555748Sduboff * initialize kstats including mii statistics
49565748Sduboff */
49575748Sduboff gem_nd_setup(dp);
49585748Sduboff
49595748Sduboff /*
49605748Sduboff * Add interrupt to system.
49615748Sduboff */
49625748Sduboff if (ret = mac_register(macp, &dp->mh)) {
49635748Sduboff cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
49645748Sduboff dp->name, ret);
49655748Sduboff goto err_release_stats;
49665748Sduboff }
49675748Sduboff mac_free(macp);
49685748Sduboff macp = NULL;
49695748Sduboff
49705748Sduboff if (dp->misc_flag & GEM_SOFTINTR) {
49715748Sduboff if (ddi_add_softintr(dip,
49725748Sduboff DDI_SOFTINT_LOW, &dp->soft_id,
49735748Sduboff NULL, NULL,
49745748Sduboff (uint_t (*)(caddr_t))gem_intr,
49755748Sduboff (caddr_t)dp) != DDI_SUCCESS) {
49765748Sduboff cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
49775748Sduboff dp->name);
49785748Sduboff goto err_unregister;
49795748Sduboff }
49805748Sduboff } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
49815748Sduboff if (ddi_add_intr(dip, 0, NULL, NULL,
49825748Sduboff (uint_t (*)(caddr_t))gem_intr,
49835748Sduboff (caddr_t)dp) != DDI_SUCCESS) {
49845748Sduboff cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
49855748Sduboff goto err_unregister;
49865748Sduboff }
49875748Sduboff } else {
49885748Sduboff /*
49895748Sduboff * Dont use interrupt.
49905748Sduboff * schedule first call of gem_intr_watcher
49915748Sduboff */
49925748Sduboff dp->intr_watcher_id =
49935748Sduboff timeout((void (*)(void *))gem_intr_watcher,
49945748Sduboff (void *)dp, drv_usectohz(3*1000000));
49955748Sduboff }
49965748Sduboff
49975748Sduboff /* link this device to dev_info */
49985748Sduboff dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
49997116Sduboff dp->port = port;
50005748Sduboff ddi_set_driver_private(dip, (caddr_t)dp);
50015748Sduboff
50027116Sduboff /* reset mii phy and start mii link watcher */
50035748Sduboff gem_mii_start(dp);
50045748Sduboff
50055748Sduboff DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
50065748Sduboff return (dp);
50075748Sduboff
50085748Sduboff err_unregister:
50095748Sduboff (void) mac_unregister(dp->mh);
50105748Sduboff err_release_stats:
50115748Sduboff /* release NDD resources */
50125748Sduboff gem_nd_cleanup(dp);
50135748Sduboff
50145748Sduboff err_free_ring:
50155748Sduboff gem_free_memory(dp);
50165748Sduboff err_free_regs:
50175748Sduboff ddi_regs_map_free(&dp->regs_handle);
50185748Sduboff err_free_locks:
50195748Sduboff mutex_destroy(&dp->xmitlock);
50205748Sduboff mutex_destroy(&dp->intrlock);
50215748Sduboff cv_destroy(&dp->tx_drain_cv);
50225748Sduboff err_free_private:
50235748Sduboff if (macp) {
50245748Sduboff mac_free(macp);
50255748Sduboff }
50265748Sduboff kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
50275748Sduboff
50285748Sduboff return (NULL);
50295748Sduboff }
50305748Sduboff
50315748Sduboff int
gem_do_detach(dev_info_t * dip)50325748Sduboff gem_do_detach(dev_info_t *dip)
50335748Sduboff {
50345748Sduboff struct gem_dev *dp;
50355748Sduboff struct gem_dev *tmp;
50365748Sduboff caddr_t private;
50375748Sduboff int priv_size;
50385748Sduboff ddi_acc_handle_t rh;
50395748Sduboff
50405748Sduboff dp = GEM_GET_DEV(dip);
50415748Sduboff if (dp == NULL) {
50425748Sduboff return (DDI_SUCCESS);
50435748Sduboff }
50445748Sduboff
50455748Sduboff rh = dp->regs_handle;
50465748Sduboff private = dp->private;
50475748Sduboff priv_size = dp->priv_size;
50485748Sduboff
50495748Sduboff while (dp) {
50507116Sduboff /* unregister with gld v3 */
50517116Sduboff if (mac_unregister(dp->mh) != 0) {
50527116Sduboff return (DDI_FAILURE);
50537116Sduboff }
50547116Sduboff
50555748Sduboff /* ensure any rx buffers are not used */
50565748Sduboff if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
50575748Sduboff /* resource is busy */
50585748Sduboff cmn_err(CE_PANIC,
50595748Sduboff "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
50605748Sduboff dp->name, __func__,
50615748Sduboff dp->rx_buf_allocated, dp->rx_buf_freecnt);
50625748Sduboff /* NOT REACHED */
50635748Sduboff }
50645748Sduboff
50655748Sduboff /* stop mii link watcher */
50665748Sduboff gem_mii_stop(dp);
50675748Sduboff
50685748Sduboff /* unregister interrupt handler */
50695748Sduboff if (dp->misc_flag & GEM_SOFTINTR) {
50705748Sduboff ddi_remove_softintr(dp->soft_id);
50715748Sduboff } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
50725748Sduboff ddi_remove_intr(dip, 0, dp->iblock_cookie);
50735748Sduboff } else {
50745748Sduboff /* stop interrupt watcher */
50755748Sduboff if (dp->intr_watcher_id) {
50765748Sduboff while (untimeout(dp->intr_watcher_id) == -1)
50775748Sduboff ;
50785748Sduboff dp->intr_watcher_id = 0;
50795748Sduboff }
50805748Sduboff }
50815748Sduboff
50825748Sduboff /* release NDD resources */
50835748Sduboff gem_nd_cleanup(dp);
50845748Sduboff /* release buffers, descriptors and dma resources */
50855748Sduboff gem_free_memory(dp);
50865748Sduboff
50875748Sduboff /* release locks and condition variables */
50885748Sduboff mutex_destroy(&dp->xmitlock);
50895748Sduboff mutex_destroy(&dp->intrlock);
50905748Sduboff cv_destroy(&dp->tx_drain_cv);
50915748Sduboff
50925748Sduboff /* release basic memory resources */
50935748Sduboff tmp = dp->next;
50945748Sduboff kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
50955748Sduboff dp = tmp;
50965748Sduboff }
50975748Sduboff
50985748Sduboff /* release common private memory for the nic */
50995748Sduboff kmem_free(private, priv_size);
51005748Sduboff
51015748Sduboff /* release register mapping resources */
51025748Sduboff ddi_regs_map_free(&rh);
51035748Sduboff
51045748Sduboff DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
51055748Sduboff ddi_driver_name(dip), ddi_get_instance(dip)));
51065748Sduboff
51075748Sduboff return (DDI_SUCCESS);
51085748Sduboff }
51095748Sduboff
51105748Sduboff int
gem_suspend(dev_info_t * dip)51115748Sduboff gem_suspend(dev_info_t *dip)
51125748Sduboff {
51135748Sduboff struct gem_dev *dp;
51145748Sduboff
51155748Sduboff /*
51165748Sduboff * stop the device
51175748Sduboff */
51185748Sduboff dp = GEM_GET_DEV(dip);
51195748Sduboff ASSERT(dp);
51205748Sduboff
51215748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
51225748Sduboff
51235748Sduboff for (; dp; dp = dp->next) {
51245748Sduboff
51255748Sduboff /* stop mii link watcher */
51265748Sduboff gem_mii_stop(dp);
51275748Sduboff
51285748Sduboff /* stop interrupt watcher for no-intr mode */
51295748Sduboff if (dp->misc_flag & GEM_NOINTR) {
51305748Sduboff if (dp->intr_watcher_id) {
51315748Sduboff while (untimeout(dp->intr_watcher_id) == -1)
51325748Sduboff ;
51335748Sduboff }
51345748Sduboff dp->intr_watcher_id = 0;
51355748Sduboff }
51365748Sduboff
51375748Sduboff /* stop tx timeout watcher */
51385748Sduboff if (dp->timeout_id) {
51395748Sduboff while (untimeout(dp->timeout_id) == -1)
51405748Sduboff ;
51415748Sduboff dp->timeout_id = 0;
51425748Sduboff }
51435748Sduboff
51445748Sduboff /* make the nic state inactive */
51455748Sduboff mutex_enter(&dp->intrlock);
51465748Sduboff (void) gem_mac_stop(dp, 0);
51475748Sduboff ASSERT(!dp->mac_active);
51485748Sduboff
51495748Sduboff /* no further register access */
51505748Sduboff dp->mac_suspended = B_TRUE;
51515748Sduboff mutex_exit(&dp->intrlock);
51525748Sduboff }
51535748Sduboff
51545748Sduboff /* XXX - power down the nic */
51555748Sduboff
51565748Sduboff return (DDI_SUCCESS);
51575748Sduboff }
51585748Sduboff
51595748Sduboff int
gem_resume(dev_info_t * dip)51605748Sduboff gem_resume(dev_info_t *dip)
51615748Sduboff {
51625748Sduboff struct gem_dev *dp;
51635748Sduboff
51645748Sduboff /*
51655748Sduboff * restart the device
51665748Sduboff */
51675748Sduboff dp = GEM_GET_DEV(dip);
51685748Sduboff ASSERT(dp);
51695748Sduboff
51705748Sduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
51715748Sduboff
51725748Sduboff for (; dp; dp = dp->next) {
51735748Sduboff
51745748Sduboff /*
51755748Sduboff * Bring up the nic after power up
51765748Sduboff */
51775748Sduboff
51785748Sduboff /* gem_xxx.c layer to setup power management state. */
51795748Sduboff ASSERT(!dp->mac_active);
51805748Sduboff
51815748Sduboff /* reset the chip, because we are just after power up. */
51825748Sduboff mutex_enter(&dp->intrlock);
51835748Sduboff
51845748Sduboff dp->mac_suspended = B_FALSE;
51855748Sduboff dp->nic_state = NIC_STATE_STOPPED;
51865748Sduboff
51875748Sduboff if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
51885748Sduboff cmn_err(CE_WARN, "%s: %s: failed to reset chip",
51895748Sduboff dp->name, __func__);
51905748Sduboff mutex_exit(&dp->intrlock);
51915748Sduboff goto err;
51925748Sduboff }
51935748Sduboff mutex_exit(&dp->intrlock);
51945748Sduboff
51955748Sduboff /* initialize mii phy because we are just after power up */
51965748Sduboff if (dp->gc.gc_mii_init) {
51975748Sduboff (void) (*dp->gc.gc_mii_init)(dp);
51985748Sduboff }
51995748Sduboff
52005748Sduboff if (dp->misc_flag & GEM_NOINTR) {
52015748Sduboff /*
52025748Sduboff * schedule first call of gem_intr_watcher
52035748Sduboff * instead of interrupts.
52045748Sduboff */
52055748Sduboff dp->intr_watcher_id =
52065748Sduboff timeout((void (*)(void *))gem_intr_watcher,
52075748Sduboff (void *)dp, drv_usectohz(3*1000000));
52085748Sduboff }
52095748Sduboff
52105748Sduboff /* restart mii link watcher */
52115748Sduboff gem_mii_start(dp);
52125748Sduboff
52135748Sduboff /* restart mac */
52145748Sduboff mutex_enter(&dp->intrlock);
52155748Sduboff
52165748Sduboff if (gem_mac_init(dp) != GEM_SUCCESS) {
52175748Sduboff mutex_exit(&dp->intrlock);
52185748Sduboff goto err_reset;
52195748Sduboff }
52205748Sduboff dp->nic_state = NIC_STATE_INITIALIZED;
52215748Sduboff
52225748Sduboff /* setup media mode if the link have been up */
52235748Sduboff if (dp->mii_state == MII_STATE_LINKUP) {
52245748Sduboff if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
52255748Sduboff mutex_exit(&dp->intrlock);
52265748Sduboff goto err_reset;
52275748Sduboff }
52285748Sduboff }
52295748Sduboff
52305748Sduboff /* enable mac address and rx filter */
52315748Sduboff dp->rxmode |= RXMODE_ENABLE;
52325748Sduboff if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
52335748Sduboff mutex_exit(&dp->intrlock);
52345748Sduboff goto err_reset;
52355748Sduboff }
52365748Sduboff dp->nic_state = NIC_STATE_ONLINE;
52375748Sduboff
52385748Sduboff /* restart tx timeout watcher */
52395748Sduboff dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
52405748Sduboff (void *)dp,
52415748Sduboff dp->gc.gc_tx_timeout_interval);
52425748Sduboff
52435748Sduboff /* now the nic is fully functional */
52445748Sduboff if (dp->mii_state == MII_STATE_LINKUP) {
52455748Sduboff if (gem_mac_start(dp) != GEM_SUCCESS) {
52465748Sduboff mutex_exit(&dp->intrlock);
52475748Sduboff goto err_reset;
52485748Sduboff }
52495748Sduboff }
52505748Sduboff mutex_exit(&dp->intrlock);
52515748Sduboff }
52525748Sduboff
52535748Sduboff return (DDI_SUCCESS);
52545748Sduboff
52555748Sduboff err_reset:
52565748Sduboff if (dp->intr_watcher_id) {
52575748Sduboff while (untimeout(dp->intr_watcher_id) == -1)
52585748Sduboff ;
52595748Sduboff dp->intr_watcher_id = 0;
52605748Sduboff }
52615748Sduboff mutex_enter(&dp->intrlock);
52625748Sduboff (*dp->gc.gc_reset_chip)(dp);
52635748Sduboff dp->nic_state = NIC_STATE_STOPPED;
52645748Sduboff mutex_exit(&dp->intrlock);
52655748Sduboff
52665748Sduboff err:
52675748Sduboff return (DDI_FAILURE);
52685748Sduboff }
52695748Sduboff
52705748Sduboff /*
52715748Sduboff * misc routines for PCI
52725748Sduboff */
52735748Sduboff uint8_t
gem_search_pci_cap(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint8_t target)52745748Sduboff gem_search_pci_cap(dev_info_t *dip,
52755748Sduboff ddi_acc_handle_t conf_handle, uint8_t target)
52765748Sduboff {
52775748Sduboff uint8_t pci_cap_ptr;
52785748Sduboff uint32_t pci_cap;
52795748Sduboff
52805748Sduboff /* search power management capablities */
52815748Sduboff pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
52825748Sduboff while (pci_cap_ptr) {
52835748Sduboff /* read pci capability header */
52845748Sduboff pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
52855748Sduboff if ((pci_cap & 0xff) == target) {
52865748Sduboff /* found */
52875748Sduboff break;
52885748Sduboff }
52895748Sduboff /* get next_ptr */
52905748Sduboff pci_cap_ptr = (pci_cap >> 8) & 0xff;
52915748Sduboff }
52925748Sduboff return (pci_cap_ptr);
52935748Sduboff }
52945748Sduboff
52955748Sduboff int
gem_pci_set_power_state(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint_t new_mode)52965748Sduboff gem_pci_set_power_state(dev_info_t *dip,
52975748Sduboff ddi_acc_handle_t conf_handle, uint_t new_mode)
52985748Sduboff {
52995748Sduboff uint8_t pci_cap_ptr;
53005748Sduboff uint32_t pmcsr;
53015748Sduboff uint_t unit;
53025748Sduboff const char *drv_name;
53035748Sduboff
53045748Sduboff ASSERT(new_mode < 4);
53055748Sduboff
53065748Sduboff unit = ddi_get_instance(dip);
53075748Sduboff drv_name = ddi_driver_name(dip);
53085748Sduboff
53095748Sduboff /* search power management capablities */
53105748Sduboff pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
53115748Sduboff
53125748Sduboff if (pci_cap_ptr == 0) {
53135748Sduboff cmn_err(CE_CONT,
53145748Sduboff "!%s%d: doesn't have pci power management capability",
53155748Sduboff drv_name, unit);
53165748Sduboff return (DDI_FAILURE);
53175748Sduboff }
53185748Sduboff
53195748Sduboff /* read power management capabilities */
53205748Sduboff pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
53215748Sduboff
53225748Sduboff DPRINTF(0, (CE_CONT,
53235748Sduboff "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
53245748Sduboff drv_name, unit, pci_cap_ptr, pmcsr));
53255748Sduboff
53265748Sduboff /*
53275748Sduboff * Is the resuested power mode supported?
53285748Sduboff */
53295748Sduboff /* not yet */
53305748Sduboff
53315748Sduboff /*
53325748Sduboff * move to new mode
53335748Sduboff */
53345748Sduboff pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
53355748Sduboff pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
53365748Sduboff
53375748Sduboff return (DDI_SUCCESS);
53385748Sduboff }
53395748Sduboff
53405748Sduboff /*
53415748Sduboff * select suitable register for by specified address space or register
53425748Sduboff * offset in PCI config space
53435748Sduboff */
53445748Sduboff int
gem_pci_regs_map_setup(dev_info_t * dip,uint32_t which,uint32_t mask,struct ddi_device_acc_attr * attrp,caddr_t * basep,ddi_acc_handle_t * hp)53455748Sduboff gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
53465748Sduboff struct ddi_device_acc_attr *attrp,
53475748Sduboff caddr_t *basep, ddi_acc_handle_t *hp)
53485748Sduboff {
53495748Sduboff struct pci_phys_spec *regs;
53505748Sduboff uint_t len;
53515748Sduboff uint_t unit;
53525748Sduboff uint_t n;
53535748Sduboff uint_t i;
53545748Sduboff int ret;
53555748Sduboff const char *drv_name;
53565748Sduboff
53575748Sduboff unit = ddi_get_instance(dip);
53585748Sduboff drv_name = ddi_driver_name(dip);
53595748Sduboff
53605748Sduboff /* Search IO-range or memory-range to be mapped */
53615748Sduboff regs = NULL;
53625748Sduboff len = 0;
53635748Sduboff
53645748Sduboff if ((ret = ddi_prop_lookup_int_array(
53655748Sduboff DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
53665748Sduboff "reg", (void *)®s, &len)) != DDI_PROP_SUCCESS) {
53675748Sduboff cmn_err(CE_WARN,
53685748Sduboff "!%s%d: failed to get reg property (ret:%d)",
53695748Sduboff drv_name, unit, ret);
53705748Sduboff return (DDI_FAILURE);
53715748Sduboff }
53725748Sduboff n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
53735748Sduboff
53745748Sduboff ASSERT(regs != NULL && len > 0);
53755748Sduboff
53765748Sduboff #if GEM_DEBUG_LEVEL > 0
53775748Sduboff for (i = 0; i < n; i++) {
53785748Sduboff cmn_err(CE_CONT,
53795748Sduboff "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
53805748Sduboff drv_name, unit, i,
53815748Sduboff regs[i].pci_phys_hi,
53825748Sduboff regs[i].pci_phys_mid,
53835748Sduboff regs[i].pci_phys_low,
53845748Sduboff regs[i].pci_size_hi,
53855748Sduboff regs[i].pci_size_low);
53865748Sduboff }
53875748Sduboff #endif
53885748Sduboff for (i = 0; i < n; i++) {
53895748Sduboff if ((regs[i].pci_phys_hi & mask) == which) {
53905748Sduboff /* it's the requested space */
53915748Sduboff ddi_prop_free(regs);
53925748Sduboff goto address_range_found;
53935748Sduboff }
53945748Sduboff }
53955748Sduboff ddi_prop_free(regs);
53965748Sduboff return (DDI_FAILURE);
53975748Sduboff
53985748Sduboff address_range_found:
53995748Sduboff if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
54005748Sduboff != DDI_SUCCESS) {
54015748Sduboff cmn_err(CE_CONT,
54025748Sduboff "!%s%d: ddi_regs_map_setup failed (ret:%d)",
54035748Sduboff drv_name, unit, ret);
54045748Sduboff }
54055748Sduboff
54065748Sduboff return (ret);
54075748Sduboff }
54085748Sduboff
54095748Sduboff void
gem_mod_init(struct dev_ops * dop,char * name)54105748Sduboff gem_mod_init(struct dev_ops *dop, char *name)
54115748Sduboff {
54125748Sduboff mac_init_ops(dop, name);
54135748Sduboff }
54145748Sduboff
54155748Sduboff void
gem_mod_fini(struct dev_ops * dop)54165748Sduboff gem_mod_fini(struct dev_ops *dop)
54175748Sduboff {
54185748Sduboff mac_fini_ops(dop);
54195748Sduboff }
5420