xref: /onnv-gate/usr/src/uts/common/io/sfe/sfe_util.c (revision 5748:772468a153b9)
1*5748Sduboff /*
2*5748Sduboff  * sfe_util.c: general ethernet mac driver framework version 2.6
3*5748Sduboff  *
4*5748Sduboff  * Copyright (c) 2002-2007 Masayuki Murayama.  All rights reserved.
5*5748Sduboff  *
6*5748Sduboff  * Redistribution and use in source and binary forms, with or without
7*5748Sduboff  * modification, are permitted provided that the following conditions are met:
8*5748Sduboff  *
9*5748Sduboff  * 1. Redistributions of source code must retain the above copyright notice,
10*5748Sduboff  *    this list of conditions and the following disclaimer.
11*5748Sduboff  *
12*5748Sduboff  * 2. Redistributions in binary form must reproduce the above copyright notice,
13*5748Sduboff  *    this list of conditions and the following disclaimer in the documentation
14*5748Sduboff  *    and/or other materials provided with the distribution.
15*5748Sduboff  *
16*5748Sduboff  * 3. Neither the name of the author nor the names of its contributors may be
17*5748Sduboff  *    used to endorse or promote products derived from this software without
18*5748Sduboff  *    specific prior written permission.
19*5748Sduboff  *
20*5748Sduboff  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21*5748Sduboff  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22*5748Sduboff  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23*5748Sduboff  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24*5748Sduboff  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25*5748Sduboff  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26*5748Sduboff  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27*5748Sduboff  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28*5748Sduboff  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29*5748Sduboff  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30*5748Sduboff  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31*5748Sduboff  * DAMAGE.
32*5748Sduboff  */
33*5748Sduboff 
34*5748Sduboff #pragma ident	"%Z%%M%	%I%	%E% SMI"	/* sfe device driver */
35*5748Sduboff 
36*5748Sduboff /*
37*5748Sduboff  * System Header files.
38*5748Sduboff  */
39*5748Sduboff #include <sys/types.h>
40*5748Sduboff #include <sys/conf.h>
41*5748Sduboff #include <sys/debug.h>
42*5748Sduboff #include <sys/kmem.h>
43*5748Sduboff #include <sys/vtrace.h>
44*5748Sduboff #include <sys/ethernet.h>
45*5748Sduboff #include <sys/modctl.h>
46*5748Sduboff #include <sys/errno.h>
47*5748Sduboff #include <sys/ddi.h>
48*5748Sduboff #include <sys/sunddi.h>
49*5748Sduboff #include <sys/stream.h>		/* required for MBLK* */
50*5748Sduboff #include <sys/strsun.h>		/* required for mionack() */
51*5748Sduboff #include <sys/byteorder.h>
52*5748Sduboff #include <sys/pci.h>
53*5748Sduboff #include <inet/common.h>
54*5748Sduboff #include <inet/led.h>
55*5748Sduboff #include <inet/mi.h>
56*5748Sduboff #include <inet/nd.h>
57*5748Sduboff #include <sys/crc32.h>
58*5748Sduboff 
59*5748Sduboff #include <sys/note.h>
60*5748Sduboff 
61*5748Sduboff #include "sfe_mii.h"
62*5748Sduboff #include "sfe_util.h"
63*5748Sduboff 
64*5748Sduboff 
65*5748Sduboff 
66*5748Sduboff extern char ident[];
67*5748Sduboff 
68*5748Sduboff /* Debugging support */
69*5748Sduboff #ifdef GEM_DEBUG_LEVEL
70*5748Sduboff static int gem_debug = GEM_DEBUG_LEVEL;
71*5748Sduboff #define	DPRINTF(n, args)	if (gem_debug > (n)) cmn_err args
72*5748Sduboff #else
73*5748Sduboff #define	DPRINTF(n, args)
74*5748Sduboff #undef ASSERT
75*5748Sduboff #define	ASSERT(x)
76*5748Sduboff #endif
77*5748Sduboff 
78*5748Sduboff #define	IOC_LINESIZE	0x40	/* Is it right for amd64? */
79*5748Sduboff 
80*5748Sduboff /*
81*5748Sduboff  * Useful macros and typedefs
82*5748Sduboff  */
83*5748Sduboff #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
84*5748Sduboff 
85*5748Sduboff #define	GET_NET16(p)	((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
86*5748Sduboff #define	GET_ETHERTYPE(p)	GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
87*5748Sduboff 
88*5748Sduboff #define	GET_IPTYPEv4(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 9])
89*5748Sduboff #define	GET_IPTYPEv6(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 6])
90*5748Sduboff 
91*5748Sduboff 
92*5748Sduboff #ifndef INT32_MAX
93*5748Sduboff #define	INT32_MAX	0x7fffffff
94*5748Sduboff #endif
95*5748Sduboff 
96*5748Sduboff #define	VTAG_OFF	(ETHERADDRL*2)
97*5748Sduboff #ifndef VTAG_SIZE
98*5748Sduboff #define	VTAG_SIZE	4
99*5748Sduboff #endif
100*5748Sduboff #ifndef VTAG_TPID
101*5748Sduboff #define	VTAG_TPID	0x8100U
102*5748Sduboff #endif
103*5748Sduboff 
104*5748Sduboff #define	GET_TXBUF(dp, sn)	\
105*5748Sduboff 	&(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
106*5748Sduboff 
107*5748Sduboff #ifndef offsetof
108*5748Sduboff #define	offsetof(t, m)	((long)&(((t *) 0)->m))
109*5748Sduboff #endif
110*5748Sduboff #define	TXFLAG_VTAG(flag)	\
111*5748Sduboff 	(((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
112*5748Sduboff 
113*5748Sduboff #define	MAXPKTBUF(dp)	\
114*5748Sduboff 	((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
115*5748Sduboff 
116*5748Sduboff #define	WATCH_INTERVAL_FAST	drv_usectohz(100*1000)	/* 100mS */
117*5748Sduboff #define	BOOLEAN(x)	((x) ? 1 : 0)
118*5748Sduboff 
119*5748Sduboff /*
120*5748Sduboff  * Macros to distinct chip generation.
121*5748Sduboff  */
122*5748Sduboff 
123*5748Sduboff /*
124*5748Sduboff  * Private functions
125*5748Sduboff  */
126*5748Sduboff static void gem_mii_start(struct gem_dev *);
127*5748Sduboff static void gem_mii_stop(struct gem_dev *);
128*5748Sduboff 
129*5748Sduboff /* local buffer management */
130*5748Sduboff static void gem_nd_setup(struct gem_dev *dp);
131*5748Sduboff static void gem_nd_cleanup(struct gem_dev *dp);
132*5748Sduboff static int gem_alloc_memory(struct gem_dev *);
133*5748Sduboff static void gem_free_memory(struct gem_dev *);
134*5748Sduboff static void gem_init_rx_ring(struct gem_dev *);
135*5748Sduboff static void gem_init_tx_ring(struct gem_dev *);
136*5748Sduboff __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
137*5748Sduboff 
138*5748Sduboff static void gem_tx_timeout(struct gem_dev *);
139*5748Sduboff static void gem_mii_link_watcher(struct gem_dev *dp);
140*5748Sduboff static int gem_mac_init(struct gem_dev *dp);
141*5748Sduboff static int gem_mac_start(struct gem_dev *dp);
142*5748Sduboff static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
143*5748Sduboff static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
144*5748Sduboff 
145*5748Sduboff static	struct ether_addr	gem_etherbroadcastaddr = {
146*5748Sduboff 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
147*5748Sduboff };
148*5748Sduboff 
149*5748Sduboff int gem_speed_value[] = {10, 100, 1000};
150*5748Sduboff 
151*5748Sduboff /* ============================================================== */
152*5748Sduboff /*
153*5748Sduboff  * Misc runtime routines
154*5748Sduboff  */
155*5748Sduboff /* ============================================================== */
156*5748Sduboff /*
157*5748Sduboff  * Ether CRC calculation according to 21143 data sheet
158*5748Sduboff  */
159*5748Sduboff uint32_t
160*5748Sduboff gem_ether_crc_le(const uint8_t *addr, int len)
161*5748Sduboff {
162*5748Sduboff 	uint32_t	crc;
163*5748Sduboff 
164*5748Sduboff 	CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
165*5748Sduboff 	return (crc);
166*5748Sduboff }
167*5748Sduboff 
168*5748Sduboff uint32_t
169*5748Sduboff gem_ether_crc_be(const uint8_t *addr, int len)
170*5748Sduboff {
171*5748Sduboff 	int		idx;
172*5748Sduboff 	int		bit;
173*5748Sduboff 	uint_t		data;
174*5748Sduboff 	uint32_t	crc;
175*5748Sduboff #define	CRC32_POLY_BE	0x04c11db7
176*5748Sduboff 
177*5748Sduboff 	crc = 0xffffffff;
178*5748Sduboff 	for (idx = 0; idx < len; idx++) {
179*5748Sduboff 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
180*5748Sduboff 			crc = (crc << 1)
181*5748Sduboff 			    ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
182*5748Sduboff 		}
183*5748Sduboff 	}
184*5748Sduboff 	return (crc);
185*5748Sduboff #undef	CRC32_POLY_BE
186*5748Sduboff }
187*5748Sduboff 
188*5748Sduboff int
189*5748Sduboff gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
190*5748Sduboff {
191*5748Sduboff 	char	propname[32];
192*5748Sduboff 
193*5748Sduboff 	(void) sprintf(propname, prop_template, dp->name);
194*5748Sduboff 
195*5748Sduboff 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
196*5748Sduboff 	    DDI_PROP_DONTPASS, propname, def_val));
197*5748Sduboff }
198*5748Sduboff 
199*5748Sduboff static int
200*5748Sduboff gem_population(uint32_t x)
201*5748Sduboff {
202*5748Sduboff 	int	i;
203*5748Sduboff 	int	cnt;
204*5748Sduboff 
205*5748Sduboff 	cnt = 0;
206*5748Sduboff 	for (i = 0; i < 32; i++) {
207*5748Sduboff 		if (x & (1 << i)) {
208*5748Sduboff 			cnt++;
209*5748Sduboff 		}
210*5748Sduboff 	}
211*5748Sduboff 	return (cnt);
212*5748Sduboff }
213*5748Sduboff 
214*5748Sduboff 
215*5748Sduboff /* ============================================================== */
216*5748Sduboff /*
217*5748Sduboff  * vlan tag operations
218*5748Sduboff  */
219*5748Sduboff /* ============================================================== */
220*5748Sduboff 
221*5748Sduboff __INLINE__
222*5748Sduboff static void
223*5748Sduboff gem_add_vtag(mblk_t *mp, int vtag)
224*5748Sduboff {
225*5748Sduboff 	uint32_t	*bp;
226*5748Sduboff 
227*5748Sduboff 	/* we must have enough room to insert vtag before b_rptr */
228*5748Sduboff 	ASSERT((long)mp->b_rptr - (long)mp->b_datap->db_base >= VTAG_SIZE);
229*5748Sduboff 
230*5748Sduboff 	bp = (void *)mp->b_rptr;
231*5748Sduboff 	mp->b_rptr = (uint8_t *)bp - VTAG_SIZE;
232*5748Sduboff 
233*5748Sduboff 	switch (3ull & (long)bp) {
234*5748Sduboff 	case 3:
235*5748Sduboff 		((uint8_t *)bp)[VTAG_OFF-3] = ((uint8_t *)bp)[VTAG_OFF+1];
236*5748Sduboff 		/* FALLTHROUGH */
237*5748Sduboff 	case 2:
238*5748Sduboff 		((uint8_t *)bp)[VTAG_OFF-2] = ((uint8_t *)bp)[VTAG_OFF+2];
239*5748Sduboff 		/* FALLTHROUGH */
240*5748Sduboff 	case 1:
241*5748Sduboff 		((uint8_t *)bp)[VTAG_OFF-1] = ((uint8_t *)bp)[VTAG_OFF+3];
242*5748Sduboff 		break;
243*5748Sduboff 	}
244*5748Sduboff 	((uint8_t *)bp)[VTAG_OFF + 0] = (uint8_t)(VTAG_TPID >> 8);
245*5748Sduboff 	((uint8_t *)bp)[VTAG_OFF + 1] = (uint8_t)VTAG_TPID;
246*5748Sduboff 	((uint8_t *)bp)[VTAG_OFF + 2] = (uint8_t)(vtag >> 8);
247*5748Sduboff 	((uint8_t *)bp)[VTAG_OFF + 3] = (uint8_t)vtag;
248*5748Sduboff 	bp = (void *)(long)((~3ull) & (long)bp);
249*5748Sduboff 	bp[0] = bp[1];
250*5748Sduboff 	bp[1] = bp[2];
251*5748Sduboff 	bp[2] = bp[3];
252*5748Sduboff }
253*5748Sduboff #pragma inline(gem_add_vtag)
254*5748Sduboff /* ============================================================== */
255*5748Sduboff /*
256*5748Sduboff  * IO cache flush
257*5748Sduboff  */
258*5748Sduboff /* ============================================================== */
259*5748Sduboff __INLINE__ void
260*5748Sduboff gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
261*5748Sduboff {
262*5748Sduboff 	int	n;
263*5748Sduboff 	int	m;
264*5748Sduboff 	int	rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
265*5748Sduboff 
266*5748Sduboff 	/* sync active descriptors */
267*5748Sduboff 	if (rx_desc_unit_shift < 0 || nslot == 0) {
268*5748Sduboff 		/* no rx descriptor ring */
269*5748Sduboff 		return;
270*5748Sduboff 	}
271*5748Sduboff 
272*5748Sduboff 	n = dp->gc.gc_rx_ring_size - head;
273*5748Sduboff 	if ((m = nslot - n) > 0) {
274*5748Sduboff 		(void) ddi_dma_sync(dp->desc_dma_handle,
275*5748Sduboff 		    (off_t)0,
276*5748Sduboff 		    (size_t)(m << rx_desc_unit_shift),
277*5748Sduboff 		    how);
278*5748Sduboff 		nslot = n;
279*5748Sduboff 	}
280*5748Sduboff 
281*5748Sduboff 	(void) ddi_dma_sync(dp->desc_dma_handle,
282*5748Sduboff 	    (off_t)(head << rx_desc_unit_shift),
283*5748Sduboff 	    (size_t)(nslot << rx_desc_unit_shift),
284*5748Sduboff 	    how);
285*5748Sduboff }
286*5748Sduboff 
287*5748Sduboff __INLINE__ void
288*5748Sduboff gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
289*5748Sduboff {
290*5748Sduboff 	int	n;
291*5748Sduboff 	int	m;
292*5748Sduboff 	int	tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
293*5748Sduboff 
294*5748Sduboff 	/* sync active descriptors */
295*5748Sduboff 	if (tx_desc_unit_shift < 0 || nslot == 0) {
296*5748Sduboff 		/* no tx descriptor ring */
297*5748Sduboff 		return;
298*5748Sduboff 	}
299*5748Sduboff 
300*5748Sduboff 	n = dp->gc.gc_tx_ring_size - head;
301*5748Sduboff 	if ((m = nslot - n) > 0) {
302*5748Sduboff 		(void) ddi_dma_sync(dp->desc_dma_handle,
303*5748Sduboff 		    (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
304*5748Sduboff 		    (size_t)(m << tx_desc_unit_shift),
305*5748Sduboff 		    how);
306*5748Sduboff 		nslot = n;
307*5748Sduboff 	}
308*5748Sduboff 
309*5748Sduboff 	(void) ddi_dma_sync(dp->desc_dma_handle,
310*5748Sduboff 	    (off_t)((head << tx_desc_unit_shift)
311*5748Sduboff 	    + (dp->tx_ring_dma - dp->rx_ring_dma)),
312*5748Sduboff 	    (size_t)(nslot << tx_desc_unit_shift),
313*5748Sduboff 	    how);
314*5748Sduboff }
315*5748Sduboff 
316*5748Sduboff static void
317*5748Sduboff gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
318*5748Sduboff {
319*5748Sduboff 	gem_rx_desc_dma_sync(dp,
320*5748Sduboff 	    SLOT(head, dp->gc.gc_rx_ring_size), nslot,
321*5748Sduboff 	    DDI_DMA_SYNC_FORDEV);
322*5748Sduboff }
323*5748Sduboff 
324*5748Sduboff /* ============================================================== */
325*5748Sduboff /*
326*5748Sduboff  * Buffer management
327*5748Sduboff  */
328*5748Sduboff /* ============================================================== */
329*5748Sduboff static void
330*5748Sduboff gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
331*5748Sduboff {
332*5748Sduboff 	cmn_err(level,
333*5748Sduboff 	    "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
334*5748Sduboff 	    "tx_softq: %d[%d] %d[%d] (+%d), "
335*5748Sduboff 	    "tx_free: %d[%d] %d[%d] (+%d), "
336*5748Sduboff 	    "tx_desc: %d[%d] %d[%d] (+%d), "
337*5748Sduboff 	    "intr: %d[%d] (+%d)",
338*5748Sduboff 	    dp->name, title,
339*5748Sduboff 	    dp->tx_active_head,
340*5748Sduboff 	    SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
341*5748Sduboff 	    dp->tx_active_tail,
342*5748Sduboff 	    SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
343*5748Sduboff 	    dp->tx_active_tail - dp->tx_active_head,
344*5748Sduboff 	    dp->tx_softq_head,
345*5748Sduboff 	    SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
346*5748Sduboff 	    dp->tx_softq_tail,
347*5748Sduboff 	    SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
348*5748Sduboff 	    dp->tx_softq_tail - dp->tx_softq_head,
349*5748Sduboff 	    dp->tx_free_head,
350*5748Sduboff 	    SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
351*5748Sduboff 	    dp->tx_free_tail,
352*5748Sduboff 	    SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
353*5748Sduboff 	    dp->tx_free_tail - dp->tx_free_head,
354*5748Sduboff 	    dp->tx_desc_head,
355*5748Sduboff 	    SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
356*5748Sduboff 	    dp->tx_desc_tail,
357*5748Sduboff 	    SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
358*5748Sduboff 	    dp->tx_desc_tail - dp->tx_desc_head,
359*5748Sduboff 	    dp->tx_desc_intr,
360*5748Sduboff 	    SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
361*5748Sduboff 	    dp->tx_desc_intr - dp->tx_desc_head);
362*5748Sduboff }
363*5748Sduboff 
364*5748Sduboff static void
365*5748Sduboff gem_free_rxbuf(struct rxbuf *rbp)
366*5748Sduboff {
367*5748Sduboff 	struct gem_dev	*dp;
368*5748Sduboff 
369*5748Sduboff 	dp = rbp->rxb_devp;
370*5748Sduboff 	ASSERT(mutex_owned(&dp->intrlock));
371*5748Sduboff 	rbp->rxb_next = dp->rx_buf_freelist;
372*5748Sduboff 	dp->rx_buf_freelist = rbp;
373*5748Sduboff 	dp->rx_buf_freecnt++;
374*5748Sduboff }
375*5748Sduboff 
376*5748Sduboff /*
377*5748Sduboff  * gem_get_rxbuf: supply a receive buffer which have been mapped into
378*5748Sduboff  * DMA space.
379*5748Sduboff  */
380*5748Sduboff struct rxbuf *
381*5748Sduboff gem_get_rxbuf(struct gem_dev *dp, int cansleep)
382*5748Sduboff {
383*5748Sduboff 	struct rxbuf		*rbp;
384*5748Sduboff 	uint_t			count = 0;
385*5748Sduboff 	int			i;
386*5748Sduboff 	int			err;
387*5748Sduboff 
388*5748Sduboff 	ASSERT(mutex_owned(&dp->intrlock));
389*5748Sduboff 
390*5748Sduboff 	DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
391*5748Sduboff 	    dp->rx_buf_freecnt));
392*5748Sduboff 	/*
393*5748Sduboff 	 * Get rx buffer management structure
394*5748Sduboff 	 */
395*5748Sduboff 	rbp = dp->rx_buf_freelist;
396*5748Sduboff 	if (rbp) {
397*5748Sduboff 		/* get one from the recycle list */
398*5748Sduboff 		ASSERT(dp->rx_buf_freecnt > 0);
399*5748Sduboff 
400*5748Sduboff 		dp->rx_buf_freelist = rbp->rxb_next;
401*5748Sduboff 		dp->rx_buf_freecnt--;
402*5748Sduboff 		rbp->rxb_next = NULL;
403*5748Sduboff 		return (rbp);
404*5748Sduboff 	}
405*5748Sduboff 
406*5748Sduboff 	/*
407*5748Sduboff 	 * Allocate a rx buffer management structure
408*5748Sduboff 	 */
409*5748Sduboff 	rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
410*5748Sduboff 	if (rbp == NULL) {
411*5748Sduboff 		/* no memory */
412*5748Sduboff 		return (NULL);
413*5748Sduboff 	}
414*5748Sduboff 
415*5748Sduboff 	/*
416*5748Sduboff 	 * Prepare a back pointer to the device structure which will be
417*5748Sduboff 	 * refered on freeing the buffer later.
418*5748Sduboff 	 */
419*5748Sduboff 	rbp->rxb_devp = dp;
420*5748Sduboff 
421*5748Sduboff 	/* allocate a dma handle for rx data buffer */
422*5748Sduboff 	if ((err = ddi_dma_alloc_handle(dp->dip,
423*5748Sduboff 	    &dp->gc.gc_dma_attr_rxbuf,
424*5748Sduboff 	    (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
425*5748Sduboff 	    NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
426*5748Sduboff 
427*5748Sduboff 		cmn_err(CE_WARN,
428*5748Sduboff 		    "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
429*5748Sduboff 		    dp->name, __func__, err);
430*5748Sduboff 
431*5748Sduboff 		kmem_free(rbp, sizeof (struct rxbuf));
432*5748Sduboff 		return (NULL);
433*5748Sduboff 	}
434*5748Sduboff 
435*5748Sduboff 	/* allocate a bounce buffer for rx */
436*5748Sduboff 	if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
437*5748Sduboff 	    ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
438*5748Sduboff 	    &dp->gc.gc_buf_attr,
439*5748Sduboff 		/*
440*5748Sduboff 		 * if the nic requires a header at the top of receive buffers,
441*5748Sduboff 		 * it may access the rx buffer randomly.
442*5748Sduboff 		 */
443*5748Sduboff 	    (dp->gc.gc_rx_header_len > 0)
444*5748Sduboff 	    ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
445*5748Sduboff 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
446*5748Sduboff 	    NULL,
447*5748Sduboff 	    &rbp->rxb_buf, &rbp->rxb_buf_len,
448*5748Sduboff 	    &rbp->rxb_bah)) != DDI_SUCCESS) {
449*5748Sduboff 
450*5748Sduboff 		cmn_err(CE_WARN,
451*5748Sduboff 		    "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
452*5748Sduboff 		    dp->name, __func__, err);
453*5748Sduboff 
454*5748Sduboff 		ddi_dma_free_handle(&rbp->rxb_dh);
455*5748Sduboff 		kmem_free(rbp, sizeof (struct rxbuf));
456*5748Sduboff 		return (NULL);
457*5748Sduboff 	}
458*5748Sduboff 
459*5748Sduboff 	/* Mapin the bounce buffer into the DMA space */
460*5748Sduboff 	if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
461*5748Sduboff 	    NULL, rbp->rxb_buf, dp->rx_buf_len,
462*5748Sduboff 	    ((dp->gc.gc_rx_header_len > 0)
463*5748Sduboff 	    ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
464*5748Sduboff 	    :(DDI_DMA_READ | DDI_DMA_STREAMING)),
465*5748Sduboff 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
466*5748Sduboff 	    NULL,
467*5748Sduboff 	    rbp->rxb_dmacookie,
468*5748Sduboff 	    &count)) != DDI_DMA_MAPPED) {
469*5748Sduboff 
470*5748Sduboff 		ASSERT(err != DDI_DMA_INUSE);
471*5748Sduboff 		DPRINTF(0, (CE_WARN,
472*5748Sduboff 		    "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
473*5748Sduboff 		    dp->name, __func__, err));
474*5748Sduboff 
475*5748Sduboff 		/*
476*5748Sduboff 		 * we failed to allocate a dma resource
477*5748Sduboff 		 * for the rx bounce buffer.
478*5748Sduboff 		 */
479*5748Sduboff 		ddi_dma_mem_free(&rbp->rxb_bah);
480*5748Sduboff 		ddi_dma_free_handle(&rbp->rxb_dh);
481*5748Sduboff 		kmem_free(rbp, sizeof (struct rxbuf));
482*5748Sduboff 		return (NULL);
483*5748Sduboff 	}
484*5748Sduboff 
485*5748Sduboff 	/* correct the rest of the DMA mapping */
486*5748Sduboff 	for (i = 1; i < count; i++) {
487*5748Sduboff 		ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
488*5748Sduboff 	}
489*5748Sduboff 	rbp->rxb_nfrags = count;
490*5748Sduboff 
491*5748Sduboff 	/* Now we successfully prepared an rx buffer */
492*5748Sduboff 	dp->rx_buf_allocated++;
493*5748Sduboff 
494*5748Sduboff 	return (rbp);
495*5748Sduboff }
496*5748Sduboff 
497*5748Sduboff /* ============================================================== */
498*5748Sduboff /*
499*5748Sduboff  * memory resource management
500*5748Sduboff  */
501*5748Sduboff /* ============================================================== */
502*5748Sduboff static int
503*5748Sduboff gem_alloc_memory(struct gem_dev *dp)
504*5748Sduboff {
505*5748Sduboff 	caddr_t			ring;
506*5748Sduboff 	caddr_t			buf;
507*5748Sduboff 	size_t			req_size;
508*5748Sduboff 	size_t			ring_len;
509*5748Sduboff 	size_t			buf_len;
510*5748Sduboff 	ddi_dma_cookie_t	ring_cookie;
511*5748Sduboff 	ddi_dma_cookie_t	buf_cookie;
512*5748Sduboff 	uint_t			count;
513*5748Sduboff 	int			i;
514*5748Sduboff 	int			err;
515*5748Sduboff 	struct txbuf		*tbp;
516*5748Sduboff 	int			tx_buf_len;
517*5748Sduboff 	ddi_dma_attr_t		dma_attr_txbounce;
518*5748Sduboff 
519*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
520*5748Sduboff 
521*5748Sduboff 	dp->desc_dma_handle = NULL;
522*5748Sduboff 	req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
523*5748Sduboff 
524*5748Sduboff 	if (req_size > 0) {
525*5748Sduboff 		/*
526*5748Sduboff 		 * Alloc RX/TX descriptors and a io area.
527*5748Sduboff 		 */
528*5748Sduboff 		if ((err = ddi_dma_alloc_handle(dp->dip,
529*5748Sduboff 		    &dp->gc.gc_dma_attr_desc,
530*5748Sduboff 		    DDI_DMA_SLEEP, NULL,
531*5748Sduboff 		    &dp->desc_dma_handle)) != DDI_SUCCESS) {
532*5748Sduboff 			cmn_err(CE_WARN,
533*5748Sduboff 			    "!%s: %s: ddi_dma_alloc_handle failed: %d",
534*5748Sduboff 			    dp->name, __func__, err);
535*5748Sduboff 			return (ENOMEM);
536*5748Sduboff 		}
537*5748Sduboff 
538*5748Sduboff 		if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
539*5748Sduboff 		    req_size, &dp->gc.gc_desc_attr,
540*5748Sduboff 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
541*5748Sduboff 		    &ring, &ring_len,
542*5748Sduboff 		    &dp->desc_acc_handle)) != DDI_SUCCESS) {
543*5748Sduboff 			cmn_err(CE_WARN,
544*5748Sduboff 			    "!%s: %s: ddi_dma_mem_alloc failed: "
545*5748Sduboff 			    "ret %d, request size: %d",
546*5748Sduboff 			    dp->name, __func__, err, (int)req_size);
547*5748Sduboff 			ddi_dma_free_handle(&dp->desc_dma_handle);
548*5748Sduboff 			return (ENOMEM);
549*5748Sduboff 		}
550*5748Sduboff 
551*5748Sduboff 		if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
552*5748Sduboff 		    NULL, ring, ring_len,
553*5748Sduboff 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
554*5748Sduboff 		    DDI_DMA_SLEEP, NULL,
555*5748Sduboff 		    &ring_cookie, &count)) != DDI_SUCCESS) {
556*5748Sduboff 			ASSERT(err != DDI_DMA_INUSE);
557*5748Sduboff 			cmn_err(CE_WARN,
558*5748Sduboff 			    "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
559*5748Sduboff 			    dp->name, __func__, err);
560*5748Sduboff 			ddi_dma_mem_free(&dp->desc_acc_handle);
561*5748Sduboff 			ddi_dma_free_handle(&dp->desc_dma_handle);
562*5748Sduboff 			return (ENOMEM);
563*5748Sduboff 		}
564*5748Sduboff 		ASSERT(count == 1);
565*5748Sduboff 
566*5748Sduboff 		/* set base of rx descriptor ring */
567*5748Sduboff 		dp->rx_ring = ring;
568*5748Sduboff 		dp->rx_ring_dma = ring_cookie.dmac_laddress;
569*5748Sduboff 
570*5748Sduboff 		/* set base of tx descriptor ring */
571*5748Sduboff 		dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
572*5748Sduboff 		dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
573*5748Sduboff 
574*5748Sduboff 		/* set base of io area */
575*5748Sduboff 		dp->io_area = dp->tx_ring + dp->tx_desc_size;
576*5748Sduboff 		dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
577*5748Sduboff 	}
578*5748Sduboff 
579*5748Sduboff 	/*
580*5748Sduboff 	 * Prepare DMA resources for tx packets
581*5748Sduboff 	 */
582*5748Sduboff 	ASSERT(dp->gc.gc_tx_buf_size > 0);
583*5748Sduboff 
584*5748Sduboff 	/* Special dma attribute for tx bounce buffers */
585*5748Sduboff 	dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
586*5748Sduboff 	dma_attr_txbounce.dma_attr_sgllen = 1;
587*5748Sduboff 	dma_attr_txbounce.dma_attr_align =
588*5748Sduboff 	    max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
589*5748Sduboff 
590*5748Sduboff 	/* Size for tx bounce buffers must be max tx packet size. */
591*5748Sduboff 	tx_buf_len = MAXPKTBUF(dp);
592*5748Sduboff 	tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
593*5748Sduboff 
594*5748Sduboff 	ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
595*5748Sduboff 
596*5748Sduboff 	for (i = 0, tbp = dp->tx_buf;
597*5748Sduboff 	    i < dp->gc.gc_tx_buf_size; i++, tbp++) {
598*5748Sduboff 
599*5748Sduboff 		/* setup bounce buffers for tx packets */
600*5748Sduboff 		if ((err = ddi_dma_alloc_handle(dp->dip,
601*5748Sduboff 		    &dma_attr_txbounce,
602*5748Sduboff 		    DDI_DMA_SLEEP, NULL,
603*5748Sduboff 		    &tbp->txb_bdh)) != DDI_SUCCESS) {
604*5748Sduboff 
605*5748Sduboff 			cmn_err(CE_WARN,
606*5748Sduboff 		    "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
607*5748Sduboff 			    " err=%d, i=%d",
608*5748Sduboff 			    dp->name, __func__, err, i);
609*5748Sduboff 			goto err_alloc_dh;
610*5748Sduboff 		}
611*5748Sduboff 
612*5748Sduboff 		if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
613*5748Sduboff 		    tx_buf_len,
614*5748Sduboff 		    &dp->gc.gc_buf_attr,
615*5748Sduboff 		    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
616*5748Sduboff 		    &buf, &buf_len,
617*5748Sduboff 		    &tbp->txb_bah)) != DDI_SUCCESS) {
618*5748Sduboff 			cmn_err(CE_WARN,
619*5748Sduboff 		    "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
620*5748Sduboff 			    "ret %d, request size %d",
621*5748Sduboff 			    dp->name, __func__, err, tx_buf_len);
622*5748Sduboff 			ddi_dma_free_handle(&tbp->txb_bdh);
623*5748Sduboff 			goto err_alloc_dh;
624*5748Sduboff 		}
625*5748Sduboff 
626*5748Sduboff 		if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
627*5748Sduboff 		    NULL, buf, buf_len,
628*5748Sduboff 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
629*5748Sduboff 		    DDI_DMA_SLEEP, NULL,
630*5748Sduboff 		    &buf_cookie, &count)) != DDI_SUCCESS) {
631*5748Sduboff 				ASSERT(err != DDI_DMA_INUSE);
632*5748Sduboff 				cmn_err(CE_WARN,
633*5748Sduboff 	"!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
634*5748Sduboff 				    dp->name, __func__, err);
635*5748Sduboff 				ddi_dma_mem_free(&tbp->txb_bah);
636*5748Sduboff 				ddi_dma_free_handle(&tbp->txb_bdh);
637*5748Sduboff 				goto err_alloc_dh;
638*5748Sduboff 		}
639*5748Sduboff 		ASSERT(count == 1);
640*5748Sduboff 		tbp->txb_buf = buf;
641*5748Sduboff 		tbp->txb_buf_dma = buf_cookie.dmac_laddress;
642*5748Sduboff 	}
643*5748Sduboff 
644*5748Sduboff 	return (0);
645*5748Sduboff 
646*5748Sduboff err_alloc_dh:
647*5748Sduboff 	if (dp->gc.gc_tx_buf_size > 0) {
648*5748Sduboff 		while (i-- > 0) {
649*5748Sduboff 			(void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
650*5748Sduboff 			ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
651*5748Sduboff 			ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
652*5748Sduboff 		}
653*5748Sduboff 	}
654*5748Sduboff 
655*5748Sduboff 	if (dp->desc_dma_handle) {
656*5748Sduboff 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
657*5748Sduboff 		ddi_dma_mem_free(&dp->desc_acc_handle);
658*5748Sduboff 		ddi_dma_free_handle(&dp->desc_dma_handle);
659*5748Sduboff 		dp->desc_dma_handle = NULL;
660*5748Sduboff 	}
661*5748Sduboff 
662*5748Sduboff 	return (ENOMEM);
663*5748Sduboff }
664*5748Sduboff 
665*5748Sduboff static void
666*5748Sduboff gem_free_memory(struct gem_dev *dp)
667*5748Sduboff {
668*5748Sduboff 	int		i;
669*5748Sduboff 	struct rxbuf	*rbp;
670*5748Sduboff 	struct txbuf	*tbp;
671*5748Sduboff 
672*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
673*5748Sduboff 
674*5748Sduboff 	/* Free TX/RX descriptors and tx padding buffer */
675*5748Sduboff 	if (dp->desc_dma_handle) {
676*5748Sduboff 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
677*5748Sduboff 		ddi_dma_mem_free(&dp->desc_acc_handle);
678*5748Sduboff 		ddi_dma_free_handle(&dp->desc_dma_handle);
679*5748Sduboff 		dp->desc_dma_handle = NULL;
680*5748Sduboff 	}
681*5748Sduboff 
682*5748Sduboff 	/* Free dma handles for Tx */
683*5748Sduboff 	for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
684*5748Sduboff 		/* Free bounce buffer associated to each txbuf */
685*5748Sduboff 		(void) ddi_dma_unbind_handle(tbp->txb_bdh);
686*5748Sduboff 		ddi_dma_mem_free(&tbp->txb_bah);
687*5748Sduboff 		ddi_dma_free_handle(&tbp->txb_bdh);
688*5748Sduboff 	}
689*5748Sduboff 
690*5748Sduboff 	/* Free rx buffer */
691*5748Sduboff 	while ((rbp = dp->rx_buf_freelist) != NULL) {
692*5748Sduboff 
693*5748Sduboff 		ASSERT(dp->rx_buf_freecnt > 0);
694*5748Sduboff 
695*5748Sduboff 		dp->rx_buf_freelist = rbp->rxb_next;
696*5748Sduboff 		dp->rx_buf_freecnt--;
697*5748Sduboff 
698*5748Sduboff 		/* release DMA mapping */
699*5748Sduboff 		ASSERT(rbp->rxb_dh != NULL);
700*5748Sduboff 
701*5748Sduboff 		/* free dma handles for rx bbuf */
702*5748Sduboff 		/* it has dma mapping always */
703*5748Sduboff 		ASSERT(rbp->rxb_nfrags > 0);
704*5748Sduboff 		(void) ddi_dma_unbind_handle(rbp->rxb_dh);
705*5748Sduboff 
706*5748Sduboff 		/* free the associated bounce buffer and dma handle */
707*5748Sduboff 		ASSERT(rbp->rxb_bah != NULL);
708*5748Sduboff 		ddi_dma_mem_free(&rbp->rxb_bah);
709*5748Sduboff 		/* free the associated dma handle */
710*5748Sduboff 		ddi_dma_free_handle(&rbp->rxb_dh);
711*5748Sduboff 
712*5748Sduboff 		/* free the base memory of rx buffer management */
713*5748Sduboff 		kmem_free(rbp, sizeof (struct rxbuf));
714*5748Sduboff 	}
715*5748Sduboff }
716*5748Sduboff 
717*5748Sduboff /* ============================================================== */
718*5748Sduboff /*
719*5748Sduboff  * Rx/Tx descriptor slot management
720*5748Sduboff  */
721*5748Sduboff /* ============================================================== */
722*5748Sduboff /*
723*5748Sduboff  * Initialize an empty rx ring.
724*5748Sduboff  */
725*5748Sduboff static void
726*5748Sduboff gem_init_rx_ring(struct gem_dev *dp)
727*5748Sduboff {
728*5748Sduboff 	int		i;
729*5748Sduboff 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
730*5748Sduboff 
731*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
732*5748Sduboff 	    dp->name, __func__,
733*5748Sduboff 	    rx_ring_size, dp->gc.gc_rx_buf_max));
734*5748Sduboff 
735*5748Sduboff 	/* make a physical chain of rx descriptors */
736*5748Sduboff 	for (i = 0; i < rx_ring_size; i++) {
737*5748Sduboff 		(*dp->gc.gc_rx_desc_init)(dp, i);
738*5748Sduboff 	}
739*5748Sduboff 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
740*5748Sduboff 
741*5748Sduboff 	dp->rx_active_head = (seqnum_t)0;
742*5748Sduboff 	dp->rx_active_tail = (seqnum_t)0;
743*5748Sduboff 
744*5748Sduboff 	ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
745*5748Sduboff 	ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
746*5748Sduboff }
747*5748Sduboff 
748*5748Sduboff /*
749*5748Sduboff  * Prepare rx buffers and put them into the rx buffer/descriptor ring.
750*5748Sduboff  */
751*5748Sduboff static void
752*5748Sduboff gem_prepare_rx_buf(struct gem_dev *dp)
753*5748Sduboff {
754*5748Sduboff 	int		i;
755*5748Sduboff 	int		nrbuf;
756*5748Sduboff 	struct rxbuf	*rbp;
757*5748Sduboff 
758*5748Sduboff 	ASSERT(mutex_owned(&dp->intrlock));
759*5748Sduboff 
760*5748Sduboff 	/* Now we have no active buffers in rx ring */
761*5748Sduboff 
762*5748Sduboff 	nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
763*5748Sduboff 	for (i = 0; i < nrbuf; i++) {
764*5748Sduboff 		if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
765*5748Sduboff 			break;
766*5748Sduboff 		}
767*5748Sduboff 		gem_append_rxbuf(dp, rbp);
768*5748Sduboff 	}
769*5748Sduboff 
770*5748Sduboff 	gem_rx_desc_dma_sync(dp,
771*5748Sduboff 	    0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
772*5748Sduboff }
773*5748Sduboff 
774*5748Sduboff /*
775*5748Sduboff  * Reclaim active rx buffers in rx buffer ring.
776*5748Sduboff  */
777*5748Sduboff static void
778*5748Sduboff gem_clean_rx_buf(struct gem_dev *dp)
779*5748Sduboff {
780*5748Sduboff 	int		i;
781*5748Sduboff 	struct rxbuf	*rbp;
782*5748Sduboff 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
783*5748Sduboff #ifdef GEM_DEBUG_LEVEL
784*5748Sduboff 	int		total;
785*5748Sduboff #endif
786*5748Sduboff 	ASSERT(mutex_owned(&dp->intrlock));
787*5748Sduboff 
788*5748Sduboff 	DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
789*5748Sduboff 	    dp->name, __func__, dp->rx_buf_freecnt));
790*5748Sduboff 	/*
791*5748Sduboff 	 * clean up HW descriptors
792*5748Sduboff 	 */
793*5748Sduboff 	for (i = 0; i < rx_ring_size; i++) {
794*5748Sduboff 		(*dp->gc.gc_rx_desc_clean)(dp, i);
795*5748Sduboff 	}
796*5748Sduboff 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
797*5748Sduboff 
798*5748Sduboff #ifdef GEM_DEBUG_LEVEL
799*5748Sduboff 	total = 0;
800*5748Sduboff #endif
801*5748Sduboff 	/*
802*5748Sduboff 	 * Reclaim allocated rx buffers
803*5748Sduboff 	 */
804*5748Sduboff 	while ((rbp = dp->rx_buf_head) != NULL) {
805*5748Sduboff #ifdef GEM_DEBUG_LEVEL
806*5748Sduboff 		total++;
807*5748Sduboff #endif
808*5748Sduboff 		/* remove the first one from rx buffer list */
809*5748Sduboff 		dp->rx_buf_head = rbp->rxb_next;
810*5748Sduboff 
811*5748Sduboff 		/* recycle the rxbuf */
812*5748Sduboff 		gem_free_rxbuf(rbp);
813*5748Sduboff 	}
814*5748Sduboff 	dp->rx_buf_tail = (struct rxbuf *)NULL;
815*5748Sduboff 
816*5748Sduboff 	DPRINTF(2, (CE_CONT,
817*5748Sduboff 	    "!%s: %s: %d buffers freeed, total: %d free",
818*5748Sduboff 	    dp->name, __func__, total, dp->rx_buf_freecnt));
819*5748Sduboff }
820*5748Sduboff 
821*5748Sduboff /*
822*5748Sduboff  * Initialize an empty transmit buffer/descriptor ring
823*5748Sduboff  */
824*5748Sduboff static void
825*5748Sduboff gem_init_tx_ring(struct gem_dev *dp)
826*5748Sduboff {
827*5748Sduboff 	int		i;
828*5748Sduboff 	int		tx_buf_size = dp->gc.gc_tx_buf_size;
829*5748Sduboff 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
830*5748Sduboff 
831*5748Sduboff 	DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
832*5748Sduboff 	    dp->name, __func__,
833*5748Sduboff 	    dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
834*5748Sduboff 
835*5748Sduboff 	ASSERT(!dp->mac_active);
836*5748Sduboff 
837*5748Sduboff 	/* initialize active list and free list */
838*5748Sduboff 	dp->tx_slots_base =
839*5748Sduboff 	    SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
840*5748Sduboff 	dp->tx_softq_tail -= dp->tx_softq_head;
841*5748Sduboff 	dp->tx_softq_head = (seqnum_t)0;
842*5748Sduboff 
843*5748Sduboff 	dp->tx_active_head = dp->tx_softq_head;
844*5748Sduboff 	dp->tx_active_tail = dp->tx_softq_head;
845*5748Sduboff 
846*5748Sduboff 	dp->tx_free_head   = dp->tx_softq_tail;
847*5748Sduboff 	dp->tx_free_tail   = dp->gc.gc_tx_buf_limit;
848*5748Sduboff 
849*5748Sduboff 	dp->tx_desc_head = (seqnum_t)0;
850*5748Sduboff 	dp->tx_desc_tail = (seqnum_t)0;
851*5748Sduboff 	dp->tx_desc_intr = (seqnum_t)0;
852*5748Sduboff 
853*5748Sduboff 	for (i = 0; i < tx_ring_size; i++) {
854*5748Sduboff 		(*dp->gc.gc_tx_desc_init)(dp, i);
855*5748Sduboff 	}
856*5748Sduboff 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
857*5748Sduboff }
858*5748Sduboff 
859*5748Sduboff __INLINE__
860*5748Sduboff static void
861*5748Sduboff gem_txbuf_free_dma_resources(struct txbuf *tbp)
862*5748Sduboff {
863*5748Sduboff 	if (tbp->txb_mp) {
864*5748Sduboff 		freemsg(tbp->txb_mp);
865*5748Sduboff 		tbp->txb_mp = NULL;
866*5748Sduboff 	}
867*5748Sduboff 	tbp->txb_nfrags = 0;
868*5748Sduboff }
869*5748Sduboff #pragma inline(gem_txbuf_free_dma_resources)
870*5748Sduboff 
871*5748Sduboff /*
872*5748Sduboff  * reclaim active tx buffers and reset positions in tx rings.
873*5748Sduboff  */
874*5748Sduboff static void
875*5748Sduboff gem_clean_tx_buf(struct gem_dev *dp)
876*5748Sduboff {
877*5748Sduboff 	int		i;
878*5748Sduboff 	seqnum_t	head;
879*5748Sduboff 	seqnum_t	tail;
880*5748Sduboff 	seqnum_t	sn;
881*5748Sduboff 	struct txbuf	*tbp;
882*5748Sduboff 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
883*5748Sduboff #ifdef GEM_DEBUG_LEVEL
884*5748Sduboff 	int		err;
885*5748Sduboff #endif
886*5748Sduboff 
887*5748Sduboff 	ASSERT(!dp->mac_active);
888*5748Sduboff 	ASSERT(dp->tx_busy == 0);
889*5748Sduboff 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
890*5748Sduboff 
891*5748Sduboff 	/*
892*5748Sduboff 	 * clean up all HW descriptors
893*5748Sduboff 	 */
894*5748Sduboff 	for (i = 0; i < tx_ring_size; i++) {
895*5748Sduboff 		(*dp->gc.gc_tx_desc_clean)(dp, i);
896*5748Sduboff 	}
897*5748Sduboff 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
898*5748Sduboff 
899*5748Sduboff 	/* dequeue all active and loaded buffers */
900*5748Sduboff 	head = dp->tx_active_head;
901*5748Sduboff 	tail = dp->tx_softq_tail;
902*5748Sduboff 
903*5748Sduboff 	ASSERT(dp->tx_free_head - head >= 0);
904*5748Sduboff 	tbp = GET_TXBUF(dp, head);
905*5748Sduboff 	for (sn = head; sn != tail; sn++) {
906*5748Sduboff 		gem_txbuf_free_dma_resources(tbp);
907*5748Sduboff 		ASSERT(tbp->txb_mp == NULL);
908*5748Sduboff 		dp->stats.errxmt++;
909*5748Sduboff 		tbp = tbp->txb_next;
910*5748Sduboff 	}
911*5748Sduboff 
912*5748Sduboff #ifdef GEM_DEBUG_LEVEL
913*5748Sduboff 	/* ensure no dma resources for tx are not in use now */
914*5748Sduboff 	err = 0;
915*5748Sduboff 	while (sn != head + dp->gc.gc_tx_buf_size) {
916*5748Sduboff 		if (tbp->txb_mp || tbp->txb_nfrags) {
917*5748Sduboff 			DPRINTF(0, (CE_CONT,
918*5748Sduboff 			    "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
919*5748Sduboff 			    dp->name, __func__,
920*5748Sduboff 			    sn, SLOT(sn, dp->gc.gc_tx_buf_size),
921*5748Sduboff 			    tbp->txb_mp, tbp->txb_nfrags));
922*5748Sduboff 			err = 1;
923*5748Sduboff 		}
924*5748Sduboff 		sn++;
925*5748Sduboff 		tbp = tbp->txb_next;
926*5748Sduboff 	}
927*5748Sduboff 
928*5748Sduboff 	if (err) {
929*5748Sduboff 		gem_dump_txbuf(dp, CE_WARN,
930*5748Sduboff 		    "gem_clean_tx_buf: tbp->txb_mp != NULL");
931*5748Sduboff 	}
932*5748Sduboff #endif
933*5748Sduboff 	/* recycle buffers, now no active tx buffers in the ring */
934*5748Sduboff 	dp->tx_free_tail += tail - head;
935*5748Sduboff 	ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
936*5748Sduboff 
937*5748Sduboff 	/* fix positions in tx buffer rings */
938*5748Sduboff 	dp->tx_active_head = dp->tx_free_head;
939*5748Sduboff 	dp->tx_active_tail = dp->tx_free_head;
940*5748Sduboff 	dp->tx_softq_head  = dp->tx_free_head;
941*5748Sduboff 	dp->tx_softq_tail  = dp->tx_free_head;
942*5748Sduboff }
943*5748Sduboff 
944*5748Sduboff /*
945*5748Sduboff  * Reclaim transmitted buffers from tx buffer/descriptor ring.
946*5748Sduboff  */
947*5748Sduboff __INLINE__ int
948*5748Sduboff gem_reclaim_txbuf(struct gem_dev *dp)
949*5748Sduboff {
950*5748Sduboff 	struct txbuf	*tbp;
951*5748Sduboff 	uint_t		txstat;
952*5748Sduboff 	int		err = GEM_SUCCESS;
953*5748Sduboff 	seqnum_t	head;
954*5748Sduboff 	seqnum_t	tail;
955*5748Sduboff 	seqnum_t	sn;
956*5748Sduboff 	seqnum_t	desc_head;
957*5748Sduboff 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
958*5748Sduboff 	uint_t (*tx_desc_stat)(struct gem_dev *dp,
959*5748Sduboff 	    int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
960*5748Sduboff #if GEM_DEBUG_LEVEL > 4
961*5748Sduboff 	clock_t			now = ddi_get_lbolt();
962*5748Sduboff #endif
963*5748Sduboff 
964*5748Sduboff 	mutex_enter(&dp->xmitlock);
965*5748Sduboff 
966*5748Sduboff 	head = dp->tx_active_head;
967*5748Sduboff 	tail = dp->tx_active_tail;
968*5748Sduboff 
969*5748Sduboff #if GEM_DEBUG_LEVEL > 2
970*5748Sduboff 	if (head != tail) {
971*5748Sduboff 		cmn_err(CE_CONT, "!%s: %s: "
972*5748Sduboff 		    "testing active_head:%d[%d], active_tail:%d[%d]",
973*5748Sduboff 		    dp->name, __func__,
974*5748Sduboff 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
975*5748Sduboff 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
976*5748Sduboff 	}
977*5748Sduboff #endif
978*5748Sduboff #ifdef DEBUG
979*5748Sduboff 	if (dp->tx_reclaim_busy == 0) {
980*5748Sduboff 		/* check tx buffer management consistency */
981*5748Sduboff 		ASSERT(dp->tx_free_tail - dp->tx_active_head
982*5748Sduboff 		    == dp->gc.gc_tx_buf_limit);
983*5748Sduboff 		/* EMPTY */
984*5748Sduboff 	}
985*5748Sduboff #endif
986*5748Sduboff 	dp->tx_reclaim_busy++;
987*5748Sduboff 
988*5748Sduboff 	/* sync all active HW descriptors */
989*5748Sduboff 	gem_tx_desc_dma_sync(dp,
990*5748Sduboff 	    SLOT(dp->tx_desc_head, tx_ring_size),
991*5748Sduboff 	    dp->tx_desc_tail - dp->tx_desc_head,
992*5748Sduboff 	    DDI_DMA_SYNC_FORKERNEL);
993*5748Sduboff 
994*5748Sduboff 	tbp = GET_TXBUF(dp, head);
995*5748Sduboff 	desc_head = dp->tx_desc_head;
996*5748Sduboff 	for (sn = head; sn != tail;
997*5748Sduboff 	    dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
998*5748Sduboff 		int	ndescs;
999*5748Sduboff 
1000*5748Sduboff 		ASSERT(tbp->txb_desc == desc_head);
1001*5748Sduboff 
1002*5748Sduboff 		ndescs = tbp->txb_ndescs;
1003*5748Sduboff 		txstat = (*tx_desc_stat)(dp,
1004*5748Sduboff 		    SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1005*5748Sduboff 
1006*5748Sduboff 		if (txstat == 0) {
1007*5748Sduboff 			/* not transmitted yet */
1008*5748Sduboff 			break;
1009*5748Sduboff 		}
1010*5748Sduboff 
1011*5748Sduboff 		ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1012*5748Sduboff 
1013*5748Sduboff 		if (txstat & GEM_TX_ERR) {
1014*5748Sduboff 			err = GEM_FAILURE;
1015*5748Sduboff 			cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1016*5748Sduboff 			    dp->name, sn, SLOT(sn, tx_ring_size));
1017*5748Sduboff 		}
1018*5748Sduboff #if GEM_DEBUG_LEVEL > 4
1019*5748Sduboff 		if (now - tbp->txb_stime >= 50) {
1020*5748Sduboff 			cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1021*5748Sduboff 			    dp->name, (now - tbp->txb_stime)*10);
1022*5748Sduboff 		}
1023*5748Sduboff #endif
1024*5748Sduboff 		/* free transmitted descriptors */
1025*5748Sduboff 		desc_head += ndescs;
1026*5748Sduboff 	}
1027*5748Sduboff 
1028*5748Sduboff 	if (dp->tx_desc_head != desc_head) {
1029*5748Sduboff 		/* we have reclaimed one or more tx buffers */
1030*5748Sduboff 		dp->tx_desc_head = desc_head;
1031*5748Sduboff 
1032*5748Sduboff 		/* If we passed the next interrupt position, update it */
1033*5748Sduboff 		if (desc_head - dp->tx_desc_intr >= 0) {
1034*5748Sduboff 			dp->tx_desc_intr = desc_head;
1035*5748Sduboff 		}
1036*5748Sduboff 	}
1037*5748Sduboff 	mutex_exit(&dp->xmitlock);
1038*5748Sduboff 
1039*5748Sduboff 	/* free dma mapping resources associated with transmitted tx buffers */
1040*5748Sduboff 	tbp = GET_TXBUF(dp, head);
1041*5748Sduboff 	tail = sn;
1042*5748Sduboff #if GEM_DEBUG_LEVEL > 2
1043*5748Sduboff 	if (head != tail) {
1044*5748Sduboff 		cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1045*5748Sduboff 		    __func__,
1046*5748Sduboff 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1047*5748Sduboff 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1048*5748Sduboff 	}
1049*5748Sduboff #endif
1050*5748Sduboff 	for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1051*5748Sduboff 		gem_txbuf_free_dma_resources(tbp);
1052*5748Sduboff 	}
1053*5748Sduboff 
1054*5748Sduboff 	/* recycle the tx buffers */
1055*5748Sduboff 	mutex_enter(&dp->xmitlock);
1056*5748Sduboff 	if (--dp->tx_reclaim_busy == 0) {
1057*5748Sduboff 		/* we are the last thread who can update free tail */
1058*5748Sduboff #if GEM_DEBUG_LEVEL > 4
1059*5748Sduboff 		/* check all resouces have been deallocated */
1060*5748Sduboff 		sn = dp->tx_free_tail;
1061*5748Sduboff 		tbp = GET_TXBUF(dp, new_tail);
1062*5748Sduboff 		while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1063*5748Sduboff 			if (tbp->txb_nfrags) {
1064*5748Sduboff 				/* in use */
1065*5748Sduboff 				break;
1066*5748Sduboff 			}
1067*5748Sduboff 			ASSERT(tbp->txb_mp == NULL);
1068*5748Sduboff 			tbp = tbp->txb_next;
1069*5748Sduboff 			sn++;
1070*5748Sduboff 		}
1071*5748Sduboff 		ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1072*5748Sduboff #endif
1073*5748Sduboff 		dp->tx_free_tail =
1074*5748Sduboff 		    dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1075*5748Sduboff 	}
1076*5748Sduboff 	if (!dp->mac_active) {
1077*5748Sduboff 		/* someone may be waiting for me. */
1078*5748Sduboff 		cv_broadcast(&dp->tx_drain_cv);
1079*5748Sduboff 	}
1080*5748Sduboff #if GEM_DEBUG_LEVEL > 2
1081*5748Sduboff 	cmn_err(CE_CONT, "!%s: %s: called, "
1082*5748Sduboff 	    "free_head:%d free_tail:%d(+%d) added:%d",
1083*5748Sduboff 	    dp->name, __func__,
1084*5748Sduboff 	    dp->tx_free_head, dp->tx_free_tail,
1085*5748Sduboff 	    dp->tx_free_tail - dp->tx_free_head, tail - head);
1086*5748Sduboff #endif
1087*5748Sduboff 	mutex_exit(&dp->xmitlock);
1088*5748Sduboff 
1089*5748Sduboff 	return (err);
1090*5748Sduboff }
1091*5748Sduboff #pragma inline(gem_reclaim_txbuf)
1092*5748Sduboff 
1093*5748Sduboff 
1094*5748Sduboff /*
1095*5748Sduboff  * Make tx descriptors in out-of-order manner
1096*5748Sduboff  */
1097*5748Sduboff static void
1098*5748Sduboff gem_tx_load_descs_oo(struct gem_dev *dp,
1099*5748Sduboff 	seqnum_t start_slot, seqnum_t end_slot, seqnum_t intr_slot,
1100*5748Sduboff 	uint64_t flags)
1101*5748Sduboff {
1102*5748Sduboff 	seqnum_t	sn;
1103*5748Sduboff 	struct txbuf	*tbp;
1104*5748Sduboff 	int	tx_ring_size = dp->gc.gc_tx_ring_size;
1105*5748Sduboff 	int	(*tx_desc_write)
1106*5748Sduboff 	    (struct gem_dev *dp, int slot,
1107*5748Sduboff 	    ddi_dma_cookie_t *dmacookie,
1108*5748Sduboff 	    int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1109*5748Sduboff 	clock_t	now = ddi_get_lbolt();
1110*5748Sduboff 
1111*5748Sduboff 	sn = start_slot;
1112*5748Sduboff 	tbp = GET_TXBUF(dp, sn);
1113*5748Sduboff 	do {
1114*5748Sduboff 		if (sn == intr_slot) {
1115*5748Sduboff 			flags |= GEM_TXFLAG_INTR;
1116*5748Sduboff 		}
1117*5748Sduboff #if GEM_DEBUG_LEVEL > 1
1118*5748Sduboff 		if (dp->tx_cnt < 100) {
1119*5748Sduboff 			dp->tx_cnt++;
1120*5748Sduboff 			flags |= GEM_TXFLAG_INTR;
1121*5748Sduboff 		}
1122*5748Sduboff #endif
1123*5748Sduboff 		/* write a tx descriptor */
1124*5748Sduboff 		tbp->txb_desc = sn;
1125*5748Sduboff 		tbp->txb_ndescs = (*tx_desc_write)(dp,
1126*5748Sduboff 		    SLOT(sn, tx_ring_size),
1127*5748Sduboff 		    tbp->txb_dmacookie,
1128*5748Sduboff 		    tbp->txb_nfrags, flags | tbp->txb_flag);
1129*5748Sduboff 		tbp->txb_stime = now;
1130*5748Sduboff 		ASSERT(tbp->txb_ndescs == 1);
1131*5748Sduboff 
1132*5748Sduboff 		flags = 0;
1133*5748Sduboff 		sn++;
1134*5748Sduboff 		tbp = tbp->txb_next;
1135*5748Sduboff 	} while (sn != end_slot);
1136*5748Sduboff }
1137*5748Sduboff 
1138*5748Sduboff 
1139*5748Sduboff __INLINE__
1140*5748Sduboff static void
1141*5748Sduboff gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1142*5748Sduboff {
1143*5748Sduboff 	size_t			min_pkt;
1144*5748Sduboff 	caddr_t			bp;
1145*5748Sduboff 	size_t			off;
1146*5748Sduboff 	mblk_t			*tp;
1147*5748Sduboff 	size_t			len;
1148*5748Sduboff 	uint64_t		flag;
1149*5748Sduboff 
1150*5748Sduboff 	ASSERT(tbp->txb_mp == NULL);
1151*5748Sduboff 
1152*5748Sduboff 	/* we use bounce buffer for the packet */
1153*5748Sduboff 	min_pkt = ETHERMIN;
1154*5748Sduboff 	bp = tbp->txb_buf;
1155*5748Sduboff 	off = 0;
1156*5748Sduboff 	tp = mp;
1157*5748Sduboff 
1158*5748Sduboff 	flag = tbp->txb_flag;
1159*5748Sduboff 	if (flag & GEM_TXFLAG_SWVTAG) {
1160*5748Sduboff 		/* need to increase min packet size */
1161*5748Sduboff 		min_pkt += VTAG_SIZE;
1162*5748Sduboff 		ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1163*5748Sduboff 	} else if (flag & GEM_TXFLAG_VTAG) {
1164*5748Sduboff 		size_t		rest;
1165*5748Sduboff 		/* we use hardware capability to add vlan tag. */
1166*5748Sduboff 
1167*5748Sduboff 		/* copy until VTAG + VTAG_SIZE */
1168*5748Sduboff 		for (rest = VTAG_OFF + VTAG_SIZE; ; tp = tp->b_cont) {
1169*5748Sduboff 			ASSERT(tp != NULL);
1170*5748Sduboff 			len = min((long)tp->b_wptr - (long)tp->b_rptr, rest);
1171*5748Sduboff 			bcopy(tp->b_rptr, &bp[off], len);
1172*5748Sduboff 			off  += len;
1173*5748Sduboff 			rest -= len;
1174*5748Sduboff 			if (rest == 0) {
1175*5748Sduboff 				tp->b_rptr += len;
1176*5748Sduboff 				break;
1177*5748Sduboff 			}
1178*5748Sduboff 		}
1179*5748Sduboff 		/* we have just copied vlan tag, see it. */
1180*5748Sduboff 		ASSERT(GET_NET16(&bp[off - VTAG_SIZE]) == VTAG_TPID);
1181*5748Sduboff 
1182*5748Sduboff 		/* remove the vlan tag */
1183*5748Sduboff 		off -= VTAG_SIZE;
1184*5748Sduboff 	}
1185*5748Sduboff 
1186*5748Sduboff 	/* copy the rest */
1187*5748Sduboff 	for (; tp; tp = tp->b_cont) {
1188*5748Sduboff 		if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1189*5748Sduboff 			bcopy(tp->b_rptr, &bp[off], len);
1190*5748Sduboff 			off += len;
1191*5748Sduboff 		}
1192*5748Sduboff 	}
1193*5748Sduboff 
1194*5748Sduboff 	if (off < min_pkt &&
1195*5748Sduboff 	    (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1196*5748Sduboff 		/*
1197*5748Sduboff 		 * Extend explicitly the packet to minimum packet size.
1198*5748Sduboff 		 * For software vlan packets, we shouldn't use tx autopad
1199*5748Sduboff 		 * function because nics may not be aware of vlan, that
1200*5748Sduboff 		 * we must keep 46 octet of payload even if we use vlan.
1201*5748Sduboff 		 */
1202*5748Sduboff 		bzero(&bp[off], min_pkt - off);
1203*5748Sduboff 		off = min_pkt;
1204*5748Sduboff 	}
1205*5748Sduboff 
1206*5748Sduboff 	(void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1207*5748Sduboff 
1208*5748Sduboff 	tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1209*5748Sduboff 	tbp->txb_dmacookie[0].dmac_size = off;
1210*5748Sduboff 
1211*5748Sduboff 	DPRINTF(2, (CE_CONT,
1212*5748Sduboff 	    "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1213*5748Sduboff 	    dp->name, __func__,
1214*5748Sduboff 	    tbp->txb_dmacookie[0].dmac_laddress,
1215*5748Sduboff 	    tbp->txb_dmacookie[0].dmac_size,
1216*5748Sduboff 	    (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1217*5748Sduboff 	    min_pkt));
1218*5748Sduboff 
1219*5748Sduboff 	/* save misc info */
1220*5748Sduboff 	tbp->txb_mp = mp;
1221*5748Sduboff 	tbp->txb_nfrags = 1;
1222*5748Sduboff #ifdef DEBUG_MULTIFRAGS
1223*5748Sduboff 	if (dp->gc.gc_tx_max_frags >= 3 &&
1224*5748Sduboff 	    tbp->txb_dmacookie[0].dmac_size > 16*3) {
1225*5748Sduboff 		tbp->txb_dmacookie[1].dmac_laddress =
1226*5748Sduboff 		    tbp->txb_dmacookie[0].dmac_laddress + 16;
1227*5748Sduboff 		tbp->txb_dmacookie[2].dmac_laddress =
1228*5748Sduboff 		    tbp->txb_dmacookie[1].dmac_laddress + 16;
1229*5748Sduboff 
1230*5748Sduboff 		tbp->txb_dmacookie[2].dmac_size =
1231*5748Sduboff 		    tbp->txb_dmacookie[0].dmac_size - 16*2;
1232*5748Sduboff 		tbp->txb_dmacookie[1].dmac_size = 16;
1233*5748Sduboff 		tbp->txb_dmacookie[0].dmac_size = 16;
1234*5748Sduboff 		tbp->txb_nfrags  = 3;
1235*5748Sduboff 	}
1236*5748Sduboff #endif
1237*5748Sduboff }
1238*5748Sduboff #pragma inline(gem_setup_txbuf_copy)
1239*5748Sduboff 
1240*5748Sduboff __INLINE__
1241*5748Sduboff static void
1242*5748Sduboff gem_tx_start_unit(struct gem_dev *dp)
1243*5748Sduboff {
1244*5748Sduboff 	seqnum_t	head;
1245*5748Sduboff 	seqnum_t	tail;
1246*5748Sduboff 	struct txbuf	*tbp_head;
1247*5748Sduboff 	struct txbuf	*tbp_tail;
1248*5748Sduboff 
1249*5748Sduboff 	/* update HW descriptors from soft queue */
1250*5748Sduboff 	ASSERT(mutex_owned(&dp->xmitlock));
1251*5748Sduboff 	ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1252*5748Sduboff 
1253*5748Sduboff 	head = dp->tx_softq_head;
1254*5748Sduboff 	tail = dp->tx_softq_tail;
1255*5748Sduboff 
1256*5748Sduboff 	DPRINTF(1, (CE_CONT,
1257*5748Sduboff 	    "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1258*5748Sduboff 	    dp->name, __func__, head, tail, tail - head,
1259*5748Sduboff 	    dp->tx_desc_head, dp->tx_desc_tail,
1260*5748Sduboff 	    dp->tx_desc_tail - dp->tx_desc_head));
1261*5748Sduboff 
1262*5748Sduboff 	ASSERT(tail - head > 0);
1263*5748Sduboff 
1264*5748Sduboff 	dp->tx_desc_tail = tail;
1265*5748Sduboff 
1266*5748Sduboff 	tbp_head = GET_TXBUF(dp, head);
1267*5748Sduboff 	tbp_tail = GET_TXBUF(dp, tail - 1);
1268*5748Sduboff 
1269*5748Sduboff 	ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1270*5748Sduboff 
1271*5748Sduboff 	dp->gc.gc_tx_start(dp,
1272*5748Sduboff 	    SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1273*5748Sduboff 	    tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1274*5748Sduboff 
1275*5748Sduboff 	/* advance softq head and active tail */
1276*5748Sduboff 	dp->tx_softq_head = dp->tx_active_tail = tail;
1277*5748Sduboff }
1278*5748Sduboff #pragma inline(gem_tx_start_unit)
1279*5748Sduboff 
1280*5748Sduboff #ifdef GEM_DEBUG_LEVEL
1281*5748Sduboff static int gem_send_cnt[10];
1282*5748Sduboff #endif
1283*5748Sduboff 
1284*5748Sduboff /*
1285*5748Sduboff  * gem_send_common is an exported function because hw depend routines may
1286*5748Sduboff  * use it for sending control frames like setup frames for 2114x chipset.
1287*5748Sduboff  */
1288*5748Sduboff mblk_t *
1289*5748Sduboff gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1290*5748Sduboff {
1291*5748Sduboff 	int			nmblk;
1292*5748Sduboff 	int			avail;
1293*5748Sduboff 	mblk_t			*tp;
1294*5748Sduboff 	mblk_t			*mp;
1295*5748Sduboff 	int			i = 0;
1296*5748Sduboff 	struct txbuf		*tbp;
1297*5748Sduboff 	seqnum_t		head;
1298*5748Sduboff 	seqnum_t		intr;
1299*5748Sduboff 	uint64_t		load_flags;
1300*5748Sduboff 	uint64_t		len_total = 0;
1301*5748Sduboff 	uint64_t		packets = 0;
1302*5748Sduboff 	uint32_t		vtag;
1303*5748Sduboff 
1304*5748Sduboff 	ASSERT(mp_head != NULL);
1305*5748Sduboff 
1306*5748Sduboff 	mp = mp_head;
1307*5748Sduboff 	nmblk = 1;
1308*5748Sduboff 	while ((mp = mp->b_next) != NULL) {
1309*5748Sduboff 		nmblk++;
1310*5748Sduboff 	}
1311*5748Sduboff #ifdef GEM_DEBUG_LEVEL
1312*5748Sduboff 	gem_send_cnt[0]++;
1313*5748Sduboff 	gem_send_cnt[min(nmblk, 9)]++;
1314*5748Sduboff #endif
1315*5748Sduboff 	/*
1316*5748Sduboff 	 * Aquire resources
1317*5748Sduboff 	 */
1318*5748Sduboff 	mutex_enter(&dp->xmitlock);
1319*5748Sduboff 
1320*5748Sduboff 	if (dp->mac_suspended) {
1321*5748Sduboff 		mutex_exit(&dp->xmitlock);
1322*5748Sduboff 		mp = mp_head;
1323*5748Sduboff 		while (mp) {
1324*5748Sduboff 			tp = mp->b_next;
1325*5748Sduboff 			freemsg(mp);
1326*5748Sduboff 			mp = tp;
1327*5748Sduboff 		}
1328*5748Sduboff 		return (NULL);
1329*5748Sduboff 	}
1330*5748Sduboff 
1331*5748Sduboff 	if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1332*5748Sduboff 		/* don't send data packets while mac isn't active */
1333*5748Sduboff 		mutex_exit(&dp->xmitlock);
1334*5748Sduboff 		return (mp_head);
1335*5748Sduboff 	}
1336*5748Sduboff 
1337*5748Sduboff 	/* allocate free slots */
1338*5748Sduboff 	head = dp->tx_free_head;
1339*5748Sduboff 	avail = dp->tx_free_tail - head;
1340*5748Sduboff 
1341*5748Sduboff 	DPRINTF(2, (CE_CONT,
1342*5748Sduboff 	    "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1343*5748Sduboff 	    dp->name, __func__,
1344*5748Sduboff 	    dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1345*5748Sduboff 
1346*5748Sduboff 	if ((dp->misc_flag & GEM_CTRL_PKT) &&
1347*5748Sduboff 	    (flags & GEM_SEND_CTRL) == 0 && avail > 0) {
1348*5748Sduboff 		/* reserve a txbuffer for sending control packets */
1349*5748Sduboff 		avail--;
1350*5748Sduboff 	}
1351*5748Sduboff 
1352*5748Sduboff 	if (nmblk > avail) {
1353*5748Sduboff 		if (avail == 0) {
1354*5748Sduboff 			/* no resources; short cut */
1355*5748Sduboff 			DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
1356*5748Sduboff 			goto done;
1357*5748Sduboff 		}
1358*5748Sduboff 		nmblk = avail;
1359*5748Sduboff 	}
1360*5748Sduboff 
1361*5748Sduboff 	dp->tx_free_head = head + nmblk;
1362*5748Sduboff 	load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1363*5748Sduboff 
1364*5748Sduboff 	/* calculate next interrupt position */
1365*5748Sduboff 	intr = head + avail;	/* free tail */
1366*5748Sduboff 
1367*5748Sduboff 	/*
1368*5748Sduboff 	 * update interrupt position if it is in the range of
1369*5748Sduboff 	 * allcated tx buffers and we are using out of order way.
1370*5748Sduboff 	 */
1371*5748Sduboff 	if ((head + nmblk) - intr >= 0 &&
1372*5748Sduboff 	    intr - dp->tx_desc_intr > 0) {
1373*5748Sduboff 		dp->tx_desc_intr = intr;
1374*5748Sduboff 	}
1375*5748Sduboff 	mutex_exit(&dp->xmitlock);
1376*5748Sduboff 
1377*5748Sduboff 	tbp = GET_TXBUF(dp, head);
1378*5748Sduboff 
1379*5748Sduboff 	i = nmblk;
1380*5748Sduboff 	do {
1381*5748Sduboff 		size_t		len;
1382*5748Sduboff 		uint8_t		*bp;
1383*5748Sduboff #define	PKT_MIN_SIZE	(sizeof (struct ether_header) + 10 + VTAG_SIZE)
1384*5748Sduboff 
1385*5748Sduboff 		/* remove one from the mblk list */
1386*5748Sduboff 		ASSERT(mp_head != NULL);
1387*5748Sduboff 		mp = mp_head;
1388*5748Sduboff 		mp_head = mp_head->b_next;
1389*5748Sduboff 		mp->b_next = NULL;
1390*5748Sduboff 
1391*5748Sduboff 		/* save misc info */
1392*5748Sduboff 		tbp->txb_flag =
1393*5748Sduboff 		    (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
1394*5748Sduboff 
1395*5748Sduboff 		/*
1396*5748Sduboff 		 * prepare the header of the packet for further analysis
1397*5748Sduboff 		 */
1398*5748Sduboff 		if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
1399*5748Sduboff 			int 	off;
1400*5748Sduboff 
1401*5748Sduboff 			/* we use bounce buffer for the packet */
1402*5748Sduboff 			bp = (uint8_t *)tbp->txb_buf;
1403*5748Sduboff 			for (tp = mp, off = 0;
1404*5748Sduboff 			    tp && (off < PKT_MIN_SIZE);
1405*5748Sduboff 			    tp = tp->b_cont, off += len) {
1406*5748Sduboff 				len = min((long)tp->b_wptr - (long)tp->b_rptr,
1407*5748Sduboff 				    PKT_MIN_SIZE - off);
1408*5748Sduboff 				bcopy(tp->b_rptr, &bp[off], len);
1409*5748Sduboff 			}
1410*5748Sduboff 		} else {
1411*5748Sduboff 			bp = mp->b_rptr;
1412*5748Sduboff 		}
1413*5748Sduboff #undef PKT_MIN_SIZE
1414*5748Sduboff 
1415*5748Sduboff 		if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1416*5748Sduboff 			/* statistics for non-unicast packets */
1417*5748Sduboff 			if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1418*5748Sduboff 			    ETHERADDRL) == 0) {
1419*5748Sduboff 				dp->stats.obcast++;
1420*5748Sduboff 			} else {
1421*5748Sduboff 				dp->stats.omcast++;
1422*5748Sduboff 			}
1423*5748Sduboff 		}
1424*5748Sduboff 
1425*5748Sduboff 		/* process vlan tag for GLD v3 */
1426*5748Sduboff 		if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
1427*5748Sduboff 			if (dp->misc_flag & GEM_VLAN_HARD) {
1428*5748Sduboff 				vtag = GET_NET16(&bp[VTAG_OFF + 2]);
1429*5748Sduboff 				ASSERT(vtag);
1430*5748Sduboff 				tbp->txb_flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
1431*5748Sduboff 			} else {
1432*5748Sduboff 				tbp->txb_flag |= GEM_TXFLAG_SWVTAG;
1433*5748Sduboff 			}
1434*5748Sduboff 		}
1435*5748Sduboff 
1436*5748Sduboff 		gem_setup_txbuf_copy(dp, mp, tbp);
1437*5748Sduboff 		tbp = tbp->txb_next;
1438*5748Sduboff 	} while (--i > 0);
1439*5748Sduboff 
1440*5748Sduboff 	(void) gem_tx_load_descs_oo(dp,
1441*5748Sduboff 	    head, head + nmblk, intr - 1, load_flags);
1442*5748Sduboff 
1443*5748Sduboff 	/* Append the tbp at the tail of the active tx buffer list */
1444*5748Sduboff 	mutex_enter(&dp->xmitlock);
1445*5748Sduboff 
1446*5748Sduboff 	if ((--dp->tx_busy) == 0) {
1447*5748Sduboff 		/* extend the tail of softq, as new packets have been ready. */
1448*5748Sduboff 		dp->tx_softq_tail = dp->tx_free_head;
1449*5748Sduboff 
1450*5748Sduboff 		if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1451*5748Sduboff 			/*
1452*5748Sduboff 			 * The device status has changed while we are
1453*5748Sduboff 			 * preparing tx buf.
1454*5748Sduboff 			 * As we are the last one that make tx non-busy.
1455*5748Sduboff 			 * wake up someone who may wait for us.
1456*5748Sduboff 			 */
1457*5748Sduboff 			cv_broadcast(&dp->tx_drain_cv);
1458*5748Sduboff 		} else {
1459*5748Sduboff 			ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1460*5748Sduboff 			gem_tx_start_unit(dp);
1461*5748Sduboff 		}
1462*5748Sduboff 	}
1463*5748Sduboff 	dp->stats.obytes += len_total;
1464*5748Sduboff 	dp->stats.opackets += packets;
1465*5748Sduboff 
1466*5748Sduboff done:
1467*5748Sduboff 	if (mp_head) {
1468*5748Sduboff 		/*
1469*5748Sduboff 		 * We mark the tx side as blocked. The state will be
1470*5748Sduboff 		 * kept until we'll unblock tx side explicitly.
1471*5748Sduboff 		 */
1472*5748Sduboff 		dp->tx_blocked = B_TRUE;
1473*5748Sduboff 	}
1474*5748Sduboff 	mutex_exit(&dp->xmitlock);
1475*5748Sduboff 
1476*5748Sduboff 	return (mp_head);
1477*5748Sduboff }
1478*5748Sduboff 
1479*5748Sduboff /* ========================================================== */
1480*5748Sduboff /*
1481*5748Sduboff  * error detection and restart routines
1482*5748Sduboff  */
1483*5748Sduboff /* ========================================================== */
1484*5748Sduboff int
1485*5748Sduboff gem_restart_nic(struct gem_dev *dp, uint_t flags)
1486*5748Sduboff {
1487*5748Sduboff 	ASSERT(mutex_owned(&dp->intrlock));
1488*5748Sduboff 
1489*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called: tx_desc:%d %d %d",
1490*5748Sduboff 	    dp->name, __func__,
1491*5748Sduboff 	    dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1492*5748Sduboff 
1493*5748Sduboff 	if (dp->mac_suspended) {
1494*5748Sduboff 		/* should we return GEM_FAILURE ? */
1495*5748Sduboff 		return (GEM_FAILURE);
1496*5748Sduboff 	}
1497*5748Sduboff 
1498*5748Sduboff 	/*
1499*5748Sduboff 	 * We should avoid calling any routines except xxx_chip_reset
1500*5748Sduboff 	 * when we are resuming the system.
1501*5748Sduboff 	 */
1502*5748Sduboff 	if (dp->mac_active) {
1503*5748Sduboff 		if (flags & GEM_RESTART_KEEP_BUF) {
1504*5748Sduboff 			/* stop rx gracefully */
1505*5748Sduboff 			dp->rxmode &= ~RXMODE_ENABLE;
1506*5748Sduboff 			(void) (*dp->gc.gc_set_rx_filter)(dp);
1507*5748Sduboff 		}
1508*5748Sduboff 		(void) gem_mac_stop(dp, flags);
1509*5748Sduboff 	}
1510*5748Sduboff 
1511*5748Sduboff 	/* reset the chip. */
1512*5748Sduboff 	if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1513*5748Sduboff 		cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1514*5748Sduboff 		    dp->name, __func__);
1515*5748Sduboff 		goto err;
1516*5748Sduboff 	}
1517*5748Sduboff 
1518*5748Sduboff 	if (gem_mac_init(dp) != GEM_SUCCESS) {
1519*5748Sduboff 		goto err;
1520*5748Sduboff 	}
1521*5748Sduboff 
1522*5748Sduboff 	/* setup media mode if the link have been up */
1523*5748Sduboff 	if (dp->mii_state == MII_STATE_LINKUP) {
1524*5748Sduboff 		if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1525*5748Sduboff 			goto err;
1526*5748Sduboff 		}
1527*5748Sduboff 	}
1528*5748Sduboff 
1529*5748Sduboff 	/* setup mac address and enable rx filter */
1530*5748Sduboff 	dp->rxmode |= RXMODE_ENABLE;
1531*5748Sduboff 	if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1532*5748Sduboff 		goto err;
1533*5748Sduboff 	}
1534*5748Sduboff 
1535*5748Sduboff 	/*
1536*5748Sduboff 	 * XXX - a panic happended because of linkdown.
1537*5748Sduboff 	 * We must check mii_state here, because the link can be down just
1538*5748Sduboff 	 * before the restart event happen. If the link is down now,
1539*5748Sduboff 	 * gem_mac_start() will be called from gem_mii_link_check() when
1540*5748Sduboff 	 * the link become up later.
1541*5748Sduboff 	 */
1542*5748Sduboff 	if (dp->mii_state == MII_STATE_LINKUP) {
1543*5748Sduboff 		/* restart the nic */
1544*5748Sduboff 		ASSERT(!dp->mac_active);
1545*5748Sduboff 		(void) gem_mac_start(dp);
1546*5748Sduboff 	}
1547*5748Sduboff 	return (GEM_SUCCESS);
1548*5748Sduboff err:
1549*5748Sduboff 	return (GEM_FAILURE);
1550*5748Sduboff }
1551*5748Sduboff 
1552*5748Sduboff 
1553*5748Sduboff static void
1554*5748Sduboff gem_tx_timeout(struct gem_dev *dp)
1555*5748Sduboff {
1556*5748Sduboff 	clock_t		now;
1557*5748Sduboff 	boolean_t	tx_sched;
1558*5748Sduboff 	struct txbuf	*tbp;
1559*5748Sduboff 
1560*5748Sduboff 	mutex_enter(&dp->intrlock);
1561*5748Sduboff 
1562*5748Sduboff 	tx_sched = B_FALSE;
1563*5748Sduboff 	now = ddi_get_lbolt();
1564*5748Sduboff 
1565*5748Sduboff 	mutex_enter(&dp->xmitlock);
1566*5748Sduboff 	if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1567*5748Sduboff 		mutex_exit(&dp->xmitlock);
1568*5748Sduboff 		goto schedule_next;
1569*5748Sduboff 	}
1570*5748Sduboff 	mutex_exit(&dp->xmitlock);
1571*5748Sduboff 
1572*5748Sduboff 	/* reclaim transmitted buffers to check the trasmitter hangs or not. */
1573*5748Sduboff 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1574*5748Sduboff 		/* tx error happened, reset transmitter in the chip */
1575*5748Sduboff 		(void) gem_restart_nic(dp, 0);
1576*5748Sduboff 		tx_sched = B_TRUE;
1577*5748Sduboff 		dp->tx_blocked = B_FALSE;
1578*5748Sduboff 
1579*5748Sduboff 		goto schedule_next;
1580*5748Sduboff 	}
1581*5748Sduboff 
1582*5748Sduboff 	mutex_enter(&dp->xmitlock);
1583*5748Sduboff 	/* check if the transmitter is stuck */
1584*5748Sduboff 	if (dp->tx_active_head == dp->tx_active_tail) {
1585*5748Sduboff 		/* no tx buffer is loaded to the nic */
1586*5748Sduboff 		mutex_exit(&dp->xmitlock);
1587*5748Sduboff 		goto schedule_next;
1588*5748Sduboff 	}
1589*5748Sduboff 
1590*5748Sduboff 	tbp = GET_TXBUF(dp, dp->tx_active_head);
1591*5748Sduboff 	if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1592*5748Sduboff 		mutex_exit(&dp->xmitlock);
1593*5748Sduboff 		goto schedule_next;
1594*5748Sduboff 	}
1595*5748Sduboff 	mutex_exit(&dp->xmitlock);
1596*5748Sduboff 
1597*5748Sduboff 	gem_dump_txbuf(dp, CE_WARN, __func__);
1598*5748Sduboff 
1599*5748Sduboff 	/* discard untransmitted packet and restart tx.  */
1600*5748Sduboff 	(void) gem_restart_nic(dp, 0);
1601*5748Sduboff 	tx_sched = B_TRUE;
1602*5748Sduboff 	dp->tx_blocked = B_FALSE;
1603*5748Sduboff 
1604*5748Sduboff schedule_next:
1605*5748Sduboff 	mutex_exit(&dp->intrlock);
1606*5748Sduboff 
1607*5748Sduboff 	/* restart the downstream if needed */
1608*5748Sduboff 	if (tx_sched) {
1609*5748Sduboff 		mac_tx_update(dp->mh);
1610*5748Sduboff 	}
1611*5748Sduboff 
1612*5748Sduboff 	DPRINTF(4, (CE_CONT,
1613*5748Sduboff 	    "!%s: blocked:%d desc_head:%d desc_tail:%d desc_intr:%d",
1614*5748Sduboff 	    dp->name, dp->tx_blocked,
1615*5748Sduboff 	    dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1616*5748Sduboff 	dp->timeout_id =
1617*5748Sduboff 	    timeout((void (*)(void *))gem_tx_timeout,
1618*5748Sduboff 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
1619*5748Sduboff }
1620*5748Sduboff 
1621*5748Sduboff /* ================================================================== */
1622*5748Sduboff /*
1623*5748Sduboff  * Interrupt handler
1624*5748Sduboff  */
1625*5748Sduboff /* ================================================================== */
1626*5748Sduboff __INLINE__
1627*5748Sduboff static void
1628*5748Sduboff gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1629*5748Sduboff {
1630*5748Sduboff 	struct rxbuf	*rbp;
1631*5748Sduboff 	seqnum_t	tail;
1632*5748Sduboff 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1633*5748Sduboff 
1634*5748Sduboff 	ASSERT(rbp_head != NULL);
1635*5748Sduboff 	ASSERT(mutex_owned(&dp->intrlock));
1636*5748Sduboff 
1637*5748Sduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1638*5748Sduboff 	    dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1639*5748Sduboff 
1640*5748Sduboff 	/*
1641*5748Sduboff 	 * Add new buffers into active rx buffer list
1642*5748Sduboff 	 */
1643*5748Sduboff 	if (dp->rx_buf_head == NULL) {
1644*5748Sduboff 		dp->rx_buf_head = rbp_head;
1645*5748Sduboff 		ASSERT(dp->rx_buf_tail == NULL);
1646*5748Sduboff 	} else {
1647*5748Sduboff 		dp->rx_buf_tail->rxb_next = rbp_head;
1648*5748Sduboff 	}
1649*5748Sduboff 
1650*5748Sduboff 	tail = dp->rx_active_tail;
1651*5748Sduboff 	for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1652*5748Sduboff 		/* need to notify the tail for the lower layer */
1653*5748Sduboff 		dp->rx_buf_tail = rbp;
1654*5748Sduboff 
1655*5748Sduboff 		dp->gc.gc_rx_desc_write(dp,
1656*5748Sduboff 		    SLOT(tail, rx_ring_size),
1657*5748Sduboff 		    rbp->rxb_dmacookie,
1658*5748Sduboff 		    rbp->rxb_nfrags);
1659*5748Sduboff 
1660*5748Sduboff 		dp->rx_active_tail = tail = tail + 1;
1661*5748Sduboff 	}
1662*5748Sduboff }
1663*5748Sduboff #pragma inline(gem_append_rxbuf)
1664*5748Sduboff 
1665*5748Sduboff mblk_t *
1666*5748Sduboff gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1667*5748Sduboff {
1668*5748Sduboff 	int		rx_header_len = dp->gc.gc_rx_header_len;
1669*5748Sduboff 	uint8_t		*bp;
1670*5748Sduboff 	mblk_t		*mp;
1671*5748Sduboff 
1672*5748Sduboff 	/* allocate a new mblk */
1673*5748Sduboff 	if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1674*5748Sduboff 		ASSERT(mp->b_next == NULL);
1675*5748Sduboff 		ASSERT(mp->b_cont == NULL);
1676*5748Sduboff 
1677*5748Sduboff 		mp->b_rptr += VTAG_SIZE;
1678*5748Sduboff 		bp = mp->b_rptr;
1679*5748Sduboff 		mp->b_wptr = bp + len;
1680*5748Sduboff 
1681*5748Sduboff 		(void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
1682*5748Sduboff 		    len, DDI_DMA_SYNC_FORKERNEL);
1683*5748Sduboff 
1684*5748Sduboff 		bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1685*5748Sduboff 	}
1686*5748Sduboff 	return (mp);
1687*5748Sduboff }
1688*5748Sduboff 
1689*5748Sduboff #ifdef GEM_DEBUG_LEVEL
1690*5748Sduboff uint_t	gem_rx_pkts[17];
1691*5748Sduboff #endif
1692*5748Sduboff 
1693*5748Sduboff 
1694*5748Sduboff int
1695*5748Sduboff gem_receive(struct gem_dev *dp)
1696*5748Sduboff {
1697*5748Sduboff 	uint64_t	len_total = 0;
1698*5748Sduboff 	struct rxbuf	*rbp;
1699*5748Sduboff 	mblk_t		*mp;
1700*5748Sduboff 	int		cnt = 0;
1701*5748Sduboff 	uint64_t	rxstat;
1702*5748Sduboff 	struct rxbuf	*newbufs;
1703*5748Sduboff 	struct rxbuf	**newbufs_tailp;
1704*5748Sduboff 	mblk_t		*rx_head;
1705*5748Sduboff 	mblk_t 		**rx_tailp;
1706*5748Sduboff 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1707*5748Sduboff 	seqnum_t	active_head;
1708*5748Sduboff 	uint64_t	(*rx_desc_stat)(struct gem_dev *dp,
1709*5748Sduboff 	    int slot, int ndesc);
1710*5748Sduboff 	uint16_t	vtag;
1711*5748Sduboff 	int		ethermin = ETHERMIN;
1712*5748Sduboff 	int		ethermax = dp->mtu + sizeof (struct ether_header);
1713*5748Sduboff 
1714*5748Sduboff 	ASSERT(mutex_owned(&dp->intrlock));
1715*5748Sduboff 
1716*5748Sduboff 	DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1717*5748Sduboff 	    dp->name, dp->rx_buf_head));
1718*5748Sduboff 
1719*5748Sduboff 	rx_desc_stat  = dp->gc.gc_rx_desc_stat;
1720*5748Sduboff 	newbufs_tailp = &newbufs;
1721*5748Sduboff 	rx_tailp = &rx_head;
1722*5748Sduboff 	for (active_head = dp->rx_active_head;
1723*5748Sduboff 	    (rbp = dp->rx_buf_head) != NULL; active_head++) {
1724*5748Sduboff 		int		len;
1725*5748Sduboff 		if (cnt == 0) {
1726*5748Sduboff 			cnt = max(dp->poll_pkt_delay*2, 10);
1727*5748Sduboff 			cnt = min(cnt,
1728*5748Sduboff 			    dp->rx_active_tail - active_head);
1729*5748Sduboff 			gem_rx_desc_dma_sync(dp,
1730*5748Sduboff 			    SLOT(active_head, rx_ring_size),
1731*5748Sduboff 			    cnt,
1732*5748Sduboff 			    DDI_DMA_SYNC_FORKERNEL);
1733*5748Sduboff 		}
1734*5748Sduboff 		if (((rxstat = (*rx_desc_stat)(dp,
1735*5748Sduboff 		    SLOT(active_head, rx_ring_size),
1736*5748Sduboff 		    rbp->rxb_nfrags))
1737*5748Sduboff 		    & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1738*5748Sduboff 			/* not received yet */
1739*5748Sduboff 			break;
1740*5748Sduboff 		}
1741*5748Sduboff 
1742*5748Sduboff 		/* Remove the head of the rx buffer list */
1743*5748Sduboff 		dp->rx_buf_head = rbp->rxb_next;
1744*5748Sduboff 		cnt--;
1745*5748Sduboff 
1746*5748Sduboff 
1747*5748Sduboff 		if (rxstat & GEM_RX_ERR) {
1748*5748Sduboff 			goto next;
1749*5748Sduboff 		}
1750*5748Sduboff 
1751*5748Sduboff 		len = rxstat & GEM_RX_LEN;
1752*5748Sduboff 		DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1753*5748Sduboff 		    dp->name, __func__, rxstat, len));
1754*5748Sduboff 
1755*5748Sduboff 		/*
1756*5748Sduboff 		 * Copy the packet
1757*5748Sduboff 		 */
1758*5748Sduboff 		if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1759*5748Sduboff 			/* no memory, discard the packet */
1760*5748Sduboff 			dp->stats.norcvbuf++;
1761*5748Sduboff 			goto next;
1762*5748Sduboff 		}
1763*5748Sduboff 
1764*5748Sduboff 		/*
1765*5748Sduboff 		 * Process VLAN tag
1766*5748Sduboff 		 */
1767*5748Sduboff 		ethermin = ETHERMIN;
1768*5748Sduboff 		ethermax = dp->mtu + sizeof (struct ether_header);
1769*5748Sduboff 		vtag = (rxstat & GEM_RX_VTAG) >> GEM_RX_VTAG_SHIFT;
1770*5748Sduboff 		if (vtag) {
1771*5748Sduboff 			/* insert vlan vtag extracted by the hardware */
1772*5748Sduboff 			gem_add_vtag(mp, vtag);
1773*5748Sduboff 			len += VTAG_SIZE;
1774*5748Sduboff 			ethermax += VTAG_SIZE;
1775*5748Sduboff 		} else if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1776*5748Sduboff 
1777*5748Sduboff 			ethermax += VTAG_SIZE;
1778*5748Sduboff 		}
1779*5748Sduboff 
1780*5748Sduboff 		/* check packet size */
1781*5748Sduboff 		if (len < ethermin) {
1782*5748Sduboff 			dp->stats.errrcv++;
1783*5748Sduboff 			dp->stats.runt++;
1784*5748Sduboff 			freemsg(mp);
1785*5748Sduboff 			goto next;
1786*5748Sduboff 		}
1787*5748Sduboff 
1788*5748Sduboff 		if (len > ethermax) {
1789*5748Sduboff 			dp->stats.errrcv++;
1790*5748Sduboff 			dp->stats.frame_too_long++;
1791*5748Sduboff 			freemsg(mp);
1792*5748Sduboff 			goto next;
1793*5748Sduboff 		}
1794*5748Sduboff 
1795*5748Sduboff 		len_total += len;
1796*5748Sduboff 
1797*5748Sduboff 		/* append received packet to temporaly rx buffer list */
1798*5748Sduboff 		*rx_tailp = mp;
1799*5748Sduboff 		rx_tailp  = &mp->b_next;
1800*5748Sduboff 
1801*5748Sduboff 		if (mp->b_rptr[0] & 1) {
1802*5748Sduboff 			if (bcmp(mp->b_rptr,
1803*5748Sduboff 			    gem_etherbroadcastaddr.ether_addr_octet,
1804*5748Sduboff 			    ETHERADDRL) == 0) {
1805*5748Sduboff 				dp->stats.rbcast++;
1806*5748Sduboff 			} else {
1807*5748Sduboff 				dp->stats.rmcast++;
1808*5748Sduboff 			}
1809*5748Sduboff 		}
1810*5748Sduboff next:
1811*5748Sduboff 		ASSERT(rbp != NULL);
1812*5748Sduboff 
1813*5748Sduboff 		/* append new one to temporal new buffer list */
1814*5748Sduboff 		*newbufs_tailp = rbp;
1815*5748Sduboff 		newbufs_tailp  = &rbp->rxb_next;
1816*5748Sduboff 	}
1817*5748Sduboff 
1818*5748Sduboff 	/* advance rx_active_head */
1819*5748Sduboff 	if ((cnt = active_head - dp->rx_active_head) > 0) {
1820*5748Sduboff 		dp->stats.rbytes += len_total;
1821*5748Sduboff 		dp->stats.rpackets += cnt;
1822*5748Sduboff 	}
1823*5748Sduboff 	dp->rx_active_head = active_head;
1824*5748Sduboff 
1825*5748Sduboff 	/* terminate the working list */
1826*5748Sduboff 	*newbufs_tailp = NULL;
1827*5748Sduboff 	*rx_tailp = NULL;
1828*5748Sduboff 
1829*5748Sduboff 	if (dp->rx_buf_head == NULL) {
1830*5748Sduboff 		dp->rx_buf_tail = NULL;
1831*5748Sduboff 	}
1832*5748Sduboff 
1833*5748Sduboff 	DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1834*5748Sduboff 	    dp->name, __func__, cnt, rx_head));
1835*5748Sduboff 
1836*5748Sduboff 	if (newbufs) {
1837*5748Sduboff 		/*
1838*5748Sduboff 		 * fillfull rx list with new buffers
1839*5748Sduboff 		 */
1840*5748Sduboff 		seqnum_t	head;
1841*5748Sduboff 
1842*5748Sduboff 		/* save current tail */
1843*5748Sduboff 		head = dp->rx_active_tail;
1844*5748Sduboff 		gem_append_rxbuf(dp, newbufs);
1845*5748Sduboff 
1846*5748Sduboff 		/* call hw depend start routine if we have. */
1847*5748Sduboff 		dp->gc.gc_rx_start(dp,
1848*5748Sduboff 		    SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1849*5748Sduboff 	}
1850*5748Sduboff 
1851*5748Sduboff 	if (rx_head) {
1852*5748Sduboff 		/*
1853*5748Sduboff 		 * send up received packets
1854*5748Sduboff 		 */
1855*5748Sduboff 		mutex_exit(&dp->intrlock);
1856*5748Sduboff 		mac_rx(dp->mh, dp->mac_rx_ring_ha, rx_head);
1857*5748Sduboff 		mutex_enter(&dp->intrlock);
1858*5748Sduboff 	}
1859*5748Sduboff 
1860*5748Sduboff #ifdef GEM_DEBUG_LEVEL
1861*5748Sduboff 	gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1862*5748Sduboff #endif
1863*5748Sduboff 	return (cnt);
1864*5748Sduboff }
1865*5748Sduboff 
1866*5748Sduboff boolean_t
1867*5748Sduboff gem_tx_done(struct gem_dev *dp)
1868*5748Sduboff {
1869*5748Sduboff 	boolean_t		tx_sched = B_FALSE;
1870*5748Sduboff 
1871*5748Sduboff 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1872*5748Sduboff 		(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1873*5748Sduboff 		DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1874*5748Sduboff 		    dp->name, dp->tx_active_head, dp->tx_active_tail));
1875*5748Sduboff 		tx_sched = B_TRUE;
1876*5748Sduboff 		goto x;
1877*5748Sduboff 	}
1878*5748Sduboff 
1879*5748Sduboff 	mutex_enter(&dp->xmitlock);
1880*5748Sduboff 
1881*5748Sduboff 	/* XXX - for oo, we must not have any packets in soft queue */
1882*5748Sduboff 	ASSERT((!dp->gc.gc_tx_desc_write_oo) ||
1883*5748Sduboff 	    dp->tx_softq_head == dp->tx_softq_tail);
1884*5748Sduboff 	/*
1885*5748Sduboff 	 * if we won't have chance to get more free tx buffers, and blocked,
1886*5748Sduboff 	 * it is worth to reschedule the downstream i.e. tx side.
1887*5748Sduboff 	 */
1888*5748Sduboff 	if (dp->tx_blocked && (dp->tx_desc_intr == dp->tx_desc_head)) {
1889*5748Sduboff 		/*
1890*5748Sduboff 		 * As no further tx-done interrupts are scheduled, this
1891*5748Sduboff 		 * is the last chance to kick tx side, which may be
1892*5748Sduboff 		 * blocked now, otherwise the tx side never works again.
1893*5748Sduboff 		 */
1894*5748Sduboff 		tx_sched = B_TRUE;
1895*5748Sduboff 		dp->tx_blocked = B_FALSE;
1896*5748Sduboff 	}
1897*5748Sduboff 
1898*5748Sduboff 	mutex_exit(&dp->xmitlock);
1899*5748Sduboff 
1900*5748Sduboff 	DPRINTF(3, (CE_CONT, "!%s: gem_tx_done: ret: blocked:%d",
1901*5748Sduboff 	    dp->name, dp->tx_blocked));
1902*5748Sduboff x:
1903*5748Sduboff 	return (tx_sched);
1904*5748Sduboff }
1905*5748Sduboff 
1906*5748Sduboff static uint_t
1907*5748Sduboff gem_intr(struct gem_dev	*dp)
1908*5748Sduboff {
1909*5748Sduboff 	uint_t		ret;
1910*5748Sduboff 
1911*5748Sduboff 	mutex_enter(&dp->intrlock);
1912*5748Sduboff 	if (dp->mac_suspended) {
1913*5748Sduboff 		mutex_exit(&dp->intrlock);
1914*5748Sduboff 		return (DDI_INTR_UNCLAIMED);
1915*5748Sduboff 	}
1916*5748Sduboff 	dp->intr_busy = B_TRUE;
1917*5748Sduboff 
1918*5748Sduboff 	ret = (*dp->gc.gc_interrupt)(dp);
1919*5748Sduboff 
1920*5748Sduboff 	if (ret == DDI_INTR_UNCLAIMED) {
1921*5748Sduboff 		dp->intr_busy = B_FALSE;
1922*5748Sduboff 		mutex_exit(&dp->intrlock);
1923*5748Sduboff 		return (ret);
1924*5748Sduboff 	}
1925*5748Sduboff 
1926*5748Sduboff 	if (!dp->mac_active) {
1927*5748Sduboff 		cv_broadcast(&dp->tx_drain_cv);
1928*5748Sduboff 	}
1929*5748Sduboff 
1930*5748Sduboff 
1931*5748Sduboff 	dp->stats.intr++;
1932*5748Sduboff 	dp->intr_busy = B_FALSE;
1933*5748Sduboff 
1934*5748Sduboff 	mutex_exit(&dp->intrlock);
1935*5748Sduboff 
1936*5748Sduboff 	if (ret & INTR_RESTART_TX) {
1937*5748Sduboff 		DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
1938*5748Sduboff 		mac_tx_update(dp->mh);
1939*5748Sduboff 		ret &= ~INTR_RESTART_TX;
1940*5748Sduboff 	}
1941*5748Sduboff 	return (ret);
1942*5748Sduboff }
1943*5748Sduboff 
1944*5748Sduboff static void
1945*5748Sduboff gem_intr_watcher(struct gem_dev *dp)
1946*5748Sduboff {
1947*5748Sduboff 	(void) gem_intr(dp);
1948*5748Sduboff 
1949*5748Sduboff 	/* schedule next call of tu_intr_watcher */
1950*5748Sduboff 	dp->intr_watcher_id =
1951*5748Sduboff 	    timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
1952*5748Sduboff }
1953*5748Sduboff 
1954*5748Sduboff /* ======================================================================== */
1955*5748Sduboff /*
1956*5748Sduboff  * MII support routines
1957*5748Sduboff  */
1958*5748Sduboff /* ======================================================================== */
1959*5748Sduboff static void
1960*5748Sduboff gem_choose_forcedmode(struct gem_dev *dp)
1961*5748Sduboff {
1962*5748Sduboff 	/* choose media mode */
1963*5748Sduboff 	if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
1964*5748Sduboff 		dp->speed = GEM_SPD_1000;
1965*5748Sduboff 		dp->full_duplex = dp->anadv_1000fdx;
1966*5748Sduboff 	} else if (dp->anadv_100fdx || dp->anadv_100t4) {
1967*5748Sduboff 		dp->speed = GEM_SPD_100;
1968*5748Sduboff 		dp->full_duplex = B_TRUE;
1969*5748Sduboff 	} else if (dp->anadv_100hdx) {
1970*5748Sduboff 		dp->speed = GEM_SPD_100;
1971*5748Sduboff 		dp->full_duplex = B_FALSE;
1972*5748Sduboff 	} else {
1973*5748Sduboff 		dp->speed = GEM_SPD_10;
1974*5748Sduboff 		dp->full_duplex = dp->anadv_10fdx;
1975*5748Sduboff 	}
1976*5748Sduboff }
1977*5748Sduboff 
1978*5748Sduboff uint16_t
1979*5748Sduboff gem_mii_read(struct gem_dev *dp, uint_t reg)
1980*5748Sduboff {
1981*5748Sduboff 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
1982*5748Sduboff 		(*dp->gc.gc_mii_sync)(dp);
1983*5748Sduboff 	}
1984*5748Sduboff 	return ((*dp->gc.gc_mii_read)(dp, reg));
1985*5748Sduboff }
1986*5748Sduboff 
1987*5748Sduboff void
1988*5748Sduboff gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
1989*5748Sduboff {
1990*5748Sduboff 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
1991*5748Sduboff 		(*dp->gc.gc_mii_sync)(dp);
1992*5748Sduboff 	}
1993*5748Sduboff 	(*dp->gc.gc_mii_write)(dp, reg, val);
1994*5748Sduboff }
1995*5748Sduboff 
1996*5748Sduboff #define	fc_cap_decode(x)	\
1997*5748Sduboff 	((((x) & MII_ABILITY_PAUSE) ? 1 : 0) |	\
1998*5748Sduboff 	(((x) & MII_ABILITY_ASM_DIR) ? 2 : 0))
1999*5748Sduboff 
2000*5748Sduboff int
2001*5748Sduboff gem_mii_config_default(struct gem_dev *dp)
2002*5748Sduboff {
2003*5748Sduboff 	uint16_t	mii_stat;
2004*5748Sduboff 	uint16_t	val;
2005*5748Sduboff 	static uint16_t fc_cap_encode[4] = {
2006*5748Sduboff 		/* none */		0,
2007*5748Sduboff 		/* symmetric */		MII_ABILITY_PAUSE,
2008*5748Sduboff 		/* tx */		MII_ABILITY_ASM_DIR,
2009*5748Sduboff 		/* rx-symmetric */	MII_ABILITY_PAUSE | MII_ABILITY_ASM_DIR,
2010*5748Sduboff 	};
2011*5748Sduboff 
2012*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2013*5748Sduboff 
2014*5748Sduboff 	/*
2015*5748Sduboff 	 * Configure bits in advertisement register
2016*5748Sduboff 	 */
2017*5748Sduboff 	mii_stat = dp->mii_status;
2018*5748Sduboff 
2019*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2020*5748Sduboff 	    dp->name, __func__, mii_stat, MII_STATUS_BITS));
2021*5748Sduboff 
2022*5748Sduboff 	if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2023*5748Sduboff 		/* it's funny */
2024*5748Sduboff 		cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2025*5748Sduboff 		    dp->name, mii_stat, MII_STATUS_BITS);
2026*5748Sduboff 		return (GEM_FAILURE);
2027*5748Sduboff 	}
2028*5748Sduboff 
2029*5748Sduboff 	/* Do not change the rest of the ability bits in the advert reg */
2030*5748Sduboff 	val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2031*5748Sduboff 
2032*5748Sduboff 	DPRINTF(0, (CE_CONT,
2033*5748Sduboff 	    "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2034*5748Sduboff 	    dp->name, __func__,
2035*5748Sduboff 	    dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2036*5748Sduboff 	    dp->anadv_10fdx, dp->anadv_10hdx));
2037*5748Sduboff 
2038*5748Sduboff 	if (dp->anadv_100t4) {
2039*5748Sduboff 		val |= MII_ABILITY_100BASE_T4;
2040*5748Sduboff 	}
2041*5748Sduboff 	if (dp->anadv_100fdx) {
2042*5748Sduboff 		val |= MII_ABILITY_100BASE_TX_FD;
2043*5748Sduboff 	}
2044*5748Sduboff 	if (dp->anadv_100hdx) {
2045*5748Sduboff 		val |= MII_ABILITY_100BASE_TX;
2046*5748Sduboff 	}
2047*5748Sduboff 	if (dp->anadv_10fdx) {
2048*5748Sduboff 		val |= MII_ABILITY_10BASE_T_FD;
2049*5748Sduboff 	}
2050*5748Sduboff 	if (dp->anadv_10hdx) {
2051*5748Sduboff 		val |= MII_ABILITY_10BASE_T;
2052*5748Sduboff 	}
2053*5748Sduboff 
2054*5748Sduboff 	/* set flow control capability */
2055*5748Sduboff 	val |= fc_cap_encode[dp->anadv_flow_control];
2056*5748Sduboff 
2057*5748Sduboff 	DPRINTF(0, (CE_CONT,
2058*5748Sduboff 	    "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2059*5748Sduboff 	    dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2060*5748Sduboff 	    dp->anadv_flow_control));
2061*5748Sduboff 
2062*5748Sduboff 	gem_mii_write(dp, MII_AN_ADVERT, val);
2063*5748Sduboff 
2064*5748Sduboff 	if (mii_stat & MII_STATUS_XSTATUS) {
2065*5748Sduboff 		/*
2066*5748Sduboff 		 * 1000Base-T GMII support
2067*5748Sduboff 		 */
2068*5748Sduboff 		if (!dp->anadv_autoneg) {
2069*5748Sduboff 			/* enable manual configuration */
2070*5748Sduboff 			val = MII_1000TC_CFG_EN;
2071*5748Sduboff 		} else {
2072*5748Sduboff 			val = 0;
2073*5748Sduboff 			if (dp->anadv_1000fdx) {
2074*5748Sduboff 				val |= MII_1000TC_ADV_FULL;
2075*5748Sduboff 			}
2076*5748Sduboff 			if (dp->anadv_1000hdx) {
2077*5748Sduboff 				val |= MII_1000TC_ADV_HALF;
2078*5748Sduboff 			}
2079*5748Sduboff 		}
2080*5748Sduboff 		DPRINTF(0, (CE_CONT,
2081*5748Sduboff 		    "!%s: %s: setting MII_1000TC reg:%b",
2082*5748Sduboff 		    dp->name, __func__, val, MII_1000TC_BITS));
2083*5748Sduboff 
2084*5748Sduboff 		gem_mii_write(dp, MII_1000TC, val);
2085*5748Sduboff 	}
2086*5748Sduboff 
2087*5748Sduboff 	return (GEM_SUCCESS);
2088*5748Sduboff }
2089*5748Sduboff 
2090*5748Sduboff #define	GEM_LINKUP(dp)		mac_link_update((dp)->mh, LINK_STATE_UP)
2091*5748Sduboff #define	GEM_LINKDOWN(dp)	mac_link_update((dp)->mh, LINK_STATE_DOWN)
2092*5748Sduboff 
2093*5748Sduboff static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2094*5748Sduboff /*	 none	symm	tx	rx/symm */
2095*5748Sduboff /* none */
2096*5748Sduboff 	{FLOW_CONTROL_NONE,
2097*5748Sduboff 		FLOW_CONTROL_NONE,
2098*5748Sduboff 			FLOW_CONTROL_NONE,
2099*5748Sduboff 				FLOW_CONTROL_NONE},
2100*5748Sduboff /* sym */
2101*5748Sduboff 	{FLOW_CONTROL_NONE,
2102*5748Sduboff 		FLOW_CONTROL_SYMMETRIC,
2103*5748Sduboff 			FLOW_CONTROL_NONE,
2104*5748Sduboff 				FLOW_CONTROL_SYMMETRIC},
2105*5748Sduboff /* tx */
2106*5748Sduboff 	{FLOW_CONTROL_NONE,
2107*5748Sduboff 		FLOW_CONTROL_NONE,
2108*5748Sduboff 			FLOW_CONTROL_NONE,
2109*5748Sduboff 				FLOW_CONTROL_TX_PAUSE},
2110*5748Sduboff /* rx/symm */
2111*5748Sduboff 	{FLOW_CONTROL_NONE,
2112*5748Sduboff 		FLOW_CONTROL_SYMMETRIC,
2113*5748Sduboff 			FLOW_CONTROL_RX_PAUSE,
2114*5748Sduboff 				FLOW_CONTROL_SYMMETRIC},
2115*5748Sduboff };
2116*5748Sduboff 
2117*5748Sduboff static char *gem_fc_type[] = {
2118*5748Sduboff 	"without",
2119*5748Sduboff 	"with symmetric",
2120*5748Sduboff 	"with tx",
2121*5748Sduboff 	"with rx",
2122*5748Sduboff };
2123*5748Sduboff 
2124*5748Sduboff boolean_t
2125*5748Sduboff gem_mii_link_check(struct gem_dev *dp)
2126*5748Sduboff {
2127*5748Sduboff 	uint16_t	old_mii_state;
2128*5748Sduboff 	boolean_t	tx_sched = B_FALSE;
2129*5748Sduboff 	uint16_t	status;
2130*5748Sduboff 	uint16_t	advert;
2131*5748Sduboff 	uint16_t	lpable;
2132*5748Sduboff 	uint16_t	exp;
2133*5748Sduboff 	uint16_t	ctl1000;
2134*5748Sduboff 	uint16_t	stat1000;
2135*5748Sduboff 	uint16_t	val;
2136*5748Sduboff 	clock_t		now;
2137*5748Sduboff 	clock_t		diff;
2138*5748Sduboff 	int		linkdown_action;
2139*5748Sduboff 	boolean_t	fix_phy = B_FALSE;
2140*5748Sduboff 
2141*5748Sduboff 	now = ddi_get_lbolt();
2142*5748Sduboff 	old_mii_state = dp->mii_state;
2143*5748Sduboff 
2144*5748Sduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2145*5748Sduboff 	    dp->name, __func__, now, dp->mii_state));
2146*5748Sduboff 
2147*5748Sduboff 	diff = now - dp->mii_last_check;
2148*5748Sduboff 	dp->mii_last_check = now;
2149*5748Sduboff 
2150*5748Sduboff next_nowait:
2151*5748Sduboff 	switch (dp->mii_state) {
2152*5748Sduboff 	case MII_STATE_UNKNOWN:
2153*5748Sduboff 		/* power-up, DP83840 requires 32 sync bits */
2154*5748Sduboff 		(*dp->gc.gc_mii_sync)(dp);
2155*5748Sduboff 		goto reset_phy;
2156*5748Sduboff 
2157*5748Sduboff 	case MII_STATE_RESETTING:
2158*5748Sduboff 		dp->mii_timer -= diff;
2159*5748Sduboff 		if (dp->mii_timer > 0) {
2160*5748Sduboff 			/* don't read phy registers in resetting */
2161*5748Sduboff 			dp->mii_interval = WATCH_INTERVAL_FAST;
2162*5748Sduboff 			goto next;
2163*5748Sduboff 		}
2164*5748Sduboff 
2165*5748Sduboff 		/* Timer expired, ensure reset bit is not set */
2166*5748Sduboff 
2167*5748Sduboff 		if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2168*5748Sduboff 			/* some phys need sync bits after reset */
2169*5748Sduboff 			(*dp->gc.gc_mii_sync)(dp);
2170*5748Sduboff 		}
2171*5748Sduboff 		val = gem_mii_read(dp, MII_CONTROL);
2172*5748Sduboff 		if (val & MII_CONTROL_RESET) {
2173*5748Sduboff 			cmn_err(CE_NOTE,
2174*5748Sduboff 			    "!%s: time:%ld resetting phy not complete."
2175*5748Sduboff 			    " mii_control:0x%b",
2176*5748Sduboff 			    dp->name, ddi_get_lbolt(),
2177*5748Sduboff 			    val, MII_CONTROL_BITS);
2178*5748Sduboff 		}
2179*5748Sduboff 
2180*5748Sduboff 		/* ensure neither isolated nor pwrdown nor auto-nego mode */
2181*5748Sduboff 		/* XXX -- this operation is required for NS DP83840A. */
2182*5748Sduboff 		gem_mii_write(dp, MII_CONTROL, 0);
2183*5748Sduboff 
2184*5748Sduboff 		/* As resetting PHY has completed, configure PHY registers */
2185*5748Sduboff 		if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2186*5748Sduboff 			/* we failed to configure PHY. */
2187*5748Sduboff 			goto reset_phy;
2188*5748Sduboff 		}
2189*5748Sduboff 
2190*5748Sduboff 		/* mii_config may disable autonegatiation */
2191*5748Sduboff 		gem_choose_forcedmode(dp);
2192*5748Sduboff 
2193*5748Sduboff 		dp->mii_lpable = 0;
2194*5748Sduboff 		dp->mii_advert = 0;
2195*5748Sduboff 		dp->mii_exp = 0;
2196*5748Sduboff 		dp->mii_ctl1000 = 0;
2197*5748Sduboff 		dp->mii_stat1000 = 0;
2198*5748Sduboff 		dp->flow_control = FLOW_CONTROL_NONE;
2199*5748Sduboff 
2200*5748Sduboff 		if (!dp->anadv_autoneg) {
2201*5748Sduboff 			/* skip auto-negotiation phase */
2202*5748Sduboff 			dp->mii_state = MII_STATE_MEDIA_SETUP;
2203*5748Sduboff 			dp->mii_timer = 0;
2204*5748Sduboff 			dp->mii_interval = 0;
2205*5748Sduboff 			goto next_nowait;
2206*5748Sduboff 		}
2207*5748Sduboff 
2208*5748Sduboff 		/* Issue auto-negotiation command */
2209*5748Sduboff 		goto autonego;
2210*5748Sduboff 
2211*5748Sduboff 	case MII_STATE_AUTONEGOTIATING:
2212*5748Sduboff 		/*
2213*5748Sduboff 		 * Autonegotiation is in progress
2214*5748Sduboff 		 */
2215*5748Sduboff 		dp->mii_timer -= diff;
2216*5748Sduboff 		if (dp->mii_timer -
2217*5748Sduboff 		    (dp->gc.gc_mii_an_timeout
2218*5748Sduboff 		    - dp->gc.gc_mii_an_wait) > 0) {
2219*5748Sduboff 			/*
2220*5748Sduboff 			 * wait for a while, typically autonegotiation
2221*5748Sduboff 			 * completes in 2.3 - 2.5 sec.
2222*5748Sduboff 			 */
2223*5748Sduboff 			dp->mii_interval = WATCH_INTERVAL_FAST;
2224*5748Sduboff 			goto next;
2225*5748Sduboff 		}
2226*5748Sduboff 
2227*5748Sduboff 		/* read PHY status */
2228*5748Sduboff 		status = gem_mii_read(dp, MII_STATUS);
2229*5748Sduboff 		DPRINTF(4, (CE_CONT,
2230*5748Sduboff 		    "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2231*5748Sduboff 		    dp->name, __func__, dp->mii_state,
2232*5748Sduboff 		    status, MII_STATUS_BITS));
2233*5748Sduboff 
2234*5748Sduboff 		if (status & MII_STATUS_REMFAULT) {
2235*5748Sduboff 			/*
2236*5748Sduboff 			 * The link parnert told me something wrong happend.
2237*5748Sduboff 			 * What do we do ?
2238*5748Sduboff 			 */
2239*5748Sduboff 			cmn_err(CE_CONT,
2240*5748Sduboff 			    "!%s: auto-negotiation failed: remote fault",
2241*5748Sduboff 			    dp->name);
2242*5748Sduboff 			goto autonego;
2243*5748Sduboff 		}
2244*5748Sduboff 
2245*5748Sduboff 		if ((status & MII_STATUS_ANDONE) == 0) {
2246*5748Sduboff 			if (dp->mii_timer <= 0) {
2247*5748Sduboff 				/*
2248*5748Sduboff 				 * Auto-negotiation was timed out,
2249*5748Sduboff 				 * try again w/o resetting phy.
2250*5748Sduboff 				 */
2251*5748Sduboff 				if (!dp->mii_supress_msg) {
2252*5748Sduboff 					cmn_err(CE_WARN,
2253*5748Sduboff 				    "!%s: auto-negotiation failed: timeout",
2254*5748Sduboff 					    dp->name);
2255*5748Sduboff 					dp->mii_supress_msg = B_TRUE;
2256*5748Sduboff 				}
2257*5748Sduboff 				goto autonego;
2258*5748Sduboff 			}
2259*5748Sduboff 			/*
2260*5748Sduboff 			 * Auto-negotiation is in progress. Wait.
2261*5748Sduboff 			 */
2262*5748Sduboff 			dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2263*5748Sduboff 			goto next;
2264*5748Sduboff 		}
2265*5748Sduboff 
2266*5748Sduboff 		/*
2267*5748Sduboff 		 * Auto-negotiation have completed.
2268*5748Sduboff 		 * Assume linkdown and fall through.
2269*5748Sduboff 		 */
2270*5748Sduboff 		dp->mii_supress_msg = B_FALSE;
2271*5748Sduboff 		dp->mii_state = MII_STATE_AN_DONE;
2272*5748Sduboff 		DPRINTF(0, (CE_CONT,
2273*5748Sduboff 		    "!%s: auto-negotiation completed, MII_STATUS:%b",
2274*5748Sduboff 		    dp->name, status, MII_STATUS_BITS));
2275*5748Sduboff 
2276*5748Sduboff 		if (dp->gc.gc_mii_an_delay > 0) {
2277*5748Sduboff 			dp->mii_timer = dp->gc.gc_mii_an_delay;
2278*5748Sduboff 			dp->mii_interval = drv_usectohz(20*1000);
2279*5748Sduboff 			goto next;
2280*5748Sduboff 		}
2281*5748Sduboff 
2282*5748Sduboff 		dp->mii_timer = 0;
2283*5748Sduboff 		diff = 0;
2284*5748Sduboff 		goto next_nowait;
2285*5748Sduboff 
2286*5748Sduboff 	case MII_STATE_AN_DONE:
2287*5748Sduboff 		/*
2288*5748Sduboff 		 * Auto-negotiation have done. Now we can set up media.
2289*5748Sduboff 		 */
2290*5748Sduboff 		dp->mii_timer -= diff;
2291*5748Sduboff 		if (dp->mii_timer > 0) {
2292*5748Sduboff 			/* wait for a while */
2293*5748Sduboff 			dp->mii_interval = WATCH_INTERVAL_FAST;
2294*5748Sduboff 			goto next;
2295*5748Sduboff 		}
2296*5748Sduboff 
2297*5748Sduboff 		/*
2298*5748Sduboff 		 * set up the result of auto negotiation
2299*5748Sduboff 		 */
2300*5748Sduboff 
2301*5748Sduboff 		/*
2302*5748Sduboff 		 * Read registers required to determin current
2303*5748Sduboff 		 * duplex mode and media speed.
2304*5748Sduboff 		 */
2305*5748Sduboff 		if (dp->gc.gc_mii_an_delay > 0) {
2306*5748Sduboff 			/*
2307*5748Sduboff 			 * As the link watcher context has been suspended,
2308*5748Sduboff 			 * 'status' is invalid. We must status register here
2309*5748Sduboff 			 */
2310*5748Sduboff 			status = gem_mii_read(dp, MII_STATUS);
2311*5748Sduboff 		}
2312*5748Sduboff 		advert = gem_mii_read(dp, MII_AN_ADVERT);
2313*5748Sduboff 		lpable = gem_mii_read(dp, MII_AN_LPABLE);
2314*5748Sduboff 		exp = gem_mii_read(dp, MII_AN_EXPANSION);
2315*5748Sduboff 		if (exp == 0xffff) {
2316*5748Sduboff 			/* some phys don't have exp register */
2317*5748Sduboff 			exp = 0;
2318*5748Sduboff 		}
2319*5748Sduboff 		ctl1000  = 0;
2320*5748Sduboff 		stat1000 = 0;
2321*5748Sduboff 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2322*5748Sduboff 			ctl1000  = gem_mii_read(dp, MII_1000TC);
2323*5748Sduboff 			stat1000 = gem_mii_read(dp, MII_1000TS);
2324*5748Sduboff 		}
2325*5748Sduboff 		dp->mii_lpable = lpable;
2326*5748Sduboff 		dp->mii_advert = advert;
2327*5748Sduboff 		dp->mii_exp = exp;
2328*5748Sduboff 		dp->mii_ctl1000  = ctl1000;
2329*5748Sduboff 		dp->mii_stat1000 = stat1000;
2330*5748Sduboff 
2331*5748Sduboff 		cmn_err(CE_CONT,
2332*5748Sduboff 		"!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2333*5748Sduboff 		    dp->name,
2334*5748Sduboff 		    advert, MII_ABILITY_BITS,
2335*5748Sduboff 		    lpable, MII_ABILITY_BITS,
2336*5748Sduboff 		    exp, MII_AN_EXP_BITS);
2337*5748Sduboff 
2338*5748Sduboff 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2339*5748Sduboff 			cmn_err(CE_CONT,
2340*5748Sduboff 			    "! MII_1000TC:%b, MII_1000TS:%b",
2341*5748Sduboff 			    ctl1000, MII_1000TC_BITS,
2342*5748Sduboff 			    stat1000, MII_1000TS_BITS);
2343*5748Sduboff 		}
2344*5748Sduboff 
2345*5748Sduboff 		if (gem_population(lpable) <= 1 &&
2346*5748Sduboff 		    (exp & MII_AN_EXP_LPCANAN) == 0) {
2347*5748Sduboff 			if ((advert & MII_ABILITY_TECH) != lpable) {
2348*5748Sduboff 				cmn_err(CE_WARN,
2349*5748Sduboff 				    "!%s: but the link partnar doesn't seem"
2350*5748Sduboff 				    " to have auto-negotiation capability."
2351*5748Sduboff 				    " please check the link configuration.",
2352*5748Sduboff 				    dp->name);
2353*5748Sduboff 			}
2354*5748Sduboff 			/*
2355*5748Sduboff 			 * it should be result of pararell detection, which
2356*5748Sduboff 			 * cannot detect duplex mode.
2357*5748Sduboff 			 */
2358*5748Sduboff 			if (lpable & MII_ABILITY_100BASE_TX) {
2359*5748Sduboff 				/*
2360*5748Sduboff 				 * we prefer full duplex mode for 100Mbps
2361*5748Sduboff 				 * connection, if we can.
2362*5748Sduboff 				 */
2363*5748Sduboff 				lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2364*5748Sduboff 			}
2365*5748Sduboff 
2366*5748Sduboff 			if ((advert & lpable) == 0 &&
2367*5748Sduboff 			    lpable & MII_ABILITY_10BASE_T) {
2368*5748Sduboff 				lpable |= advert & MII_ABILITY_10BASE_T_FD;
2369*5748Sduboff 			}
2370*5748Sduboff 			/*
2371*5748Sduboff 			 * as the link partnar isn't auto-negotiatable, use
2372*5748Sduboff 			 * fixed mode temporally.
2373*5748Sduboff 			 */
2374*5748Sduboff 			fix_phy = B_TRUE;
2375*5748Sduboff 		} else if (lpable == 0) {
2376*5748Sduboff 			cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2377*5748Sduboff 			goto reset_phy;
2378*5748Sduboff 		}
2379*5748Sduboff 		/*
2380*5748Sduboff 		 * configure current link mode according to AN priority.
2381*5748Sduboff 		 */
2382*5748Sduboff 		val = advert & lpable;
2383*5748Sduboff 		if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2384*5748Sduboff 		    (stat1000 & MII_1000TS_LP_FULL)) {
2385*5748Sduboff 			/* 1000BaseT & full duplex */
2386*5748Sduboff 			dp->speed	 = GEM_SPD_1000;
2387*5748Sduboff 			dp->full_duplex  = B_TRUE;
2388*5748Sduboff 		} else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2389*5748Sduboff 		    (stat1000 & MII_1000TS_LP_HALF)) {
2390*5748Sduboff 			/* 1000BaseT & half duplex */
2391*5748Sduboff 			dp->speed = GEM_SPD_1000;
2392*5748Sduboff 			dp->full_duplex = B_FALSE;
2393*5748Sduboff 		} else if (val & MII_ABILITY_100BASE_TX_FD) {
2394*5748Sduboff 			/* 100BaseTx & full duplex */
2395*5748Sduboff 			dp->speed = GEM_SPD_100;
2396*5748Sduboff 			dp->full_duplex = B_TRUE;
2397*5748Sduboff 		} else if (val & MII_ABILITY_100BASE_T4) {
2398*5748Sduboff 			/* 100BaseT4 & full duplex */
2399*5748Sduboff 			dp->speed = GEM_SPD_100;
2400*5748Sduboff 			dp->full_duplex = B_TRUE;
2401*5748Sduboff 		} else if (val & MII_ABILITY_100BASE_TX) {
2402*5748Sduboff 			/* 100BaseTx & half duplex */
2403*5748Sduboff 			dp->speed	 = GEM_SPD_100;
2404*5748Sduboff 			dp->full_duplex  = B_FALSE;
2405*5748Sduboff 		} else if (val & MII_ABILITY_10BASE_T_FD) {
2406*5748Sduboff 			/* 10BaseT & full duplex */
2407*5748Sduboff 			dp->speed	 = GEM_SPD_10;
2408*5748Sduboff 			dp->full_duplex  = B_TRUE;
2409*5748Sduboff 		} else if (val & MII_ABILITY_10BASE_T) {
2410*5748Sduboff 			/* 10BaseT & half duplex */
2411*5748Sduboff 			dp->speed	 = GEM_SPD_10;
2412*5748Sduboff 			dp->full_duplex  = B_FALSE;
2413*5748Sduboff 		} else {
2414*5748Sduboff 			/*
2415*5748Sduboff 			 * It seems that the link partnar doesn't have
2416*5748Sduboff 			 * auto-negotiation capability and our PHY
2417*5748Sduboff 			 * could not report the correct current mode.
2418*5748Sduboff 			 * We guess current mode by mii_control register.
2419*5748Sduboff 			 */
2420*5748Sduboff 			val = gem_mii_read(dp, MII_CONTROL);
2421*5748Sduboff 
2422*5748Sduboff 			/* select 100m full or 10m half */
2423*5748Sduboff 			dp->speed = (val & MII_CONTROL_100MB) ?
2424*5748Sduboff 			    GEM_SPD_100 : GEM_SPD_10;
2425*5748Sduboff 			dp->full_duplex = dp->speed != GEM_SPD_10;
2426*5748Sduboff 			fix_phy = B_TRUE;
2427*5748Sduboff 
2428*5748Sduboff 			cmn_err(CE_NOTE,
2429*5748Sduboff 			    "!%s: auto-negotiation done but "
2430*5748Sduboff 			    "common ability not found.\n"
2431*5748Sduboff 			    "PHY state: control:%b advert:%b lpable:%b\n"
2432*5748Sduboff 			    "guessing %d Mbps %s duplex mode",
2433*5748Sduboff 			    dp->name,
2434*5748Sduboff 			    val, MII_CONTROL_BITS,
2435*5748Sduboff 			    advert, MII_ABILITY_BITS,
2436*5748Sduboff 			    lpable, MII_ABILITY_BITS,
2437*5748Sduboff 			    gem_speed_value[dp->speed],
2438*5748Sduboff 			    dp->full_duplex ? "full" : "half");
2439*5748Sduboff 		}
2440*5748Sduboff 
2441*5748Sduboff 		if (dp->full_duplex) {
2442*5748Sduboff 			dp->flow_control =
2443*5748Sduboff 			    gem_fc_result[fc_cap_decode(advert)]
2444*5748Sduboff 			    [fc_cap_decode(lpable)];
2445*5748Sduboff 		} else {
2446*5748Sduboff 			dp->flow_control = FLOW_CONTROL_NONE;
2447*5748Sduboff 		}
2448*5748Sduboff 		dp->mii_state = MII_STATE_MEDIA_SETUP;
2449*5748Sduboff 		/* FALLTHROUGH */
2450*5748Sduboff 
2451*5748Sduboff 	case MII_STATE_MEDIA_SETUP:
2452*5748Sduboff 		dp->mii_state = MII_STATE_LINKDOWN;
2453*5748Sduboff 		dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2454*5748Sduboff 		DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2455*5748Sduboff 		dp->mii_supress_msg = B_FALSE;
2456*5748Sduboff 
2457*5748Sduboff 		/* use short interval */
2458*5748Sduboff 		dp->mii_interval = WATCH_INTERVAL_FAST;
2459*5748Sduboff 
2460*5748Sduboff 		if ((!dp->anadv_autoneg) ||
2461*5748Sduboff 		    dp->gc.gc_mii_an_oneshot || fix_phy) {
2462*5748Sduboff 
2463*5748Sduboff 			/*
2464*5748Sduboff 			 * write specified mode to phy.
2465*5748Sduboff 			 */
2466*5748Sduboff 			val = gem_mii_read(dp, MII_CONTROL);
2467*5748Sduboff 			val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2468*5748Sduboff 			    MII_CONTROL_ANE | MII_CONTROL_RSAN);
2469*5748Sduboff 
2470*5748Sduboff 			if (dp->full_duplex) {
2471*5748Sduboff 				val |= MII_CONTROL_FDUPLEX;
2472*5748Sduboff 			}
2473*5748Sduboff 
2474*5748Sduboff 			switch (dp->speed) {
2475*5748Sduboff 			case GEM_SPD_1000:
2476*5748Sduboff 				val |= MII_CONTROL_1000MB;
2477*5748Sduboff 				break;
2478*5748Sduboff 
2479*5748Sduboff 			case GEM_SPD_100:
2480*5748Sduboff 				val |= MII_CONTROL_100MB;
2481*5748Sduboff 				break;
2482*5748Sduboff 
2483*5748Sduboff 			default:
2484*5748Sduboff 				cmn_err(CE_WARN, "%s: unknown speed:%d",
2485*5748Sduboff 				    dp->name, dp->speed);
2486*5748Sduboff 				/* FALLTHROUGH */
2487*5748Sduboff 			case GEM_SPD_10:
2488*5748Sduboff 				/* for GEM_SPD_10, do nothing */
2489*5748Sduboff 				break;
2490*5748Sduboff 			}
2491*5748Sduboff 
2492*5748Sduboff 			if (dp->mii_status & MII_STATUS_XSTATUS) {
2493*5748Sduboff 				gem_mii_write(dp,
2494*5748Sduboff 				    MII_1000TC, MII_1000TC_CFG_EN);
2495*5748Sduboff 			}
2496*5748Sduboff 			gem_mii_write(dp, MII_CONTROL, val);
2497*5748Sduboff 		}
2498*5748Sduboff 
2499*5748Sduboff 		if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2500*5748Sduboff 			/* notify the result of auto-negotiation to mac */
2501*5748Sduboff 			(*dp->gc.gc_set_media)(dp);
2502*5748Sduboff 		}
2503*5748Sduboff 
2504*5748Sduboff 		if ((void *)dp->gc.gc_mii_tune_phy) {
2505*5748Sduboff 			/* for built-in sis900 */
2506*5748Sduboff 			/* XXX - this code should be removed.  */
2507*5748Sduboff 			(*dp->gc.gc_mii_tune_phy)(dp);
2508*5748Sduboff 		}
2509*5748Sduboff 
2510*5748Sduboff 		goto next_nowait;
2511*5748Sduboff 
2512*5748Sduboff 	case MII_STATE_LINKDOWN:
2513*5748Sduboff 		status = gem_mii_read(dp, MII_STATUS);
2514*5748Sduboff 		if (status & MII_STATUS_LINKUP) {
2515*5748Sduboff 			/*
2516*5748Sduboff 			 * Link going up
2517*5748Sduboff 			 */
2518*5748Sduboff 			dp->mii_state = MII_STATE_LINKUP;
2519*5748Sduboff 			dp->mii_supress_msg = B_FALSE;
2520*5748Sduboff 
2521*5748Sduboff 			DPRINTF(0, (CE_CONT,
2522*5748Sduboff 			    "!%s: link up detected: mii_stat:%b",
2523*5748Sduboff 			    dp->name, status, MII_STATUS_BITS));
2524*5748Sduboff 
2525*5748Sduboff 			/*
2526*5748Sduboff 			 * MII_CONTROL_100MB and  MII_CONTROL_FDUPLEX are
2527*5748Sduboff 			 * ignored when MII_CONTROL_ANE is set.
2528*5748Sduboff 			 */
2529*5748Sduboff 			cmn_err(CE_CONT,
2530*5748Sduboff 			    "!%s: Link up: %d Mbps %s duplex %s flow control",
2531*5748Sduboff 			    dp->name,
2532*5748Sduboff 			    gem_speed_value[dp->speed],
2533*5748Sduboff 			    dp->full_duplex ? "full" : "half",
2534*5748Sduboff 			    gem_fc_type[dp->flow_control]);
2535*5748Sduboff 
2536*5748Sduboff 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2537*5748Sduboff 
2538*5748Sduboff 			/* XXX - we need other timer to watch statictics */
2539*5748Sduboff 			if (dp->gc.gc_mii_hw_link_detection &&
2540*5748Sduboff 			    dp->nic_state == NIC_STATE_ONLINE) {
2541*5748Sduboff 				dp->mii_interval = 0;
2542*5748Sduboff 			}
2543*5748Sduboff 
2544*5748Sduboff 			if (dp->nic_state == NIC_STATE_ONLINE) {
2545*5748Sduboff 				if (!dp->mac_active) {
2546*5748Sduboff 					(void) gem_mac_start(dp);
2547*5748Sduboff 				}
2548*5748Sduboff 				tx_sched = B_TRUE;
2549*5748Sduboff 				if (dp->tx_blocked) {
2550*5748Sduboff 					dp->tx_blocked = B_FALSE;
2551*5748Sduboff 				}
2552*5748Sduboff 			}
2553*5748Sduboff 			goto next;
2554*5748Sduboff 		}
2555*5748Sduboff 
2556*5748Sduboff 		dp->mii_supress_msg = B_TRUE;
2557*5748Sduboff 		if (dp->anadv_autoneg) {
2558*5748Sduboff 			dp->mii_timer -= diff;
2559*5748Sduboff 			if (dp->mii_timer <= 0) {
2560*5748Sduboff 				/*
2561*5748Sduboff 				 * link down timer expired.
2562*5748Sduboff 				 * need to restart auto-negotiation.
2563*5748Sduboff 				 */
2564*5748Sduboff 				linkdown_action =
2565*5748Sduboff 				    dp->gc.gc_mii_linkdown_timeout_action;
2566*5748Sduboff 				goto restart_autonego;
2567*5748Sduboff 			}
2568*5748Sduboff 		}
2569*5748Sduboff 		/* don't change mii_state */
2570*5748Sduboff 		break;
2571*5748Sduboff 
2572*5748Sduboff 	case MII_STATE_LINKUP:
2573*5748Sduboff 		status = gem_mii_read(dp, MII_STATUS);
2574*5748Sduboff 		if ((status & MII_STATUS_LINKUP) == 0) {
2575*5748Sduboff 			/*
2576*5748Sduboff 			 * Link going down
2577*5748Sduboff 			 */
2578*5748Sduboff 			cmn_err(CE_NOTE,
2579*5748Sduboff 			    "!%s: link down detected: mii_stat:%b",
2580*5748Sduboff 			    dp->name, status, MII_STATUS_BITS);
2581*5748Sduboff 
2582*5748Sduboff 			if (dp->nic_state == NIC_STATE_ONLINE &&
2583*5748Sduboff 			    dp->mac_active &&
2584*5748Sduboff 			    dp->gc.gc_mii_stop_mac_on_linkdown) {
2585*5748Sduboff 				(void) gem_mac_stop(dp, 0);
2586*5748Sduboff 			}
2587*5748Sduboff 
2588*5748Sduboff 			if (dp->anadv_autoneg) {
2589*5748Sduboff 				/* need to restart auto-negotiation */
2590*5748Sduboff 				linkdown_action = dp->gc.gc_mii_linkdown_action;
2591*5748Sduboff 				goto restart_autonego;
2592*5748Sduboff 			}
2593*5748Sduboff 
2594*5748Sduboff 			dp->mii_state = MII_STATE_LINKDOWN;
2595*5748Sduboff 			dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2596*5748Sduboff 
2597*5748Sduboff 			if ((void *)dp->gc.gc_mii_tune_phy) {
2598*5748Sduboff 				/* for built-in sis900 */
2599*5748Sduboff 				(*dp->gc.gc_mii_tune_phy)(dp);
2600*5748Sduboff 			}
2601*5748Sduboff 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2602*5748Sduboff 			goto next;
2603*5748Sduboff 		}
2604*5748Sduboff 
2605*5748Sduboff 		/* don't change mii_state */
2606*5748Sduboff 		if (dp->gc.gc_mii_hw_link_detection &&
2607*5748Sduboff 		    dp->nic_state == NIC_STATE_ONLINE) {
2608*5748Sduboff 			dp->mii_interval = 0;
2609*5748Sduboff 			goto next;
2610*5748Sduboff 		}
2611*5748Sduboff 		break;
2612*5748Sduboff 	}
2613*5748Sduboff 	dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2614*5748Sduboff 	goto next;
2615*5748Sduboff 
2616*5748Sduboff 	/* Actions on the end of state routine */
2617*5748Sduboff 
2618*5748Sduboff restart_autonego:
2619*5748Sduboff 	switch (linkdown_action) {
2620*5748Sduboff 	case MII_ACTION_RESET:
2621*5748Sduboff 		if (!dp->mii_supress_msg) {
2622*5748Sduboff 			cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2623*5748Sduboff 		}
2624*5748Sduboff 		dp->mii_supress_msg = B_TRUE;
2625*5748Sduboff 		goto reset_phy;
2626*5748Sduboff 
2627*5748Sduboff 	case MII_ACTION_NONE:
2628*5748Sduboff 		dp->mii_supress_msg = B_TRUE;
2629*5748Sduboff 		if (dp->gc.gc_mii_an_oneshot) {
2630*5748Sduboff 			goto autonego;
2631*5748Sduboff 		}
2632*5748Sduboff 		/* PHY will restart autonego automatically */
2633*5748Sduboff 		dp->mii_state = MII_STATE_AUTONEGOTIATING;
2634*5748Sduboff 		dp->mii_timer = dp->gc.gc_mii_an_timeout;
2635*5748Sduboff 		dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2636*5748Sduboff 		goto next;
2637*5748Sduboff 
2638*5748Sduboff 	case MII_ACTION_RSA:
2639*5748Sduboff 		if (!dp->mii_supress_msg) {
2640*5748Sduboff 			cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2641*5748Sduboff 			    dp->name);
2642*5748Sduboff 		}
2643*5748Sduboff 		dp->mii_supress_msg = B_TRUE;
2644*5748Sduboff 		goto autonego;
2645*5748Sduboff 
2646*5748Sduboff 	default:
2647*5748Sduboff 		cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2648*5748Sduboff 		    dp->name, dp->gc.gc_mii_linkdown_action);
2649*5748Sduboff 		dp->mii_supress_msg = B_TRUE;
2650*5748Sduboff 	}
2651*5748Sduboff 	/* NOTREACHED */
2652*5748Sduboff 
2653*5748Sduboff reset_phy:
2654*5748Sduboff 	if (!dp->mii_supress_msg) {
2655*5748Sduboff 		cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2656*5748Sduboff 	}
2657*5748Sduboff 	dp->mii_state = MII_STATE_RESETTING;
2658*5748Sduboff 	dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2659*5748Sduboff 	if (!dp->gc.gc_mii_dont_reset) {
2660*5748Sduboff 		gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2661*5748Sduboff 	}
2662*5748Sduboff 	dp->mii_interval = WATCH_INTERVAL_FAST;
2663*5748Sduboff 	goto next;
2664*5748Sduboff 
2665*5748Sduboff autonego:
2666*5748Sduboff 	if (!dp->mii_supress_msg) {
2667*5748Sduboff 		cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2668*5748Sduboff 	}
2669*5748Sduboff 	dp->mii_state = MII_STATE_AUTONEGOTIATING;
2670*5748Sduboff 	dp->mii_timer = dp->gc.gc_mii_an_timeout;
2671*5748Sduboff 
2672*5748Sduboff 	/* start/restart auto nego */
2673*5748Sduboff 	val = gem_mii_read(dp, MII_CONTROL) &
2674*5748Sduboff 	    ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2675*5748Sduboff 
2676*5748Sduboff 	if (val & MII_CONTROL_ANE) {
2677*5748Sduboff 		/* restart auto nego */
2678*5748Sduboff 		gem_mii_write(dp, MII_CONTROL, val | MII_CONTROL_RSAN);
2679*5748Sduboff 	} else {
2680*5748Sduboff 		/* enable auto nego */
2681*5748Sduboff 		/* XXX - it doesn't work for mx98315 */
2682*5748Sduboff 		gem_mii_write(dp, MII_CONTROL, val | MII_CONTROL_ANE);
2683*5748Sduboff 	}
2684*5748Sduboff 
2685*5748Sduboff 	dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2686*5748Sduboff 
2687*5748Sduboff next:
2688*5748Sduboff 	if (dp->link_watcher_id == 0 && dp->mii_interval) {
2689*5748Sduboff 		/* we must schedule next mii_watcher */
2690*5748Sduboff 		dp->link_watcher_id =
2691*5748Sduboff 		    timeout((void (*)(void *))& gem_mii_link_watcher,
2692*5748Sduboff 		    (void *)dp, dp->mii_interval);
2693*5748Sduboff 	}
2694*5748Sduboff 
2695*5748Sduboff 	if (old_mii_state == MII_STATE_UNKNOWN ||
2696*5748Sduboff 	    old_mii_state != dp->mii_state) {
2697*5748Sduboff 		/* notify new mii link state */
2698*5748Sduboff 		if (dp->mii_state == MII_STATE_LINKUP) {
2699*5748Sduboff 			GEM_LINKUP(dp);
2700*5748Sduboff 		} else {
2701*5748Sduboff 			GEM_LINKDOWN(dp);
2702*5748Sduboff 		}
2703*5748Sduboff 	}
2704*5748Sduboff 	return (tx_sched);
2705*5748Sduboff }
2706*5748Sduboff 
2707*5748Sduboff static void
2708*5748Sduboff gem_mii_link_watcher(struct gem_dev *dp)
2709*5748Sduboff {
2710*5748Sduboff 	boolean_t	tx_sched;
2711*5748Sduboff 
2712*5748Sduboff 	mutex_enter(&dp->intrlock);
2713*5748Sduboff 
2714*5748Sduboff 	dp->link_watcher_id = 0;
2715*5748Sduboff 	tx_sched = gem_mii_link_check(dp);
2716*5748Sduboff #if GEM_DEBUG_LEVEL > 2
2717*5748Sduboff 	if (dp->link_watcher_id == 0) {
2718*5748Sduboff 		cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2719*5748Sduboff 	}
2720*5748Sduboff #endif
2721*5748Sduboff 	mutex_exit(&dp->intrlock);
2722*5748Sduboff 
2723*5748Sduboff 	if (tx_sched) {
2724*5748Sduboff 		/* kick potentially stopped downstream */
2725*5748Sduboff 		mac_tx_update(dp->mh);
2726*5748Sduboff 	}
2727*5748Sduboff }
2728*5748Sduboff 
2729*5748Sduboff int
2730*5748Sduboff gem_mii_probe_default(struct gem_dev *dp)
2731*5748Sduboff {
2732*5748Sduboff 	int8_t		phy;
2733*5748Sduboff 	uint16_t	status;
2734*5748Sduboff 	uint16_t	adv;
2735*5748Sduboff 	uint16_t	adv_org;
2736*5748Sduboff 
2737*5748Sduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2738*5748Sduboff 
2739*5748Sduboff 	/*
2740*5748Sduboff 	 * Scan PHY
2741*5748Sduboff 	 */
2742*5748Sduboff 	/* ensure to send sync bits */
2743*5748Sduboff 	dp->mii_status = 0;
2744*5748Sduboff 
2745*5748Sduboff 	/* Try default phy first */
2746*5748Sduboff 	if (dp->mii_phy_addr) {
2747*5748Sduboff 		status = gem_mii_read(dp, MII_STATUS);
2748*5748Sduboff 		if (status != 0xffff && status != 0) {
2749*5748Sduboff 			gem_mii_write(dp, MII_CONTROL, 0);
2750*5748Sduboff 			goto PHY_found;
2751*5748Sduboff 		}
2752*5748Sduboff 
2753*5748Sduboff 		if (dp->mii_phy_addr < 0) {
2754*5748Sduboff 			cmn_err(CE_NOTE,
2755*5748Sduboff 	    "!%s: failed to probe default internal and/or non-MII PHY",
2756*5748Sduboff 			    dp->name);
2757*5748Sduboff 			return (GEM_FAILURE);
2758*5748Sduboff 		}
2759*5748Sduboff 
2760*5748Sduboff 		cmn_err(CE_NOTE,
2761*5748Sduboff 		    "!%s: failed to probe default MII PHY at %d",
2762*5748Sduboff 		    dp->name, dp->mii_phy_addr);
2763*5748Sduboff 	}
2764*5748Sduboff 
2765*5748Sduboff 	/* Try all possible address */
2766*5748Sduboff 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2767*5748Sduboff 		dp->mii_phy_addr = phy;
2768*5748Sduboff 		status = gem_mii_read(dp, MII_STATUS);
2769*5748Sduboff 
2770*5748Sduboff 		if (status != 0xffff && status != 0) {
2771*5748Sduboff 			gem_mii_write(dp, MII_CONTROL, 0);
2772*5748Sduboff 			goto PHY_found;
2773*5748Sduboff 		}
2774*5748Sduboff 	}
2775*5748Sduboff 
2776*5748Sduboff 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2777*5748Sduboff 		dp->mii_phy_addr = phy;
2778*5748Sduboff 		gem_mii_write(dp, MII_CONTROL, 0);
2779*5748Sduboff 		status = gem_mii_read(dp, MII_STATUS);
2780*5748Sduboff 
2781*5748Sduboff 		if (status != 0xffff && status != 0) {
2782*5748Sduboff 			goto PHY_found;
2783*5748Sduboff 		}
2784*5748Sduboff 	}
2785*5748Sduboff 
2786*5748Sduboff 	cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2787*5748Sduboff 	dp->mii_phy_addr = -1;
2788*5748Sduboff 
2789*5748Sduboff 	return (GEM_FAILURE);
2790*5748Sduboff 
2791*5748Sduboff PHY_found:
2792*5748Sduboff 	dp->mii_status = status;
2793*5748Sduboff 	dp->mii_phy_id  = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2794*5748Sduboff 	    gem_mii_read(dp, MII_PHYIDL);
2795*5748Sduboff 
2796*5748Sduboff 	if (dp->mii_phy_addr < 0) {
2797*5748Sduboff 		cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2798*5748Sduboff 		    dp->name, dp->mii_phy_id);
2799*5748Sduboff 	} else {
2800*5748Sduboff 		cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2801*5748Sduboff 		    dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2802*5748Sduboff 	}
2803*5748Sduboff 
2804*5748Sduboff 	cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2805*5748Sduboff 	    dp->name,
2806*5748Sduboff 	    gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2807*5748Sduboff 	    status, MII_STATUS_BITS,
2808*5748Sduboff 	    gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2809*5748Sduboff 	    gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2810*5748Sduboff 
2811*5748Sduboff 	dp->mii_xstatus = 0;
2812*5748Sduboff 	if (status & MII_STATUS_XSTATUS) {
2813*5748Sduboff 		dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2814*5748Sduboff 
2815*5748Sduboff 		cmn_err(CE_CONT, "!%s: xstatus:%b",
2816*5748Sduboff 		    dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2817*5748Sduboff 	}
2818*5748Sduboff 
2819*5748Sduboff 	/* check if the phy can advertize pause abilities */
2820*5748Sduboff 	adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2821*5748Sduboff 
2822*5748Sduboff 	gem_mii_write(dp, MII_AN_ADVERT,
2823*5748Sduboff 	    MII_ABILITY_PAUSE | MII_ABILITY_ASM_DIR);
2824*5748Sduboff 
2825*5748Sduboff 	adv = gem_mii_read(dp, MII_AN_ADVERT);
2826*5748Sduboff 
2827*5748Sduboff 	if ((adv & MII_ABILITY_PAUSE) == 0) {
2828*5748Sduboff 		dp->gc.gc_flow_control &= ~1;
2829*5748Sduboff 	}
2830*5748Sduboff 
2831*5748Sduboff 	if ((adv & MII_ABILITY_ASM_DIR) == 0) {
2832*5748Sduboff 		dp->gc.gc_flow_control &= ~2;
2833*5748Sduboff 	}
2834*5748Sduboff 
2835*5748Sduboff 	gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2836*5748Sduboff 
2837*5748Sduboff 	return (GEM_SUCCESS);
2838*5748Sduboff }
2839*5748Sduboff 
2840*5748Sduboff static void
2841*5748Sduboff gem_mii_start(struct gem_dev *dp)
2842*5748Sduboff {
2843*5748Sduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2844*5748Sduboff 
2845*5748Sduboff 	/* make a first call of check link */
2846*5748Sduboff 	dp->mii_state = MII_STATE_UNKNOWN;
2847*5748Sduboff 	dp->mii_last_check = ddi_get_lbolt();
2848*5748Sduboff 	(void) gem_mii_link_watcher(dp);
2849*5748Sduboff }
2850*5748Sduboff 
2851*5748Sduboff static void
2852*5748Sduboff gem_mii_stop(struct gem_dev *dp)
2853*5748Sduboff {
2854*5748Sduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2855*5748Sduboff 
2856*5748Sduboff 	/* Ensure timer routine stopped */
2857*5748Sduboff 	mutex_enter(&dp->intrlock);
2858*5748Sduboff 	if (dp->link_watcher_id) {
2859*5748Sduboff 		while (untimeout(dp->link_watcher_id) == -1)
2860*5748Sduboff 			;
2861*5748Sduboff 		dp->link_watcher_id = 0;
2862*5748Sduboff 	}
2863*5748Sduboff 	mutex_exit(&dp->intrlock);
2864*5748Sduboff }
2865*5748Sduboff 
2866*5748Sduboff boolean_t
2867*5748Sduboff gem_get_mac_addr_conf(struct gem_dev *dp)
2868*5748Sduboff {
2869*5748Sduboff 	char		propname[32];
2870*5748Sduboff 	char		*valstr;
2871*5748Sduboff 	uint8_t		mac[ETHERADDRL];
2872*5748Sduboff 	char		*cp;
2873*5748Sduboff 	int		c;
2874*5748Sduboff 	int		i;
2875*5748Sduboff 	int		j;
2876*5748Sduboff 	uint8_t		v;
2877*5748Sduboff 	uint8_t		d;
2878*5748Sduboff 	uint8_t		ored;
2879*5748Sduboff 
2880*5748Sduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2881*5748Sduboff 	/*
2882*5748Sduboff 	 * Get ethernet address from .conf file
2883*5748Sduboff 	 */
2884*5748Sduboff 	(void) sprintf(propname, "mac-addr");
2885*5748Sduboff 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
2886*5748Sduboff 	    DDI_PROP_DONTPASS, propname, &valstr)) !=
2887*5748Sduboff 	    DDI_PROP_SUCCESS) {
2888*5748Sduboff 		return (B_FALSE);
2889*5748Sduboff 	}
2890*5748Sduboff 
2891*5748Sduboff 	if (strlen(valstr) != ETHERADDRL*3-1) {
2892*5748Sduboff 		goto syntax_err;
2893*5748Sduboff 	}
2894*5748Sduboff 
2895*5748Sduboff 	cp = valstr;
2896*5748Sduboff 	j  = 0;
2897*5748Sduboff 	ored = 0;
2898*5748Sduboff 	for (;;) {
2899*5748Sduboff 		v = 0;
2900*5748Sduboff 		for (i = 0; i < 2; i++) {
2901*5748Sduboff 			c = *cp++;
2902*5748Sduboff 
2903*5748Sduboff 			if (c >= 'a' && c <= 'f') {
2904*5748Sduboff 				d = c - 'a' + 10;
2905*5748Sduboff 			} else if (c >= 'A' && c <= 'F') {
2906*5748Sduboff 				d = c - 'A' + 10;
2907*5748Sduboff 			} else if (c >= '0' && c <= '9') {
2908*5748Sduboff 				d = c - '0';
2909*5748Sduboff 			} else {
2910*5748Sduboff 				goto syntax_err;
2911*5748Sduboff 			}
2912*5748Sduboff 			v = (v << 4) | d;
2913*5748Sduboff 		}
2914*5748Sduboff 
2915*5748Sduboff 		mac[j++] = v;
2916*5748Sduboff 		ored |= v;
2917*5748Sduboff 		if (j == ETHERADDRL) {
2918*5748Sduboff 			/* done */
2919*5748Sduboff 			break;
2920*5748Sduboff 		}
2921*5748Sduboff 
2922*5748Sduboff 		c = *cp++;
2923*5748Sduboff 		if (c != ':') {
2924*5748Sduboff 			goto syntax_err;
2925*5748Sduboff 		}
2926*5748Sduboff 	}
2927*5748Sduboff 
2928*5748Sduboff 	if (ored == 0) {
2929*5748Sduboff 		goto err;
2930*5748Sduboff 	}
2931*5748Sduboff 	for (i = 0; i < ETHERADDRL; i++) {
2932*5748Sduboff 		dp->dev_addr.ether_addr_octet[i] = mac[i];
2933*5748Sduboff 	}
2934*5748Sduboff 	ddi_prop_free(valstr);
2935*5748Sduboff 	return (B_TRUE);
2936*5748Sduboff 
2937*5748Sduboff syntax_err:
2938*5748Sduboff 	cmn_err(CE_CONT,
2939*5748Sduboff 	    "!%s: read mac addr: trying .conf: syntax err %s",
2940*5748Sduboff 	    dp->name, valstr);
2941*5748Sduboff err:
2942*5748Sduboff 	ddi_prop_free(valstr);
2943*5748Sduboff 
2944*5748Sduboff 	return (B_FALSE);
2945*5748Sduboff }
2946*5748Sduboff 
2947*5748Sduboff 
2948*5748Sduboff /* ============================================================== */
2949*5748Sduboff /*
2950*5748Sduboff  * internal start/stop interface
2951*5748Sduboff  */
2952*5748Sduboff /* ============================================================== */
2953*5748Sduboff static int
2954*5748Sduboff gem_mac_set_rx_filter(struct gem_dev *dp)
2955*5748Sduboff {
2956*5748Sduboff 	return ((*dp->gc.gc_set_rx_filter)(dp));
2957*5748Sduboff }
2958*5748Sduboff 
2959*5748Sduboff /*
2960*5748Sduboff  * gem_mac_init: cold start
2961*5748Sduboff  */
2962*5748Sduboff static int
2963*5748Sduboff gem_mac_init(struct gem_dev *dp)
2964*5748Sduboff {
2965*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2966*5748Sduboff 
2967*5748Sduboff 	if (dp->mac_suspended) {
2968*5748Sduboff 		return (GEM_FAILURE);
2969*5748Sduboff 	}
2970*5748Sduboff 
2971*5748Sduboff 	dp->mac_active = B_FALSE;
2972*5748Sduboff 
2973*5748Sduboff 	gem_init_rx_ring(dp);
2974*5748Sduboff 	gem_init_tx_ring(dp);
2975*5748Sduboff 
2976*5748Sduboff 	/* reset transmitter state */
2977*5748Sduboff 	dp->tx_blocked  = B_FALSE;
2978*5748Sduboff 	dp->tx_busy = 0;
2979*5748Sduboff 	dp->tx_reclaim_busy = 0;
2980*5748Sduboff 
2981*5748Sduboff 	if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
2982*5748Sduboff 		return (GEM_FAILURE);
2983*5748Sduboff 	}
2984*5748Sduboff 
2985*5748Sduboff 	gem_prepare_rx_buf(dp);
2986*5748Sduboff 
2987*5748Sduboff 	return (GEM_SUCCESS);
2988*5748Sduboff }
2989*5748Sduboff /*
2990*5748Sduboff  * gem_mac_start: warm start
2991*5748Sduboff  */
2992*5748Sduboff static int
2993*5748Sduboff gem_mac_start(struct gem_dev *dp)
2994*5748Sduboff {
2995*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2996*5748Sduboff 
2997*5748Sduboff 	ASSERT(mutex_owned(&dp->intrlock));
2998*5748Sduboff 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
2999*5748Sduboff 	ASSERT(dp->mii_state ==  MII_STATE_LINKUP);
3000*5748Sduboff 
3001*5748Sduboff 	/* enable tx and rx */
3002*5748Sduboff 	mutex_enter(&dp->xmitlock);
3003*5748Sduboff 	if (dp->mac_suspended) {
3004*5748Sduboff 		mutex_exit(&dp->xmitlock);
3005*5748Sduboff 		return (GEM_FAILURE);
3006*5748Sduboff 	}
3007*5748Sduboff 	dp->mac_active = B_TRUE;
3008*5748Sduboff 	mutex_exit(&dp->xmitlock);
3009*5748Sduboff 
3010*5748Sduboff 	if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3011*5748Sduboff 		cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3012*5748Sduboff 		    dp->name, __func__);
3013*5748Sduboff 		return (GEM_FAILURE);
3014*5748Sduboff 	}
3015*5748Sduboff 
3016*5748Sduboff 	/* kick rx */
3017*5748Sduboff 	dp->gc.gc_rx_start(dp,
3018*5748Sduboff 	    SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
3019*5748Sduboff 	    dp->rx_active_tail - dp->rx_active_head);
3020*5748Sduboff 
3021*5748Sduboff 	mutex_enter(&dp->xmitlock);
3022*5748Sduboff 
3023*5748Sduboff 	/* load untranmitted packets to the nic */
3024*5748Sduboff 	ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3025*5748Sduboff 	if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3026*5748Sduboff 		gem_tx_load_descs_oo(dp,
3027*5748Sduboff 		    dp->tx_softq_head, dp->tx_softq_tail,
3028*5748Sduboff 		    dp->tx_free_tail - 1,
3029*5748Sduboff 		    GEM_TXFLAG_HEAD);
3030*5748Sduboff 		/* issue preloaded tx buffers */
3031*5748Sduboff 		gem_tx_start_unit(dp);
3032*5748Sduboff 	}
3033*5748Sduboff 
3034*5748Sduboff 	mutex_exit(&dp->xmitlock);
3035*5748Sduboff 
3036*5748Sduboff 	return (GEM_SUCCESS);
3037*5748Sduboff }
3038*5748Sduboff 
3039*5748Sduboff static int
3040*5748Sduboff gem_mac_stop(struct gem_dev *dp, uint_t flags)
3041*5748Sduboff {
3042*5748Sduboff 	int		i;
3043*5748Sduboff 	int		wait_time; /* in uS */
3044*5748Sduboff #ifdef GEM_DEBUG_LEVEL
3045*5748Sduboff 	clock_t		now;
3046*5748Sduboff #endif
3047*5748Sduboff 	int		ret = GEM_SUCCESS;
3048*5748Sduboff 
3049*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3050*5748Sduboff 	    dp->name, __func__, dp->rx_buf_freecnt));
3051*5748Sduboff 
3052*5748Sduboff 	ASSERT(mutex_owned(&dp->intrlock));
3053*5748Sduboff 	ASSERT(!mutex_owned(&dp->xmitlock));
3054*5748Sduboff 
3055*5748Sduboff 	/*
3056*5748Sduboff 	 * Block transmits
3057*5748Sduboff 	 */
3058*5748Sduboff 	mutex_enter(&dp->xmitlock);
3059*5748Sduboff 	if (dp->mac_suspended) {
3060*5748Sduboff 		mutex_exit(&dp->xmitlock);
3061*5748Sduboff 		return (GEM_SUCCESS);
3062*5748Sduboff 	}
3063*5748Sduboff 	dp->mac_active = B_FALSE;
3064*5748Sduboff 
3065*5748Sduboff 	while (dp->tx_busy > 0) {
3066*5748Sduboff 		cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3067*5748Sduboff 	}
3068*5748Sduboff 	mutex_exit(&dp->xmitlock);
3069*5748Sduboff 
3070*5748Sduboff 	if ((flags & GEM_RESTART_NOWAIT) == 0) {
3071*5748Sduboff 		/*
3072*5748Sduboff 		 * Wait for all tx buffers sent.
3073*5748Sduboff 		 */
3074*5748Sduboff 		wait_time =
3075*5748Sduboff 		    2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3076*5748Sduboff 		    (dp->tx_active_tail - dp->tx_active_head);
3077*5748Sduboff 
3078*5748Sduboff 		DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3079*5748Sduboff 		    dp->name, __func__, wait_time));
3080*5748Sduboff 		i = 0;
3081*5748Sduboff #ifdef GEM_DEBUG_LEVEL
3082*5748Sduboff 		now = ddi_get_lbolt();
3083*5748Sduboff #endif
3084*5748Sduboff 		while (dp->tx_active_tail != dp->tx_active_head) {
3085*5748Sduboff 			if (i > wait_time) {
3086*5748Sduboff 				/* timeout */
3087*5748Sduboff 				cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3088*5748Sduboff 				    dp->name, __func__);
3089*5748Sduboff 				break;
3090*5748Sduboff 			}
3091*5748Sduboff 			(void) gem_reclaim_txbuf(dp);
3092*5748Sduboff 			drv_usecwait(100);
3093*5748Sduboff 			i += 100;
3094*5748Sduboff 		}
3095*5748Sduboff 		DPRINTF(0, (CE_NOTE,
3096*5748Sduboff 		    "!%s: %s: the nic have drained in %d uS, real %d mS",
3097*5748Sduboff 		    dp->name, __func__, i,
3098*5748Sduboff 		    10*((int)(ddi_get_lbolt() - now))));
3099*5748Sduboff 	}
3100*5748Sduboff 
3101*5748Sduboff 	/*
3102*5748Sduboff 	 * Now we can stop the nic safely.
3103*5748Sduboff 	 */
3104*5748Sduboff 	if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3105*5748Sduboff 		cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3106*5748Sduboff 		    dp->name, __func__);
3107*5748Sduboff 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3108*5748Sduboff 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
3109*5748Sduboff 			    dp->name, __func__);
3110*5748Sduboff 		}
3111*5748Sduboff 	}
3112*5748Sduboff 
3113*5748Sduboff 	/*
3114*5748Sduboff 	 * Clear all rx buffers
3115*5748Sduboff 	 */
3116*5748Sduboff 	if (flags & GEM_RESTART_KEEP_BUF) {
3117*5748Sduboff 		(void) gem_receive(dp);
3118*5748Sduboff 	}
3119*5748Sduboff 	gem_clean_rx_buf(dp);
3120*5748Sduboff 
3121*5748Sduboff 	/*
3122*5748Sduboff 	 * Update final statistics
3123*5748Sduboff 	 */
3124*5748Sduboff 	(*dp->gc.gc_get_stats)(dp);
3125*5748Sduboff 
3126*5748Sduboff 	/*
3127*5748Sduboff 	 * Clear all pended tx packets
3128*5748Sduboff 	 */
3129*5748Sduboff 	ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3130*5748Sduboff 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3131*5748Sduboff 	if (flags & GEM_RESTART_KEEP_BUF) {
3132*5748Sduboff 		/* restore active tx buffers */
3133*5748Sduboff 		dp->tx_active_tail = dp->tx_active_head;
3134*5748Sduboff 		dp->tx_softq_head  = dp->tx_active_head;
3135*5748Sduboff 	} else {
3136*5748Sduboff 		gem_clean_tx_buf(dp);
3137*5748Sduboff 	}
3138*5748Sduboff 
3139*5748Sduboff 	return (ret);
3140*5748Sduboff }
3141*5748Sduboff 
3142*5748Sduboff static int
3143*5748Sduboff gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3144*5748Sduboff {
3145*5748Sduboff 	int		cnt;
3146*5748Sduboff 	int		err;
3147*5748Sduboff 
3148*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3149*5748Sduboff 
3150*5748Sduboff 	mutex_enter(&dp->intrlock);
3151*5748Sduboff 	if (dp->mac_suspended) {
3152*5748Sduboff 		mutex_exit(&dp->intrlock);
3153*5748Sduboff 		return (GEM_FAILURE);
3154*5748Sduboff 	}
3155*5748Sduboff 
3156*5748Sduboff 	if (dp->mc_count_req++ < GEM_MAXMC) {
3157*5748Sduboff 		/* append the new address at the end of the mclist */
3158*5748Sduboff 		cnt = dp->mc_count;
3159*5748Sduboff 		bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3160*5748Sduboff 		    ETHERADDRL);
3161*5748Sduboff 		if (dp->gc.gc_multicast_hash) {
3162*5748Sduboff 			dp->mc_list[cnt].hash =
3163*5748Sduboff 			    (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3164*5748Sduboff 		}
3165*5748Sduboff 		dp->mc_count = cnt + 1;
3166*5748Sduboff 	}
3167*5748Sduboff 
3168*5748Sduboff 	if (dp->mc_count_req != dp->mc_count) {
3169*5748Sduboff 		/* multicast address list overflow */
3170*5748Sduboff 		dp->rxmode |= RXMODE_MULTI_OVF;
3171*5748Sduboff 	} else {
3172*5748Sduboff 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3173*5748Sduboff 	}
3174*5748Sduboff 
3175*5748Sduboff 	/* tell the new multicast list to the hardwaire */
3176*5748Sduboff 	err = gem_mac_set_rx_filter(dp);
3177*5748Sduboff 
3178*5748Sduboff 	mutex_exit(&dp->intrlock);
3179*5748Sduboff 
3180*5748Sduboff 	return (err);
3181*5748Sduboff }
3182*5748Sduboff 
3183*5748Sduboff static int
3184*5748Sduboff gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3185*5748Sduboff {
3186*5748Sduboff 	size_t		len;
3187*5748Sduboff 	int		i;
3188*5748Sduboff 	int		cnt;
3189*5748Sduboff 	int		err;
3190*5748Sduboff 
3191*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3192*5748Sduboff 
3193*5748Sduboff 	mutex_enter(&dp->intrlock);
3194*5748Sduboff 	if (dp->mac_suspended) {
3195*5748Sduboff 		mutex_exit(&dp->intrlock);
3196*5748Sduboff 		return (GEM_FAILURE);
3197*5748Sduboff 	}
3198*5748Sduboff 
3199*5748Sduboff 	dp->mc_count_req--;
3200*5748Sduboff 	cnt = dp->mc_count;
3201*5748Sduboff 	for (i = 0; i < cnt; i++) {
3202*5748Sduboff 		if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3203*5748Sduboff 			continue;
3204*5748Sduboff 		}
3205*5748Sduboff 		/* shrink the mclist by copying forward */
3206*5748Sduboff 		len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3207*5748Sduboff 		if (len > 0) {
3208*5748Sduboff 			bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3209*5748Sduboff 		}
3210*5748Sduboff 		dp->mc_count--;
3211*5748Sduboff 		break;
3212*5748Sduboff 	}
3213*5748Sduboff 
3214*5748Sduboff 	if (dp->mc_count_req != dp->mc_count) {
3215*5748Sduboff 		/* multicast address list overflow */
3216*5748Sduboff 		dp->rxmode |= RXMODE_MULTI_OVF;
3217*5748Sduboff 	} else {
3218*5748Sduboff 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3219*5748Sduboff 	}
3220*5748Sduboff 	/* In gem v2, don't hold xmitlock on calling set_rx_filter */
3221*5748Sduboff 	err = gem_mac_set_rx_filter(dp);
3222*5748Sduboff 
3223*5748Sduboff 	mutex_exit(&dp->intrlock);
3224*5748Sduboff 
3225*5748Sduboff 	return (err);
3226*5748Sduboff }
3227*5748Sduboff 
3228*5748Sduboff /* ============================================================== */
3229*5748Sduboff /*
3230*5748Sduboff  * ND interface
3231*5748Sduboff  */
3232*5748Sduboff /* ============================================================== */
3233*5748Sduboff enum {
3234*5748Sduboff 	PARAM_AUTONEG_CAP,
3235*5748Sduboff 	PARAM_PAUSE_CAP,
3236*5748Sduboff 	PARAM_ASYM_PAUSE_CAP,
3237*5748Sduboff 	PARAM_1000FDX_CAP,
3238*5748Sduboff 	PARAM_1000HDX_CAP,
3239*5748Sduboff 	PARAM_100T4_CAP,
3240*5748Sduboff 	PARAM_100FDX_CAP,
3241*5748Sduboff 	PARAM_100HDX_CAP,
3242*5748Sduboff 	PARAM_10FDX_CAP,
3243*5748Sduboff 	PARAM_10HDX_CAP,
3244*5748Sduboff 
3245*5748Sduboff 	PARAM_ADV_AUTONEG_CAP,
3246*5748Sduboff 	PARAM_ADV_PAUSE_CAP,
3247*5748Sduboff 	PARAM_ADV_ASYM_PAUSE_CAP,
3248*5748Sduboff 	PARAM_ADV_1000FDX_CAP,
3249*5748Sduboff 	PARAM_ADV_1000HDX_CAP,
3250*5748Sduboff 	PARAM_ADV_100T4_CAP,
3251*5748Sduboff 	PARAM_ADV_100FDX_CAP,
3252*5748Sduboff 	PARAM_ADV_100HDX_CAP,
3253*5748Sduboff 	PARAM_ADV_10FDX_CAP,
3254*5748Sduboff 	PARAM_ADV_10HDX_CAP,
3255*5748Sduboff 
3256*5748Sduboff 	PARAM_LP_AUTONEG_CAP,
3257*5748Sduboff 	PARAM_LP_PAUSE_CAP,
3258*5748Sduboff 	PARAM_LP_ASYM_PAUSE_CAP,
3259*5748Sduboff 	PARAM_LP_1000FDX_CAP,
3260*5748Sduboff 	PARAM_LP_1000HDX_CAP,
3261*5748Sduboff 	PARAM_LP_100T4_CAP,
3262*5748Sduboff 	PARAM_LP_100FDX_CAP,
3263*5748Sduboff 	PARAM_LP_100HDX_CAP,
3264*5748Sduboff 	PARAM_LP_10FDX_CAP,
3265*5748Sduboff 	PARAM_LP_10HDX_CAP,
3266*5748Sduboff 
3267*5748Sduboff 	PARAM_LINK_STATUS,
3268*5748Sduboff 	PARAM_LINK_SPEED,
3269*5748Sduboff 	PARAM_LINK_DUPLEX,
3270*5748Sduboff 
3271*5748Sduboff 	PARAM_LINK_AUTONEG,
3272*5748Sduboff 	PARAM_LINK_RX_PAUSE,
3273*5748Sduboff 	PARAM_LINK_TX_PAUSE,
3274*5748Sduboff 
3275*5748Sduboff 	PARAM_LOOP_MODE,
3276*5748Sduboff 	PARAM_MSI_CNT,
3277*5748Sduboff 
3278*5748Sduboff #ifdef DEBUG_RESUME
3279*5748Sduboff 	PARAM_RESUME_TEST,
3280*5748Sduboff #endif
3281*5748Sduboff 	PARAM_COUNT
3282*5748Sduboff };
3283*5748Sduboff 
3284*5748Sduboff enum ioc_reply {
3285*5748Sduboff 	IOC_INVAL = -1,				/* bad, NAK with EINVAL	*/
3286*5748Sduboff 	IOC_DONE,				/* OK, reply sent	*/
3287*5748Sduboff 	IOC_ACK,				/* OK, just send ACK	*/
3288*5748Sduboff 	IOC_REPLY,				/* OK, just send reply	*/
3289*5748Sduboff 	IOC_RESTART_ACK,			/* OK, restart & ACK	*/
3290*5748Sduboff 	IOC_RESTART_REPLY			/* OK, restart & reply	*/
3291*5748Sduboff };
3292*5748Sduboff 
3293*5748Sduboff struct gem_nd_arg {
3294*5748Sduboff 	struct gem_dev	*dp;
3295*5748Sduboff 	int		item;
3296*5748Sduboff };
3297*5748Sduboff 
3298*5748Sduboff static int
3299*5748Sduboff gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
3300*5748Sduboff {
3301*5748Sduboff 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3302*5748Sduboff 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3303*5748Sduboff 	long		val;
3304*5748Sduboff 
3305*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
3306*5748Sduboff 	    dp->name, __func__, item));
3307*5748Sduboff 
3308*5748Sduboff 	switch (item) {
3309*5748Sduboff 	case PARAM_AUTONEG_CAP:
3310*5748Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3311*5748Sduboff 		DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
3312*5748Sduboff 		break;
3313*5748Sduboff 
3314*5748Sduboff 	case PARAM_PAUSE_CAP:
3315*5748Sduboff 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
3316*5748Sduboff 		break;
3317*5748Sduboff 
3318*5748Sduboff 	case PARAM_ASYM_PAUSE_CAP:
3319*5748Sduboff 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
3320*5748Sduboff 		break;
3321*5748Sduboff 
3322*5748Sduboff 	case PARAM_1000FDX_CAP:
3323*5748Sduboff 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3324*5748Sduboff 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3325*5748Sduboff 		break;
3326*5748Sduboff 
3327*5748Sduboff 	case PARAM_1000HDX_CAP:
3328*5748Sduboff 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3329*5748Sduboff 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3330*5748Sduboff 		break;
3331*5748Sduboff 
3332*5748Sduboff 	case PARAM_100T4_CAP:
3333*5748Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3334*5748Sduboff 		break;
3335*5748Sduboff 
3336*5748Sduboff 	case PARAM_100FDX_CAP:
3337*5748Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3338*5748Sduboff 		break;
3339*5748Sduboff 
3340*5748Sduboff 	case PARAM_100HDX_CAP:
3341*5748Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3342*5748Sduboff 		break;
3343*5748Sduboff 
3344*5748Sduboff 	case PARAM_10FDX_CAP:
3345*5748Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3346*5748Sduboff 		break;
3347*5748Sduboff 
3348*5748Sduboff 	case PARAM_10HDX_CAP:
3349*5748Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3350*5748Sduboff 		break;
3351*5748Sduboff 
3352*5748Sduboff 	case PARAM_ADV_AUTONEG_CAP:
3353*5748Sduboff 		val = dp->anadv_autoneg;
3354*5748Sduboff 		break;
3355*5748Sduboff 
3356*5748Sduboff 	case PARAM_ADV_PAUSE_CAP:
3357*5748Sduboff 		val = BOOLEAN(dp->anadv_flow_control & 1);
3358*5748Sduboff 		break;
3359*5748Sduboff 
3360*5748Sduboff 	case PARAM_ADV_ASYM_PAUSE_CAP:
3361*5748Sduboff 		val = BOOLEAN(dp->anadv_flow_control & 2);
3362*5748Sduboff 		break;
3363*5748Sduboff 
3364*5748Sduboff 	case PARAM_ADV_1000FDX_CAP:
3365*5748Sduboff 		val = dp->anadv_1000fdx;
3366*5748Sduboff 		break;
3367*5748Sduboff 
3368*5748Sduboff 	case PARAM_ADV_1000HDX_CAP:
3369*5748Sduboff 		val = dp->anadv_1000hdx;
3370*5748Sduboff 		break;
3371*5748Sduboff 
3372*5748Sduboff 	case PARAM_ADV_100T4_CAP:
3373*5748Sduboff 		val = dp->anadv_100t4;
3374*5748Sduboff 		break;
3375*5748Sduboff 
3376*5748Sduboff 	case PARAM_ADV_100FDX_CAP:
3377*5748Sduboff 		val = dp->anadv_100fdx;
3378*5748Sduboff 		break;
3379*5748Sduboff 
3380*5748Sduboff 	case PARAM_ADV_100HDX_CAP:
3381*5748Sduboff 		val = dp->anadv_100hdx;
3382*5748Sduboff 		break;
3383*5748Sduboff 
3384*5748Sduboff 	case PARAM_ADV_10FDX_CAP:
3385*5748Sduboff 		val = dp->anadv_10fdx;
3386*5748Sduboff 		break;
3387*5748Sduboff 
3388*5748Sduboff 	case PARAM_ADV_10HDX_CAP:
3389*5748Sduboff 		val = dp->anadv_10hdx;
3390*5748Sduboff 		break;
3391*5748Sduboff 
3392*5748Sduboff 	case PARAM_LP_AUTONEG_CAP:
3393*5748Sduboff 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3394*5748Sduboff 		break;
3395*5748Sduboff 
3396*5748Sduboff 	case PARAM_LP_PAUSE_CAP:
3397*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3398*5748Sduboff 		break;
3399*5748Sduboff 
3400*5748Sduboff 	case PARAM_LP_ASYM_PAUSE_CAP:
3401*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR);
3402*5748Sduboff 		break;
3403*5748Sduboff 
3404*5748Sduboff 	case PARAM_LP_1000FDX_CAP:
3405*5748Sduboff 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3406*5748Sduboff 		break;
3407*5748Sduboff 
3408*5748Sduboff 	case PARAM_LP_1000HDX_CAP:
3409*5748Sduboff 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3410*5748Sduboff 		break;
3411*5748Sduboff 
3412*5748Sduboff 	case PARAM_LP_100T4_CAP:
3413*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3414*5748Sduboff 		break;
3415*5748Sduboff 
3416*5748Sduboff 	case PARAM_LP_100FDX_CAP:
3417*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3418*5748Sduboff 		break;
3419*5748Sduboff 
3420*5748Sduboff 	case PARAM_LP_100HDX_CAP:
3421*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3422*5748Sduboff 		break;
3423*5748Sduboff 
3424*5748Sduboff 	case PARAM_LP_10FDX_CAP:
3425*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3426*5748Sduboff 		break;
3427*5748Sduboff 
3428*5748Sduboff 	case PARAM_LP_10HDX_CAP:
3429*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3430*5748Sduboff 		break;
3431*5748Sduboff 
3432*5748Sduboff 	case PARAM_LINK_STATUS:
3433*5748Sduboff 		val = (dp->mii_state == MII_STATE_LINKUP);
3434*5748Sduboff 		break;
3435*5748Sduboff 
3436*5748Sduboff 	case PARAM_LINK_SPEED:
3437*5748Sduboff 		val = gem_speed_value[dp->speed];
3438*5748Sduboff 		break;
3439*5748Sduboff 
3440*5748Sduboff 	case PARAM_LINK_DUPLEX:
3441*5748Sduboff 		val = 0;
3442*5748Sduboff 		if (dp->mii_state == MII_STATE_LINKUP) {
3443*5748Sduboff 			val = dp->full_duplex ? 2 : 1;
3444*5748Sduboff 		}
3445*5748Sduboff 		break;
3446*5748Sduboff 
3447*5748Sduboff 	case PARAM_LINK_AUTONEG:
3448*5748Sduboff 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3449*5748Sduboff 		break;
3450*5748Sduboff 
3451*5748Sduboff 	case PARAM_LINK_RX_PAUSE:
3452*5748Sduboff 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3453*5748Sduboff 		    (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3454*5748Sduboff 		break;
3455*5748Sduboff 
3456*5748Sduboff 	case PARAM_LINK_TX_PAUSE:
3457*5748Sduboff 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3458*5748Sduboff 		    (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3459*5748Sduboff 		break;
3460*5748Sduboff 
3461*5748Sduboff #ifdef DEBUG_RESUME
3462*5748Sduboff 	case PARAM_RESUME_TEST:
3463*5748Sduboff 		val = 0;
3464*5748Sduboff 		break;
3465*5748Sduboff #endif
3466*5748Sduboff 	default:
3467*5748Sduboff 		cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
3468*5748Sduboff 		    dp->name, item);
3469*5748Sduboff 		break;
3470*5748Sduboff 	}
3471*5748Sduboff 
3472*5748Sduboff 	(void) mi_mpprintf(mp, "%ld", val);
3473*5748Sduboff 
3474*5748Sduboff 	return (0);
3475*5748Sduboff }
3476*5748Sduboff 
3477*5748Sduboff static int
3478*5748Sduboff gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
3479*5748Sduboff {
3480*5748Sduboff 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3481*5748Sduboff 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3482*5748Sduboff 	long		val;
3483*5748Sduboff 	char		*end;
3484*5748Sduboff 
3485*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3486*5748Sduboff 	if (ddi_strtol(value, &end, 10, &val)) {
3487*5748Sduboff 		return (EINVAL);
3488*5748Sduboff 	}
3489*5748Sduboff 	if (end == value) {
3490*5748Sduboff 		return (EINVAL);
3491*5748Sduboff 	}
3492*5748Sduboff 
3493*5748Sduboff 	switch (item) {
3494*5748Sduboff 	case PARAM_ADV_AUTONEG_CAP:
3495*5748Sduboff 		if (val != 0 && val != 1) {
3496*5748Sduboff 			goto err;
3497*5748Sduboff 		}
3498*5748Sduboff 		if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3499*5748Sduboff 			goto err;
3500*5748Sduboff 		}
3501*5748Sduboff 		dp->anadv_autoneg = (int)val;
3502*5748Sduboff 		break;
3503*5748Sduboff 
3504*5748Sduboff 	case PARAM_ADV_PAUSE_CAP:
3505*5748Sduboff 		if (val != 0 && val != 1) {
3506*5748Sduboff 			goto err;
3507*5748Sduboff 		}
3508*5748Sduboff 		if (val) {
3509*5748Sduboff 			dp->anadv_flow_control |= 1;
3510*5748Sduboff 		} else {
3511*5748Sduboff 			dp->anadv_flow_control &= ~1;
3512*5748Sduboff 		}
3513*5748Sduboff 		break;
3514*5748Sduboff 
3515*5748Sduboff 	case PARAM_ADV_ASYM_PAUSE_CAP:
3516*5748Sduboff 		if (val != 0 && val != 1) {
3517*5748Sduboff 			goto err;
3518*5748Sduboff 		}
3519*5748Sduboff 		if (val) {
3520*5748Sduboff 			dp->anadv_flow_control |= 2;
3521*5748Sduboff 		} else {
3522*5748Sduboff 			dp->anadv_flow_control &= ~2;
3523*5748Sduboff 		}
3524*5748Sduboff 		break;
3525*5748Sduboff 
3526*5748Sduboff 	case PARAM_ADV_1000FDX_CAP:
3527*5748Sduboff 		if (val != 0 && val != 1) {
3528*5748Sduboff 			goto err;
3529*5748Sduboff 		}
3530*5748Sduboff 		if (val && (dp->mii_xstatus &
3531*5748Sduboff 		    (MII_XSTATUS_1000BASET_FD |
3532*5748Sduboff 		    MII_XSTATUS_1000BASEX_FD)) == 0) {
3533*5748Sduboff 			goto err;
3534*5748Sduboff 		}
3535*5748Sduboff 		dp->anadv_1000fdx = (int)val;
3536*5748Sduboff 		break;
3537*5748Sduboff 
3538*5748Sduboff 	case PARAM_ADV_1000HDX_CAP:
3539*5748Sduboff 		if (val != 0 && val != 1) {
3540*5748Sduboff 			goto err;
3541*5748Sduboff 		}
3542*5748Sduboff 		if (val && (dp->mii_xstatus &
3543*5748Sduboff 		    (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
3544*5748Sduboff 			goto err;
3545*5748Sduboff 		}
3546*5748Sduboff 		dp->anadv_1000hdx = (int)val;
3547*5748Sduboff 		break;
3548*5748Sduboff 
3549*5748Sduboff 	case PARAM_ADV_100T4_CAP:
3550*5748Sduboff 		if (val != 0 && val != 1) {
3551*5748Sduboff 			goto err;
3552*5748Sduboff 		}
3553*5748Sduboff 		if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3554*5748Sduboff 			goto err;
3555*5748Sduboff 		}
3556*5748Sduboff 		dp->anadv_100t4 = (int)val;
3557*5748Sduboff 		break;
3558*5748Sduboff 
3559*5748Sduboff 	case PARAM_ADV_100FDX_CAP:
3560*5748Sduboff 		if (val != 0 && val != 1) {
3561*5748Sduboff 			goto err;
3562*5748Sduboff 		}
3563*5748Sduboff 		if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3564*5748Sduboff 			goto err;
3565*5748Sduboff 		}
3566*5748Sduboff 		dp->anadv_100fdx = (int)val;
3567*5748Sduboff 		break;
3568*5748Sduboff 
3569*5748Sduboff 	case PARAM_ADV_100HDX_CAP:
3570*5748Sduboff 		if (val != 0 && val != 1) {
3571*5748Sduboff 			goto err;
3572*5748Sduboff 		}
3573*5748Sduboff 		if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3574*5748Sduboff 			goto err;
3575*5748Sduboff 		}
3576*5748Sduboff 		dp->anadv_100hdx = (int)val;
3577*5748Sduboff 		break;
3578*5748Sduboff 
3579*5748Sduboff 	case PARAM_ADV_10FDX_CAP:
3580*5748Sduboff 		if (val != 0 && val != 1) {
3581*5748Sduboff 			goto err;
3582*5748Sduboff 		}
3583*5748Sduboff 		if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3584*5748Sduboff 			goto err;
3585*5748Sduboff 		}
3586*5748Sduboff 		dp->anadv_10fdx = (int)val;
3587*5748Sduboff 		break;
3588*5748Sduboff 
3589*5748Sduboff 	case PARAM_ADV_10HDX_CAP:
3590*5748Sduboff 		if (val != 0 && val != 1) {
3591*5748Sduboff 			goto err;
3592*5748Sduboff 		}
3593*5748Sduboff 		if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3594*5748Sduboff 			goto err;
3595*5748Sduboff 		}
3596*5748Sduboff 		dp->anadv_10hdx = (int)val;
3597*5748Sduboff 		break;
3598*5748Sduboff 	}
3599*5748Sduboff 
3600*5748Sduboff 	/* sync with PHY */
3601*5748Sduboff 	gem_choose_forcedmode(dp);
3602*5748Sduboff 
3603*5748Sduboff 	dp->mii_state = MII_STATE_UNKNOWN;
3604*5748Sduboff 	if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3605*5748Sduboff 		/* XXX - Can we ignore the return code ? */
3606*5748Sduboff 		(void) gem_mii_link_check(dp);
3607*5748Sduboff 	}
3608*5748Sduboff 
3609*5748Sduboff 	return (0);
3610*5748Sduboff err:
3611*5748Sduboff 	return (EINVAL);
3612*5748Sduboff }
3613*5748Sduboff 
3614*5748Sduboff static void
3615*5748Sduboff gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3616*5748Sduboff {
3617*5748Sduboff 	struct gem_nd_arg	*arg;
3618*5748Sduboff 
3619*5748Sduboff 	ASSERT(item >= 0);
3620*5748Sduboff 	ASSERT(item < PARAM_COUNT);
3621*5748Sduboff 
3622*5748Sduboff 	arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3623*5748Sduboff 	arg->dp = dp;
3624*5748Sduboff 	arg->item = item;
3625*5748Sduboff 
3626*5748Sduboff 	DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
3627*5748Sduboff 	    dp->name, __func__, name, item));
3628*5748Sduboff 	(void *) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3629*5748Sduboff }
3630*5748Sduboff 
3631*5748Sduboff static void
3632*5748Sduboff gem_nd_setup(struct gem_dev *dp)
3633*5748Sduboff {
3634*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
3635*5748Sduboff 	    dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3636*5748Sduboff 
3637*5748Sduboff 	ASSERT(dp->nd_arg_p == NULL);
3638*5748Sduboff 
3639*5748Sduboff 	dp->nd_arg_p =
3640*5748Sduboff 	    kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
3641*5748Sduboff 
3642*5748Sduboff #define	SETFUNC(x)	((x) ? gem_param_set : NULL)
3643*5748Sduboff 
3644*5748Sduboff 	gem_nd_load(dp, "autoneg_cap",
3645*5748Sduboff 	    gem_param_get, NULL, PARAM_AUTONEG_CAP);
3646*5748Sduboff 	gem_nd_load(dp, "pause_cap",
3647*5748Sduboff 	    gem_param_get, NULL, PARAM_PAUSE_CAP);
3648*5748Sduboff 	gem_nd_load(dp, "asym_pause_cap",
3649*5748Sduboff 	    gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
3650*5748Sduboff 	gem_nd_load(dp, "1000fdx_cap",
3651*5748Sduboff 	    gem_param_get, NULL, PARAM_1000FDX_CAP);
3652*5748Sduboff 	gem_nd_load(dp, "1000hdx_cap",
3653*5748Sduboff 	    gem_param_get, NULL, PARAM_1000HDX_CAP);
3654*5748Sduboff 	gem_nd_load(dp, "100T4_cap",
3655*5748Sduboff 	    gem_param_get, NULL, PARAM_100T4_CAP);
3656*5748Sduboff 	gem_nd_load(dp, "100fdx_cap",
3657*5748Sduboff 	    gem_param_get, NULL, PARAM_100FDX_CAP);
3658*5748Sduboff 	gem_nd_load(dp, "100hdx_cap",
3659*5748Sduboff 	    gem_param_get, NULL, PARAM_100HDX_CAP);
3660*5748Sduboff 	gem_nd_load(dp, "10fdx_cap",
3661*5748Sduboff 	    gem_param_get, NULL, PARAM_10FDX_CAP);
3662*5748Sduboff 	gem_nd_load(dp, "10hdx_cap",
3663*5748Sduboff 	    gem_param_get, NULL, PARAM_10HDX_CAP);
3664*5748Sduboff 
3665*5748Sduboff 	/* Our advertised capabilities */
3666*5748Sduboff 	gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3667*5748Sduboff 	    SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3668*5748Sduboff 	    PARAM_ADV_AUTONEG_CAP);
3669*5748Sduboff 	gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3670*5748Sduboff 	    SETFUNC(dp->gc.gc_flow_control & 1),
3671*5748Sduboff 	    PARAM_ADV_PAUSE_CAP);
3672*5748Sduboff 	gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3673*5748Sduboff 	    SETFUNC(dp->gc.gc_flow_control & 2),
3674*5748Sduboff 	    PARAM_ADV_ASYM_PAUSE_CAP);
3675*5748Sduboff 	gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3676*5748Sduboff 	    SETFUNC(dp->mii_xstatus &
3677*5748Sduboff 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
3678*5748Sduboff 	    PARAM_ADV_1000FDX_CAP);
3679*5748Sduboff 	gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3680*5748Sduboff 	    SETFUNC(dp->mii_xstatus &
3681*5748Sduboff 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
3682*5748Sduboff 	    PARAM_ADV_1000HDX_CAP);
3683*5748Sduboff 	gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3684*5748Sduboff 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3685*5748Sduboff 	    !dp->mii_advert_ro),
3686*5748Sduboff 	    PARAM_ADV_100T4_CAP);
3687*5748Sduboff 	gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3688*5748Sduboff 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3689*5748Sduboff 	    !dp->mii_advert_ro),
3690*5748Sduboff 	    PARAM_ADV_100FDX_CAP);
3691*5748Sduboff 	gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3692*5748Sduboff 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3693*5748Sduboff 	    !dp->mii_advert_ro),
3694*5748Sduboff 	    PARAM_ADV_100HDX_CAP);
3695*5748Sduboff 	gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3696*5748Sduboff 	    SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3697*5748Sduboff 	    !dp->mii_advert_ro),
3698*5748Sduboff 	    PARAM_ADV_10FDX_CAP);
3699*5748Sduboff 	gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3700*5748Sduboff 	    SETFUNC((dp->mii_status & MII_STATUS_10) &&
3701*5748Sduboff 	    !dp->mii_advert_ro),
3702*5748Sduboff 	    PARAM_ADV_10HDX_CAP);
3703*5748Sduboff 
3704*5748Sduboff 	/* Partner's advertised capabilities */
3705*5748Sduboff 	gem_nd_load(dp, "lp_autoneg_cap",
3706*5748Sduboff 	    gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
3707*5748Sduboff 	gem_nd_load(dp, "lp_pause_cap",
3708*5748Sduboff 	    gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
3709*5748Sduboff 	gem_nd_load(dp, "lp_asym_pause_cap",
3710*5748Sduboff 	    gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
3711*5748Sduboff 	gem_nd_load(dp, "lp_1000fdx_cap",
3712*5748Sduboff 	    gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
3713*5748Sduboff 	gem_nd_load(dp, "lp_1000hdx_cap",
3714*5748Sduboff 	    gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
3715*5748Sduboff 	gem_nd_load(dp, "lp_100T4_cap",
3716*5748Sduboff 	    gem_param_get, NULL, PARAM_LP_100T4_CAP);
3717*5748Sduboff 	gem_nd_load(dp, "lp_100fdx_cap",
3718*5748Sduboff 	    gem_param_get, NULL, PARAM_LP_100FDX_CAP);
3719*5748Sduboff 	gem_nd_load(dp, "lp_100hdx_cap",
3720*5748Sduboff 	    gem_param_get, NULL, PARAM_LP_100HDX_CAP);
3721*5748Sduboff 	gem_nd_load(dp, "lp_10fdx_cap",
3722*5748Sduboff 	    gem_param_get, NULL, PARAM_LP_10FDX_CAP);
3723*5748Sduboff 	gem_nd_load(dp, "lp_10hdx_cap",
3724*5748Sduboff 	    gem_param_get, NULL, PARAM_LP_10HDX_CAP);
3725*5748Sduboff 
3726*5748Sduboff 	/* Current operating modes */
3727*5748Sduboff 	gem_nd_load(dp, "link_status",
3728*5748Sduboff 	    gem_param_get, NULL, PARAM_LINK_STATUS);
3729*5748Sduboff 	gem_nd_load(dp, "link_speed",
3730*5748Sduboff 	    gem_param_get, NULL, PARAM_LINK_SPEED);
3731*5748Sduboff 	gem_nd_load(dp, "link_duplex",
3732*5748Sduboff 	    gem_param_get, NULL, PARAM_LINK_DUPLEX);
3733*5748Sduboff 	gem_nd_load(dp, "link_autoneg",
3734*5748Sduboff 	    gem_param_get, NULL, PARAM_LINK_AUTONEG);
3735*5748Sduboff 	gem_nd_load(dp, "link_rx_pause",
3736*5748Sduboff 	    gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
3737*5748Sduboff 	gem_nd_load(dp, "link_tx_pause",
3738*5748Sduboff 	    gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
3739*5748Sduboff #ifdef DEBUG_RESUME
3740*5748Sduboff 	gem_nd_load(dp, "resume_test",
3741*5748Sduboff 	    gem_param_get, NULL, PARAM_RESUME_TEST);
3742*5748Sduboff #endif
3743*5748Sduboff #undef	SETFUNC
3744*5748Sduboff }
3745*5748Sduboff 
3746*5748Sduboff static
3747*5748Sduboff enum ioc_reply
3748*5748Sduboff gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3749*5748Sduboff {
3750*5748Sduboff 	boolean_t	ok;
3751*5748Sduboff 
3752*5748Sduboff 	ASSERT(mutex_owned(&dp->intrlock));
3753*5748Sduboff 
3754*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3755*5748Sduboff 
3756*5748Sduboff 	switch (iocp->ioc_cmd) {
3757*5748Sduboff 	case ND_GET:
3758*5748Sduboff 		ok = nd_getset(wq, dp->nd_data_p, mp);
3759*5748Sduboff 		DPRINTF(0, (CE_CONT,
3760*5748Sduboff 		    "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3761*5748Sduboff 		return (ok ? IOC_REPLY : IOC_INVAL);
3762*5748Sduboff 
3763*5748Sduboff 	case ND_SET:
3764*5748Sduboff 		ok = nd_getset(wq, dp->nd_data_p, mp);
3765*5748Sduboff 
3766*5748Sduboff 		DPRINTF(0, (CE_CONT, "%s: set %s err %d",
3767*5748Sduboff 		    dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3768*5748Sduboff 
3769*5748Sduboff 		if (!ok) {
3770*5748Sduboff 			return (IOC_INVAL);
3771*5748Sduboff 		}
3772*5748Sduboff 
3773*5748Sduboff 		if (iocp->ioc_error) {
3774*5748Sduboff 			return (IOC_REPLY);
3775*5748Sduboff 		}
3776*5748Sduboff 
3777*5748Sduboff 		return (IOC_RESTART_REPLY);
3778*5748Sduboff 	}
3779*5748Sduboff 
3780*5748Sduboff 	cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3781*5748Sduboff 
3782*5748Sduboff 	return (IOC_INVAL);
3783*5748Sduboff }
3784*5748Sduboff 
3785*5748Sduboff static void
3786*5748Sduboff gem_nd_cleanup(struct gem_dev *dp)
3787*5748Sduboff {
3788*5748Sduboff 	ASSERT(dp->nd_data_p != NULL);
3789*5748Sduboff 	ASSERT(dp->nd_arg_p != NULL);
3790*5748Sduboff 
3791*5748Sduboff 	nd_free(&dp->nd_data_p);
3792*5748Sduboff 
3793*5748Sduboff 	kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3794*5748Sduboff 	dp->nd_arg_p = NULL;
3795*5748Sduboff }
3796*5748Sduboff 
3797*5748Sduboff static void
3798*5748Sduboff gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3799*5748Sduboff {
3800*5748Sduboff 	struct iocblk	*iocp;
3801*5748Sduboff 	enum ioc_reply	status;
3802*5748Sduboff 	int		cmd;
3803*5748Sduboff 
3804*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3805*5748Sduboff 
3806*5748Sduboff 	/*
3807*5748Sduboff 	 * Validate the command before bothering with the mutex ...
3808*5748Sduboff 	 */
3809*5748Sduboff 	iocp = (void *)mp->b_rptr;
3810*5748Sduboff 	iocp->ioc_error = 0;
3811*5748Sduboff 	cmd = iocp->ioc_cmd;
3812*5748Sduboff 
3813*5748Sduboff 	DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3814*5748Sduboff 
3815*5748Sduboff 	mutex_enter(&dp->intrlock);
3816*5748Sduboff 	mutex_enter(&dp->xmitlock);
3817*5748Sduboff 
3818*5748Sduboff 	switch (cmd) {
3819*5748Sduboff 	default:
3820*5748Sduboff 		_NOTE(NOTREACHED)
3821*5748Sduboff 		status = IOC_INVAL;
3822*5748Sduboff 		break;
3823*5748Sduboff 
3824*5748Sduboff 	case ND_GET:
3825*5748Sduboff 	case ND_SET:
3826*5748Sduboff 		status = gem_nd_ioctl(dp, wq, mp, iocp);
3827*5748Sduboff 		break;
3828*5748Sduboff 	}
3829*5748Sduboff 
3830*5748Sduboff 	mutex_exit(&dp->xmitlock);
3831*5748Sduboff 	mutex_exit(&dp->intrlock);
3832*5748Sduboff 
3833*5748Sduboff #ifdef DEBUG_RESUME
3834*5748Sduboff 	if (cmd == ND_GET)  {
3835*5748Sduboff 		gem_suspend(dp->dip);
3836*5748Sduboff 		gem_resume(dp->dip);
3837*5748Sduboff 	}
3838*5748Sduboff #endif
3839*5748Sduboff 	/*
3840*5748Sduboff 	 * Finally, decide how to reply
3841*5748Sduboff 	 */
3842*5748Sduboff 	switch (status) {
3843*5748Sduboff 	default:
3844*5748Sduboff 	case IOC_INVAL:
3845*5748Sduboff 		/*
3846*5748Sduboff 		 * Error, reply with a NAK and EINVAL or the specified error
3847*5748Sduboff 		 */
3848*5748Sduboff 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
3849*5748Sduboff 		    EINVAL : iocp->ioc_error);
3850*5748Sduboff 		break;
3851*5748Sduboff 
3852*5748Sduboff 	case IOC_DONE:
3853*5748Sduboff 		/*
3854*5748Sduboff 		 * OK, reply already sent
3855*5748Sduboff 		 */
3856*5748Sduboff 		break;
3857*5748Sduboff 
3858*5748Sduboff 	case IOC_RESTART_ACK:
3859*5748Sduboff 	case IOC_ACK:
3860*5748Sduboff 		/*
3861*5748Sduboff 		 * OK, reply with an ACK
3862*5748Sduboff 		 */
3863*5748Sduboff 		miocack(wq, mp, 0, 0);
3864*5748Sduboff 		break;
3865*5748Sduboff 
3866*5748Sduboff 	case IOC_RESTART_REPLY:
3867*5748Sduboff 	case IOC_REPLY:
3868*5748Sduboff 		/*
3869*5748Sduboff 		 * OK, send prepared reply as ACK or NAK
3870*5748Sduboff 		 */
3871*5748Sduboff 		mp->b_datap->db_type =
3872*5748Sduboff 		    iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
3873*5748Sduboff 		qreply(wq, mp);
3874*5748Sduboff 		break;
3875*5748Sduboff 	}
3876*5748Sduboff }
3877*5748Sduboff 
3878*5748Sduboff #ifndef SYS_MAC_H
3879*5748Sduboff #define	XCVR_UNDEFINED	0
3880*5748Sduboff #define	XCVR_NONE	1
3881*5748Sduboff #define	XCVR_10		2
3882*5748Sduboff #define	XCVR_100T4	3
3883*5748Sduboff #define	XCVR_100X	4
3884*5748Sduboff #define	XCVR_100T2	5
3885*5748Sduboff #define	XCVR_1000X	6
3886*5748Sduboff #define	XCVR_1000T	7
3887*5748Sduboff #endif
3888*5748Sduboff static int
3889*5748Sduboff gem_mac_xcvr_inuse(struct gem_dev *dp)
3890*5748Sduboff {
3891*5748Sduboff 	int	val = XCVR_UNDEFINED;
3892*5748Sduboff 
3893*5748Sduboff 	if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
3894*5748Sduboff 		if (dp->mii_status & MII_STATUS_100_BASE_T4) {
3895*5748Sduboff 			val = XCVR_100T4;
3896*5748Sduboff 		} else if (dp->mii_status &
3897*5748Sduboff 		    (MII_STATUS_100_BASEX_FD |
3898*5748Sduboff 		    MII_STATUS_100_BASEX)) {
3899*5748Sduboff 			val = XCVR_100X;
3900*5748Sduboff 		} else if (dp->mii_status &
3901*5748Sduboff 		    (MII_STATUS_100_BASE_T2_FD |
3902*5748Sduboff 		    MII_STATUS_100_BASE_T2)) {
3903*5748Sduboff 			val = XCVR_100T2;
3904*5748Sduboff 		} else if (dp->mii_status &
3905*5748Sduboff 		    (MII_STATUS_10_FD | MII_STATUS_10)) {
3906*5748Sduboff 			val = XCVR_10;
3907*5748Sduboff 		}
3908*5748Sduboff 	} else if (dp->mii_xstatus &
3909*5748Sduboff 	    (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
3910*5748Sduboff 		val = XCVR_1000T;
3911*5748Sduboff 	} else if (dp->mii_xstatus &
3912*5748Sduboff 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
3913*5748Sduboff 		val = XCVR_1000X;
3914*5748Sduboff 	}
3915*5748Sduboff 
3916*5748Sduboff 	return (val);
3917*5748Sduboff }
3918*5748Sduboff 
3919*5748Sduboff /* ============================================================== */
3920*5748Sduboff /*
3921*5748Sduboff  * GLDv3 interface
3922*5748Sduboff  */
3923*5748Sduboff /* ============================================================== */
3924*5748Sduboff static int		gem_m_getstat(void *, uint_t, uint64_t *);
3925*5748Sduboff static int		gem_m_start(void *);
3926*5748Sduboff static void		gem_m_stop(void *);
3927*5748Sduboff static int		gem_m_setpromisc(void *, boolean_t);
3928*5748Sduboff static int		gem_m_multicst(void *, boolean_t, const uint8_t *);
3929*5748Sduboff static int		gem_m_unicst(void *, const uint8_t *);
3930*5748Sduboff static mblk_t		*gem_m_tx(void *, mblk_t *);
3931*5748Sduboff static void		gem_m_resources(void *);
3932*5748Sduboff static void		gem_m_ioctl(void *, queue_t *, mblk_t *);
3933*5748Sduboff static boolean_t	gem_m_getcapab(void *, mac_capab_t, void *);
3934*5748Sduboff 
3935*5748Sduboff #define	GEM_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
3936*5748Sduboff 
3937*5748Sduboff static mac_callbacks_t gem_m_callbacks = {
3938*5748Sduboff 	GEM_M_CALLBACK_FLAGS,
3939*5748Sduboff 	gem_m_getstat,
3940*5748Sduboff 	gem_m_start,
3941*5748Sduboff 	gem_m_stop,
3942*5748Sduboff 	gem_m_setpromisc,
3943*5748Sduboff 	gem_m_multicst,
3944*5748Sduboff 	gem_m_unicst,
3945*5748Sduboff 	gem_m_tx,
3946*5748Sduboff 	gem_m_resources,
3947*5748Sduboff 	gem_m_ioctl,
3948*5748Sduboff 	gem_m_getcapab,
3949*5748Sduboff };
3950*5748Sduboff 
3951*5748Sduboff static int
3952*5748Sduboff gem_m_start(void *arg)
3953*5748Sduboff {
3954*5748Sduboff 	int		err = 0;
3955*5748Sduboff 	struct gem_dev *dp = arg;
3956*5748Sduboff 
3957*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3958*5748Sduboff 
3959*5748Sduboff 	mutex_enter(&dp->intrlock);
3960*5748Sduboff 	if (dp->mac_suspended) {
3961*5748Sduboff 		err = EIO;
3962*5748Sduboff 		goto x;
3963*5748Sduboff 	}
3964*5748Sduboff 	if (gem_mac_init(dp) != GEM_SUCCESS) {
3965*5748Sduboff 		err = EIO;
3966*5748Sduboff 		goto x;
3967*5748Sduboff 	}
3968*5748Sduboff 	dp->nic_state = NIC_STATE_INITIALIZED;
3969*5748Sduboff 
3970*5748Sduboff 	/* reset rx filter state */
3971*5748Sduboff 	dp->mc_count = 0;
3972*5748Sduboff 	dp->mc_count_req = 0;
3973*5748Sduboff 
3974*5748Sduboff 	/* setup media mode if the link have been up */
3975*5748Sduboff 	if (dp->mii_state == MII_STATE_LINKUP) {
3976*5748Sduboff 		(dp->gc.gc_set_media)(dp);
3977*5748Sduboff 	}
3978*5748Sduboff 
3979*5748Sduboff 	/* setup initial rx filter */
3980*5748Sduboff 	bcopy(dp->dev_addr.ether_addr_octet,
3981*5748Sduboff 	    dp->cur_addr.ether_addr_octet, ETHERADDRL);
3982*5748Sduboff 	dp->rxmode |= RXMODE_ENABLE;
3983*5748Sduboff 
3984*5748Sduboff 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
3985*5748Sduboff 		err = EIO;
3986*5748Sduboff 		goto x;
3987*5748Sduboff 	}
3988*5748Sduboff 
3989*5748Sduboff 	dp->nic_state = NIC_STATE_ONLINE;
3990*5748Sduboff 	if (dp->mii_state == MII_STATE_LINKUP) {
3991*5748Sduboff 		if (gem_mac_start(dp) != GEM_SUCCESS) {
3992*5748Sduboff 			err = EIO;
3993*5748Sduboff 			goto x;
3994*5748Sduboff 		}
3995*5748Sduboff 	}
3996*5748Sduboff 
3997*5748Sduboff 	dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
3998*5748Sduboff 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
3999*5748Sduboff 	mutex_exit(&dp->intrlock);
4000*5748Sduboff 
4001*5748Sduboff 	return (0);
4002*5748Sduboff x:
4003*5748Sduboff 	dp->nic_state = NIC_STATE_STOPPED;
4004*5748Sduboff 	mutex_exit(&dp->intrlock);
4005*5748Sduboff 	return (err);
4006*5748Sduboff }
4007*5748Sduboff 
4008*5748Sduboff static void
4009*5748Sduboff gem_m_stop(void *arg)
4010*5748Sduboff {
4011*5748Sduboff 	struct gem_dev	*dp = arg;
4012*5748Sduboff 
4013*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4014*5748Sduboff 
4015*5748Sduboff 	/* stop rx */
4016*5748Sduboff 	mutex_enter(&dp->intrlock);
4017*5748Sduboff 	if (dp->mac_suspended) {
4018*5748Sduboff 		mutex_exit(&dp->intrlock);
4019*5748Sduboff 		return;
4020*5748Sduboff 	}
4021*5748Sduboff 	dp->rxmode &= ~RXMODE_ENABLE;
4022*5748Sduboff 	(void) gem_mac_set_rx_filter(dp);
4023*5748Sduboff 	mutex_exit(&dp->intrlock);
4024*5748Sduboff 
4025*5748Sduboff 	/* stop tx timeout watcher */
4026*5748Sduboff 	if (dp->timeout_id) {
4027*5748Sduboff 		while (untimeout(dp->timeout_id) == -1)
4028*5748Sduboff 			;
4029*5748Sduboff 		dp->timeout_id = 0;
4030*5748Sduboff 	}
4031*5748Sduboff 
4032*5748Sduboff 	/* make the nic state inactive */
4033*5748Sduboff 	mutex_enter(&dp->intrlock);
4034*5748Sduboff 	if (dp->mac_suspended) {
4035*5748Sduboff 		mutex_exit(&dp->intrlock);
4036*5748Sduboff 		return;
4037*5748Sduboff 	}
4038*5748Sduboff 	dp->nic_state = NIC_STATE_STOPPED;
4039*5748Sduboff 
4040*5748Sduboff 	/* we need deassert mac_active due to block interrupt handler */
4041*5748Sduboff 	mutex_enter(&dp->xmitlock);
4042*5748Sduboff 	dp->mac_active = B_FALSE;
4043*5748Sduboff 	mutex_exit(&dp->xmitlock);
4044*5748Sduboff 
4045*5748Sduboff 	/* block interrupts */
4046*5748Sduboff 	while (dp->intr_busy) {
4047*5748Sduboff 		cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4048*5748Sduboff 	}
4049*5748Sduboff 	(void) gem_mac_stop(dp, 0);
4050*5748Sduboff 	mutex_exit(&dp->intrlock);
4051*5748Sduboff }
4052*5748Sduboff 
4053*5748Sduboff static int
4054*5748Sduboff gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
4055*5748Sduboff {
4056*5748Sduboff 	int		err;
4057*5748Sduboff 	int		ret;
4058*5748Sduboff 	struct gem_dev	*dp = arg;
4059*5748Sduboff 
4060*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4061*5748Sduboff 
4062*5748Sduboff 	if (add) {
4063*5748Sduboff 		ret = gem_add_multicast(dp, ep);
4064*5748Sduboff 	} else {
4065*5748Sduboff 		ret = gem_remove_multicast(dp, ep);
4066*5748Sduboff 	}
4067*5748Sduboff 
4068*5748Sduboff 	err = 0;
4069*5748Sduboff 	if (ret != GEM_SUCCESS) {
4070*5748Sduboff 		err = EIO;
4071*5748Sduboff 	}
4072*5748Sduboff 
4073*5748Sduboff 	return (err);
4074*5748Sduboff }
4075*5748Sduboff 
4076*5748Sduboff static int
4077*5748Sduboff gem_m_setpromisc(void *arg, boolean_t on)
4078*5748Sduboff {
4079*5748Sduboff 	int		err = 0;	/* no error */
4080*5748Sduboff 	struct gem_dev	*dp = arg;
4081*5748Sduboff 
4082*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4083*5748Sduboff 
4084*5748Sduboff 	mutex_enter(&dp->intrlock);
4085*5748Sduboff 	if (dp->mac_suspended) {
4086*5748Sduboff 		mutex_exit(&dp->intrlock);
4087*5748Sduboff 		return (EIO);
4088*5748Sduboff 	}
4089*5748Sduboff 	if (on) {
4090*5748Sduboff 		dp->rxmode |= RXMODE_PROMISC;
4091*5748Sduboff 	} else {
4092*5748Sduboff 		dp->rxmode &= ~RXMODE_PROMISC;
4093*5748Sduboff 	}
4094*5748Sduboff 
4095*5748Sduboff 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4096*5748Sduboff 		err = EIO;
4097*5748Sduboff 	}
4098*5748Sduboff 	mutex_exit(&dp->intrlock);
4099*5748Sduboff 
4100*5748Sduboff 	return (err);
4101*5748Sduboff }
4102*5748Sduboff 
4103*5748Sduboff int
4104*5748Sduboff gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
4105*5748Sduboff {
4106*5748Sduboff 	struct gem_dev		*dp = arg;
4107*5748Sduboff 	struct gem_stats	*gstp = &dp->stats;
4108*5748Sduboff 	uint64_t		val = 0;
4109*5748Sduboff 
4110*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4111*5748Sduboff 
4112*5748Sduboff 	mutex_enter(&dp->intrlock);
4113*5748Sduboff 	if (dp->mac_suspended) {
4114*5748Sduboff 		mutex_exit(&dp->intrlock);
4115*5748Sduboff 		return (EIO);
4116*5748Sduboff 	}
4117*5748Sduboff 	mutex_exit(&dp->intrlock);
4118*5748Sduboff 
4119*5748Sduboff 	if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4120*5748Sduboff 		return (EIO);
4121*5748Sduboff 	}
4122*5748Sduboff 
4123*5748Sduboff 	switch (stat) {
4124*5748Sduboff 	case MAC_STAT_IFSPEED:
4125*5748Sduboff 		val = gem_speed_value[dp->speed] *1000000ull;
4126*5748Sduboff 		break;
4127*5748Sduboff 
4128*5748Sduboff 	case MAC_STAT_MULTIRCV:
4129*5748Sduboff 		val = gstp->rmcast;
4130*5748Sduboff 		break;
4131*5748Sduboff 
4132*5748Sduboff 	case MAC_STAT_BRDCSTRCV:
4133*5748Sduboff 		val = gstp->rbcast;
4134*5748Sduboff 		break;
4135*5748Sduboff 
4136*5748Sduboff 	case MAC_STAT_MULTIXMT:
4137*5748Sduboff 		val = gstp->omcast;
4138*5748Sduboff 		break;
4139*5748Sduboff 
4140*5748Sduboff 	case MAC_STAT_BRDCSTXMT:
4141*5748Sduboff 		val = gstp->obcast;
4142*5748Sduboff 		break;
4143*5748Sduboff 
4144*5748Sduboff 	case MAC_STAT_NORCVBUF:
4145*5748Sduboff 		val = gstp->norcvbuf + gstp->missed;
4146*5748Sduboff 		break;
4147*5748Sduboff 
4148*5748Sduboff 	case MAC_STAT_IERRORS:
4149*5748Sduboff 		val = gstp->errrcv;
4150*5748Sduboff 		break;
4151*5748Sduboff 
4152*5748Sduboff 	case MAC_STAT_NOXMTBUF:
4153*5748Sduboff 		val = gstp->noxmtbuf;
4154*5748Sduboff 		break;
4155*5748Sduboff 
4156*5748Sduboff 	case MAC_STAT_OERRORS:
4157*5748Sduboff 		val = gstp->errxmt;
4158*5748Sduboff 		break;
4159*5748Sduboff 
4160*5748Sduboff 	case MAC_STAT_COLLISIONS:
4161*5748Sduboff 		val = gstp->collisions;
4162*5748Sduboff 		break;
4163*5748Sduboff 
4164*5748Sduboff 	case MAC_STAT_RBYTES:
4165*5748Sduboff 		val = gstp->rbytes;
4166*5748Sduboff 		break;
4167*5748Sduboff 
4168*5748Sduboff 	case MAC_STAT_IPACKETS:
4169*5748Sduboff 		val = gstp->rpackets;
4170*5748Sduboff 		break;
4171*5748Sduboff 
4172*5748Sduboff 	case MAC_STAT_OBYTES:
4173*5748Sduboff 		val = gstp->obytes;
4174*5748Sduboff 		break;
4175*5748Sduboff 
4176*5748Sduboff 	case MAC_STAT_OPACKETS:
4177*5748Sduboff 		val = gstp->opackets;
4178*5748Sduboff 		break;
4179*5748Sduboff 
4180*5748Sduboff 	case MAC_STAT_UNDERFLOWS:
4181*5748Sduboff 		val = gstp->underflow;
4182*5748Sduboff 		break;
4183*5748Sduboff 
4184*5748Sduboff 	case MAC_STAT_OVERFLOWS:
4185*5748Sduboff 		val = gstp->overflow;
4186*5748Sduboff 		break;
4187*5748Sduboff 
4188*5748Sduboff 	case ETHER_STAT_ALIGN_ERRORS:
4189*5748Sduboff 		val = gstp->frame;
4190*5748Sduboff 		break;
4191*5748Sduboff 
4192*5748Sduboff 	case ETHER_STAT_FCS_ERRORS:
4193*5748Sduboff 		val = gstp->crc;
4194*5748Sduboff 		break;
4195*5748Sduboff 
4196*5748Sduboff 	case ETHER_STAT_FIRST_COLLISIONS:
4197*5748Sduboff 		val = gstp->first_coll;
4198*5748Sduboff 		break;
4199*5748Sduboff 
4200*5748Sduboff 	case ETHER_STAT_MULTI_COLLISIONS:
4201*5748Sduboff 		val = gstp->multi_coll;
4202*5748Sduboff 		break;
4203*5748Sduboff 
4204*5748Sduboff 	case ETHER_STAT_SQE_ERRORS:
4205*5748Sduboff 		val = gstp->sqe;
4206*5748Sduboff 		break;
4207*5748Sduboff 
4208*5748Sduboff 	case ETHER_STAT_DEFER_XMTS:
4209*5748Sduboff 		val = gstp->defer;
4210*5748Sduboff 		break;
4211*5748Sduboff 
4212*5748Sduboff 	case ETHER_STAT_TX_LATE_COLLISIONS:
4213*5748Sduboff 		val = gstp->xmtlatecoll;
4214*5748Sduboff 		break;
4215*5748Sduboff 
4216*5748Sduboff 	case ETHER_STAT_EX_COLLISIONS:
4217*5748Sduboff 		val = gstp->excoll;
4218*5748Sduboff 		break;
4219*5748Sduboff 
4220*5748Sduboff 	case ETHER_STAT_MACXMT_ERRORS:
4221*5748Sduboff 		val = gstp->xmit_internal_err;
4222*5748Sduboff 		break;
4223*5748Sduboff 
4224*5748Sduboff 	case ETHER_STAT_CARRIER_ERRORS:
4225*5748Sduboff 		val = gstp->nocarrier;
4226*5748Sduboff 		break;
4227*5748Sduboff 
4228*5748Sduboff 	case ETHER_STAT_TOOLONG_ERRORS:
4229*5748Sduboff 		val = gstp->frame_too_long;
4230*5748Sduboff 		break;
4231*5748Sduboff 
4232*5748Sduboff 	case ETHER_STAT_MACRCV_ERRORS:
4233*5748Sduboff 		val = gstp->rcv_internal_err;
4234*5748Sduboff 		break;
4235*5748Sduboff 
4236*5748Sduboff 	case ETHER_STAT_XCVR_ADDR:
4237*5748Sduboff 		val = dp->mii_phy_addr;
4238*5748Sduboff 		break;
4239*5748Sduboff 
4240*5748Sduboff 	case ETHER_STAT_XCVR_ID:
4241*5748Sduboff 		val = dp->mii_phy_id;
4242*5748Sduboff 		break;
4243*5748Sduboff 
4244*5748Sduboff 	case ETHER_STAT_XCVR_INUSE:
4245*5748Sduboff 		val = gem_mac_xcvr_inuse(dp);
4246*5748Sduboff 		break;
4247*5748Sduboff 
4248*5748Sduboff 	case ETHER_STAT_CAP_1000FDX:
4249*5748Sduboff 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4250*5748Sduboff 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4251*5748Sduboff 		break;
4252*5748Sduboff 
4253*5748Sduboff 	case ETHER_STAT_CAP_1000HDX:
4254*5748Sduboff 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4255*5748Sduboff 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4256*5748Sduboff 		break;
4257*5748Sduboff 
4258*5748Sduboff 	case ETHER_STAT_CAP_100FDX:
4259*5748Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4260*5748Sduboff 		break;
4261*5748Sduboff 
4262*5748Sduboff 	case ETHER_STAT_CAP_100HDX:
4263*5748Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4264*5748Sduboff 		break;
4265*5748Sduboff 
4266*5748Sduboff 	case ETHER_STAT_CAP_10FDX:
4267*5748Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4268*5748Sduboff 		break;
4269*5748Sduboff 
4270*5748Sduboff 	case ETHER_STAT_CAP_10HDX:
4271*5748Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4272*5748Sduboff 		break;
4273*5748Sduboff 
4274*5748Sduboff 	case ETHER_STAT_CAP_ASMPAUSE:
4275*5748Sduboff 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
4276*5748Sduboff 		break;
4277*5748Sduboff 
4278*5748Sduboff 	case ETHER_STAT_CAP_PAUSE:
4279*5748Sduboff 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
4280*5748Sduboff 		break;
4281*5748Sduboff 
4282*5748Sduboff 	case ETHER_STAT_CAP_AUTONEG:
4283*5748Sduboff 		val = dp->anadv_autoneg;
4284*5748Sduboff 		break;
4285*5748Sduboff 
4286*5748Sduboff 	case ETHER_STAT_ADV_CAP_1000FDX:
4287*5748Sduboff 		val = dp->anadv_1000fdx;
4288*5748Sduboff 		break;
4289*5748Sduboff 
4290*5748Sduboff 	case ETHER_STAT_ADV_CAP_1000HDX:
4291*5748Sduboff 		val = dp->anadv_1000hdx;
4292*5748Sduboff 		break;
4293*5748Sduboff 
4294*5748Sduboff 	case ETHER_STAT_ADV_CAP_100FDX:
4295*5748Sduboff 		val = dp->anadv_100fdx;
4296*5748Sduboff 		break;
4297*5748Sduboff 
4298*5748Sduboff 	case ETHER_STAT_ADV_CAP_100HDX:
4299*5748Sduboff 		val = dp->anadv_100hdx;
4300*5748Sduboff 		break;
4301*5748Sduboff 
4302*5748Sduboff 	case ETHER_STAT_ADV_CAP_10FDX:
4303*5748Sduboff 		val = dp->anadv_10fdx;
4304*5748Sduboff 		break;
4305*5748Sduboff 
4306*5748Sduboff 	case ETHER_STAT_ADV_CAP_10HDX:
4307*5748Sduboff 		val = dp->anadv_10hdx;
4308*5748Sduboff 		break;
4309*5748Sduboff 
4310*5748Sduboff 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
4311*5748Sduboff 		val = BOOLEAN(dp->anadv_flow_control & 2);
4312*5748Sduboff 		break;
4313*5748Sduboff 
4314*5748Sduboff 	case ETHER_STAT_ADV_CAP_PAUSE:
4315*5748Sduboff 		val = BOOLEAN(dp->anadv_flow_control & 1);
4316*5748Sduboff 		break;
4317*5748Sduboff 
4318*5748Sduboff 	case ETHER_STAT_ADV_CAP_AUTONEG:
4319*5748Sduboff 		val = dp->anadv_autoneg;
4320*5748Sduboff 		break;
4321*5748Sduboff 
4322*5748Sduboff 	case ETHER_STAT_LP_CAP_1000FDX:
4323*5748Sduboff 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4324*5748Sduboff 		break;
4325*5748Sduboff 
4326*5748Sduboff 	case ETHER_STAT_LP_CAP_1000HDX:
4327*5748Sduboff 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4328*5748Sduboff 		break;
4329*5748Sduboff 
4330*5748Sduboff 	case ETHER_STAT_LP_CAP_100FDX:
4331*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4332*5748Sduboff 		break;
4333*5748Sduboff 
4334*5748Sduboff 	case ETHER_STAT_LP_CAP_100HDX:
4335*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4336*5748Sduboff 		break;
4337*5748Sduboff 
4338*5748Sduboff 	case ETHER_STAT_LP_CAP_10FDX:
4339*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4340*5748Sduboff 		break;
4341*5748Sduboff 
4342*5748Sduboff 	case ETHER_STAT_LP_CAP_10HDX:
4343*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4344*5748Sduboff 		break;
4345*5748Sduboff 
4346*5748Sduboff 	case ETHER_STAT_LP_CAP_ASMPAUSE:
4347*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR);
4348*5748Sduboff 		break;
4349*5748Sduboff 
4350*5748Sduboff 	case ETHER_STAT_LP_CAP_PAUSE:
4351*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4352*5748Sduboff 		break;
4353*5748Sduboff 
4354*5748Sduboff 	case ETHER_STAT_LP_CAP_AUTONEG:
4355*5748Sduboff 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4356*5748Sduboff 		break;
4357*5748Sduboff 
4358*5748Sduboff 	case ETHER_STAT_LINK_ASMPAUSE:
4359*5748Sduboff 		val = BOOLEAN(dp->flow_control & 2);
4360*5748Sduboff 		break;
4361*5748Sduboff 
4362*5748Sduboff 	case ETHER_STAT_LINK_PAUSE:
4363*5748Sduboff 		val = BOOLEAN(dp->flow_control & 1);
4364*5748Sduboff 		break;
4365*5748Sduboff 
4366*5748Sduboff 	case ETHER_STAT_LINK_AUTONEG:
4367*5748Sduboff 		val = dp->anadv_autoneg &&
4368*5748Sduboff 		    BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4369*5748Sduboff 		break;
4370*5748Sduboff 
4371*5748Sduboff 	case ETHER_STAT_LINK_DUPLEX:
4372*5748Sduboff 		val = (dp->mii_state == MII_STATE_LINKUP) ?
4373*5748Sduboff 		    (dp->full_duplex ? 2 : 1) : 0;
4374*5748Sduboff 		break;
4375*5748Sduboff 
4376*5748Sduboff 	case ETHER_STAT_TOOSHORT_ERRORS:
4377*5748Sduboff 		val = gstp->runt;
4378*5748Sduboff 		break;
4379*5748Sduboff 	case ETHER_STAT_LP_REMFAULT:
4380*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4381*5748Sduboff 		break;
4382*5748Sduboff 
4383*5748Sduboff 	case ETHER_STAT_JABBER_ERRORS:
4384*5748Sduboff 		val = gstp->jabber;
4385*5748Sduboff 		break;
4386*5748Sduboff 
4387*5748Sduboff 	case ETHER_STAT_CAP_100T4:
4388*5748Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4389*5748Sduboff 		break;
4390*5748Sduboff 
4391*5748Sduboff 	case ETHER_STAT_ADV_CAP_100T4:
4392*5748Sduboff 		val = dp->anadv_100t4;
4393*5748Sduboff 		break;
4394*5748Sduboff 
4395*5748Sduboff 	case ETHER_STAT_LP_CAP_100T4:
4396*5748Sduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4397*5748Sduboff 		break;
4398*5748Sduboff 
4399*5748Sduboff 	default:
4400*5748Sduboff #if GEM_DEBUG_LEVEL > 2
4401*5748Sduboff 		cmn_err(CE_WARN,
4402*5748Sduboff 		    "%s: unrecognized parameter value = %d",
4403*5748Sduboff 		    __func__, stat);
4404*5748Sduboff #endif
4405*5748Sduboff 		return (ENOTSUP);
4406*5748Sduboff 	}
4407*5748Sduboff 
4408*5748Sduboff 	*valp = val;
4409*5748Sduboff 
4410*5748Sduboff 	return (0);
4411*5748Sduboff }
4412*5748Sduboff 
4413*5748Sduboff static int
4414*5748Sduboff gem_m_unicst(void *arg, const uint8_t *mac)
4415*5748Sduboff {
4416*5748Sduboff 	int		err = 0;
4417*5748Sduboff 	struct gem_dev	*dp = arg;
4418*5748Sduboff 
4419*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4420*5748Sduboff 
4421*5748Sduboff 	mutex_enter(&dp->intrlock);
4422*5748Sduboff 	if (dp->mac_suspended) {
4423*5748Sduboff 		mutex_exit(&dp->intrlock);
4424*5748Sduboff 		return (EIO);
4425*5748Sduboff 	}
4426*5748Sduboff 	bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4427*5748Sduboff 	dp->rxmode |= RXMODE_ENABLE;
4428*5748Sduboff 
4429*5748Sduboff 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4430*5748Sduboff 		err = EIO;
4431*5748Sduboff 	}
4432*5748Sduboff 	mutex_exit(&dp->intrlock);
4433*5748Sduboff 
4434*5748Sduboff 	return (err);
4435*5748Sduboff }
4436*5748Sduboff 
4437*5748Sduboff /*
4438*5748Sduboff  * gem_m_tx is used only for sending data packets into ethernet wire.
4439*5748Sduboff  */
4440*5748Sduboff static mblk_t *
4441*5748Sduboff gem_m_tx(void *arg, mblk_t *mp)
4442*5748Sduboff {
4443*5748Sduboff 	uint32_t	flags = 0;
4444*5748Sduboff 	struct gem_dev	*dp = arg;
4445*5748Sduboff 	mblk_t		*tp;
4446*5748Sduboff 
4447*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4448*5748Sduboff 
4449*5748Sduboff 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4450*5748Sduboff 	if (dp->mii_state != MII_STATE_LINKUP) {
4451*5748Sduboff 		/* Some nics hate to send packets when the link is down. */
4452*5748Sduboff 		while (mp) {
4453*5748Sduboff 			tp = mp->b_next;
4454*5748Sduboff 			mp->b_next = NULL;
4455*5748Sduboff 			freemsg(mp);
4456*5748Sduboff 			mp = tp;
4457*5748Sduboff 		}
4458*5748Sduboff 		return (NULL);
4459*5748Sduboff 	}
4460*5748Sduboff 
4461*5748Sduboff 	return (gem_send_common(dp, mp, flags));
4462*5748Sduboff }
4463*5748Sduboff 
4464*5748Sduboff static void
4465*5748Sduboff gem_set_coalease(void *arg, time_t ticks, uint_t count)
4466*5748Sduboff {
4467*5748Sduboff 	struct gem_dev *dp = arg;
4468*5748Sduboff 	DPRINTF(1, (CE_CONT, "%s: %s: ticks:%d count:%d",
4469*5748Sduboff 	    dp->name, __func__, ticks, count));
4470*5748Sduboff 
4471*5748Sduboff 	mutex_enter(&dp->intrlock);
4472*5748Sduboff 	dp->poll_pkt_delay = count;
4473*5748Sduboff 	mutex_exit(&dp->intrlock);
4474*5748Sduboff }
4475*5748Sduboff 
4476*5748Sduboff static void
4477*5748Sduboff gem_m_resources(void *arg)
4478*5748Sduboff {
4479*5748Sduboff 	struct gem_dev		*dp = arg;
4480*5748Sduboff 	mac_rx_fifo_t		mrf;
4481*5748Sduboff 
4482*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4483*5748Sduboff 
4484*5748Sduboff 	mutex_enter(&dp->intrlock);
4485*5748Sduboff 	mutex_enter(&dp->xmitlock);
4486*5748Sduboff 
4487*5748Sduboff 	/*
4488*5748Sduboff 	 * Register Rx rings as resources and save mac
4489*5748Sduboff 	 * resource id for future reference
4490*5748Sduboff 	 */
4491*5748Sduboff 	mrf.mrf_type = MAC_RX_FIFO;
4492*5748Sduboff 	mrf.mrf_blank = gem_set_coalease;
4493*5748Sduboff 	mrf.mrf_arg = (void *)dp;
4494*5748Sduboff 	mrf.mrf_normal_blank_time = 128; /* in uS */
4495*5748Sduboff 	mrf.mrf_normal_pkt_count = dp->poll_pkt_delay;
4496*5748Sduboff 
4497*5748Sduboff 	dp->mac_rx_ring_ha = mac_resource_add(dp->mh, (mac_resource_t *)&mrf);
4498*5748Sduboff 
4499*5748Sduboff 	mutex_exit(&dp->xmitlock);
4500*5748Sduboff 	mutex_exit(&dp->intrlock);
4501*5748Sduboff }
4502*5748Sduboff 
4503*5748Sduboff static void
4504*5748Sduboff gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4505*5748Sduboff {
4506*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called",
4507*5748Sduboff 	    ((struct gem_dev *)arg)->name, __func__));
4508*5748Sduboff 
4509*5748Sduboff 	gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
4510*5748Sduboff }
4511*5748Sduboff 
4512*5748Sduboff static boolean_t
4513*5748Sduboff gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4514*5748Sduboff {
4515*5748Sduboff 	boolean_t	ret;
4516*5748Sduboff 
4517*5748Sduboff 	ret = B_FALSE;
4518*5748Sduboff 	switch (cap) {
4519*5748Sduboff 	case MAC_CAPAB_POLL:
4520*5748Sduboff 		ret = B_TRUE;
4521*5748Sduboff 		break;
4522*5748Sduboff 	}
4523*5748Sduboff 	return (ret);
4524*5748Sduboff }
4525*5748Sduboff 
4526*5748Sduboff static void
4527*5748Sduboff gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4528*5748Sduboff {
4529*5748Sduboff 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4530*5748Sduboff 	macp->m_driver = dp;
4531*5748Sduboff 	macp->m_dip = dp->dip;
4532*5748Sduboff 	macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4533*5748Sduboff 	macp->m_callbacks = &gem_m_callbacks;
4534*5748Sduboff 	macp->m_min_sdu = 0;
4535*5748Sduboff 	macp->m_max_sdu = dp->mtu;
4536*5748Sduboff }
4537*5748Sduboff 
4538*5748Sduboff /* ======================================================================== */
4539*5748Sduboff /*
4540*5748Sduboff  * attach/detatch support
4541*5748Sduboff  */
4542*5748Sduboff /* ======================================================================== */
4543*5748Sduboff static void
4544*5748Sduboff gem_read_conf(struct gem_dev *dp)
4545*5748Sduboff {
4546*5748Sduboff 	char			propname[32];
4547*5748Sduboff 	int			val;
4548*5748Sduboff 
4549*5748Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4550*5748Sduboff 
4551*5748Sduboff 	/*
4552*5748Sduboff 	 * Get media mode infomation from .conf file
4553*5748Sduboff 	 */
4554*5748Sduboff 	dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4555*5748Sduboff 	dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4556*5748Sduboff 	dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4557*5748Sduboff 	dp->anadv_100t4   = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4558*5748Sduboff 	dp->anadv_100fdx  = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4559*5748Sduboff 	dp->anadv_100hdx  = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4560*5748Sduboff 	dp->anadv_10fdx   = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4561*5748Sduboff 	dp->anadv_10hdx   = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4562*5748Sduboff 
4563*5748Sduboff 	if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4564*5748Sduboff 	    DDI_PROP_DONTPASS, "full-duplex"))) {
4565*5748Sduboff 		dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4566*5748Sduboff 		dp->anadv_autoneg = B_FALSE;
4567*5748Sduboff 		dp->anadv_1000hdx = B_FALSE;
4568*5748Sduboff 		dp->anadv_100hdx = B_FALSE;
4569*5748Sduboff 		dp->anadv_10hdx = B_FALSE;
4570*5748Sduboff 	}
4571*5748Sduboff 
4572*5748Sduboff 	if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4573*5748Sduboff 		dp->anadv_autoneg = B_FALSE;
4574*5748Sduboff 		switch (val) {
4575*5748Sduboff 		case 1000:
4576*5748Sduboff 			dp->speed = GEM_SPD_1000;
4577*5748Sduboff 			dp->anadv_100t4   = B_FALSE;
4578*5748Sduboff 			dp->anadv_100fdx  = B_FALSE;
4579*5748Sduboff 			dp->anadv_100hdx  = B_FALSE;
4580*5748Sduboff 			dp->anadv_10fdx   = B_FALSE;
4581*5748Sduboff 			dp->anadv_10hdx   = B_FALSE;
4582*5748Sduboff 			break;
4583*5748Sduboff 		case 100:
4584*5748Sduboff 			dp->speed = GEM_SPD_100;
4585*5748Sduboff 			dp->anadv_1000fdx = B_FALSE;
4586*5748Sduboff 			dp->anadv_1000hdx = B_FALSE;
4587*5748Sduboff 			dp->anadv_10fdx   = B_FALSE;
4588*5748Sduboff 			dp->anadv_10hdx   = B_FALSE;
4589*5748Sduboff 			break;
4590*5748Sduboff 		case 10:
4591*5748Sduboff 			dp->speed = GEM_SPD_10;
4592*5748Sduboff 			dp->anadv_1000fdx = B_FALSE;
4593*5748Sduboff 			dp->anadv_1000hdx = B_FALSE;
4594*5748Sduboff 			dp->anadv_100t4   = B_FALSE;
4595*5748Sduboff 			dp->anadv_100fdx  = B_FALSE;
4596*5748Sduboff 			dp->anadv_100hdx  = B_FALSE;
4597*5748Sduboff 			break;
4598*5748Sduboff 		default:
4599*5748Sduboff 			cmn_err(CE_WARN,
4600*5748Sduboff 			    "!%s: property %s: illegal value:%d",
4601*5748Sduboff 			    dp->name, propname, val);
4602*5748Sduboff 			dp->anadv_autoneg = B_TRUE;
4603*5748Sduboff 			break;
4604*5748Sduboff 		}
4605*5748Sduboff 	}
4606*5748Sduboff 
4607*5748Sduboff 	val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4608*5748Sduboff 	if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
4609*5748Sduboff 		cmn_err(CE_WARN,
4610*5748Sduboff 		    "!%s: property %s: illegal value:%d",
4611*5748Sduboff 		    dp->name, propname, val);
4612*5748Sduboff 	} else {
4613*5748Sduboff 		val = min(val, dp->gc.gc_flow_control);
4614*5748Sduboff 	}
4615*5748Sduboff 	dp->anadv_flow_control = val;
4616*5748Sduboff 
4617*5748Sduboff 	if (gem_prop_get_int(dp, "nointr", 0)) {
4618*5748Sduboff 		dp->misc_flag |= GEM_NOINTR;
4619*5748Sduboff 		cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4620*5748Sduboff 	}
4621*5748Sduboff 
4622*5748Sduboff 	dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4623*5748Sduboff 	dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4624*5748Sduboff 	dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4625*5748Sduboff 	dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4626*5748Sduboff 	dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4627*5748Sduboff 	dp->poll_pkt_delay =
4628*5748Sduboff 	    gem_prop_get_int(dp, "pkt_delay", dp->poll_pkt_delay);
4629*5748Sduboff }
4630*5748Sduboff 
4631*5748Sduboff 
4632*5748Sduboff /*
4633*5748Sduboff  * Gem kstat support
4634*5748Sduboff  */
4635*5748Sduboff 
4636*5748Sduboff #define	GEM_LOCAL_DATA_SIZE(gc)	\
4637*5748Sduboff 	(sizeof (struct gem_dev) + \
4638*5748Sduboff 	sizeof (struct mcast_addr) * GEM_MAXMC + \
4639*5748Sduboff 	sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4640*5748Sduboff 	sizeof (void *) * ((gc)->gc_tx_buf_size))
4641*5748Sduboff 
4642*5748Sduboff struct gem_dev *
4643*5748Sduboff gem_do_attach(dev_info_t *dip, int port,
4644*5748Sduboff 	struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
4645*5748Sduboff 	void *lp, int lmsize)
4646*5748Sduboff {
4647*5748Sduboff 	struct gem_dev		*dp;
4648*5748Sduboff 	int			i;
4649*5748Sduboff 	ddi_iblock_cookie_t	c;
4650*5748Sduboff 	mac_register_t		*macp = NULL;
4651*5748Sduboff 	int			ret;
4652*5748Sduboff 	int			unit;
4653*5748Sduboff 	int			nports;
4654*5748Sduboff 
4655*5748Sduboff 	unit = ddi_get_instance(dip);
4656*5748Sduboff 	if ((nports = gc->gc_nports) == 0) {
4657*5748Sduboff 		nports = 1;
4658*5748Sduboff 	}
4659*5748Sduboff 	if (nports == 1) {
4660*5748Sduboff 		ddi_set_driver_private(dip, NULL);
4661*5748Sduboff 	}
4662*5748Sduboff 
4663*5748Sduboff 	DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
4664*5748Sduboff 	    unit));
4665*5748Sduboff 
4666*5748Sduboff 	/*
4667*5748Sduboff 	 * Allocate soft data structure
4668*5748Sduboff 	 */
4669*5748Sduboff 	dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4670*5748Sduboff 
4671*5748Sduboff 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
4672*5748Sduboff 		cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
4673*5748Sduboff 		    unit, __func__);
4674*5748Sduboff 		return (NULL);
4675*5748Sduboff 	}
4676*5748Sduboff 	/* ddi_set_driver_private(dip, dp); */
4677*5748Sduboff 
4678*5748Sduboff 	/* link to private area */
4679*5748Sduboff 	dp->private   = lp;
4680*5748Sduboff 	dp->priv_size = lmsize;
4681*5748Sduboff 	dp->mc_list = (struct mcast_addr *)&dp[1];
4682*5748Sduboff 
4683*5748Sduboff 	dp->dip = dip;
4684*5748Sduboff 	(void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4685*5748Sduboff 
4686*5748Sduboff 	/*
4687*5748Sduboff 	 * Get iblock cookie
4688*5748Sduboff 	 */
4689*5748Sduboff 	if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
4690*5748Sduboff 		cmn_err(CE_CONT,
4691*5748Sduboff 		    "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4692*5748Sduboff 		    dp->name);
4693*5748Sduboff 		goto err_free_private;
4694*5748Sduboff 	}
4695*5748Sduboff 	dp->iblock_cookie = c;
4696*5748Sduboff 
4697*5748Sduboff 	/*
4698*5748Sduboff 	 * Initialize mutex's for this device.
4699*5748Sduboff 	 */
4700*5748Sduboff 	mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4701*5748Sduboff 	mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4702*5748Sduboff 	cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4703*5748Sduboff 
4704*5748Sduboff 	/*
4705*5748Sduboff 	 * configure gem parameter
4706*5748Sduboff 	 */
4707*5748Sduboff 	dp->base_addr   = base;
4708*5748Sduboff 	dp->regs_handle = *regs_handlep;
4709*5748Sduboff 	dp->gc = *gc;
4710*5748Sduboff 	gc = &dp->gc;
4711*5748Sduboff 	if (gc->gc_tx_ring_size == 0) {
4712*5748Sduboff 		/* patch for simplify dma resource management */
4713*5748Sduboff 		gc->gc_tx_max_frags = 1;
4714*5748Sduboff 		gc->gc_tx_max_descs_per_pkt = 1;
4715*5748Sduboff 		gc->gc_tx_ring_size = gc->gc_tx_buf_size;
4716*5748Sduboff 		gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
4717*5748Sduboff 		gc->gc_tx_desc_write_oo = B_TRUE;
4718*5748Sduboff 	}
4719*5748Sduboff 	if (gc->gc_tx_desc_write_oo) {
4720*5748Sduboff 		/* doublec check for making tx descs in out of order way */
4721*5748Sduboff 		gc->gc_tx_desc_write_oo =
4722*5748Sduboff 		    gc->gc_tx_max_descs_per_pkt == 1 &&
4723*5748Sduboff 		    gc->gc_tx_buf_size == gc->gc_tx_ring_size &&
4724*5748Sduboff 		    gc->gc_tx_buf_limit == gc->gc_tx_ring_limit;
4725*5748Sduboff 	}
4726*5748Sduboff 
4727*5748Sduboff 	gc->gc_nports = nports;	/* fix nports */
4728*5748Sduboff 
4729*5748Sduboff 	/* fix copy threadsholds */
4730*5748Sduboff 	gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
4731*5748Sduboff 	gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
4732*5748Sduboff 
4733*5748Sduboff 	/* fix rx buffer boundary for iocache line size */
4734*5748Sduboff 	ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
4735*5748Sduboff 	ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
4736*5748Sduboff 	gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
4737*5748Sduboff 	gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
4738*5748Sduboff 
4739*5748Sduboff 	/* patch get_packet method */
4740*5748Sduboff 	if (gc->gc_get_packet == NULL) {
4741*5748Sduboff 		gc->gc_get_packet = &gem_get_packet_default;
4742*5748Sduboff 	}
4743*5748Sduboff 
4744*5748Sduboff 	/* patch get_rx_start method */
4745*5748Sduboff 	if (gc->gc_rx_start == NULL) {
4746*5748Sduboff 		gc->gc_rx_start = &gem_rx_start_default;
4747*5748Sduboff 	}
4748*5748Sduboff 
4749*5748Sduboff 	/* calculate descriptor area */
4750*5748Sduboff 	if (gc->gc_rx_desc_unit_shift >= 0) {
4751*5748Sduboff 		dp->rx_desc_size =
4752*5748Sduboff 		    ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
4753*5748Sduboff 		    gc->gc_dma_attr_desc.dma_attr_align);
4754*5748Sduboff 	}
4755*5748Sduboff 	if (gc->gc_tx_desc_unit_shift >= 0) {
4756*5748Sduboff 		dp->tx_desc_size =
4757*5748Sduboff 		    ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
4758*5748Sduboff 		    gc->gc_dma_attr_desc.dma_attr_align);
4759*5748Sduboff 	}
4760*5748Sduboff 
4761*5748Sduboff 	dp->mtu = ETHERMTU;
4762*5748Sduboff 	dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4763*5748Sduboff 	/* link tx buffers */
4764*5748Sduboff 	for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4765*5748Sduboff 		dp->tx_buf[i].txb_next =
4766*5748Sduboff 		    &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4767*5748Sduboff 	}
4768*5748Sduboff 
4769*5748Sduboff 	dp->rxmode	   = 0;
4770*5748Sduboff 	dp->speed	   = GEM_SPD_10;	/* default is 10Mbps */
4771*5748Sduboff 	dp->full_duplex    = B_FALSE;		/* default is half */
4772*5748Sduboff 	dp->flow_control   = FLOW_CONTROL_NONE;
4773*5748Sduboff 	dp->poll_pkt_delay = 6;
4774*5748Sduboff 	dp->poll_pkt_hiwat = INT32_MAX;
4775*5748Sduboff 
4776*5748Sduboff 	/* performance tuning parameters */
4777*5748Sduboff 	dp->txthr    = ETHERMAX;	/* tx fifo threshold */
4778*5748Sduboff 	dp->txmaxdma = 16*4;		/* tx max dma burst size */
4779*5748Sduboff 	dp->rxthr    = 128;		/* rx fifo threshold */
4780*5748Sduboff 	dp->rxmaxdma = 16*4;		/* rx max dma burst size */
4781*5748Sduboff 
4782*5748Sduboff 	/*
4783*5748Sduboff 	 * Get media mode information from .conf file
4784*5748Sduboff 	 */
4785*5748Sduboff 	gem_read_conf(dp);
4786*5748Sduboff 
4787*5748Sduboff 	/* rx_buf_len is required buffer length without padding for alignment */
4788*5748Sduboff 	dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4789*5748Sduboff 
4790*5748Sduboff 	/*
4791*5748Sduboff 	 * Reset the chip
4792*5748Sduboff 	 */
4793*5748Sduboff 	mutex_enter(&dp->intrlock);
4794*5748Sduboff 	dp->nic_state = NIC_STATE_STOPPED;
4795*5748Sduboff 	ret = (*dp->gc.gc_reset_chip)(dp);
4796*5748Sduboff 	mutex_exit(&dp->intrlock);
4797*5748Sduboff 	if (ret != GEM_SUCCESS) {
4798*5748Sduboff 		goto err_free_regs;
4799*5748Sduboff 	}
4800*5748Sduboff 
4801*5748Sduboff 	/*
4802*5748Sduboff 	 * HW dependant paremeter initialization
4803*5748Sduboff 	 */
4804*5748Sduboff 	mutex_enter(&dp->intrlock);
4805*5748Sduboff 	ret = (*dp->gc.gc_attach_chip)(dp);
4806*5748Sduboff 	mutex_exit(&dp->intrlock);
4807*5748Sduboff 	if (ret != GEM_SUCCESS) {
4808*5748Sduboff 		goto err_free_regs;
4809*5748Sduboff 	}
4810*5748Sduboff 
4811*5748Sduboff #ifdef DEBUG_MULTIFRAGS
4812*5748Sduboff 	dp->gc.gc_tx_copy_thresh = dp->mtu;
4813*5748Sduboff #endif
4814*5748Sduboff 	/* allocate tx and rx resources */
4815*5748Sduboff 	if (gem_alloc_memory(dp)) {
4816*5748Sduboff 		goto err_free_regs;
4817*5748Sduboff 	}
4818*5748Sduboff 
4819*5748Sduboff 	DPRINTF(0, (CE_CONT,
4820*5748Sduboff 	    "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4821*5748Sduboff 	    dp->name, (long)dp->base_addr,
4822*5748Sduboff 	    dp->dev_addr.ether_addr_octet[0],
4823*5748Sduboff 	    dp->dev_addr.ether_addr_octet[1],
4824*5748Sduboff 	    dp->dev_addr.ether_addr_octet[2],
4825*5748Sduboff 	    dp->dev_addr.ether_addr_octet[3],
4826*5748Sduboff 	    dp->dev_addr.ether_addr_octet[4],
4827*5748Sduboff 	    dp->dev_addr.ether_addr_octet[5]));
4828*5748Sduboff 
4829*5748Sduboff 	/* copy mac address */
4830*5748Sduboff 	dp->cur_addr = dp->dev_addr;
4831*5748Sduboff 
4832*5748Sduboff 	gem_gld3_init(dp, macp);
4833*5748Sduboff 
4834*5748Sduboff 	/* Probe MII phy (scan phy) */
4835*5748Sduboff 	dp->mii_lpable = 0;
4836*5748Sduboff 	dp->mii_advert = 0;
4837*5748Sduboff 	dp->mii_exp = 0;
4838*5748Sduboff 	dp->mii_ctl1000 = 0;
4839*5748Sduboff 	dp->mii_stat1000 = 0;
4840*5748Sduboff 	if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4841*5748Sduboff 		goto err_free_ring;
4842*5748Sduboff 	}
4843*5748Sduboff 
4844*5748Sduboff 	/* mask unsupported abilities */
4845*5748Sduboff 	dp->anadv_1000fdx &=
4846*5748Sduboff 	    BOOLEAN(dp->mii_xstatus &
4847*5748Sduboff 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
4848*5748Sduboff 	dp->anadv_1000hdx &=
4849*5748Sduboff 	    BOOLEAN(dp->mii_xstatus &
4850*5748Sduboff 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
4851*5748Sduboff 	dp->anadv_100t4  &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4852*5748Sduboff 	dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4853*5748Sduboff 	dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4854*5748Sduboff 	dp->anadv_10fdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4855*5748Sduboff 	dp->anadv_10hdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4856*5748Sduboff 
4857*5748Sduboff 	gem_choose_forcedmode(dp);
4858*5748Sduboff 
4859*5748Sduboff 	/* initialize MII phy if required */
4860*5748Sduboff 	if (dp->gc.gc_mii_init) {
4861*5748Sduboff 		if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4862*5748Sduboff 			goto err_free_ring;
4863*5748Sduboff 		}
4864*5748Sduboff 	}
4865*5748Sduboff 
4866*5748Sduboff 	/*
4867*5748Sduboff 	 * initialize kstats including mii statistics
4868*5748Sduboff 	 */
4869*5748Sduboff 	gem_nd_setup(dp);
4870*5748Sduboff 
4871*5748Sduboff 	/*
4872*5748Sduboff 	 * Add interrupt to system.
4873*5748Sduboff 	 */
4874*5748Sduboff 	if (ret = mac_register(macp, &dp->mh)) {
4875*5748Sduboff 		cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
4876*5748Sduboff 		    dp->name, ret);
4877*5748Sduboff 		goto err_release_stats;
4878*5748Sduboff 	}
4879*5748Sduboff 	mac_free(macp);
4880*5748Sduboff 	macp = NULL;
4881*5748Sduboff 
4882*5748Sduboff 	if (dp->misc_flag & GEM_SOFTINTR) {
4883*5748Sduboff 		if (ddi_add_softintr(dip,
4884*5748Sduboff 		    DDI_SOFTINT_LOW, &dp->soft_id,
4885*5748Sduboff 		    NULL, NULL,
4886*5748Sduboff 		    (uint_t (*)(caddr_t))gem_intr,
4887*5748Sduboff 		    (caddr_t)dp) != DDI_SUCCESS) {
4888*5748Sduboff 			cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
4889*5748Sduboff 			    dp->name);
4890*5748Sduboff 			goto err_unregister;
4891*5748Sduboff 		}
4892*5748Sduboff 	} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
4893*5748Sduboff 		if (ddi_add_intr(dip, 0, NULL, NULL,
4894*5748Sduboff 		    (uint_t (*)(caddr_t))gem_intr,
4895*5748Sduboff 		    (caddr_t)dp) != DDI_SUCCESS) {
4896*5748Sduboff 			cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
4897*5748Sduboff 			goto err_unregister;
4898*5748Sduboff 		}
4899*5748Sduboff 	} else {
4900*5748Sduboff 		/*
4901*5748Sduboff 		 * Dont use interrupt.
4902*5748Sduboff 		 * schedule first call of gem_intr_watcher
4903*5748Sduboff 		 */
4904*5748Sduboff 		dp->intr_watcher_id =
4905*5748Sduboff 		    timeout((void (*)(void *))gem_intr_watcher,
4906*5748Sduboff 		    (void *)dp, drv_usectohz(3*1000000));
4907*5748Sduboff 	}
4908*5748Sduboff 
4909*5748Sduboff 	/* link this device to dev_info */
4910*5748Sduboff 	dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
4911*5748Sduboff 	ddi_set_driver_private(dip, (caddr_t)dp);
4912*5748Sduboff 
4913*5748Sduboff 	/* reset_mii and start mii link watcher */
4914*5748Sduboff 	gem_mii_start(dp);
4915*5748Sduboff 
4916*5748Sduboff 	DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
4917*5748Sduboff 	return (dp);
4918*5748Sduboff 
4919*5748Sduboff err_unregister:
4920*5748Sduboff 	(void) mac_unregister(dp->mh);
4921*5748Sduboff err_release_stats:
4922*5748Sduboff 	/* release NDD resources */
4923*5748Sduboff 	gem_nd_cleanup(dp);
4924*5748Sduboff 
4925*5748Sduboff err_free_ring:
4926*5748Sduboff 	gem_free_memory(dp);
4927*5748Sduboff err_free_regs:
4928*5748Sduboff 	ddi_regs_map_free(&dp->regs_handle);
4929*5748Sduboff err_free_locks:
4930*5748Sduboff 	mutex_destroy(&dp->xmitlock);
4931*5748Sduboff 	mutex_destroy(&dp->intrlock);
4932*5748Sduboff 	cv_destroy(&dp->tx_drain_cv);
4933*5748Sduboff err_free_private:
4934*5748Sduboff 	if (macp) {
4935*5748Sduboff 		mac_free(macp);
4936*5748Sduboff 	}
4937*5748Sduboff 	kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
4938*5748Sduboff 
4939*5748Sduboff 	return (NULL);
4940*5748Sduboff }
4941*5748Sduboff 
4942*5748Sduboff int
4943*5748Sduboff gem_do_detach(dev_info_t *dip)
4944*5748Sduboff {
4945*5748Sduboff 	struct gem_dev	*dp;
4946*5748Sduboff 	struct gem_dev	*tmp;
4947*5748Sduboff 	caddr_t		private;
4948*5748Sduboff 	int		priv_size;
4949*5748Sduboff 	ddi_acc_handle_t	rh;
4950*5748Sduboff 
4951*5748Sduboff 	dp = GEM_GET_DEV(dip);
4952*5748Sduboff 	if (dp == NULL) {
4953*5748Sduboff 		return (DDI_SUCCESS);
4954*5748Sduboff 	}
4955*5748Sduboff 
4956*5748Sduboff 	rh = dp->regs_handle;
4957*5748Sduboff 	private = dp->private;
4958*5748Sduboff 	priv_size = dp->priv_size;
4959*5748Sduboff 
4960*5748Sduboff 	while (dp) {
4961*5748Sduboff 		/* ensure any rx buffers are not used */
4962*5748Sduboff 		if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
4963*5748Sduboff 			/* resource is busy */
4964*5748Sduboff 			cmn_err(CE_PANIC,
4965*5748Sduboff 			    "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
4966*5748Sduboff 			    dp->name, __func__,
4967*5748Sduboff 			    dp->rx_buf_allocated, dp->rx_buf_freecnt);
4968*5748Sduboff 			/* NOT REACHED */
4969*5748Sduboff 		}
4970*5748Sduboff 
4971*5748Sduboff 		/* stop mii link watcher */
4972*5748Sduboff 		gem_mii_stop(dp);
4973*5748Sduboff 
4974*5748Sduboff 		/* unregister interrupt handler */
4975*5748Sduboff 		if (dp->misc_flag & GEM_SOFTINTR) {
4976*5748Sduboff 			ddi_remove_softintr(dp->soft_id);
4977*5748Sduboff 		} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
4978*5748Sduboff 			ddi_remove_intr(dip, 0, dp->iblock_cookie);
4979*5748Sduboff 		} else {
4980*5748Sduboff 			/* stop interrupt watcher */
4981*5748Sduboff 			if (dp->intr_watcher_id) {
4982*5748Sduboff 				while (untimeout(dp->intr_watcher_id) == -1)
4983*5748Sduboff 					;
4984*5748Sduboff 				dp->intr_watcher_id = 0;
4985*5748Sduboff 			}
4986*5748Sduboff 		}
4987*5748Sduboff 
4988*5748Sduboff 		/* unregister with gld v3 */
4989*5748Sduboff 		(void) mac_unregister(dp->mh);
4990*5748Sduboff 
4991*5748Sduboff 		/* release NDD resources */
4992*5748Sduboff 		gem_nd_cleanup(dp);
4993*5748Sduboff 		/* release buffers, descriptors and dma resources */
4994*5748Sduboff 		gem_free_memory(dp);
4995*5748Sduboff 
4996*5748Sduboff 		/* release locks and condition variables */
4997*5748Sduboff 		mutex_destroy(&dp->xmitlock);
4998*5748Sduboff 		mutex_destroy(&dp->intrlock);
4999*5748Sduboff 		cv_destroy(&dp->tx_drain_cv);
5000*5748Sduboff 
5001*5748Sduboff 		/* release basic memory resources */
5002*5748Sduboff 		tmp = dp->next;
5003*5748Sduboff 		kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5004*5748Sduboff 		dp = tmp;
5005*5748Sduboff 	}
5006*5748Sduboff 
5007*5748Sduboff 	/* release common private memory for the nic */
5008*5748Sduboff 	kmem_free(private, priv_size);
5009*5748Sduboff 
5010*5748Sduboff 	/* release register mapping resources */
5011*5748Sduboff 	ddi_regs_map_free(&rh);
5012*5748Sduboff 	ddi_set_driver_private(dip, NULL);
5013*5748Sduboff 
5014*5748Sduboff 	DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
5015*5748Sduboff 	    ddi_driver_name(dip), ddi_get_instance(dip)));
5016*5748Sduboff 
5017*5748Sduboff 	return (DDI_SUCCESS);
5018*5748Sduboff }
5019*5748Sduboff 
5020*5748Sduboff int
5021*5748Sduboff gem_suspend(dev_info_t *dip)
5022*5748Sduboff {
5023*5748Sduboff 	struct gem_dev	*dp;
5024*5748Sduboff 
5025*5748Sduboff 	/*
5026*5748Sduboff 	 * stop the device
5027*5748Sduboff 	 */
5028*5748Sduboff 	dp = GEM_GET_DEV(dip);
5029*5748Sduboff 	ASSERT(dp);
5030*5748Sduboff 
5031*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5032*5748Sduboff 
5033*5748Sduboff 	for (; dp; dp = dp->next) {
5034*5748Sduboff 
5035*5748Sduboff 		/* stop mii link watcher */
5036*5748Sduboff 		gem_mii_stop(dp);
5037*5748Sduboff 
5038*5748Sduboff 		/* stop interrupt watcher for no-intr mode */
5039*5748Sduboff 		if (dp->misc_flag & GEM_NOINTR) {
5040*5748Sduboff 			if (dp->intr_watcher_id) {
5041*5748Sduboff 				while (untimeout(dp->intr_watcher_id) == -1)
5042*5748Sduboff 					;
5043*5748Sduboff 			}
5044*5748Sduboff 			dp->intr_watcher_id = 0;
5045*5748Sduboff 		}
5046*5748Sduboff 
5047*5748Sduboff 		/* stop tx timeout watcher */
5048*5748Sduboff 		if (dp->timeout_id) {
5049*5748Sduboff 			while (untimeout(dp->timeout_id) == -1)
5050*5748Sduboff 				;
5051*5748Sduboff 			dp->timeout_id = 0;
5052*5748Sduboff 		}
5053*5748Sduboff 
5054*5748Sduboff 		/* make the nic state inactive */
5055*5748Sduboff 		mutex_enter(&dp->intrlock);
5056*5748Sduboff 		(void) gem_mac_stop(dp, 0);
5057*5748Sduboff 		ASSERT(!dp->mac_active);
5058*5748Sduboff 
5059*5748Sduboff 		/* no further register access */
5060*5748Sduboff 		dp->mac_suspended = B_TRUE;
5061*5748Sduboff 		mutex_exit(&dp->intrlock);
5062*5748Sduboff 	}
5063*5748Sduboff 
5064*5748Sduboff 	/* XXX - power down the nic */
5065*5748Sduboff 
5066*5748Sduboff 	return (DDI_SUCCESS);
5067*5748Sduboff }
5068*5748Sduboff 
5069*5748Sduboff int
5070*5748Sduboff gem_resume(dev_info_t *dip)
5071*5748Sduboff {
5072*5748Sduboff 	struct gem_dev	*dp;
5073*5748Sduboff 
5074*5748Sduboff 	/*
5075*5748Sduboff 	 * restart the device
5076*5748Sduboff 	 */
5077*5748Sduboff 	dp = GEM_GET_DEV(dip);
5078*5748Sduboff 	ASSERT(dp);
5079*5748Sduboff 
5080*5748Sduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5081*5748Sduboff 
5082*5748Sduboff 	for (; dp; dp = dp->next) {
5083*5748Sduboff 
5084*5748Sduboff 		/*
5085*5748Sduboff 		 * Bring up the nic after power up
5086*5748Sduboff 		 */
5087*5748Sduboff 
5088*5748Sduboff 		/* gem_xxx.c layer to setup power management state. */
5089*5748Sduboff 		ASSERT(!dp->mac_active);
5090*5748Sduboff 
5091*5748Sduboff 		/* reset the chip, because we are just after power up. */
5092*5748Sduboff 		mutex_enter(&dp->intrlock);
5093*5748Sduboff 
5094*5748Sduboff 		dp->mac_suspended = B_FALSE;
5095*5748Sduboff 		dp->nic_state = NIC_STATE_STOPPED;
5096*5748Sduboff 
5097*5748Sduboff 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5098*5748Sduboff 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
5099*5748Sduboff 			    dp->name, __func__);
5100*5748Sduboff 			mutex_exit(&dp->intrlock);
5101*5748Sduboff 			goto err;
5102*5748Sduboff 		}
5103*5748Sduboff 		mutex_exit(&dp->intrlock);
5104*5748Sduboff 
5105*5748Sduboff 		/* initialize mii phy because we are just after power up */
5106*5748Sduboff 		if (dp->gc.gc_mii_init) {
5107*5748Sduboff 			(void) (*dp->gc.gc_mii_init)(dp);
5108*5748Sduboff 		}
5109*5748Sduboff 
5110*5748Sduboff 		if (dp->misc_flag & GEM_NOINTR) {
5111*5748Sduboff 			/*
5112*5748Sduboff 			 * schedule first call of gem_intr_watcher
5113*5748Sduboff 			 * instead of interrupts.
5114*5748Sduboff 			 */
5115*5748Sduboff 			dp->intr_watcher_id =
5116*5748Sduboff 			    timeout((void (*)(void *))gem_intr_watcher,
5117*5748Sduboff 			    (void *)dp, drv_usectohz(3*1000000));
5118*5748Sduboff 		}
5119*5748Sduboff 
5120*5748Sduboff 		/* restart mii link watcher */
5121*5748Sduboff 		gem_mii_start(dp);
5122*5748Sduboff 
5123*5748Sduboff 		/* restart mac */
5124*5748Sduboff 		mutex_enter(&dp->intrlock);
5125*5748Sduboff 
5126*5748Sduboff 		if (gem_mac_init(dp) != GEM_SUCCESS) {
5127*5748Sduboff 			mutex_exit(&dp->intrlock);
5128*5748Sduboff 			goto err_reset;
5129*5748Sduboff 		}
5130*5748Sduboff 		dp->nic_state = NIC_STATE_INITIALIZED;
5131*5748Sduboff 
5132*5748Sduboff 		/* setup media mode if the link have been up */
5133*5748Sduboff 		if (dp->mii_state == MII_STATE_LINKUP) {
5134*5748Sduboff 			if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5135*5748Sduboff 				mutex_exit(&dp->intrlock);
5136*5748Sduboff 				goto err_reset;
5137*5748Sduboff 			}
5138*5748Sduboff 		}
5139*5748Sduboff 
5140*5748Sduboff 		/* enable mac address and rx filter */
5141*5748Sduboff 		dp->rxmode |= RXMODE_ENABLE;
5142*5748Sduboff 		if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5143*5748Sduboff 			mutex_exit(&dp->intrlock);
5144*5748Sduboff 			goto err_reset;
5145*5748Sduboff 		}
5146*5748Sduboff 		dp->nic_state = NIC_STATE_ONLINE;
5147*5748Sduboff 
5148*5748Sduboff 		/* restart tx timeout watcher */
5149*5748Sduboff 		dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5150*5748Sduboff 		    (void *)dp,
5151*5748Sduboff 		    dp->gc.gc_tx_timeout_interval);
5152*5748Sduboff 
5153*5748Sduboff 		/* now the nic is fully functional */
5154*5748Sduboff 		if (dp->mii_state == MII_STATE_LINKUP) {
5155*5748Sduboff 			if (gem_mac_start(dp) != GEM_SUCCESS) {
5156*5748Sduboff 				mutex_exit(&dp->intrlock);
5157*5748Sduboff 				goto err_reset;
5158*5748Sduboff 			}
5159*5748Sduboff 		}
5160*5748Sduboff 		mutex_exit(&dp->intrlock);
5161*5748Sduboff 	}
5162*5748Sduboff 
5163*5748Sduboff 	return (DDI_SUCCESS);
5164*5748Sduboff 
5165*5748Sduboff err_reset:
5166*5748Sduboff 	if (dp->intr_watcher_id) {
5167*5748Sduboff 		while (untimeout(dp->intr_watcher_id) == -1)
5168*5748Sduboff 			;
5169*5748Sduboff 		dp->intr_watcher_id = 0;
5170*5748Sduboff 	}
5171*5748Sduboff 	mutex_enter(&dp->intrlock);
5172*5748Sduboff 	(*dp->gc.gc_reset_chip)(dp);
5173*5748Sduboff 	dp->nic_state = NIC_STATE_STOPPED;
5174*5748Sduboff 	mutex_exit(&dp->intrlock);
5175*5748Sduboff 
5176*5748Sduboff err:
5177*5748Sduboff 	return (DDI_FAILURE);
5178*5748Sduboff }
5179*5748Sduboff 
5180*5748Sduboff /*
5181*5748Sduboff  * misc routines for PCI
5182*5748Sduboff  */
5183*5748Sduboff uint8_t
5184*5748Sduboff gem_search_pci_cap(dev_info_t *dip,
5185*5748Sduboff 		ddi_acc_handle_t conf_handle, uint8_t target)
5186*5748Sduboff {
5187*5748Sduboff 	uint8_t		pci_cap_ptr;
5188*5748Sduboff 	uint32_t	pci_cap;
5189*5748Sduboff 
5190*5748Sduboff 	/* search power management capablities */
5191*5748Sduboff 	pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
5192*5748Sduboff 	while (pci_cap_ptr) {
5193*5748Sduboff 		/* read pci capability header */
5194*5748Sduboff 		pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
5195*5748Sduboff 		if ((pci_cap & 0xff) == target) {
5196*5748Sduboff 			/* found */
5197*5748Sduboff 			break;
5198*5748Sduboff 		}
5199*5748Sduboff 		/* get next_ptr */
5200*5748Sduboff 		pci_cap_ptr = (pci_cap >> 8) & 0xff;
5201*5748Sduboff 	}
5202*5748Sduboff 	return (pci_cap_ptr);
5203*5748Sduboff }
5204*5748Sduboff 
5205*5748Sduboff int
5206*5748Sduboff gem_pci_set_power_state(dev_info_t *dip,
5207*5748Sduboff 		ddi_acc_handle_t conf_handle, uint_t new_mode)
5208*5748Sduboff {
5209*5748Sduboff 	uint8_t		pci_cap_ptr;
5210*5748Sduboff 	uint32_t	pmcsr;
5211*5748Sduboff 	uint_t		unit;
5212*5748Sduboff 	const char	*drv_name;
5213*5748Sduboff 
5214*5748Sduboff 	ASSERT(new_mode < 4);
5215*5748Sduboff 
5216*5748Sduboff 	unit = ddi_get_instance(dip);
5217*5748Sduboff 	drv_name = ddi_driver_name(dip);
5218*5748Sduboff 
5219*5748Sduboff 	/* search power management capablities */
5220*5748Sduboff 	pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
5221*5748Sduboff 
5222*5748Sduboff 	if (pci_cap_ptr == 0) {
5223*5748Sduboff 		cmn_err(CE_CONT,
5224*5748Sduboff 		    "!%s%d: doesn't have pci power management capability",
5225*5748Sduboff 		    drv_name, unit);
5226*5748Sduboff 		return (DDI_FAILURE);
5227*5748Sduboff 	}
5228*5748Sduboff 
5229*5748Sduboff 	/* read power management capabilities */
5230*5748Sduboff 	pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
5231*5748Sduboff 
5232*5748Sduboff 	DPRINTF(0, (CE_CONT,
5233*5748Sduboff 	    "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5234*5748Sduboff 	    drv_name, unit, pci_cap_ptr, pmcsr));
5235*5748Sduboff 
5236*5748Sduboff 	/*
5237*5748Sduboff 	 * Is the resuested power mode supported?
5238*5748Sduboff 	 */
5239*5748Sduboff 	/* not yet */
5240*5748Sduboff 
5241*5748Sduboff 	/*
5242*5748Sduboff 	 * move to new mode
5243*5748Sduboff 	 */
5244*5748Sduboff 	pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
5245*5748Sduboff 	pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
5246*5748Sduboff 
5247*5748Sduboff 	return (DDI_SUCCESS);
5248*5748Sduboff }
5249*5748Sduboff 
5250*5748Sduboff /*
5251*5748Sduboff  * select suitable register for by specified address space or register
5252*5748Sduboff  * offset in PCI config space
5253*5748Sduboff  */
5254*5748Sduboff int
5255*5748Sduboff gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
5256*5748Sduboff 	struct ddi_device_acc_attr *attrp,
5257*5748Sduboff 	caddr_t *basep, ddi_acc_handle_t *hp)
5258*5748Sduboff {
5259*5748Sduboff 	struct pci_phys_spec	*regs;
5260*5748Sduboff 	uint_t		len;
5261*5748Sduboff 	uint_t		unit;
5262*5748Sduboff 	uint_t		n;
5263*5748Sduboff 	uint_t		i;
5264*5748Sduboff 	int		ret;
5265*5748Sduboff 	const char	*drv_name;
5266*5748Sduboff 
5267*5748Sduboff 	unit = ddi_get_instance(dip);
5268*5748Sduboff 	drv_name = ddi_driver_name(dip);
5269*5748Sduboff 
5270*5748Sduboff 	/* Search IO-range or memory-range to be mapped */
5271*5748Sduboff 	regs = NULL;
5272*5748Sduboff 	len  = 0;
5273*5748Sduboff 
5274*5748Sduboff 	if ((ret = ddi_prop_lookup_int_array(
5275*5748Sduboff 	    DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5276*5748Sduboff 	    "reg", (void *)&regs, &len)) != DDI_PROP_SUCCESS) {
5277*5748Sduboff 		cmn_err(CE_WARN,
5278*5748Sduboff 		    "!%s%d: failed to get reg property (ret:%d)",
5279*5748Sduboff 		    drv_name, unit, ret);
5280*5748Sduboff 		return (DDI_FAILURE);
5281*5748Sduboff 	}
5282*5748Sduboff 	n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
5283*5748Sduboff 
5284*5748Sduboff 	ASSERT(regs != NULL && len > 0);
5285*5748Sduboff 
5286*5748Sduboff #if GEM_DEBUG_LEVEL > 0
5287*5748Sduboff 	for (i = 0; i < n; i++) {
5288*5748Sduboff 		cmn_err(CE_CONT,
5289*5748Sduboff 		    "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5290*5748Sduboff 		    drv_name, unit, i,
5291*5748Sduboff 		    regs[i].pci_phys_hi,
5292*5748Sduboff 		    regs[i].pci_phys_mid,
5293*5748Sduboff 		    regs[i].pci_phys_low,
5294*5748Sduboff 		    regs[i].pci_size_hi,
5295*5748Sduboff 		    regs[i].pci_size_low);
5296*5748Sduboff 	}
5297*5748Sduboff #endif
5298*5748Sduboff 	for (i = 0; i < n; i++) {
5299*5748Sduboff 		if ((regs[i].pci_phys_hi & mask) == which) {
5300*5748Sduboff 			/* it's the requested space */
5301*5748Sduboff 			ddi_prop_free(regs);
5302*5748Sduboff 			goto address_range_found;
5303*5748Sduboff 		}
5304*5748Sduboff 	}
5305*5748Sduboff 	ddi_prop_free(regs);
5306*5748Sduboff 	return (DDI_FAILURE);
5307*5748Sduboff 
5308*5748Sduboff address_range_found:
5309*5748Sduboff 	if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
5310*5748Sduboff 	    != DDI_SUCCESS) {
5311*5748Sduboff 		cmn_err(CE_CONT,
5312*5748Sduboff 		    "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5313*5748Sduboff 		    drv_name, unit, ret);
5314*5748Sduboff 	}
5315*5748Sduboff 
5316*5748Sduboff 	return (ret);
5317*5748Sduboff }
5318*5748Sduboff 
5319*5748Sduboff void
5320*5748Sduboff gem_mod_init(struct dev_ops *dop, char *name)
5321*5748Sduboff {
5322*5748Sduboff 	mac_init_ops(dop, name);
5323*5748Sduboff }
5324*5748Sduboff 
5325*5748Sduboff void
5326*5748Sduboff gem_mod_fini(struct dev_ops *dop)
5327*5748Sduboff {
5328*5748Sduboff 	mac_fini_ops(dop);
5329*5748Sduboff }
5330