xref: /dflybsd-src/sys/dev/netif/msk/if_msk.c (revision 03517d4e1314f46a8bc390aa6bcb929361ee0569)
1  /******************************************************************************
2   *
3   * Name   : sky2.c
4   * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5   * Version: $Revision: 1.23 $
6   * Date   : $Date: 2005/12/22 09:04:11 $
7   * Purpose: Main driver source file
8   *
9   *****************************************************************************/
10  
11  /******************************************************************************
12   *
13   *	LICENSE:
14   *	Copyright (C) Marvell International Ltd. and/or its affiliates
15   *
16   *	The computer program files contained in this folder ("Files")
17   *	are provided to you under the BSD-type license terms provided
18   *	below, and any use of such Files and any derivative works
19   *	thereof created by you shall be governed by the following terms
20   *	and conditions:
21   *
22   *	- Redistributions of source code must retain the above copyright
23   *	  notice, this list of conditions and the following disclaimer.
24   *	- Redistributions in binary form must reproduce the above
25   *	  copyright notice, this list of conditions and the following
26   *	  disclaimer in the documentation and/or other materials provided
27   *	  with the distribution.
28   *	- Neither the name of Marvell nor the names of its contributors
29   *	  may be used to endorse or promote products derived from this
30   *	  software without specific prior written permission.
31   *
32   *	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33   *	"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34   *	LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35   *	FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36   *	COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37   *	INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38   *	BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
39   *	LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40   *	HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41   *	STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42   *	ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43   *	OF THE POSSIBILITY OF SUCH DAMAGE.
44   *	/LICENSE
45   *
46   *****************************************************************************/
47  
48  /*-
49   * Copyright (c) 1997, 1998, 1999, 2000
50   *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
51   *
52   * Redistribution and use in source and binary forms, with or without
53   * modification, are permitted provided that the following conditions
54   * are met:
55   * 1. Redistributions of source code must retain the above copyright
56   *    notice, this list of conditions and the following disclaimer.
57   * 2. Redistributions in binary form must reproduce the above copyright
58   *    notice, this list of conditions and the following disclaimer in the
59   *    documentation and/or other materials provided with the distribution.
60   * 3. All advertising materials mentioning features or use of this software
61   *    must display the following acknowledgement:
62   *	This product includes software developed by Bill Paul.
63   * 4. Neither the name of the author nor the names of any co-contributors
64   *    may be used to endorse or promote products derived from this software
65   *    without specific prior written permission.
66   *
67   * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68   * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69   * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70   * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71   * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72   * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73   * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74   * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75   * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77   * THE POSSIBILITY OF SUCH DAMAGE.
78   */
79  /*-
80   * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81   *
82   * Permission to use, copy, modify, and distribute this software for any
83   * purpose with or without fee is hereby granted, provided that the above
84   * copyright notice and this permission notice appear in all copies.
85   *
86   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87   * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88   * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89   * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90   * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91   * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92   * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93   */
94  
95  /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */
96  
97  /*
98   * Device driver for the Marvell Yukon II Ethernet controller.
99   * Due to lack of documentation, this driver is based on the code from
100   * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
101   */
102  
103  #include <sys/param.h>
104  #include <sys/endian.h>
105  #include <sys/kernel.h>
106  #include <sys/bus.h>
107  #include <sys/in_cksum.h>
108  #include <sys/interrupt.h>
109  #include <sys/malloc.h>
110  #include <sys/proc.h>
111  #include <sys/rman.h>
112  #include <sys/serialize.h>
113  #include <sys/socket.h>
114  #include <sys/sockio.h>
115  #include <sys/sysctl.h>
116  
117  #include <net/ethernet.h>
118  #include <net/if.h>
119  #include <net/bpf.h>
120  #include <net/if_arp.h>
121  #include <net/if_dl.h>
122  #include <net/if_media.h>
123  #include <net/ifq_var.h>
124  #include <net/vlan/if_vlan_var.h>
125  
126  #include <netinet/ip.h>
127  #include <netinet/ip_var.h>
128  
129  #include <dev/netif/mii_layer/miivar.h>
130  
131  #include <bus/pci/pcireg.h>
132  #include <bus/pci/pcivar.h>
133  
134  #include "if_mskreg.h"
135  
136  /* "device miibus" required.  See GENERIC if you get errors here. */
137  #include "miibus_if.h"
138  
139  #define MSK_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
140  
141  /*
142   * Devices supported by this driver.
143   */
144  static const struct msk_product {
145  	uint16_t	msk_vendorid;
146  	uint16_t	msk_deviceid;
147  	const char	*msk_name;
148  } msk_products[] = {
149  	{ VENDORID_SK, DEVICEID_SK_YUKON2,
150  	    "SK-9Sxx Gigabit Ethernet" },
151  	{ VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
152  	    "SK-9Exx Gigabit Ethernet"},
153  	{ VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
154  	    "Marvell Yukon 88E8021CU Gigabit Ethernet" },
155  	{ VENDORID_MARVELL, DEVICEID_MRVL_8021X,
156  	    "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
157  	{ VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
158  	    "Marvell Yukon 88E8022CU Gigabit Ethernet" },
159  	{ VENDORID_MARVELL, DEVICEID_MRVL_8022X,
160  	    "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
161  	{ VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
162  	    "Marvell Yukon 88E8061CU Gigabit Ethernet" },
163  	{ VENDORID_MARVELL, DEVICEID_MRVL_8061X,
164  	    "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
165  	{ VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
166  	    "Marvell Yukon 88E8062CU Gigabit Ethernet" },
167  	{ VENDORID_MARVELL, DEVICEID_MRVL_8062X,
168  	    "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
169  	{ VENDORID_MARVELL, DEVICEID_MRVL_8035,
170  	    "Marvell Yukon 88E8035 Fast Ethernet" },
171  	{ VENDORID_MARVELL, DEVICEID_MRVL_8036,
172  	    "Marvell Yukon 88E8036 Fast Ethernet" },
173  	{ VENDORID_MARVELL, DEVICEID_MRVL_8038,
174  	    "Marvell Yukon 88E8038 Fast Ethernet" },
175  	{ VENDORID_MARVELL, DEVICEID_MRVL_8039,
176  	    "Marvell Yukon 88E8039 Fast Ethernet" },
177  	{ VENDORID_MARVELL, DEVICEID_MRVL_8040,
178  	    "Marvell Yukon 88E8040 Fast Ethernet" },
179  	{ VENDORID_MARVELL, DEVICEID_MRVL_8040T,
180  	    "Marvell Yukon 88E8040T Fast Ethernet" },
181  	{ VENDORID_MARVELL, DEVICEID_MRVL_8042,
182  	    "Marvell Yukon 88E8042 Fast Ethernet" },
183  	{ VENDORID_MARVELL, DEVICEID_MRVL_8048,
184  	    "Marvell Yukon 88E8048 Fast Ethernet" },
185  	{ VENDORID_MARVELL, DEVICEID_MRVL_4361,
186  	    "Marvell Yukon 88E8050 Gigabit Ethernet" },
187  	{ VENDORID_MARVELL, DEVICEID_MRVL_4360,
188  	    "Marvell Yukon 88E8052 Gigabit Ethernet" },
189  	{ VENDORID_MARVELL, DEVICEID_MRVL_4362,
190  	    "Marvell Yukon 88E8053 Gigabit Ethernet" },
191  	{ VENDORID_MARVELL, DEVICEID_MRVL_4363,
192  	    "Marvell Yukon 88E8055 Gigabit Ethernet" },
193  	{ VENDORID_MARVELL, DEVICEID_MRVL_4364,
194  	    "Marvell Yukon 88E8056 Gigabit Ethernet" },
195  	{ VENDORID_MARVELL, DEVICEID_MRVL_4365,
196  	    "Marvell Yukon 88E8070 Gigabit Ethernet" },
197  	{ VENDORID_MARVELL, DEVICEID_MRVL_436A,
198  	    "Marvell Yukon 88E8058 Gigabit Ethernet" },
199  	{ VENDORID_MARVELL, DEVICEID_MRVL_436B,
200  	    "Marvell Yukon 88E8071 Gigabit Ethernet" },
201  	{ VENDORID_MARVELL, DEVICEID_MRVL_436C,
202  	    "Marvell Yukon 88E8072 Gigabit Ethernet" },
203  	{ VENDORID_MARVELL, DEVICEID_MRVL_436D,
204  	    "Marvell Yukon 88E8055 Gigabit Ethernet" },
205  	{ VENDORID_MARVELL, DEVICEID_MRVL_4370,
206  	    "Marvell Yukon 88E8075 Gigabit Ethernet" },
207  	{ VENDORID_MARVELL, DEVICEID_MRVL_4380,
208  	    "Marvell Yukon 88E8057 Gigabit Ethernet" },
209  	{ VENDORID_MARVELL, DEVICEID_MRVL_4381,
210  	    "Marvell Yukon 88E8059 Gigabit Ethernet" },
211  	{ VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
212  	    "D-Link 550SX Gigabit Ethernet" },
213  	{ VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
214  	    "D-Link 560T Gigabit Ethernet" },
215  	{ 0, 0, NULL }
216  };
217  
218  static const char *model_name[] = {
219  	"Yukon XL",
220  	"Yukon EC Ultra",
221  	"Yukon EX",
222  	"Yukon EC",
223  	"Yukon FE",
224  	"Yukon FE+",
225  	"Yukon Supreme",
226  	"Yukon Ultra 2",
227  	"Yukon Unknown",
228  	"Yukon Optima"
229  };
230  
231  static int	mskc_probe(device_t);
232  static int	mskc_attach(device_t);
233  static int	mskc_detach(device_t);
234  static int	mskc_shutdown(device_t);
235  static int	mskc_suspend(device_t);
236  static int	mskc_resume(device_t);
237  static void	mskc_intr(void *);
238  
239  static void	mskc_reset(struct msk_softc *);
240  static void	mskc_set_imtimer(struct msk_softc *);
241  static void	mskc_intr_hwerr(struct msk_softc *);
242  static int	mskc_handle_events(struct msk_softc *);
243  static void	mskc_phy_power(struct msk_softc *, int);
244  static int	mskc_setup_rambuffer(struct msk_softc *);
245  static int	mskc_status_dma_alloc(struct msk_softc *);
246  static void	mskc_status_dma_free(struct msk_softc *);
247  static int	mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS);
248  static int	mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
249  
250  static int	msk_probe(device_t);
251  static int	msk_attach(device_t);
252  static int	msk_detach(device_t);
253  static int	msk_miibus_readreg(device_t, int, int);
254  static int	msk_miibus_writereg(device_t, int, int, int);
255  static void	msk_miibus_statchg(device_t);
256  
257  static void	msk_init(void *);
258  static int	msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
259  static void	msk_start(struct ifnet *, struct ifaltq_subque *);
260  static void	msk_watchdog(struct ifnet *);
261  static int	msk_mediachange(struct ifnet *);
262  static void	msk_mediastatus(struct ifnet *, struct ifmediareq *);
263  
264  static void	msk_tick(void *);
265  static void	msk_intr_phy(struct msk_if_softc *);
266  static void	msk_intr_gmac(struct msk_if_softc *);
267  static __inline void
268  		msk_rxput(struct msk_if_softc *);
269  static void	msk_handle_hwerr(struct msk_if_softc *, uint32_t);
270  static void	msk_rxeof(struct msk_if_softc *, uint32_t, int);
271  static void	msk_txeof(struct msk_if_softc *, int);
272  static void	msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
273  static void	msk_set_rambuffer(struct msk_if_softc *);
274  static void	msk_stop(struct msk_if_softc *);
275  
276  static int	msk_txrx_dma_alloc(struct msk_if_softc *);
277  static void	msk_txrx_dma_free(struct msk_if_softc *);
278  static int	msk_init_rx_ring(struct msk_if_softc *);
279  static void	msk_init_tx_ring(struct msk_if_softc *);
280  static __inline void
281  		msk_discard_rxbuf(struct msk_if_softc *, int);
282  static int	msk_newbuf(struct msk_if_softc *, int, int);
283  static int	msk_encap(struct msk_if_softc *, struct mbuf **);
284  
285  #ifdef MSK_JUMBO
286  static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
287  static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
288  static int msk_jumbo_newbuf(struct msk_if_softc *, int);
289  static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
290  static void *msk_jalloc(struct msk_if_softc *);
291  static void msk_jfree(void *, void *);
292  #endif
293  
294  static int	msk_phy_readreg(struct msk_if_softc *, int, int);
295  static int	msk_phy_writereg(struct msk_if_softc *, int, int, int);
296  
297  static void	msk_rxfilter(struct msk_if_softc *);
298  static void	msk_setvlan(struct msk_if_softc *, struct ifnet *);
299  static void	msk_set_tx_stfwd(struct msk_if_softc *);
300  
301  static int	msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *,
302  				  void **, bus_addr_t *, bus_dmamap_t *);
303  static void	msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
304  
305  static device_method_t mskc_methods[] = {
306  	/* Device interface */
307  	DEVMETHOD(device_probe,		mskc_probe),
308  	DEVMETHOD(device_attach,	mskc_attach),
309  	DEVMETHOD(device_detach,	mskc_detach),
310  	DEVMETHOD(device_suspend,	mskc_suspend),
311  	DEVMETHOD(device_resume,	mskc_resume),
312  	DEVMETHOD(device_shutdown,	mskc_shutdown),
313  
314  	/* bus interface */
315  	DEVMETHOD(bus_print_child,	bus_generic_print_child),
316  	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
317  
318  	{ NULL, NULL }
319  };
320  
321  static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc));
322  static devclass_t mskc_devclass;
323  
324  static device_method_t msk_methods[] = {
325  	/* Device interface */
326  	DEVMETHOD(device_probe,		msk_probe),
327  	DEVMETHOD(device_attach,	msk_attach),
328  	DEVMETHOD(device_detach,	msk_detach),
329  	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
330  
331  	/* bus interface */
332  	DEVMETHOD(bus_print_child,	bus_generic_print_child),
333  	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
334  
335  	/* MII interface */
336  	DEVMETHOD(miibus_readreg,	msk_miibus_readreg),
337  	DEVMETHOD(miibus_writereg,	msk_miibus_writereg),
338  	DEVMETHOD(miibus_statchg,	msk_miibus_statchg),
339  
340  	{ NULL, NULL }
341  };
342  
343  static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc));
344  static devclass_t msk_devclass;
345  
346  DECLARE_DUMMY_MODULE(if_msk);
347  DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, NULL, NULL);
348  DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, NULL, NULL);
349  DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
350  
351  static int	mskc_msi_enable = 0;
352  static int	mskc_intr_rate = 0;
353  static int	mskc_process_limit = MSK_PROC_DEFAULT;
354  
355  TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate);
356  TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit);
357  TUNABLE_INT("hw.mskc.msi.enable", &mskc_msi_enable);
358  
359  static int
360  msk_miibus_readreg(device_t dev, int phy, int reg)
361  {
362  	struct msk_if_softc *sc_if;
363  
364  	if (phy != PHY_ADDR_MARV)
365  		return (0);
366  
367  	sc_if = device_get_softc(dev);
368  
369  	return (msk_phy_readreg(sc_if, phy, reg));
370  }
371  
372  static int
373  msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
374  {
375  	struct msk_softc *sc;
376  	int i, val;
377  
378  	sc = sc_if->msk_softc;
379  
380          GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
381  	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
382  
383  	for (i = 0; i < MSK_TIMEOUT; i++) {
384  		DELAY(1);
385  		val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
386  		if ((val & GM_SMI_CT_RD_VAL) != 0) {
387  			val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
388  			break;
389  		}
390  	}
391  
392  	if (i == MSK_TIMEOUT) {
393  		if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
394  		val = 0;
395  	}
396  
397  	return (val);
398  }
399  
400  static int
401  msk_miibus_writereg(device_t dev, int phy, int reg, int val)
402  {
403  	struct msk_if_softc *sc_if;
404  
405  	if (phy != PHY_ADDR_MARV)
406  		return (0);
407  
408  	sc_if = device_get_softc(dev);
409  
410  	return (msk_phy_writereg(sc_if, phy, reg, val));
411  }
412  
413  static int
414  msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
415  {
416  	struct msk_softc *sc;
417  	int i;
418  
419  	sc = sc_if->msk_softc;
420  
421  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
422          GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
423  	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
424  	for (i = 0; i < MSK_TIMEOUT; i++) {
425  		DELAY(1);
426  		if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
427  		    GM_SMI_CT_BUSY) == 0)
428  			break;
429  	}
430  	if (i == MSK_TIMEOUT)
431  		if_printf(sc_if->msk_ifp, "phy write timeout\n");
432  
433  	return (0);
434  }
435  
436  static void
437  msk_miibus_statchg(device_t dev)
438  {
439  	struct msk_if_softc *sc_if;
440  	struct msk_softc *sc;
441  	struct mii_data *mii;
442  	uint32_t gmac;
443  
444  	sc_if = device_get_softc(dev);
445  	sc = sc_if->msk_softc;
446  
447  	mii = device_get_softc(sc_if->msk_miibus);
448  
449  	sc_if->msk_link = 0;
450  	if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
451  	    (IFM_AVALID | IFM_ACTIVE)) {
452  		switch (IFM_SUBTYPE(mii->mii_media_active)) {
453  		case IFM_10_T:
454  		case IFM_100_TX:
455  			sc_if->msk_link = 1;
456  			break;
457  		case IFM_1000_T:
458  		case IFM_1000_SX:
459  		case IFM_1000_LX:
460  		case IFM_1000_CX:
461  			if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
462  				sc_if->msk_link = 1;
463  			break;
464  		}
465  	}
466  
467  	if (sc_if->msk_link != 0) {
468  		/* Enable Tx FIFO Underrun. */
469  		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
470  		    GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
471  		/*
472  		 * Because mii(4) notify msk(4) that it detected link status
473  		 * change, there is no need to enable automatic
474  		 * speed/flow-control/duplex updates.
475  		 */
476  		gmac = GM_GPCR_AU_ALL_DIS;
477  		switch (IFM_SUBTYPE(mii->mii_media_active)) {
478  		case IFM_1000_SX:
479  		case IFM_1000_T:
480  			gmac |= GM_GPCR_SPEED_1000;
481  			break;
482  		case IFM_100_TX:
483  			gmac |= GM_GPCR_SPEED_100;
484  			break;
485  		case IFM_10_T:
486  			break;
487  		}
488  
489  		if ((mii->mii_media_active & IFM_GMASK) & IFM_FDX)
490  			gmac |= GM_GPCR_DUP_FULL;
491  		else
492  			gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
493  		/* Disable Rx flow control. */
494  		if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
495  			gmac |= GM_GPCR_FC_RX_DIS;
496  		/* Disable Tx flow control. */
497  		if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
498  			gmac |= GM_GPCR_FC_TX_DIS;
499  		gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
500  		GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
501  		/* Read again to ensure writing. */
502  		GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
503  
504  		gmac = GMC_PAUSE_OFF;
505  		if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) &&
506  		    ((mii->mii_media_active & IFM_GMASK) & IFM_FDX))
507  			gmac = GMC_PAUSE_ON;
508  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
509  
510  		/* Enable PHY interrupt for FIFO underrun/overflow. */
511  		msk_phy_writereg(sc_if, PHY_ADDR_MARV,
512  		    PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
513  	} else {
514  		/*
515  		 * Link state changed to down.
516  		 * Disable PHY interrupts.
517  		 */
518  		msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
519  		/* Disable Rx/Tx MAC. */
520  		gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
521  		if (gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) {
522  			gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
523  			GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
524  			/* Read again to ensure writing. */
525  			GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
526  		}
527  	}
528  }
529  
530  static void
531  msk_rxfilter(struct msk_if_softc *sc_if)
532  {
533  	struct msk_softc *sc;
534  	struct ifnet *ifp;
535  	struct ifmultiaddr *ifma;
536  	uint32_t mchash[2];
537  	uint32_t crc;
538  	uint16_t mode;
539  
540  	sc = sc_if->msk_softc;
541  	ifp = sc_if->msk_ifp;
542  
543  	bzero(mchash, sizeof(mchash));
544  	mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
545  	if ((ifp->if_flags & IFF_PROMISC) != 0) {
546  		mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
547  	} else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
548  		mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
549  		mchash[0] = 0xffff;
550  		mchash[1] = 0xffff;
551  	} else {
552  		mode |= GM_RXCR_UCF_ENA;
553  		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
554  			if (ifma->ifma_addr->sa_family != AF_LINK)
555  				continue;
556  			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
557  			    ifma->ifma_addr), ETHER_ADDR_LEN);
558  			/* Just want the 6 least significant bits. */
559  			crc &= 0x3f;
560  			/* Set the corresponding bit in the hash table. */
561  			mchash[crc >> 5] |= 1 << (crc & 0x1f);
562  		}
563  		if (mchash[0] != 0 || mchash[1] != 0)
564  			mode |= GM_RXCR_MCF_ENA;
565  	}
566  
567  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
568  	    mchash[0] & 0xffff);
569  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
570  	    (mchash[0] >> 16) & 0xffff);
571  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
572  	    mchash[1] & 0xffff);
573  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
574  	    (mchash[1] >> 16) & 0xffff);
575  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
576  }
577  
578  static void
579  msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
580  {
581  	struct msk_softc *sc;
582  
583  	sc = sc_if->msk_softc;
584  	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
585  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
586  		    RX_VLAN_STRIP_ON);
587  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
588  		    TX_VLAN_TAG_ON);
589  	} else {
590  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
591  		    RX_VLAN_STRIP_OFF);
592  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
593  		    TX_VLAN_TAG_OFF);
594  	}
595  }
596  
597  static int
598  msk_init_rx_ring(struct msk_if_softc *sc_if)
599  {
600  	struct msk_ring_data *rd;
601  	struct msk_rxdesc *rxd;
602  	int i, prod;
603  
604  	sc_if->msk_cdata.msk_rx_cons = 0;
605  	sc_if->msk_cdata.msk_rx_prod = 0;
606  	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
607  
608  	rd = &sc_if->msk_rdata;
609  	bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
610  	prod = sc_if->msk_cdata.msk_rx_prod;
611  	for (i = 0; i < MSK_RX_RING_CNT; i++) {
612  		rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
613  		rxd->rx_m = NULL;
614  		rxd->rx_le = &rd->msk_rx_ring[prod];
615  		if (msk_newbuf(sc_if, prod, 1) != 0)
616  			return (ENOBUFS);
617  		MSK_INC(prod, MSK_RX_RING_CNT);
618  	}
619  
620  	/* Update prefetch unit. */
621  	sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
622  	CSR_WRITE_2(sc_if->msk_softc,
623  	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
624  	    sc_if->msk_cdata.msk_rx_prod);
625  
626  	return (0);
627  }
628  
629  #ifdef MSK_JUMBO
630  static int
631  msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
632  {
633  	struct msk_ring_data *rd;
634  	struct msk_rxdesc *rxd;
635  	int i, prod;
636  
637  	MSK_IF_LOCK_ASSERT(sc_if);
638  
639  	sc_if->msk_cdata.msk_rx_cons = 0;
640  	sc_if->msk_cdata.msk_rx_prod = 0;
641  	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
642  
643  	rd = &sc_if->msk_rdata;
644  	bzero(rd->msk_jumbo_rx_ring,
645  	    sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
646  	prod = sc_if->msk_cdata.msk_rx_prod;
647  	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
648  		rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
649  		rxd->rx_m = NULL;
650  		rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
651  		if (msk_jumbo_newbuf(sc_if, prod) != 0)
652  			return (ENOBUFS);
653  		MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
654  	}
655  
656  	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
657  	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
658  	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
659  
660  	sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
661  	CSR_WRITE_2(sc_if->msk_softc,
662  	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
663  	    sc_if->msk_cdata.msk_rx_prod);
664  
665  	return (0);
666  }
667  #endif
668  
669  static void
670  msk_init_tx_ring(struct msk_if_softc *sc_if)
671  {
672  	struct msk_ring_data *rd;
673  	struct msk_txdesc *txd;
674  	int i;
675  
676  	sc_if->msk_cdata.msk_tx_prod = 0;
677  	sc_if->msk_cdata.msk_tx_cons = 0;
678  	sc_if->msk_cdata.msk_tx_cnt = 0;
679  
680  	rd = &sc_if->msk_rdata;
681  	bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
682  	for (i = 0; i < MSK_TX_RING_CNT; i++) {
683  		txd = &sc_if->msk_cdata.msk_txdesc[i];
684  		txd->tx_m = NULL;
685  		txd->tx_le = &rd->msk_tx_ring[i];
686  	}
687  }
688  
689  static __inline void
690  msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
691  {
692  	struct msk_rx_desc *rx_le;
693  	struct msk_rxdesc *rxd;
694  	struct mbuf *m;
695  
696  	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
697  	m = rxd->rx_m;
698  	rx_le = rxd->rx_le;
699  	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
700  }
701  
702  #ifdef MSK_JUMBO
703  static __inline void
704  msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int	idx)
705  {
706  	struct msk_rx_desc *rx_le;
707  	struct msk_rxdesc *rxd;
708  	struct mbuf *m;
709  
710  	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
711  	m = rxd->rx_m;
712  	rx_le = rxd->rx_le;
713  	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
714  }
715  #endif
716  
717  static int
718  msk_newbuf(struct msk_if_softc *sc_if, int idx, int init)
719  {
720  	struct msk_rx_desc *rx_le;
721  	struct msk_rxdesc *rxd;
722  	struct mbuf *m;
723  	bus_dma_segment_t seg;
724  	bus_dmamap_t map;
725  	int error, nseg;
726  
727  	m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
728  	if (m == NULL)
729  		return (ENOBUFS);
730  
731  	m->m_len = m->m_pkthdr.len = MCLBYTES;
732  	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
733  		m_adj(m, ETHER_ALIGN);
734  
735  	error = bus_dmamap_load_mbuf_segment(sc_if->msk_cdata.msk_rx_tag,
736  			sc_if->msk_cdata.msk_rx_sparemap,
737  			m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
738  	if (error) {
739  		m_freem(m);
740  		if (init)
741  			if_printf(&sc_if->arpcom.ac_if, "can't load RX mbuf\n");
742  		return (error);
743  	}
744  
745  	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
746  	if (rxd->rx_m != NULL) {
747  		bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
748  		    BUS_DMASYNC_POSTREAD);
749  		bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
750  	}
751  
752  	map = rxd->rx_dmamap;
753  	rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
754  	sc_if->msk_cdata.msk_rx_sparemap = map;
755  
756  	rxd->rx_m = m;
757  	rx_le = rxd->rx_le;
758  	rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr));
759  	rx_le->msk_control = htole32(seg.ds_len | OP_PACKET | HW_OWNER);
760  
761  	return (0);
762  }
763  
764  #ifdef MSK_JUMBO
765  static int
766  msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
767  {
768  	struct msk_rx_desc *rx_le;
769  	struct msk_rxdesc *rxd;
770  	struct mbuf *m;
771  	bus_dma_segment_t segs[1];
772  	bus_dmamap_t map;
773  	int nsegs;
774  	void *buf;
775  
776  	MGETHDR(m, M_NOWAIT, MT_DATA);
777  	if (m == NULL)
778  		return (ENOBUFS);
779  	buf = msk_jalloc(sc_if);
780  	if (buf == NULL) {
781  		m_freem(m);
782  		return (ENOBUFS);
783  	}
784  	/* Attach the buffer to the mbuf. */
785  	MEXTADD(m, buf, MSK_JLEN, msk_jfree, sc_if, 0, EXT_NET_DRV);
786  	if ((m->m_flags & M_EXT) == 0) {
787  		m_freem(m);
788  		return (ENOBUFS);
789  	}
790  	m->m_pkthdr.len = m->m_len = MSK_JLEN;
791  	m_adj(m, ETHER_ALIGN);
792  
793  	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
794  	    sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
795  	    BUS_DMA_NOWAIT) != 0) {
796  		m_freem(m);
797  		return (ENOBUFS);
798  	}
799  	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
800  
801  	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
802  	if (rxd->rx_m != NULL) {
803  		bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
804  		    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
805  		bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
806  		    rxd->rx_dmamap);
807  	}
808  	map = rxd->rx_dmamap;
809  	rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
810  	sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
811  	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
812  	    BUS_DMASYNC_PREREAD);
813  	rxd->rx_m = m;
814  	rx_le = rxd->rx_le;
815  	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
816  	rx_le->msk_control =
817  	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
818  
819  	return (0);
820  }
821  #endif
822  
823  /*
824   * Set media options.
825   */
826  static int
827  msk_mediachange(struct ifnet *ifp)
828  {
829  	struct msk_if_softc *sc_if = ifp->if_softc;
830  	struct mii_data	*mii;
831  	int error;
832  
833  	mii = device_get_softc(sc_if->msk_miibus);
834  	error = mii_mediachg(mii);
835  
836  	return (error);
837  }
838  
839  /*
840   * Report current media status.
841   */
842  static void
843  msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
844  {
845  	struct msk_if_softc *sc_if = ifp->if_softc;
846  	struct mii_data	*mii;
847  
848  	mii = device_get_softc(sc_if->msk_miibus);
849  	mii_pollstat(mii);
850  
851  	ifmr->ifm_active = mii->mii_media_active;
852  	ifmr->ifm_status = mii->mii_media_status;
853  }
854  
855  static int
856  msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
857  {
858  	struct msk_if_softc *sc_if;
859  	struct ifreq *ifr;
860  	struct mii_data	*mii;
861  	int error, mask;
862  
863  	sc_if = ifp->if_softc;
864  	ifr = (struct ifreq *)data;
865  	error = 0;
866  
867  	switch(command) {
868  	case SIOCSIFMTU:
869  #ifdef MSK_JUMBO
870  		if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
871  			error = EINVAL;
872  			break;
873  		}
874  		if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE &&
875  		    ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
876  			error = EINVAL;
877  			break;
878  		}
879  		ifp->if_mtu = ifr->ifr_mtu;
880  		if ((ifp->if_flags & IFF_RUNNING) != 0)
881  			msk_init(sc_if);
882  #else
883  		error = EOPNOTSUPP;
884  #endif
885  		break;
886  
887  	case SIOCSIFFLAGS:
888  		if (ifp->if_flags & IFF_UP) {
889  			if (ifp->if_flags & IFF_RUNNING) {
890  				if (((ifp->if_flags ^ sc_if->msk_if_flags)
891  				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
892  					msk_rxfilter(sc_if);
893  			} else {
894  				if (sc_if->msk_detach == 0)
895  					msk_init(sc_if);
896  			}
897  		} else {
898  			if (ifp->if_flags & IFF_RUNNING)
899  				msk_stop(sc_if);
900  		}
901  		sc_if->msk_if_flags = ifp->if_flags;
902  		break;
903  
904  	case SIOCADDMULTI:
905  	case SIOCDELMULTI:
906  		if (ifp->if_flags & IFF_RUNNING)
907  			msk_rxfilter(sc_if);
908  		break;
909  
910  	case SIOCGIFMEDIA:
911  	case SIOCSIFMEDIA:
912  		mii = device_get_softc(sc_if->msk_miibus);
913  		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
914  		break;
915  
916  	case SIOCSIFCAP:
917  		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
918  		if ((mask & IFCAP_TXCSUM) != 0) {
919  			ifp->if_capenable ^= IFCAP_TXCSUM;
920  			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
921  			    (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
922  				ifp->if_hwassist |= MSK_CSUM_FEATURES;
923  			else
924  				ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
925  		}
926  #ifdef notyet
927  		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
928  			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
929  			msk_setvlan(sc_if, ifp);
930  		}
931  #endif
932  
933  		if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
934  		    sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
935  			/*
936  			 * In Yukon EC Ultra, TSO & checksum offload is not
937  			 * supported for jumbo frame.
938  			 */
939  			ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
940  			ifp->if_capenable &= ~IFCAP_TXCSUM;
941  		}
942  		break;
943  
944  	default:
945  		error = ether_ioctl(ifp, command, data);
946  		break;
947  	}
948  
949  	return (error);
950  }
951  
952  static int
953  mskc_probe(device_t dev)
954  {
955  	const struct msk_product *mp;
956  	uint16_t vendor, devid;
957  
958  	vendor = pci_get_vendor(dev);
959  	devid = pci_get_device(dev);
960  	for (mp = msk_products; mp->msk_name != NULL; ++mp) {
961  		if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
962  			device_set_desc(dev, mp->msk_name);
963  			return (0);
964  		}
965  	}
966  	return (ENXIO);
967  }
968  
969  static int
970  mskc_setup_rambuffer(struct msk_softc *sc)
971  {
972  	int next;
973  	int i;
974  
975  	/* Get adapter SRAM size. */
976  	sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
977  	if (bootverbose) {
978  		device_printf(sc->msk_dev,
979  		    "RAM buffer size : %dKB\n", sc->msk_ramsize);
980  	}
981  	if (sc->msk_ramsize == 0)
982  		return (0);
983  	sc->msk_pflags |= MSK_FLAG_RAMBUF;
984  
985  	/*
986  	 * Give receiver 2/3 of memory and round down to the multiple
987  	 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
988  	 * of 1024.
989  	 */
990  	sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
991  	sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
992  	for (i = 0, next = 0; i < sc->msk_num_port; i++) {
993  		sc->msk_rxqstart[i] = next;
994  		sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
995  		next = sc->msk_rxqend[i] + 1;
996  		sc->msk_txqstart[i] = next;
997  		sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
998  		next = sc->msk_txqend[i] + 1;
999  		if (bootverbose) {
1000  			device_printf(sc->msk_dev,
1001  			    "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1002  			    sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1003  			    sc->msk_rxqend[i]);
1004  			device_printf(sc->msk_dev,
1005  			    "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1006  			    sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1007  			    sc->msk_txqend[i]);
1008  		}
1009  	}
1010  
1011  	return (0);
1012  }
1013  
1014  static void
1015  mskc_phy_power(struct msk_softc *sc, int mode)
1016  {
1017  	uint32_t our, val;
1018  	int i;
1019  
1020  	switch (mode) {
1021  	case MSK_PHY_POWERUP:
1022  		/* Switch power to VCC (WA for VAUX problem). */
1023  		CSR_WRITE_1(sc, B0_POWER_CTRL,
1024  		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1025  		/* Disable Core Clock Division, set Clock Select to 0. */
1026  		CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1027  
1028  		val = 0;
1029  		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1030  		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1031  			/* Enable bits are inverted. */
1032  			val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1033  			      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1034  			      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1035  		}
1036  		/*
1037  		 * Enable PCI & Core Clock, enable clock gating for both Links.
1038  		 */
1039  		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1040  
1041  		our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1042  		our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1043  		if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
1044  			if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1045  				/* Deassert Low Power for 1st PHY. */
1046  				our |= PCI_Y2_PHY1_COMA;
1047  				if (sc->msk_num_port > 1)
1048  					our |= PCI_Y2_PHY2_COMA;
1049  			}
1050  		}
1051  		if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
1052  		    sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1053  		    sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
1054  			val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
1055  			val &= (PCI_FORCE_ASPM_REQUEST |
1056  			    PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
1057  			    PCI_ASPM_CLKRUN_REQUEST);
1058  			/* Set all bits to 0 except bits 15..12. */
1059  			CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
1060  			val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
1061  			val &= PCI_CTL_TIM_VMAIN_AV_MSK;
1062  			CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
1063  			CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
1064  			CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1065  			/*
1066  			 * Disable status race, workaround for
1067  			 * Yukon EC Ultra & Yukon EX.
1068  			 */
1069  			val = CSR_READ_4(sc, B2_GP_IO);
1070  			val |= GLB_GPIO_STAT_RACE_DIS;
1071  			CSR_WRITE_4(sc, B2_GP_IO, val);
1072  			CSR_READ_4(sc, B2_GP_IO);
1073  		}
1074  		/* Release PHY from PowerDown/COMA mode. */
1075  		CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
1076  
1077  		for (i = 0; i < sc->msk_num_port; i++) {
1078  			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1079  			    GMLC_RST_SET);
1080  			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1081  			    GMLC_RST_CLR);
1082  		}
1083  		break;
1084  	case MSK_PHY_POWERDOWN:
1085  		val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1086  		val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1087  		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1088  		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1089  			val &= ~PCI_Y2_PHY1_COMA;
1090  			if (sc->msk_num_port > 1)
1091  				val &= ~PCI_Y2_PHY2_COMA;
1092  		}
1093  		CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
1094  
1095  		val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1096  		      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1097  		      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1098  		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1099  		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1100  			/* Enable bits are inverted. */
1101  			val = 0;
1102  		}
1103  		/*
1104  		 * Disable PCI & Core Clock, disable clock gating for
1105  		 * both Links.
1106  		 */
1107  		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1108  		CSR_WRITE_1(sc, B0_POWER_CTRL,
1109  		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1110  		break;
1111  	default:
1112  		break;
1113  	}
1114  }
1115  
1116  static void
1117  mskc_reset(struct msk_softc *sc)
1118  {
1119  	bus_addr_t addr;
1120  	uint16_t status;
1121  	uint32_t val;
1122  	int i;
1123  
1124  	/* Disable ASF. */
1125  	if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
1126  	    sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
1127  		if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1128  		    sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
1129  			CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1130  			status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
1131  			/* Clear AHB bridge & microcontroller reset. */
1132  			status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
1133  			    Y2_ASF_HCU_CCSR_CPU_RST_MODE);
1134  			/* Clear ASF microcontroller state. */
1135  			status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1136  			status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
1137  			CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
1138  			CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1139  		} else {
1140  			CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1141  		}
1142  		CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1143  		/*
1144  		 * Since we disabled ASF, S/W reset is required for
1145  		 * Power Management.
1146  		 */
1147  		CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1148  		CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1149  	}
1150  
1151  	/* Clear all error bits in the PCI status register. */
1152  	status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1153  	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1154  
1155  	pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1156  	    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1157  	    PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1158  	CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1159  
1160  	switch (sc->msk_bustype) {
1161  	case MSK_PEX_BUS:
1162  		/* Clear all PEX errors. */
1163  		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1164  		val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1165  		if ((val & PEX_RX_OV) != 0) {
1166  			sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1167  			sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1168  		}
1169  		break;
1170  	case MSK_PCI_BUS:
1171  	case MSK_PCIX_BUS:
1172  		/* Set Cache Line Size to 2(8bytes) if configured to 0. */
1173  		val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1174  		if (val == 0)
1175  			pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1176  		if (sc->msk_bustype == MSK_PCIX_BUS) {
1177  			/* Set Cache Line Size opt. */
1178  			val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1179  			val |= PCI_CLS_OPT;
1180  			CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
1181  		}
1182  		break;
1183  	}
1184  	/* Set PHY power state. */
1185  	mskc_phy_power(sc, MSK_PHY_POWERUP);
1186  
1187  	/* Reset GPHY/GMAC Control */
1188  	for (i = 0; i < sc->msk_num_port; i++) {
1189  		/* GPHY Control reset. */
1190  		CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1191  		CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1192  		/* GMAC Control reset. */
1193  		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1194  		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1195  		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1196  		if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1197  		    sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
1198  			CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
1199  			    GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
1200  			    GMC_BYP_RETR_ON);
1201  		}
1202  	}
1203  
1204  	if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
1205  	    sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
1206  		CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
1207  	if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
1208  		/* Disable PCIe PHY powerdown(reg 0x80, bit7). */
1209  		CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
1210  	}
1211  	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1212  
1213  	/* LED On. */
1214  	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1215  
1216  	/* Clear TWSI IRQ. */
1217  	CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1218  
1219  	/* Turn off hardware timer. */
1220  	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1221  	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1222  
1223  	/* Turn off descriptor polling. */
1224  	CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1225  
1226  	/* Turn off time stamps. */
1227  	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1228  	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1229  
1230  	if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
1231  	    sc->msk_hw_id == CHIP_ID_YUKON_EC ||
1232  	    sc->msk_hw_id == CHIP_ID_YUKON_FE) {
1233  		/* Configure timeout values. */
1234  		for (i = 0; i < sc->msk_num_port; i++) {
1235  			CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL),
1236  			    RI_RST_SET);
1237  			CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL),
1238  			    RI_RST_CLR);
1239  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1240  			    MSK_RI_TO_53);
1241  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1242  			    MSK_RI_TO_53);
1243  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1244  			    MSK_RI_TO_53);
1245  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1246  			    MSK_RI_TO_53);
1247  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1248  			    MSK_RI_TO_53);
1249  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1250  			    MSK_RI_TO_53);
1251  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1252  			    MSK_RI_TO_53);
1253  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1254  			    MSK_RI_TO_53);
1255  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1256  			    MSK_RI_TO_53);
1257  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1258  			    MSK_RI_TO_53);
1259  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1260  			    MSK_RI_TO_53);
1261  			CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1262  			    MSK_RI_TO_53);
1263  		}
1264  	}
1265  
1266  	/* Disable all interrupts. */
1267  	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1268  	CSR_READ_4(sc, B0_HWE_IMSK);
1269  	CSR_WRITE_4(sc, B0_IMSK, 0);
1270  	CSR_READ_4(sc, B0_IMSK);
1271  
1272          /*
1273           * On dual port PCI-X card, there is an problem where status
1274           * can be received out of order due to split transactions.
1275           */
1276  	if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
1277  		uint16_t pcix_cmd;
1278  
1279  		pcix_cmd = pci_read_config(sc->msk_dev,
1280  		    sc->msk_pcixcap + PCIXR_COMMAND, 2);
1281  		/* Clear Max Outstanding Split Transactions. */
1282  		pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
1283  		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1284  		pci_write_config(sc->msk_dev,
1285  		    sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
1286  		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1287  	}
1288  	if (sc->msk_pciecap != 0) {
1289  		/* Change Max. Read Request Size to 2048 bytes. */
1290  		if (pcie_get_max_readrq(sc->msk_dev) ==
1291  		    PCIEM_DEVCTL_MAX_READRQ_512) {
1292  			pcie_set_max_readrq(sc->msk_dev,
1293  			    PCIEM_DEVCTL_MAX_READRQ_2048);
1294  		}
1295  	}
1296  
1297  	/* Clear status list. */
1298  	bzero(sc->msk_stat_ring,
1299  	    sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1300  	sc->msk_stat_cons = 0;
1301  	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1302  	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1303  	/* Set the status list base address. */
1304  	addr = sc->msk_stat_ring_paddr;
1305  	CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1306  	CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1307  	/* Set the status list last index. */
1308  	CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1309  	if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1310  	    sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1311  		/* WA for dev. #4.3 */
1312  		CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1313  		/* WA for dev. #4.18 */
1314  		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1315  		CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1316  	} else {
1317  		CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1318  		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1319  		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1320  		    sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1321  			CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1322  		else
1323  			CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1324  		CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1325  	}
1326  	/*
1327  	 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1328  	 */
1329  	CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1330  
1331  	/* Enable status unit. */
1332  	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1333  
1334  	CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1335  	CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1336  	CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1337  }
1338  
1339  static int
1340  msk_probe(device_t dev)
1341  {
1342  	struct msk_softc *sc = device_get_softc(device_get_parent(dev));
1343  	char desc[100];
1344  
1345  	/*
1346  	 * Not much to do here. We always know there will be
1347  	 * at least one GMAC present, and if there are two,
1348  	 * mskc_attach() will create a second device instance
1349  	 * for us.
1350  	 */
1351  	ksnprintf(desc, sizeof(desc),
1352  	    "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1353  	    model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1354  	    sc->msk_hw_rev);
1355  	device_set_desc_copy(dev, desc);
1356  
1357  	return (0);
1358  }
1359  
1360  static int
1361  msk_attach(device_t dev)
1362  {
1363  	struct msk_softc *sc = device_get_softc(device_get_parent(dev));
1364  	struct msk_if_softc *sc_if = device_get_softc(dev);
1365  	struct ifnet *ifp = &sc_if->arpcom.ac_if;
1366  	int i, port, error;
1367  	uint8_t eaddr[ETHER_ADDR_LEN];
1368  
1369  	port = *(int *)device_get_ivars(dev);
1370  	KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B);
1371  
1372  	kfree(device_get_ivars(dev), M_DEVBUF);
1373  	device_set_ivars(dev, NULL);
1374  
1375  	callout_init(&sc_if->msk_tick_ch);
1376  	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1377  
1378  	sc_if->msk_if_dev = dev;
1379  	sc_if->msk_port = port;
1380  	sc_if->msk_softc = sc;
1381  	sc_if->msk_ifp = ifp;
1382  	sc_if->msk_flags = sc->msk_pflags;
1383  	sc->msk_if[port] = sc_if;
1384  
1385  	/* Setup Tx/Rx queue register offsets. */
1386  	if (port == MSK_PORT_A) {
1387  		sc_if->msk_txq = Q_XA1;
1388  		sc_if->msk_txsq = Q_XS1;
1389  		sc_if->msk_rxq = Q_R1;
1390  	} else {
1391  		sc_if->msk_txq = Q_XA2;
1392  		sc_if->msk_txsq = Q_XS2;
1393  		sc_if->msk_rxq = Q_R2;
1394  	}
1395  
1396  	error = msk_txrx_dma_alloc(sc_if);
1397  	if (error)
1398  		goto fail;
1399  
1400  	ifp->if_softc = sc_if;
1401  	ifp->if_mtu = ETHERMTU;
1402  	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1403  	ifp->if_init = msk_init;
1404  	ifp->if_ioctl = msk_ioctl;
1405  	ifp->if_start = msk_start;
1406  	ifp->if_watchdog = msk_watchdog;
1407  	ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1408  	ifq_set_ready(&ifp->if_snd);
1409  
1410  #ifdef notyet
1411  	/*
1412  	 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1413  	 * has serious bug in Rx checksum offload for all Yukon II family
1414  	 * hardware. It seems there is a workaround to make it work somtimes.
1415  	 * However, the workaround also have to check OP code sequences to
1416  	 * verify whether the OP code is correct. Sometimes it should compute
1417  	 * IP/TCP/UDP checksum in driver in order to verify correctness of
1418  	 * checksum computed by hardware. If you have to compute checksum
1419  	 * with software to verify the hardware's checksum why have hardware
1420  	 * compute the checksum? I think there is no reason to spend time to
1421  	 * make Rx checksum offload work on Yukon II hardware.
1422  	 */
1423  	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU |
1424  			       IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1425  	ifp->if_hwassist = MSK_CSUM_FEATURES;
1426  	ifp->if_capenable = ifp->if_capabilities;
1427  #endif
1428  
1429  	/*
1430  	 * Get station address for this interface. Note that
1431  	 * dual port cards actually come with three station
1432  	 * addresses: one for each port, plus an extra. The
1433  	 * extra one is used by the SysKonnect driver software
1434  	 * as a 'virtual' station address for when both ports
1435  	 * are operating in failover mode. Currently we don't
1436  	 * use this extra address.
1437  	 */
1438  	for (i = 0; i < ETHER_ADDR_LEN; i++)
1439  		eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1440  
1441  	sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
1442  
1443  	/*
1444  	 * Do miibus setup.
1445  	 */
1446  	error = mii_phy_probe(dev, &sc_if->msk_miibus,
1447  			      msk_mediachange, msk_mediastatus);
1448  	if (error) {
1449  		device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1450  		goto fail;
1451  	}
1452  
1453  	/*
1454  	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1455  	 */
1456  	ether_ifattach(ifp, eaddr, &sc->msk_serializer);
1457  #if 0
1458  	/*
1459  	 * Tell the upper layer(s) we support long frames.
1460  	 * Must appear after the call to ether_ifattach() because
1461  	 * ether_ifattach() sets ifi_hdrlen to the default value.
1462  	 */
1463          ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1464  #endif
1465  
1466  	return 0;
1467  fail:
1468  	msk_detach(dev);
1469  	sc->msk_if[port] = NULL;
1470  	return (error);
1471  }
1472  
1473  /*
1474   * Attach the interface. Allocate softc structures, do ifmedia
1475   * setup and ethernet/BPF attach.
1476   */
1477  static int
1478  mskc_attach(device_t dev)
1479  {
1480  	struct msk_softc *sc;
1481  	struct sysctl_ctx_list *ctx;
1482  	struct sysctl_oid *tree;
1483  	int error, *port, cpuid;
1484  	u_int irq_flags;
1485  
1486  	sc = device_get_softc(dev);
1487  	sc->msk_dev = dev;
1488  	lwkt_serialize_init(&sc->msk_serializer);
1489  
1490  	/*
1491  	 * Initailize sysctl variables
1492  	 */
1493  	sc->msk_process_limit = mskc_process_limit;
1494  	sc->msk_intr_rate = mskc_intr_rate;
1495  
1496  #ifndef BURN_BRIDGES
1497  	/*
1498  	 * Handle power management nonsense.
1499  	 */
1500  	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1501  		uint32_t irq, bar0, bar1;
1502  
1503  		/* Save important PCI config data. */
1504  		bar0 = pci_read_config(dev, PCIR_BAR(0), 4);
1505  		bar1 = pci_read_config(dev, PCIR_BAR(1), 4);
1506  		irq = pci_read_config(dev, PCIR_INTLINE, 4);
1507  
1508  		/* Reset the power state. */
1509  		device_printf(dev, "chip is in D%d power mode "
1510  			      "-- setting to D0\n", pci_get_powerstate(dev));
1511  
1512  		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1513  
1514  		/* Restore PCI config data. */
1515  		pci_write_config(dev, PCIR_BAR(0), bar0, 4);
1516  		pci_write_config(dev, PCIR_BAR(1), bar1, 4);
1517  		pci_write_config(dev, PCIR_INTLINE, irq, 4);
1518  	}
1519  #endif	/* BURN_BRIDGES */
1520  
1521  	/*
1522  	 * Map control/status registers.
1523  	 */
1524  	pci_enable_busmaster(dev);
1525  
1526  	/*
1527  	 * Allocate I/O resource
1528  	 */
1529  #ifdef MSK_USEIOSPACE
1530  	sc->msk_res_type = SYS_RES_IOPORT;
1531  	sc->msk_res_rid = PCIR_BAR(1);
1532  #else
1533  	sc->msk_res_type = SYS_RES_MEMORY;
1534  	sc->msk_res_rid = PCIR_BAR(0);
1535  #endif
1536  	sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
1537  					     &sc->msk_res_rid, RF_ACTIVE);
1538  	if (sc->msk_res == NULL) {
1539  		if (sc->msk_res_type == SYS_RES_MEMORY) {
1540  			sc->msk_res_type = SYS_RES_IOPORT;
1541  			sc->msk_res_rid = PCIR_BAR(1);
1542  		} else {
1543  			sc->msk_res_type = SYS_RES_MEMORY;
1544  			sc->msk_res_rid = PCIR_BAR(0);
1545  		}
1546  		sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
1547  						     &sc->msk_res_rid,
1548  						     RF_ACTIVE);
1549  		if (sc->msk_res == NULL) {
1550  			device_printf(dev, "couldn't allocate %s resources\n",
1551  			sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
1552  			return (ENXIO);
1553  		}
1554  	}
1555  	sc->msk_res_bt = rman_get_bustag(sc->msk_res);
1556  	sc->msk_res_bh = rman_get_bushandle(sc->msk_res);
1557  
1558  	/*
1559  	 * Allocate IRQ
1560  	 */
1561  	sc->msk_irq_type = pci_alloc_1intr(dev, mskc_msi_enable,
1562  	    &sc->msk_irq_rid, &irq_flags);
1563  
1564  	sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->msk_irq_rid,
1565  	    irq_flags);
1566  	if (sc->msk_irq == NULL) {
1567  		device_printf(dev, "couldn't allocate IRQ resources\n");
1568  		error = ENXIO;
1569  		goto fail;
1570  	}
1571  
1572  	/* Enable all clocks before accessing any registers. */
1573  	CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
1574  
1575  	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1576  	sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1577  	sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1578  	/* Bail out if chip is not recognized. */
1579  	if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1580  	    sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
1581  	    sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
1582  		device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1583  		    sc->msk_hw_id, sc->msk_hw_rev);
1584  		error = ENXIO;
1585  		goto fail;
1586  	}
1587  
1588  	/*
1589  	 * Create sysctl tree
1590  	 */
1591  	ctx = device_get_sysctl_ctx(dev);
1592  	tree = device_get_sysctl_tree(dev);
1593  	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
1594  			OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1595  			&sc->msk_process_limit, 0, mskc_sysctl_proc_limit,
1596  			"I", "max number of Rx events to process");
1597  	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
1598  			OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
1599  			sc, 0, mskc_sysctl_intr_rate,
1600  			"I", "max number of interrupt per second");
1601  	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1602  		       "defrag_avoided", CTLFLAG_RW, &sc->msk_defrag_avoided,
1603  		       0, "# of avoided m_defrag on TX path");
1604  	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1605  		       "leading_copied", CTLFLAG_RW, &sc->msk_leading_copied,
1606  		       0, "# of leading copies on TX path");
1607  	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1608  		       "trailing_copied", CTLFLAG_RW, &sc->msk_trailing_copied,
1609  		       0, "# of trailing copies on TX path");
1610  
1611  	sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1612  	if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1613  		sc->msk_coppertype = 0;
1614  	else
1615  		sc->msk_coppertype = 1;
1616  	/* Check number of MACs. */
1617  	sc->msk_num_port = 1;
1618  	if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1619  	    CFG_DUAL_MAC_MSK) {
1620  		if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1621  			sc->msk_num_port++;
1622  	}
1623  
1624  	/* Check bus type. */
1625  	if (pci_is_pcie(sc->msk_dev) == 0) {
1626  		sc->msk_bustype = MSK_PEX_BUS;
1627  		sc->msk_pciecap = pci_get_pciecap_ptr(sc->msk_dev);
1628  	} else if (pci_is_pcix(sc->msk_dev) == 0) {
1629  		sc->msk_bustype = MSK_PCIX_BUS;
1630  		sc->msk_pcixcap = pci_get_pcixcap_ptr(sc->msk_dev);
1631  	} else {
1632  		sc->msk_bustype = MSK_PCI_BUS;
1633  	}
1634  
1635  	switch (sc->msk_hw_id) {
1636  	case CHIP_ID_YUKON_EC:
1637  	case CHIP_ID_YUKON_EC_U:
1638  		sc->msk_clock = 125;	/* 125 Mhz */
1639  		break;
1640  	case CHIP_ID_YUKON_EX:
1641  		sc->msk_clock = 125;	/* 125 Mhz */
1642  		break;
1643  	case CHIP_ID_YUKON_FE:
1644  		sc->msk_clock = 100;	/* 100 Mhz */
1645  		sc->msk_pflags |= MSK_FLAG_FASTETHER;
1646  		break;
1647  	case CHIP_ID_YUKON_FE_P:
1648  		sc->msk_clock = 50;	/* 50 Mhz */
1649  		/* DESCV2 */
1650  		sc->msk_pflags |= MSK_FLAG_FASTETHER;
1651  		if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1652  			/*
1653  			 * XXX
1654  			 * FE+ A0 has status LE writeback bug so msk(4)
1655  			 * does not rely on status word of received frame
1656  			 * in msk_rxeof() which in turn disables all
1657  			 * hardware assistance bits reported by the status
1658  			 * word as well as validity of the recevied frame.
1659  			 * Just pass received frames to upper stack with
1660  			 * minimal test and let upper stack handle them.
1661  			 */
1662  			sc->msk_pflags |= MSK_FLAG_NORXCHK;
1663  		}
1664  		break;
1665  	case CHIP_ID_YUKON_XL:
1666  		sc->msk_clock = 156;	/* 156 Mhz */
1667  		break;
1668  	case CHIP_ID_YUKON_SUPR:
1669  		sc->msk_clock = 125;	/* 125 MHz */
1670  		break;
1671  	case CHIP_ID_YUKON_UL_2:
1672  		sc->msk_clock = 125;	/* 125 Mhz */
1673  		break;
1674  	case CHIP_ID_YUKON_OPT:
1675  		sc->msk_clock = 125;	/* 125 MHz */
1676  		break;
1677  	default:
1678  		sc->msk_clock = 156;	/* 156 Mhz */
1679  		break;
1680  	}
1681  
1682  	error = mskc_status_dma_alloc(sc);
1683  	if (error)
1684  		goto fail;
1685  
1686  	/* Set base interrupt mask. */
1687  	sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1688  	sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1689  	    Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1690  
1691  	/* Reset the adapter. */
1692  	mskc_reset(sc);
1693  
1694  	error = mskc_setup_rambuffer(sc);
1695  	if (error)
1696  		goto fail;
1697  
1698  	sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1699  	if (sc->msk_devs[MSK_PORT_A] == NULL) {
1700  		device_printf(dev, "failed to add child for PORT_A\n");
1701  		error = ENXIO;
1702  		goto fail;
1703  	}
1704  	port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1705  	*port = MSK_PORT_A;
1706  	device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1707  
1708  	if (sc->msk_num_port > 1) {
1709  		sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1710  		if (sc->msk_devs[MSK_PORT_B] == NULL) {
1711  			device_printf(dev, "failed to add child for PORT_B\n");
1712  			error = ENXIO;
1713  			goto fail;
1714  		}
1715  		port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1716  		*port = MSK_PORT_B;
1717  		device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1718  	}
1719  
1720  	bus_generic_attach(dev);
1721  
1722  	cpuid = rman_get_cpuid(sc->msk_irq);
1723  	if (sc->msk_if[0] != NULL)
1724  		ifq_set_cpuid(&sc->msk_if[0]->msk_ifp->if_snd, cpuid);
1725  	if (sc->msk_if[1] != NULL)
1726  		ifq_set_cpuid(&sc->msk_if[1]->msk_ifp->if_snd, cpuid);
1727  
1728  	error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE,
1729  			       mskc_intr, sc, &sc->msk_intrhand,
1730  			       &sc->msk_serializer);
1731  	if (error) {
1732  		device_printf(dev, "couldn't set up interrupt handler\n");
1733  		goto fail;
1734  	}
1735  	return 0;
1736  fail:
1737  	mskc_detach(dev);
1738  	return (error);
1739  }
1740  
1741  /*
1742   * Shutdown hardware and free up resources. This can be called any
1743   * time after the mutex has been initialized. It is called in both
1744   * the error case in attach and the normal detach case so it needs
1745   * to be careful about only freeing resources that have actually been
1746   * allocated.
1747   */
1748  static int
1749  msk_detach(device_t dev)
1750  {
1751  	struct msk_if_softc *sc_if = device_get_softc(dev);
1752  
1753  	if (device_is_attached(dev)) {
1754  		struct msk_softc *sc = sc_if->msk_softc;
1755  		struct ifnet *ifp = &sc_if->arpcom.ac_if;
1756  
1757  		lwkt_serialize_enter(ifp->if_serializer);
1758  
1759  		if (sc->msk_intrhand != NULL) {
1760  			if (sc->msk_if[MSK_PORT_A] != NULL)
1761  				msk_stop(sc->msk_if[MSK_PORT_A]);
1762  			if (sc->msk_if[MSK_PORT_B] != NULL)
1763  				msk_stop(sc->msk_if[MSK_PORT_B]);
1764  
1765  			bus_teardown_intr(sc->msk_dev, sc->msk_irq,
1766  					  sc->msk_intrhand);
1767  			sc->msk_intrhand = NULL;
1768  		}
1769  
1770  		lwkt_serialize_exit(ifp->if_serializer);
1771  
1772  		ether_ifdetach(ifp);
1773  	}
1774  
1775  	if (sc_if->msk_miibus != NULL)
1776  		device_delete_child(dev, sc_if->msk_miibus);
1777  
1778  	msk_txrx_dma_free(sc_if);
1779  	return (0);
1780  }
1781  
1782  static int
1783  mskc_detach(device_t dev)
1784  {
1785  	struct msk_softc *sc = device_get_softc(dev);
1786  	int *port, i;
1787  
1788  #ifdef INVARIANTS
1789  	if (device_is_attached(dev)) {
1790  		KASSERT(sc->msk_intrhand == NULL,
1791  			("intr is not torn down yet"));
1792  	}
1793  #endif
1794  
1795  	for (i = 0; i < sc->msk_num_port; ++i) {
1796  		if (sc->msk_devs[i] != NULL) {
1797  			port = device_get_ivars(sc->msk_devs[i]);
1798  			if (port != NULL) {
1799  				kfree(port, M_DEVBUF);
1800  				device_set_ivars(sc->msk_devs[i], NULL);
1801  			}
1802  			device_delete_child(dev, sc->msk_devs[i]);
1803  		}
1804  	}
1805  
1806  	/* Disable all interrupts. */
1807  	CSR_WRITE_4(sc, B0_IMSK, 0);
1808  	CSR_READ_4(sc, B0_IMSK);
1809  	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1810  	CSR_READ_4(sc, B0_HWE_IMSK);
1811  
1812  	/* LED Off. */
1813  	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1814  
1815  	/* Put hardware reset. */
1816  	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1817  
1818  	mskc_status_dma_free(sc);
1819  
1820  	if (sc->msk_irq != NULL) {
1821  		bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid,
1822  				     sc->msk_irq);
1823  	}
1824  	if (sc->msk_irq_type == PCI_INTR_TYPE_MSI)
1825  		pci_release_msi(dev);
1826  
1827  	if (sc->msk_res != NULL) {
1828  		bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid,
1829  				     sc->msk_res);
1830  	}
1831  
1832  	return (0);
1833  }
1834  
1835  /* Create status DMA region. */
1836  static int
1837  mskc_status_dma_alloc(struct msk_softc *sc)
1838  {
1839  	bus_dmamem_t dmem;
1840  	int error;
1841  
1842  	error = bus_dmamem_coherent(NULL/* XXX parent */, MSK_STAT_ALIGN, 0,
1843  			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1844  			MSK_STAT_RING_SZ, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1845  	if (error) {
1846  		device_printf(sc->msk_dev,
1847  		    "failed to create status coherent DMA memory\n");
1848  		return error;
1849  	}
1850  	sc->msk_stat_tag = dmem.dmem_tag;
1851  	sc->msk_stat_map = dmem.dmem_map;
1852  	sc->msk_stat_ring = dmem.dmem_addr;
1853  	sc->msk_stat_ring_paddr = dmem.dmem_busaddr;
1854  
1855  	return (0);
1856  }
1857  
1858  static void
1859  mskc_status_dma_free(struct msk_softc *sc)
1860  {
1861  	/* Destroy status block. */
1862  	if (sc->msk_stat_tag) {
1863  		bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1864  		bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring,
1865  				sc->msk_stat_map);
1866  		bus_dma_tag_destroy(sc->msk_stat_tag);
1867  		sc->msk_stat_tag = NULL;
1868  	}
1869  }
1870  
1871  static int
1872  msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
1873  {
1874  	int error, i, j;
1875  #ifdef MSK_JUMBO
1876  	struct msk_rxdesc *jrxd;
1877  	struct msk_jpool_entry *entry;
1878  	uint8_t *ptr;
1879  #endif
1880  	bus_size_t rxalign;
1881  
1882  	/* Create parent DMA tag. */
1883  	/*
1884  	 * XXX
1885  	 * It seems that Yukon II supports full 64bits DMA operations. But
1886  	 * it needs two descriptors(list elements) for 64bits DMA operations.
1887  	 * Since we don't know what DMA address mappings(32bits or 64bits)
1888  	 * would be used in advance for each mbufs, we limits its DMA space
1889  	 * to be in range of 32bits address space. Otherwise, we should check
1890  	 * what DMA address is used and chain another descriptor for the
1891  	 * 64bits DMA operation. This also means descriptor ring size is
1892  	 * variable. Limiting DMA address to be in 32bit address space greatly
1893  	 * simplyfies descriptor handling and possibly would increase
1894  	 * performance a bit due to efficient handling of descriptors.
1895  	 * Apart from harassing checksum offloading mechanisms, it seems
1896  	 * it's really bad idea to use a seperate descriptor for 64bit
1897  	 * DMA operation to save small descriptor memory. Anyway, I've
1898  	 * never seen these exotic scheme on ethernet interface hardware.
1899  	 */
1900  	error = bus_dma_tag_create(
1901  		    NULL,			/* parent */
1902  		    1, 0,			/* alignment, boundary */
1903  		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1904  		    BUS_SPACE_MAXADDR,		/* highaddr */
1905  		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1906  		    0,				/* nsegments */
1907  		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1908  		    0,				/* flags */
1909  		    &sc_if->msk_cdata.msk_parent_tag);
1910  	if (error) {
1911  		device_printf(sc_if->msk_if_dev,
1912  			      "failed to create parent DMA tag\n");
1913  		return error;
1914  	}
1915  
1916  	/* Create DMA stuffs for Tx ring. */
1917  	error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ,
1918  				  &sc_if->msk_cdata.msk_tx_ring_tag,
1919  				  (void *)&sc_if->msk_rdata.msk_tx_ring,
1920  				  &sc_if->msk_rdata.msk_tx_ring_paddr,
1921  				  &sc_if->msk_cdata.msk_tx_ring_map);
1922  	if (error) {
1923  		device_printf(sc_if->msk_if_dev,
1924  			      "failed to create TX ring DMA stuffs\n");
1925  		return error;
1926  	}
1927  
1928  	/* Create DMA stuffs for Rx ring. */
1929  	error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ,
1930  				  &sc_if->msk_cdata.msk_rx_ring_tag,
1931  				  (void *)&sc_if->msk_rdata.msk_rx_ring,
1932  				  &sc_if->msk_rdata.msk_rx_ring_paddr,
1933  				  &sc_if->msk_cdata.msk_rx_ring_map);
1934  	if (error) {
1935  		device_printf(sc_if->msk_if_dev,
1936  			      "failed to create RX ring DMA stuffs\n");
1937  		return error;
1938  	}
1939  
1940  	/* Create tag for Tx buffers. */
1941  	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
1942  		    1, 0,			/* alignment, boundary */
1943  		    BUS_SPACE_MAXADDR,		/* lowaddr */
1944  		    BUS_SPACE_MAXADDR,		/* highaddr */
1945  		    MSK_JUMBO_FRAMELEN,		/* maxsize */
1946  		    MSK_MAXTXSEGS,		/* nsegments */
1947  		    MSK_MAXSGSIZE,		/* maxsegsize */
1948  		    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
1949  		    BUS_DMA_ONEBPAGE,		/* flags */
1950  		    &sc_if->msk_cdata.msk_tx_tag);
1951  	if (error) {
1952  		device_printf(sc_if->msk_if_dev,
1953  			      "failed to create Tx DMA tag\n");
1954  		return error;
1955  	}
1956  
1957  	/* Create DMA maps for Tx buffers. */
1958  	for (i = 0; i < MSK_TX_RING_CNT; i++) {
1959  		struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i];
1960  
1961  		error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag,
1962  				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1963  				&txd->tx_dmamap);
1964  		if (error) {
1965  			device_printf(sc_if->msk_if_dev,
1966  				      "failed to create %dth Tx dmamap\n", i);
1967  
1968  			for (j = 0; j < i; ++j) {
1969  				txd = &sc_if->msk_cdata.msk_txdesc[j];
1970  				bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
1971  						   txd->tx_dmamap);
1972  			}
1973  			bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
1974  			sc_if->msk_cdata.msk_tx_tag = NULL;
1975  
1976  			return error;
1977  		}
1978  	}
1979  
1980  	/*
1981  	 * Workaround hardware hang which seems to happen when Rx buffer
1982  	 * is not aligned on multiple of FIFO word(8 bytes).
1983  	 */
1984  	if (sc_if->msk_flags & MSK_FLAG_RAMBUF)
1985  		rxalign = MSK_RX_BUF_ALIGN;
1986  	else
1987  		rxalign = 1;
1988  
1989  	/* Create tag for Rx buffers. */
1990  	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
1991  		    rxalign, 0,			/* alignment, boundary */
1992  		    BUS_SPACE_MAXADDR,		/* lowaddr */
1993  		    BUS_SPACE_MAXADDR,		/* highaddr */
1994  		    MCLBYTES,			/* maxsize */
1995  		    1,				/* nsegments */
1996  		    MCLBYTES,			/* maxsegsize */
1997  		    BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED |
1998  		    BUS_DMA_WAITOK,		/* flags */
1999  		    &sc_if->msk_cdata.msk_rx_tag);
2000  	if (error) {
2001  		device_printf(sc_if->msk_if_dev,
2002  			      "failed to create Rx DMA tag\n");
2003  		return error;
2004  	}
2005  
2006  	/* Create DMA maps for Rx buffers. */
2007  	error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, BUS_DMA_WAITOK,
2008  				  &sc_if->msk_cdata.msk_rx_sparemap);
2009  	if (error) {
2010  		device_printf(sc_if->msk_if_dev,
2011  			      "failed to create spare Rx dmamap\n");
2012  		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2013  		sc_if->msk_cdata.msk_rx_tag = NULL;
2014  		return error;
2015  	}
2016  	for (i = 0; i < MSK_RX_RING_CNT; i++) {
2017  		struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2018  
2019  		error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag,
2020  					  BUS_DMA_WAITOK, &rxd->rx_dmamap);
2021  		if (error) {
2022  			device_printf(sc_if->msk_if_dev,
2023  				      "failed to create %dth Rx dmamap\n", i);
2024  
2025  			for (j = 0; j < i; ++j) {
2026  				rxd = &sc_if->msk_cdata.msk_rxdesc[j];
2027  				bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2028  						   rxd->rx_dmamap);
2029  			}
2030  			bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2031  					   sc_if->msk_cdata.msk_rx_sparemap);
2032  			bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2033  			sc_if->msk_cdata.msk_rx_tag = NULL;
2034  
2035  			return error;
2036  		}
2037  	}
2038  
2039  #ifdef MSK_JUMBO
2040  	SLIST_INIT(&sc_if->msk_jfree_listhead);
2041  	SLIST_INIT(&sc_if->msk_jinuse_listhead);
2042  
2043  	/* Create tag for jumbo Rx ring. */
2044  	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2045  		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2046  		    BUS_SPACE_MAXADDR,		/* lowaddr */
2047  		    BUS_SPACE_MAXADDR,		/* highaddr */
2048  		    MSK_JUMBO_RX_RING_SZ,	/* maxsize */
2049  		    1,				/* nsegments */
2050  		    MSK_JUMBO_RX_RING_SZ,	/* maxsegsize */
2051  		    0,				/* flags */
2052  		    NULL, NULL,			/* lockfunc, lockarg */
2053  		    &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2054  	if (error != 0) {
2055  		device_printf(sc_if->msk_if_dev,
2056  		    "failed to create jumbo Rx ring DMA tag\n");
2057  		goto fail;
2058  	}
2059  
2060  	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2061  	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2062  	    (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2063  	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2064  	    &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2065  	if (error != 0) {
2066  		device_printf(sc_if->msk_if_dev,
2067  		    "failed to allocate DMA'able memory for jumbo Rx ring\n");
2068  		goto fail;
2069  	}
2070  
2071  	ctx.msk_busaddr = 0;
2072  	error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2073  	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2074  	    sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2075  	    msk_dmamap_cb, &ctx, 0);
2076  	if (error != 0) {
2077  		device_printf(sc_if->msk_if_dev,
2078  		    "failed to load DMA'able memory for jumbo Rx ring\n");
2079  		goto fail;
2080  	}
2081  	sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2082  
2083  	/* Create tag for jumbo buffer blocks. */
2084  	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2085  		    PAGE_SIZE, 0,		/* alignment, boundary */
2086  		    BUS_SPACE_MAXADDR,		/* lowaddr */
2087  		    BUS_SPACE_MAXADDR,		/* highaddr */
2088  		    MSK_JMEM,			/* maxsize */
2089  		    1,				/* nsegments */
2090  		    MSK_JMEM,			/* maxsegsize */
2091  		    0,				/* flags */
2092  		    NULL, NULL,			/* lockfunc, lockarg */
2093  		    &sc_if->msk_cdata.msk_jumbo_tag);
2094  	if (error != 0) {
2095  		device_printf(sc_if->msk_if_dev,
2096  		    "failed to create jumbo Rx buffer block DMA tag\n");
2097  		goto fail;
2098  	}
2099  
2100  	/* Create tag for jumbo Rx buffers. */
2101  	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2102  		    PAGE_SIZE, 0,		/* alignment, boundary */
2103  		    BUS_SPACE_MAXADDR,		/* lowaddr */
2104  		    BUS_SPACE_MAXADDR,		/* highaddr */
2105  		    MCLBYTES * MSK_MAXRXSEGS,	/* maxsize */
2106  		    MSK_MAXRXSEGS,		/* nsegments */
2107  		    MSK_JLEN,			/* maxsegsize */
2108  		    0,				/* flags */
2109  		    NULL, NULL,			/* lockfunc, lockarg */
2110  		    &sc_if->msk_cdata.msk_jumbo_rx_tag);
2111  	if (error != 0) {
2112  		device_printf(sc_if->msk_if_dev,
2113  		    "failed to create jumbo Rx DMA tag\n");
2114  		goto fail;
2115  	}
2116  
2117  	/* Create DMA maps for jumbo Rx buffers. */
2118  	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2119  	    &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2120  		device_printf(sc_if->msk_if_dev,
2121  		    "failed to create spare jumbo Rx dmamap\n");
2122  		goto fail;
2123  	}
2124  	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2125  		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2126  		jrxd->rx_m = NULL;
2127  		jrxd->rx_dmamap = NULL;
2128  		error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2129  		    &jrxd->rx_dmamap);
2130  		if (error != 0) {
2131  			device_printf(sc_if->msk_if_dev,
2132  			    "failed to create jumbo Rx dmamap\n");
2133  			goto fail;
2134  		}
2135  	}
2136  
2137  	/* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2138  	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
2139  	    (void **)&sc_if->msk_rdata.msk_jumbo_buf,
2140  	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2141  	    &sc_if->msk_cdata.msk_jumbo_map);
2142  	if (error != 0) {
2143  		device_printf(sc_if->msk_if_dev,
2144  		    "failed to allocate DMA'able memory for jumbo buf\n");
2145  		goto fail;
2146  	}
2147  
2148  	ctx.msk_busaddr = 0;
2149  	error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
2150  	    sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
2151  	    MSK_JMEM, msk_dmamap_cb, &ctx, 0);
2152  	if (error != 0) {
2153  		device_printf(sc_if->msk_if_dev,
2154  		    "failed to load DMA'able memory for jumbobuf\n");
2155  		goto fail;
2156  	}
2157  	sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
2158  
2159  	/*
2160  	 * Now divide it up into 9K pieces and save the addresses
2161  	 * in an array.
2162  	 */
2163  	ptr = sc_if->msk_rdata.msk_jumbo_buf;
2164  	for (i = 0; i < MSK_JSLOTS; i++) {
2165  		sc_if->msk_cdata.msk_jslots[i] = ptr;
2166  		ptr += MSK_JLEN;
2167  		entry = malloc(sizeof(struct msk_jpool_entry),
2168  		    M_DEVBUF, M_WAITOK);
2169  		if (entry == NULL) {
2170  			device_printf(sc_if->msk_if_dev,
2171  			    "no memory for jumbo buffers!\n");
2172  			error = ENOMEM;
2173  			goto fail;
2174  		}
2175  		entry->slot = i;
2176  		SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2177  		    jpool_entries);
2178  	}
2179  #endif
2180  	return 0;
2181  }
2182  
2183  static void
2184  msk_txrx_dma_free(struct msk_if_softc *sc_if)
2185  {
2186  	struct msk_txdesc *txd;
2187  	struct msk_rxdesc *rxd;
2188  #ifdef MSK_JUMBO
2189  	struct msk_rxdesc *jrxd;
2190  	struct msk_jpool_entry *entry;
2191  #endif
2192  	int i;
2193  
2194  #ifdef MSK_JUMBO
2195  	MSK_JLIST_LOCK(sc_if);
2196  	while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
2197  		device_printf(sc_if->msk_if_dev,
2198  		    "asked to free buffer that is in use!\n");
2199  		SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2200  		SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2201  		    jpool_entries);
2202  	}
2203  
2204  	while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
2205  		entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2206  		SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2207  		free(entry, M_DEVBUF);
2208  	}
2209  	MSK_JLIST_UNLOCK(sc_if);
2210  
2211  	/* Destroy jumbo buffer block. */
2212  	if (sc_if->msk_cdata.msk_jumbo_map)
2213  		bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
2214  		    sc_if->msk_cdata.msk_jumbo_map);
2215  
2216  	if (sc_if->msk_rdata.msk_jumbo_buf) {
2217  		bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
2218  		    sc_if->msk_rdata.msk_jumbo_buf,
2219  		    sc_if->msk_cdata.msk_jumbo_map);
2220  		sc_if->msk_rdata.msk_jumbo_buf = NULL;
2221  		sc_if->msk_cdata.msk_jumbo_map = NULL;
2222  	}
2223  
2224  	/* Jumbo Rx ring. */
2225  	if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2226  		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2227  			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2228  			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2229  		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2230  		    sc_if->msk_rdata.msk_jumbo_rx_ring)
2231  			bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2232  			    sc_if->msk_rdata.msk_jumbo_rx_ring,
2233  			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2234  		sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2235  		sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2236  		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2237  		sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2238  	}
2239  
2240  	/* Jumbo Rx buffers. */
2241  	if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2242  		for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2243  			jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2244  			if (jrxd->rx_dmamap) {
2245  				bus_dmamap_destroy(
2246  				    sc_if->msk_cdata.msk_jumbo_rx_tag,
2247  				    jrxd->rx_dmamap);
2248  				jrxd->rx_dmamap = NULL;
2249  			}
2250  		}
2251  		if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2252  			bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2253  			    sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2254  			sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2255  		}
2256  		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2257  		sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2258  	}
2259  #endif
2260  
2261  	/* Tx ring. */
2262  	msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag,
2263  			   sc_if->msk_rdata.msk_tx_ring,
2264  			   sc_if->msk_cdata.msk_tx_ring_map);
2265  
2266  	/* Rx ring. */
2267  	msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag,
2268  			   sc_if->msk_rdata.msk_rx_ring,
2269  			   sc_if->msk_cdata.msk_rx_ring_map);
2270  
2271  	/* Tx buffers. */
2272  	if (sc_if->msk_cdata.msk_tx_tag) {
2273  		for (i = 0; i < MSK_TX_RING_CNT; i++) {
2274  			txd = &sc_if->msk_cdata.msk_txdesc[i];
2275  			bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2276  					   txd->tx_dmamap);
2277  		}
2278  		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2279  		sc_if->msk_cdata.msk_tx_tag = NULL;
2280  	}
2281  
2282  	/* Rx buffers. */
2283  	if (sc_if->msk_cdata.msk_rx_tag) {
2284  		for (i = 0; i < MSK_RX_RING_CNT; i++) {
2285  			rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2286  			bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2287  					   rxd->rx_dmamap);
2288  		}
2289  		bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2290  				   sc_if->msk_cdata.msk_rx_sparemap);
2291  		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2292  		sc_if->msk_cdata.msk_rx_tag = NULL;
2293  	}
2294  
2295  	if (sc_if->msk_cdata.msk_parent_tag) {
2296  		bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2297  		sc_if->msk_cdata.msk_parent_tag = NULL;
2298  	}
2299  }
2300  
2301  #ifdef MSK_JUMBO
2302  /*
2303   * Allocate a jumbo buffer.
2304   */
2305  static void *
2306  msk_jalloc(struct msk_if_softc *sc_if)
2307  {
2308  	struct msk_jpool_entry *entry;
2309  
2310  	MSK_JLIST_LOCK(sc_if);
2311  
2312  	entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2313  
2314  	if (entry == NULL) {
2315  		MSK_JLIST_UNLOCK(sc_if);
2316  		return (NULL);
2317  	}
2318  
2319  	SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2320  	SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
2321  
2322  	MSK_JLIST_UNLOCK(sc_if);
2323  
2324  	return (sc_if->msk_cdata.msk_jslots[entry->slot]);
2325  }
2326  
2327  /*
2328   * Release a jumbo buffer.
2329   */
2330  static void
2331  msk_jfree(void *buf, void *args)
2332  {
2333  	struct msk_if_softc *sc_if;
2334  	struct msk_jpool_entry *entry;
2335  	int i;
2336  
2337  	/* Extract the softc struct pointer. */
2338  	sc_if = (struct msk_if_softc *)args;
2339  	KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2340  
2341  	MSK_JLIST_LOCK(sc_if);
2342  	/* Calculate the slot this buffer belongs to. */
2343  	i = ((vm_offset_t)buf
2344  	     - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
2345  	KASSERT(i >= 0 && i < MSK_JSLOTS,
2346  	    ("%s: asked to free buffer that we don't manage!", __func__));
2347  
2348  	entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
2349  	KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2350  	entry->slot = i;
2351  	SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2352  	SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
2353  	if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
2354  		wakeup(sc_if);
2355  
2356  	MSK_JLIST_UNLOCK(sc_if);
2357  }
2358  #endif
2359  
2360  static int
2361  msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2362  {
2363  	struct msk_txdesc *txd, *txd_last;
2364  	struct msk_tx_desc *tx_le;
2365  	struct mbuf *m;
2366  	bus_dmamap_t map;
2367  	bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2368  	uint32_t control, prod, si;
2369  	uint16_t offset, tcp_offset;
2370  	int error, i, nsegs, maxsegs, defrag;
2371  
2372  	maxsegs = MSK_TX_RING_CNT - sc_if->msk_cdata.msk_tx_cnt -
2373  		  MSK_RESERVED_TX_DESC_CNT;
2374  	KASSERT(maxsegs >= MSK_SPARE_TX_DESC_CNT,
2375  		("not enough spare TX desc"));
2376  	if (maxsegs > MSK_MAXTXSEGS)
2377  		maxsegs = MSK_MAXTXSEGS;
2378  
2379  	/*
2380  	 * Align TX buffer to 64bytes boundary.  This greately improves
2381  	 * bulk data TX performance on my 88E8053 (+100Mbps) at least.
2382  	 * Try avoiding m_defrag(), if the mbufs are not chained together
2383  	 * by m_next (i.e. m->m_len == m->m_pkthdr.len).
2384  	 */
2385  
2386  #define MSK_TXBUF_ALIGN	64
2387  #define MSK_TXBUF_MASK	(MSK_TXBUF_ALIGN - 1)
2388  
2389  	defrag = 1;
2390  	m = *m_head;
2391  	if (m->m_len == m->m_pkthdr.len) {
2392  		int space;
2393  
2394  		space = ((uintptr_t)m->m_data & MSK_TXBUF_MASK);
2395  		if (space) {
2396  			if (M_WRITABLE(m)) {
2397  				if (M_TRAILINGSPACE(m) >= space) {
2398  					/* e.g. TCP ACKs */
2399  					bcopy(m->m_data, m->m_data + space,
2400  					      m->m_len);
2401  					m->m_data += space;
2402  					defrag = 0;
2403  					sc_if->msk_softc->msk_trailing_copied++;
2404  				} else {
2405  					space = MSK_TXBUF_ALIGN - space;
2406  					if (M_LEADINGSPACE(m) >= space) {
2407  						/* e.g. Small UDP datagrams */
2408  						bcopy(m->m_data,
2409  						      m->m_data - space,
2410  						      m->m_len);
2411  						m->m_data -= space;
2412  						defrag = 0;
2413  						sc_if->msk_softc->
2414  						msk_leading_copied++;
2415  					}
2416  				}
2417  			}
2418  		} else {
2419  			/* e.g. on forwarding path */
2420  			defrag = 0;
2421  		}
2422  	}
2423  	if (defrag) {
2424  		m = m_defrag(*m_head, M_NOWAIT);
2425  		if (m == NULL) {
2426  			m_freem(*m_head);
2427  			*m_head = NULL;
2428  			return ENOBUFS;
2429  		}
2430  		*m_head = m;
2431  	} else {
2432  		sc_if->msk_softc->msk_defrag_avoided++;
2433  	}
2434  
2435  #undef MSK_TXBUF_MASK
2436  #undef MSK_TXBUF_ALIGN
2437  
2438  	tcp_offset = offset = 0;
2439  	if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
2440  		/*
2441  		 * Since mbuf has no protocol specific structure information
2442  		 * in it we have to inspect protocol information here to
2443  		 * setup TSO and checksum offload. I don't know why Marvell
2444  		 * made a such decision in chip design because other GigE
2445  		 * hardwares normally takes care of all these chores in
2446  		 * hardware. However, TSO performance of Yukon II is very
2447  		 * good such that it's worth to implement it.
2448  		 */
2449  		struct ether_header *eh;
2450  		struct ip *ip;
2451  
2452  		/* TODO check for M_WRITABLE(m) */
2453  
2454  		offset = sizeof(struct ether_header);
2455  		m = m_pullup(m, offset);
2456  		if (m == NULL) {
2457  			*m_head = NULL;
2458  			return (ENOBUFS);
2459  		}
2460  		eh = mtod(m, struct ether_header *);
2461  		/* Check if hardware VLAN insertion is off. */
2462  		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2463  			offset = sizeof(struct ether_vlan_header);
2464  			m = m_pullup(m, offset);
2465  			if (m == NULL) {
2466  				*m_head = NULL;
2467  				return (ENOBUFS);
2468  			}
2469  		}
2470  		m = m_pullup(m, offset + sizeof(struct ip));
2471  		if (m == NULL) {
2472  			*m_head = NULL;
2473  			return (ENOBUFS);
2474  		}
2475  		ip = (struct ip *)(mtod(m, char *) + offset);
2476  		offset += (ip->ip_hl << 2);
2477  		tcp_offset = offset;
2478  		/*
2479  		 * It seems that Yukon II has Tx checksum offload bug for
2480  		 * small TCP packets that's less than 60 bytes in size
2481  		 * (e.g. TCP window probe packet, pure ACK packet).
2482  		 * Common work around like padding with zeros to make the
2483  		 * frame minimum ethernet frame size didn't work at all.
2484  		 * Instead of disabling checksum offload completely we
2485  		 * resort to S/W checksum routine when we encounter short
2486  		 * TCP frames.
2487  		 * Short UDP packets appear to be handled correctly by
2488  		 * Yukon II.
2489  		 */
2490  		if (m->m_pkthdr.len < MSK_MIN_FRAMELEN &&
2491  		    (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2492  			uint16_t csum;
2493  
2494  			csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset -
2495  			    (ip->ip_hl << 2), offset);
2496  			*(uint16_t *)(m->m_data + offset +
2497  			    m->m_pkthdr.csum_data) = csum;
2498  			m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2499  		}
2500  		*m_head = m;
2501  	}
2502  
2503  	prod = sc_if->msk_cdata.msk_tx_prod;
2504  	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2505  	txd_last = txd;
2506  	map = txd->tx_dmamap;
2507  
2508  	error = bus_dmamap_load_mbuf_defrag(sc_if->msk_cdata.msk_tx_tag, map,
2509  			m_head, txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2510  	if (error) {
2511  		m_freem(*m_head);
2512  		*m_head = NULL;
2513  		return error;
2514  	}
2515  	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2516  
2517  	m = *m_head;
2518  	control = 0;
2519  	tx_le = NULL;
2520  
2521  #ifdef notyet
2522  	/* Check if we have a VLAN tag to insert. */
2523  	if ((m->m_flags & M_VLANTAG) != 0) {
2524  		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2525  		tx_le->msk_addr = htole32(0);
2526  		tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2527  		    htons(m->m_pkthdr.ether_vtag));
2528  		sc_if->msk_cdata.msk_tx_cnt++;
2529  		MSK_INC(prod, MSK_TX_RING_CNT);
2530  		control |= INS_VLAN;
2531  	}
2532  #endif
2533  	/* Check if we have to handle checksum offload. */
2534  	if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
2535  		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2536  		tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2537  		    & 0xffff) | ((uint32_t)tcp_offset << 16));
2538  		tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2539  		control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2540  		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2541  			control |= UDPTCP;
2542  		sc_if->msk_cdata.msk_tx_cnt++;
2543  		MSK_INC(prod, MSK_TX_RING_CNT);
2544  	}
2545  
2546  	si = prod;
2547  	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2548  	tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2549  	tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2550  	    OP_PACKET);
2551  	sc_if->msk_cdata.msk_tx_cnt++;
2552  	MSK_INC(prod, MSK_TX_RING_CNT);
2553  
2554  	for (i = 1; i < nsegs; i++) {
2555  		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2556  		tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2557  		tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2558  		    OP_BUFFER | HW_OWNER);
2559  		sc_if->msk_cdata.msk_tx_cnt++;
2560  		MSK_INC(prod, MSK_TX_RING_CNT);
2561  	}
2562  	/* Update producer index. */
2563  	sc_if->msk_cdata.msk_tx_prod = prod;
2564  
2565  	/* Set EOP on the last desciptor. */
2566  	prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2567  	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2568  	tx_le->msk_control |= htole32(EOP);
2569  
2570  	/* Turn the first descriptor ownership to hardware. */
2571  	tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2572  	tx_le->msk_control |= htole32(HW_OWNER);
2573  
2574  	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2575  	map = txd_last->tx_dmamap;
2576  	txd_last->tx_dmamap = txd->tx_dmamap;
2577  	txd->tx_dmamap = map;
2578  	txd->tx_m = m;
2579  
2580  	return (0);
2581  }
2582  
2583  static void
2584  msk_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2585  {
2586          struct msk_if_softc *sc_if;
2587          struct mbuf *m_head;
2588  	int enq;
2589  
2590  	sc_if = ifp->if_softc;
2591  
2592  	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2593  	ASSERT_SERIALIZED(ifp->if_serializer);
2594  
2595  	if (!sc_if->msk_link) {
2596  		ifq_purge(&ifp->if_snd);
2597  		return;
2598  	}
2599  
2600  	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
2601  		return;
2602  
2603  	enq = 0;
2604  	while (!ifq_is_empty(&ifp->if_snd)) {
2605  		if (MSK_IS_OACTIVE(sc_if)) {
2606  			ifq_set_oactive(&ifp->if_snd);
2607  			break;
2608  		}
2609  
2610  		m_head = ifq_dequeue(&ifp->if_snd);
2611  		if (m_head == NULL)
2612  			break;
2613  
2614  		/*
2615  		 * Pack the data into the transmit ring. If we
2616  		 * don't have room, set the OACTIVE flag and wait
2617  		 * for the NIC to drain the ring.
2618  		 */
2619  		if (msk_encap(sc_if, &m_head) != 0) {
2620  			IFNET_STAT_INC(ifp, oerrors, 1);
2621  			if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2622  				continue;
2623  			} else {
2624  				ifq_set_oactive(&ifp->if_snd);
2625  				break;
2626  			}
2627  		}
2628  		enq = 1;
2629  
2630  		/*
2631  		 * If there's a BPF listener, bounce a copy of this frame
2632  		 * to him.
2633  		 */
2634  		BPF_MTAP(ifp, m_head);
2635  	}
2636  
2637  	if (enq) {
2638  		/* Transmit */
2639  		CSR_WRITE_2(sc_if->msk_softc,
2640  		    Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2641  		    sc_if->msk_cdata.msk_tx_prod);
2642  
2643  		/* Set a timeout in case the chip goes out to lunch. */
2644  		ifp->if_timer = MSK_TX_TIMEOUT;
2645  	}
2646  }
2647  
2648  static void
2649  msk_watchdog(struct ifnet *ifp)
2650  {
2651  	struct msk_if_softc *sc_if = ifp->if_softc;
2652  	uint32_t ridx;
2653  	int idx;
2654  
2655  	ASSERT_SERIALIZED(ifp->if_serializer);
2656  
2657  	if (sc_if->msk_link == 0) {
2658  		if (bootverbose)
2659  			if_printf(sc_if->msk_ifp, "watchdog timeout "
2660  			   "(missed link)\n");
2661  		IFNET_STAT_INC(ifp, oerrors, 1);
2662  		msk_init(sc_if);
2663  		return;
2664  	}
2665  
2666  	/*
2667  	 * Reclaim first as there is a possibility of losing Tx completion
2668  	 * interrupts.
2669  	 */
2670  	ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2671  	idx = CSR_READ_2(sc_if->msk_softc, ridx);
2672  	if (sc_if->msk_cdata.msk_tx_cons != idx) {
2673  		msk_txeof(sc_if, idx);
2674  		if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2675  			if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2676  			    "-- recovering\n");
2677  			if (!ifq_is_empty(&ifp->if_snd))
2678  				if_devstart(ifp);
2679  			return;
2680  		}
2681  	}
2682  
2683  	if_printf(ifp, "watchdog timeout\n");
2684  	IFNET_STAT_INC(ifp, oerrors, 1);
2685  	msk_init(sc_if);
2686  	if (!ifq_is_empty(&ifp->if_snd))
2687  		if_devstart(ifp);
2688  }
2689  
2690  static int
2691  mskc_shutdown(device_t dev)
2692  {
2693  	struct msk_softc *sc = device_get_softc(dev);
2694  	int i;
2695  
2696  	lwkt_serialize_enter(&sc->msk_serializer);
2697  
2698  	for (i = 0; i < sc->msk_num_port; i++) {
2699  		if (sc->msk_if[i] != NULL)
2700  			msk_stop(sc->msk_if[i]);
2701  	}
2702  
2703  	/* Put hardware reset. */
2704  	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2705  
2706  	lwkt_serialize_exit(&sc->msk_serializer);
2707  	return (0);
2708  }
2709  
2710  static int
2711  mskc_suspend(device_t dev)
2712  {
2713  	struct msk_softc *sc = device_get_softc(dev);
2714  	int i;
2715  
2716  	lwkt_serialize_enter(&sc->msk_serializer);
2717  
2718  	for (i = 0; i < sc->msk_num_port; i++) {
2719  		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2720  		    ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0))
2721  			msk_stop(sc->msk_if[i]);
2722  	}
2723  
2724  	/* Disable all interrupts. */
2725  	CSR_WRITE_4(sc, B0_IMSK, 0);
2726  	CSR_READ_4(sc, B0_IMSK);
2727  	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2728  	CSR_READ_4(sc, B0_HWE_IMSK);
2729  
2730  	mskc_phy_power(sc, MSK_PHY_POWERDOWN);
2731  
2732  	/* Put hardware reset. */
2733  	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2734  	sc->msk_suspended = 1;
2735  
2736  	lwkt_serialize_exit(&sc->msk_serializer);
2737  
2738  	return (0);
2739  }
2740  
2741  static int
2742  mskc_resume(device_t dev)
2743  {
2744  	struct msk_softc *sc = device_get_softc(dev);
2745  	int i;
2746  
2747  	lwkt_serialize_enter(&sc->msk_serializer);
2748  
2749  	/* Enable all clocks before accessing any registers. */
2750  	CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
2751  	mskc_reset(sc);
2752  	for (i = 0; i < sc->msk_num_port; i++) {
2753  		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2754  		    ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
2755  			msk_init(sc->msk_if[i]);
2756  	}
2757  	sc->msk_suspended = 0;
2758  
2759  	lwkt_serialize_exit(&sc->msk_serializer);
2760  
2761  	return (0);
2762  }
2763  
2764  static void
2765  msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2766  {
2767  	struct mbuf *m;
2768  	struct ifnet *ifp;
2769  	struct msk_rxdesc *rxd;
2770  	int cons, rxlen;
2771  
2772  	ifp = sc_if->msk_ifp;
2773  
2774  	cons = sc_if->msk_cdata.msk_rx_cons;
2775  	do {
2776  		rxlen = status >> 16;
2777  		if ((status & GMR_FS_VLAN) != 0 &&
2778  		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2779  			rxlen -= EVL_ENCAPLEN;
2780  		if (sc_if->msk_flags & MSK_FLAG_NORXCHK) {
2781  			/*
2782  			 * For controllers that returns bogus status code
2783  			 * just do minimal check and let upper stack
2784  			 * handle this frame.
2785  			 */
2786  			if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
2787  				IFNET_STAT_INC(ifp, ierrors, 1);
2788  				msk_discard_rxbuf(sc_if, cons);
2789  				break;
2790  			}
2791  		} else if (len > sc_if->msk_framesize ||
2792  		    ((status & GMR_FS_ANY_ERR) != 0) ||
2793  		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2794  			/* Don't count flow-control packet as errors. */
2795  			if ((status & GMR_FS_GOOD_FC) == 0)
2796  				IFNET_STAT_INC(ifp, ierrors, 1);
2797  			msk_discard_rxbuf(sc_if, cons);
2798  			break;
2799  		}
2800  		rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
2801  		m = rxd->rx_m;
2802  		if (msk_newbuf(sc_if, cons, 0) != 0) {
2803  			IFNET_STAT_INC(ifp, iqdrops, 1);
2804  			/* Reuse old buffer. */
2805  			msk_discard_rxbuf(sc_if, cons);
2806  			break;
2807  		}
2808  		m->m_pkthdr.rcvif = ifp;
2809  		m->m_pkthdr.len = m->m_len = len;
2810  		IFNET_STAT_INC(ifp, ipackets, 1);
2811  #ifdef notyet
2812  		/* Check for VLAN tagged packets. */
2813  		if ((status & GMR_FS_VLAN) != 0 &&
2814  		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2815  			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2816  			m->m_flags |= M_VLANTAG;
2817  		}
2818  #endif
2819  
2820  		ifp->if_input(ifp, m, NULL, -1);
2821  	} while (0);
2822  
2823  	MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
2824  	MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
2825  }
2826  
2827  #ifdef MSK_JUMBO
2828  static void
2829  msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2830  {
2831  	struct mbuf *m;
2832  	struct ifnet *ifp;
2833  	struct msk_rxdesc *jrxd;
2834  	int cons, rxlen;
2835  
2836  	ifp = sc_if->msk_ifp;
2837  
2838  	MSK_IF_LOCK_ASSERT(sc_if);
2839  
2840  	cons = sc_if->msk_cdata.msk_rx_cons;
2841  	do {
2842  		rxlen = status >> 16;
2843  		if ((status & GMR_FS_VLAN) != 0 &&
2844  		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2845  			rxlen -= ETHER_VLAN_ENCAP_LEN;
2846  		if (len > sc_if->msk_framesize ||
2847  		    ((status & GMR_FS_ANY_ERR) != 0) ||
2848  		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2849  			/* Don't count flow-control packet as errors. */
2850  			if ((status & GMR_FS_GOOD_FC) == 0)
2851  				ifp->if_ierrors++;
2852  			msk_discard_jumbo_rxbuf(sc_if, cons);
2853  			break;
2854  		}
2855  		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
2856  		m = jrxd->rx_m;
2857  		if (msk_jumbo_newbuf(sc_if, cons) != 0) {
2858  			ifp->if_iqdrops++;
2859  			/* Reuse old buffer. */
2860  			msk_discard_jumbo_rxbuf(sc_if, cons);
2861  			break;
2862  		}
2863  		m->m_pkthdr.rcvif = ifp;
2864  		m->m_pkthdr.len = m->m_len = len;
2865  		ifp->if_ipackets++;
2866  		/* Check for VLAN tagged packets. */
2867  		if ((status & GMR_FS_VLAN) != 0 &&
2868  		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2869  			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2870  			m->m_flags |= M_VLANTAG;
2871  		}
2872  		MSK_IF_UNLOCK(sc_if);
2873  		ifp->if_input(ifp, m, NULL, -1);
2874  		MSK_IF_LOCK(sc_if);
2875  	} while (0);
2876  
2877  	MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
2878  	MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
2879  }
2880  #endif
2881  
2882  static void
2883  msk_txeof(struct msk_if_softc *sc_if, int idx)
2884  {
2885  	struct msk_txdesc *txd;
2886  	struct msk_tx_desc *cur_tx;
2887  	struct ifnet *ifp;
2888  	uint32_t control;
2889  	int cons, prog;
2890  
2891  	ifp = sc_if->msk_ifp;
2892  
2893  	/*
2894  	 * Go through our tx ring and free mbufs for those
2895  	 * frames that have been sent.
2896  	 */
2897  	cons = sc_if->msk_cdata.msk_tx_cons;
2898  	prog = 0;
2899  	for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
2900  		if (sc_if->msk_cdata.msk_tx_cnt <= 0)
2901  			break;
2902  		prog++;
2903  		cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
2904  		control = le32toh(cur_tx->msk_control);
2905  		sc_if->msk_cdata.msk_tx_cnt--;
2906  		if ((control & EOP) == 0)
2907  			continue;
2908  		txd = &sc_if->msk_cdata.msk_txdesc[cons];
2909  		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
2910  
2911  		IFNET_STAT_INC(ifp, opackets, 1);
2912  		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
2913  		    __func__));
2914  		m_freem(txd->tx_m);
2915  		txd->tx_m = NULL;
2916  	}
2917  
2918  	if (prog > 0) {
2919  		sc_if->msk_cdata.msk_tx_cons = cons;
2920  		if (!MSK_IS_OACTIVE(sc_if))
2921  			ifq_clr_oactive(&ifp->if_snd);
2922  		if (sc_if->msk_cdata.msk_tx_cnt == 0)
2923  			ifp->if_timer = 0;
2924  		/* No need to sync LEs as we didn't update LEs. */
2925  	}
2926  }
2927  
2928  static void
2929  msk_tick(void *xsc_if)
2930  {
2931  	struct msk_if_softc *sc_if = xsc_if;
2932  	struct ifnet *ifp = &sc_if->arpcom.ac_if;
2933  	struct mii_data *mii;
2934  
2935  	lwkt_serialize_enter(ifp->if_serializer);
2936  
2937  	mii = device_get_softc(sc_if->msk_miibus);
2938  
2939  	mii_tick(mii);
2940  	if (!sc_if->msk_link)
2941  		msk_miibus_statchg(sc_if->msk_if_dev);
2942  	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
2943  
2944  	lwkt_serialize_exit(ifp->if_serializer);
2945  }
2946  
2947  static void
2948  msk_intr_phy(struct msk_if_softc *sc_if)
2949  {
2950  	uint16_t status;
2951  
2952  	msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
2953  	status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
2954  	/* Handle FIFO Underrun/Overflow? */
2955  	if (status & PHY_M_IS_FIFO_ERROR) {
2956  		device_printf(sc_if->msk_if_dev,
2957  		    "PHY FIFO underrun/overflow.\n");
2958  	}
2959  }
2960  
2961  static void
2962  msk_intr_gmac(struct msk_if_softc *sc_if)
2963  {
2964  	struct msk_softc *sc;
2965  	uint8_t status;
2966  
2967  	sc = sc_if->msk_softc;
2968  	status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
2969  
2970  	/* GMAC Rx FIFO overrun. */
2971  	if ((status & GM_IS_RX_FF_OR) != 0) {
2972  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
2973  		    GMF_CLI_RX_FO);
2974  	}
2975  	/* GMAC Tx FIFO underrun. */
2976  	if ((status & GM_IS_TX_FF_UR) != 0) {
2977  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
2978  		    GMF_CLI_TX_FU);
2979  		device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
2980  		/*
2981  		 * XXX
2982  		 * In case of Tx underrun, we may need to flush/reset
2983  		 * Tx MAC but that would also require resynchronization
2984  		 * with status LEs. Reintializing status LEs would
2985  		 * affect other port in dual MAC configuration so it
2986  		 * should be avoided as possible as we can.
2987  		 * Due to lack of documentation it's all vague guess but
2988  		 * it needs more investigation.
2989  		 */
2990  	}
2991  }
2992  
2993  static void
2994  msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
2995  {
2996  	struct msk_softc *sc;
2997  
2998  	sc = sc_if->msk_softc;
2999  	if ((status & Y2_IS_PAR_RD1) != 0) {
3000  		device_printf(sc_if->msk_if_dev,
3001  		    "RAM buffer read parity error\n");
3002  		/* Clear IRQ. */
3003  		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3004  		    RI_CLR_RD_PERR);
3005  	}
3006  	if ((status & Y2_IS_PAR_WR1) != 0) {
3007  		device_printf(sc_if->msk_if_dev,
3008  		    "RAM buffer write parity error\n");
3009  		/* Clear IRQ. */
3010  		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3011  		    RI_CLR_WR_PERR);
3012  	}
3013  	if ((status & Y2_IS_PAR_MAC1) != 0) {
3014  		device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3015  		/* Clear IRQ. */
3016  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3017  		    GMF_CLI_TX_PE);
3018  	}
3019  	if ((status & Y2_IS_PAR_RX1) != 0) {
3020  		device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3021  		/* Clear IRQ. */
3022  		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3023  	}
3024  	if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3025  		device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3026  		/* Clear IRQ. */
3027  		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3028  	}
3029  }
3030  
3031  static void
3032  mskc_intr_hwerr(struct msk_softc *sc)
3033  {
3034  	uint32_t status;
3035  	uint32_t tlphead[4];
3036  
3037  	status = CSR_READ_4(sc, B0_HWE_ISRC);
3038  	/* Time Stamp timer overflow. */
3039  	if ((status & Y2_IS_TIST_OV) != 0)
3040  		CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3041  	if ((status & Y2_IS_PCI_NEXP) != 0) {
3042  		/*
3043  		 * PCI Express Error occured which is not described in PEX
3044  		 * spec.
3045  		 * This error is also mapped either to Master Abort(
3046  		 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3047  		 * can only be cleared there.
3048                   */
3049  		device_printf(sc->msk_dev,
3050  		    "PCI Express protocol violation error\n");
3051  	}
3052  
3053  	if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3054  		uint16_t v16;
3055  
3056  		if ((status & Y2_IS_MST_ERR) != 0)
3057  			device_printf(sc->msk_dev,
3058  			    "unexpected IRQ Status error\n");
3059  		else
3060  			device_printf(sc->msk_dev,
3061  			    "unexpected IRQ Master error\n");
3062  		/* Reset all bits in the PCI status register. */
3063  		v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3064  		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3065  		pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3066  		    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3067  		    PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3068  		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3069  	}
3070  
3071  	/* Check for PCI Express Uncorrectable Error. */
3072  	if ((status & Y2_IS_PCI_EXP) != 0) {
3073  		uint32_t v32;
3074  
3075  		/*
3076  		 * On PCI Express bus bridges are called root complexes (RC).
3077  		 * PCI Express errors are recognized by the root complex too,
3078  		 * which requests the system to handle the problem. After
3079  		 * error occurence it may be that no access to the adapter
3080  		 * may be performed any longer.
3081  		 */
3082  
3083  		v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3084  		if ((v32 & PEX_UNSUP_REQ) != 0) {
3085  			/* Ignore unsupported request error. */
3086  			if (bootverbose) {
3087  				device_printf(sc->msk_dev,
3088  				    "Uncorrectable PCI Express error\n");
3089  			}
3090  		}
3091  		if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3092  			int i;
3093  
3094  			/* Get TLP header form Log Registers. */
3095  			for (i = 0; i < 4; i++)
3096  				tlphead[i] = CSR_PCI_READ_4(sc,
3097  				    PEX_HEADER_LOG + i * 4);
3098  			/* Check for vendor defined broadcast message. */
3099  			if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3100  				sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3101  				CSR_WRITE_4(sc, B0_HWE_IMSK,
3102  				    sc->msk_intrhwemask);
3103  				CSR_READ_4(sc, B0_HWE_IMSK);
3104  			}
3105  		}
3106  		/* Clear the interrupt. */
3107  		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3108  		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3109  		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3110  	}
3111  
3112  	if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3113  		msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3114  	if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3115  		msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3116  }
3117  
3118  static __inline void
3119  msk_rxput(struct msk_if_softc *sc_if)
3120  {
3121  	struct msk_softc *sc;
3122  
3123  	sc = sc_if->msk_softc;
3124  #ifdef MSK_JUMBO
3125  	if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3126  		bus_dmamap_sync(
3127  		    sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3128  		    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3129  		    BUS_DMASYNC_PREWRITE);
3130  	}
3131  #endif
3132  	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3133  	    PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3134  }
3135  
3136  static int
3137  mskc_handle_events(struct msk_softc *sc)
3138  {
3139  	struct msk_if_softc *sc_if;
3140  	int rxput[2];
3141  	struct msk_stat_desc *sd;
3142  	uint32_t control, status;
3143  	int cons, idx, len, port, rxprog;
3144  
3145  	idx = CSR_READ_2(sc, STAT_PUT_IDX);
3146  	if (idx == sc->msk_stat_cons)
3147  		return (0);
3148  
3149  	rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3150  
3151  	rxprog = 0;
3152  	for (cons = sc->msk_stat_cons; cons != idx;) {
3153  		sd = &sc->msk_stat_ring[cons];
3154  		control = le32toh(sd->msk_control);
3155  		if ((control & HW_OWNER) == 0)
3156  			break;
3157  		/*
3158  		 * Marvell's FreeBSD driver updates status LE after clearing
3159  		 * HW_OWNER. However we don't have a way to sync single LE
3160  		 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3161  		 * an entire DMA map. So don't sync LE until we have a better
3162  		 * way to sync LEs.
3163  		 */
3164  		control &= ~HW_OWNER;
3165  		sd->msk_control = htole32(control);
3166  		status = le32toh(sd->msk_status);
3167  		len = control & STLE_LEN_MASK;
3168  		port = (control >> 16) & 0x01;
3169  		sc_if = sc->msk_if[port];
3170  		if (sc_if == NULL) {
3171  			device_printf(sc->msk_dev, "invalid port opcode "
3172  			    "0x%08x\n", control & STLE_OP_MASK);
3173  			continue;
3174  		}
3175  
3176  		switch (control & STLE_OP_MASK) {
3177  		case OP_RXVLAN:
3178  			sc_if->msk_vtag = ntohs(len);
3179  			break;
3180  		case OP_RXCHKSVLAN:
3181  			sc_if->msk_vtag = ntohs(len);
3182  			break;
3183  		case OP_RXSTAT:
3184  			if ((sc_if->msk_ifp->if_flags & IFF_RUNNING) == 0)
3185  				break;
3186  #ifdef MSK_JUMBO
3187  			if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
3188  				msk_jumbo_rxeof(sc_if, status, len);
3189  			else
3190  #endif
3191  				msk_rxeof(sc_if, status, len);
3192  			rxprog++;
3193  			/*
3194  			 * Because there is no way to sync single Rx LE
3195  			 * put the DMA sync operation off until the end of
3196  			 * event processing.
3197  			 */
3198  			rxput[port]++;
3199  			/* Update prefetch unit if we've passed water mark. */
3200  			if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3201  				msk_rxput(sc_if);
3202  				rxput[port] = 0;
3203  			}
3204  			break;
3205  		case OP_TXINDEXLE:
3206  			if (sc->msk_if[MSK_PORT_A] != NULL) {
3207  				msk_txeof(sc->msk_if[MSK_PORT_A],
3208  				    status & STLE_TXA1_MSKL);
3209  			}
3210  			if (sc->msk_if[MSK_PORT_B] != NULL) {
3211  				msk_txeof(sc->msk_if[MSK_PORT_B],
3212  				    ((status & STLE_TXA2_MSKL) >>
3213  				    STLE_TXA2_SHIFTL) |
3214  				    ((len & STLE_TXA2_MSKH) <<
3215  				    STLE_TXA2_SHIFTH));
3216  			}
3217  			break;
3218  		default:
3219  			device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3220  			    control & STLE_OP_MASK);
3221  			break;
3222  		}
3223  		MSK_INC(cons, MSK_STAT_RING_CNT);
3224  		if (rxprog > sc->msk_process_limit)
3225  			break;
3226  	}
3227  
3228  	sc->msk_stat_cons = cons;
3229  	/* XXX We should sync status LEs here. See above notes. */
3230  
3231  	if (rxput[MSK_PORT_A] > 0)
3232  		msk_rxput(sc->msk_if[MSK_PORT_A]);
3233  	if (rxput[MSK_PORT_B] > 0)
3234  		msk_rxput(sc->msk_if[MSK_PORT_B]);
3235  
3236  	return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3237  }
3238  
3239  /* Legacy interrupt handler for shared interrupt. */
3240  static void
3241  mskc_intr(void *xsc)
3242  {
3243  	struct msk_softc *sc;
3244  	struct msk_if_softc *sc_if0, *sc_if1;
3245  	struct ifnet *ifp0, *ifp1;
3246  	uint32_t status;
3247  
3248  	sc = xsc;
3249  	ASSERT_SERIALIZED(&sc->msk_serializer);
3250  
3251  	/* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3252  	status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3253  	if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3254  	    (status & sc->msk_intrmask) == 0) {
3255  		CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3256  		return;
3257  	}
3258  
3259  	sc_if0 = sc->msk_if[MSK_PORT_A];
3260  	sc_if1 = sc->msk_if[MSK_PORT_B];
3261  	ifp0 = ifp1 = NULL;
3262  	if (sc_if0 != NULL)
3263  		ifp0 = sc_if0->msk_ifp;
3264  	if (sc_if1 != NULL)
3265  		ifp1 = sc_if1->msk_ifp;
3266  
3267  	if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3268  		msk_intr_phy(sc_if0);
3269  	if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3270  		msk_intr_phy(sc_if1);
3271  	if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3272  		msk_intr_gmac(sc_if0);
3273  	if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3274  		msk_intr_gmac(sc_if1);
3275  	if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3276  		device_printf(sc->msk_dev, "Rx descriptor error\n");
3277  		sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3278  		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3279  		CSR_READ_4(sc, B0_IMSK);
3280  	}
3281          if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3282  		device_printf(sc->msk_dev, "Tx descriptor error\n");
3283  		sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3284  		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3285  		CSR_READ_4(sc, B0_IMSK);
3286  	}
3287  	if ((status & Y2_IS_HW_ERR) != 0)
3288  		mskc_intr_hwerr(sc);
3289  
3290  	while (mskc_handle_events(sc) != 0)
3291  		;
3292  	if ((status & Y2_IS_STAT_BMU) != 0)
3293  		CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3294  
3295  	/* Reenable interrupts. */
3296  	CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3297  
3298  	if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 &&
3299  	    !ifq_is_empty(&ifp0->if_snd))
3300  		if_devstart(ifp0);
3301  	if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 &&
3302  	    !ifq_is_empty(&ifp1->if_snd))
3303  		if_devstart(ifp1);
3304  }
3305  
3306  static void
3307  msk_set_tx_stfwd(struct msk_if_softc *sc_if)
3308  {
3309  	struct msk_softc *sc = sc_if->msk_softc;
3310  	struct ifnet *ifp = sc_if->msk_ifp;
3311  
3312  	if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
3313  	    sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
3314  	    sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
3315  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3316  		    TX_STFW_ENA);
3317  	} else {
3318  		if (ifp->if_mtu > ETHERMTU) {
3319  			/* Set Tx GMAC FIFO Almost Empty Threshold. */
3320  			CSR_WRITE_4(sc,
3321  			    MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3322  			    MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3323  			/* Disable Store & Forward mode for Tx. */
3324  			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3325  			    TX_STFW_DIS);
3326  		} else {
3327  			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3328  			    TX_STFW_ENA);
3329  		}
3330  	}
3331  }
3332  
3333  static void
3334  msk_init(void *xsc)
3335  {
3336  	struct msk_if_softc *sc_if = xsc;
3337  	struct msk_softc *sc = sc_if->msk_softc;
3338  	struct ifnet *ifp = sc_if->msk_ifp;
3339  	struct mii_data	 *mii;
3340  	uint16_t eaddr[ETHER_ADDR_LEN / 2];
3341  	uint16_t gmac;
3342  	uint32_t reg;
3343  	int error, i;
3344  
3345  	ASSERT_SERIALIZED(ifp->if_serializer);
3346  
3347  	mii = device_get_softc(sc_if->msk_miibus);
3348  
3349  	error = 0;
3350  	/* Cancel pending I/O and free all Rx/Tx buffers. */
3351  	msk_stop(sc_if);
3352  
3353  	sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
3354  	if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
3355  	    sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3356  		/*
3357  		 * In Yukon EC Ultra, TSO & checksum offload is not
3358  		 * supported for jumbo frame.
3359  		 */
3360  		ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
3361  		ifp->if_capenable &= ~IFCAP_TXCSUM;
3362  	}
3363  
3364  	/* GMAC Control reset. */
3365  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
3366  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
3367  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
3368  	if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
3369  	    sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
3370  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
3371  		    GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
3372  		    GMC_BYP_RETR_ON);
3373  	}
3374  
3375  	/*
3376  	 * Initialize GMAC first such that speed/duplex/flow-control
3377  	 * parameters are renegotiated when interface is brought up.
3378  	 */
3379  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
3380  
3381  	/* Dummy read the Interrupt Source Register. */
3382  	CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3383  
3384  	/* Set MIB Clear Counter Mode. */
3385  	gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3386  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3387  	/* Read all MIB Counters with Clear Mode set. */
3388  	for (i = 0; i < GM_MIB_CNT_SIZE; i++)
3389  		GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
3390  	/* Clear MIB Clear Counter Mode. */
3391  	gmac &= ~GM_PAR_MIB_CLR;
3392  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3393  
3394  	/* Disable FCS. */
3395  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3396  
3397  	/* Setup Transmit Control Register. */
3398  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3399  
3400  	/* Setup Transmit Flow Control Register. */
3401  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3402  
3403  	/* Setup Transmit Parameter Register. */
3404  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3405  	    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3406  	    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3407  
3408  	gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3409  	    GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3410  
3411  	if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
3412  		gmac |= GM_SMOD_JUMBO_ENA;
3413  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3414  
3415  	/* Set station address. */
3416          bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3417          for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3418  		GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3419  		    eaddr[i]);
3420          for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3421  		GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3422  		    eaddr[i]);
3423  
3424  	/* Disable interrupts for counter overflows. */
3425  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3426  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3427  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3428  
3429  	/* Configure Rx MAC FIFO. */
3430  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3431  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3432  	reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
3433  	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
3434  	    sc->msk_hw_id == CHIP_ID_YUKON_EX)
3435  		reg |= GMF_RX_OVER_ON;
3436  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
3437  
3438  	/* Set receive filter. */
3439  	msk_rxfilter(sc_if);
3440  
3441  	if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
3442  		/* Clear flush mask - HW bug. */
3443  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
3444  	} else {
3445  		/* Flush Rx MAC FIFO on any flow control or error. */
3446  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3447  		    GMR_FS_ANY_ERR);
3448  	}
3449  
3450  	/*
3451  	 * Set Rx FIFO flush threshold to 64 bytes 1 FIFO word
3452  	 * due to hardware hang on receipt of pause frames.
3453  	 */
3454  	reg = RX_GMF_FL_THR_DEF + 1;
3455  	/* Another magic for Yukon FE+ - From Linux. */
3456  	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3457  	    sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
3458  		reg = 0x178;
3459  	CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
3460  
3461  
3462  	/* Configure Tx MAC FIFO. */
3463  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3464  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3465  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3466  
3467  	/* Configure hardware VLAN tag insertion/stripping. */
3468  	msk_setvlan(sc_if, ifp);
3469  
3470  	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3471  		/* Set Rx Pause threshould. */
3472  		CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3473  		    MSK_ECU_LLPP);
3474  		CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3475  		    MSK_ECU_ULPP);
3476  		/* Configure store-and-forward for Tx. */
3477  		msk_set_tx_stfwd(sc_if);
3478  	}
3479  
3480  	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3481  	    sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
3482  		/* Disable dynamic watermark - from Linux. */
3483  		reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
3484  		reg &= ~0x03;
3485  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
3486  	}
3487  
3488  	/*
3489  	 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3490  	 * arbiter as we don't use Sync Tx queue.
3491  	 */
3492  	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3493  	    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3494  	/* Enable the RAM Interface Arbiter. */
3495  	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3496  
3497  	/* Setup RAM buffer. */
3498  	msk_set_rambuffer(sc_if);
3499  
3500  	/* Disable Tx sync Queue. */
3501  	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3502  
3503  	/* Setup Tx Queue Bus Memory Interface. */
3504  	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3505  	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3506  	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3507  	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3508  	switch (sc->msk_hw_id) {
3509  	case CHIP_ID_YUKON_EC_U:
3510  		if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3511  			/* Fix for Yukon-EC Ultra: set BMU FIFO level */
3512  			CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
3513  			    MSK_ECU_TXFF_LEV);
3514  		}
3515  		break;
3516  	case CHIP_ID_YUKON_EX:
3517  		/*
3518  		 * Yukon Extreme seems to have silicon bug for
3519  		 * automatic Tx checksum calculation capability.
3520  		 */
3521  		if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) {
3522  			CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
3523  			    F_TX_CHK_AUTO_OFF);
3524  		}
3525  		break;
3526   	}
3527  
3528  	/* Setup Rx Queue Bus Memory Interface. */
3529  	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3530  	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3531  	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3532  	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3533          if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3534  	    sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3535  		/* MAC Rx RAM Read is controlled by hardware. */
3536                  CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3537  	}
3538  
3539  	msk_set_prefetch(sc, sc_if->msk_txq,
3540  	    sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3541  	msk_init_tx_ring(sc_if);
3542  
3543  	/* Disable Rx checksum offload and RSS hash. */
3544  	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3545  	    BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3546  #ifdef MSK_JUMBO
3547  	if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3548  		msk_set_prefetch(sc, sc_if->msk_rxq,
3549  		    sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3550  		    MSK_JUMBO_RX_RING_CNT - 1);
3551  		error = msk_init_jumbo_rx_ring(sc_if);
3552  	} else
3553  #endif
3554  	{
3555  		msk_set_prefetch(sc, sc_if->msk_rxq,
3556  		    sc_if->msk_rdata.msk_rx_ring_paddr,
3557  		    MSK_RX_RING_CNT - 1);
3558  		error = msk_init_rx_ring(sc_if);
3559  	}
3560  	if (error != 0) {
3561  		device_printf(sc_if->msk_if_dev,
3562  		    "initialization failed: no memory for Rx buffers\n");
3563  		msk_stop(sc_if);
3564  		return;
3565  	}
3566  	if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
3567  	    sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
3568  		/* Disable flushing of non-ASF packets. */
3569  		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3570  		    GMF_RX_MACSEC_FLUSH_OFF);
3571  	}
3572  
3573  	/* Configure interrupt handling. */
3574  	if (sc_if->msk_port == MSK_PORT_A) {
3575  		sc->msk_intrmask |= Y2_IS_PORT_A;
3576  		sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3577  	} else {
3578  		sc->msk_intrmask |= Y2_IS_PORT_B;
3579  		sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3580  	}
3581  	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3582  	CSR_READ_4(sc, B0_HWE_IMSK);
3583  	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3584  	CSR_READ_4(sc, B0_IMSK);
3585  
3586  	sc_if->msk_link = 0;
3587  	mii_mediachg(mii);
3588  
3589  	mskc_set_imtimer(sc);
3590  
3591  	ifp->if_flags |= IFF_RUNNING;
3592  	ifq_clr_oactive(&ifp->if_snd);
3593  
3594  	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3595  }
3596  
3597  static void
3598  msk_set_rambuffer(struct msk_if_softc *sc_if)
3599  {
3600  	struct msk_softc *sc;
3601  	int ltpp, utpp;
3602  
3603  	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3604  		return;
3605  
3606  	sc = sc_if->msk_softc;
3607  
3608  	/* Setup Rx Queue. */
3609  	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3610  	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3611  	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3612  	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3613  	    sc->msk_rxqend[sc_if->msk_port] / 8);
3614  	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3615  	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3616  	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3617  	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3618  
3619  	utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3620  	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3621  	ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3622  	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3623  	if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3624  		ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3625  	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3626  	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3627  	/* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3628  
3629  	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3630  	CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3631  
3632  	/* Setup Tx Queue. */
3633  	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3634  	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3635  	    sc->msk_txqstart[sc_if->msk_port] / 8);
3636  	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3637  	    sc->msk_txqend[sc_if->msk_port] / 8);
3638  	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3639  	    sc->msk_txqstart[sc_if->msk_port] / 8);
3640  	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3641  	    sc->msk_txqstart[sc_if->msk_port] / 8);
3642  	/* Enable Store & Forward for Tx side. */
3643  	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3644  	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3645  	CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3646  }
3647  
3648  static void
3649  msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3650      uint32_t count)
3651  {
3652  
3653  	/* Reset the prefetch unit. */
3654  	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3655  	    PREF_UNIT_RST_SET);
3656  	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3657  	    PREF_UNIT_RST_CLR);
3658  	/* Set LE base address. */
3659  	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3660  	    MSK_ADDR_LO(addr));
3661  	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3662  	    MSK_ADDR_HI(addr));
3663  	/* Set the list last index. */
3664  	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3665  	    count);
3666  	/* Turn on prefetch unit. */
3667  	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3668  	    PREF_UNIT_OP_ON);
3669  	/* Dummy read to ensure write. */
3670  	CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3671  }
3672  
3673  static void
3674  msk_stop(struct msk_if_softc *sc_if)
3675  {
3676  	struct msk_softc *sc = sc_if->msk_softc;
3677  	struct ifnet *ifp = sc_if->msk_ifp;
3678  	struct msk_txdesc *txd;
3679  	struct msk_rxdesc *rxd;
3680  #ifdef MSK_JUMBO
3681  	struct msk_rxdesc *jrxd;
3682  #endif
3683  	uint32_t val;
3684  	int i;
3685  
3686  	ASSERT_SERIALIZED(ifp->if_serializer);
3687  
3688  	callout_stop(&sc_if->msk_tick_ch);
3689  	ifp->if_timer = 0;
3690  
3691  	/* Disable interrupts. */
3692  	if (sc_if->msk_port == MSK_PORT_A) {
3693  		sc->msk_intrmask &= ~Y2_IS_PORT_A;
3694  		sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3695  	} else {
3696  		sc->msk_intrmask &= ~Y2_IS_PORT_B;
3697  		sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3698  	}
3699  	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3700  	CSR_READ_4(sc, B0_HWE_IMSK);
3701  	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3702  	CSR_READ_4(sc, B0_IMSK);
3703  
3704  	/* Disable Tx/Rx MAC. */
3705  	val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3706  	val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3707  	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3708  	/* Read again to ensure writing. */
3709  	GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3710  
3711  	/* Stop Tx BMU. */
3712  	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3713  	val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3714  	for (i = 0; i < MSK_TIMEOUT; i++) {
3715  		if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3716  			CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3717  			    BMU_STOP);
3718  			val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3719  		} else
3720  			break;
3721  		DELAY(1);
3722  	}
3723  	if (i == MSK_TIMEOUT)
3724  		device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3725  	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3726  	    RB_RST_SET | RB_DIS_OP_MD);
3727  
3728  	/* Disable all GMAC interrupt. */
3729  	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3730  	/* Disable PHY interrupt. */
3731  	msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3732  
3733  	/* Disable the RAM Interface Arbiter. */
3734  	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3735  
3736  	/* Reset the PCI FIFO of the async Tx queue */
3737  	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3738  	    BMU_RST_SET | BMU_FIFO_RST);
3739  
3740  	/* Reset the Tx prefetch units. */
3741  	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3742  	    PREF_UNIT_RST_SET);
3743  
3744  	/* Reset the RAM Buffer async Tx queue. */
3745  	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3746  
3747  	/* Reset Tx MAC FIFO. */
3748  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3749  	/* Set Pause Off. */
3750  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3751  
3752  	/*
3753  	 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3754  	 * reach the end of packet and since we can't make sure that we have
3755  	 * incoming data, we must reset the BMU while it is not during a DMA
3756  	 * transfer. Since it is possible that the Rx path is still active,
3757  	 * the Rx RAM buffer will be stopped first, so any possible incoming
3758  	 * data will not trigger a DMA. After the RAM buffer is stopped, the
3759  	 * BMU is polled until any DMA in progress is ended and only then it
3760  	 * will be reset.
3761  	 */
3762  
3763  	/* Disable the RAM Buffer receive queue. */
3764  	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3765  	for (i = 0; i < MSK_TIMEOUT; i++) {
3766  		if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3767  		    CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3768  			break;
3769  		DELAY(1);
3770  	}
3771  	if (i == MSK_TIMEOUT)
3772  		device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3773  	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3774  	    BMU_RST_SET | BMU_FIFO_RST);
3775  	/* Reset the Rx prefetch unit. */
3776  	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
3777  	    PREF_UNIT_RST_SET);
3778  	/* Reset the RAM Buffer receive queue. */
3779  	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
3780  	/* Reset Rx MAC FIFO. */
3781  	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3782  
3783  	/* Free Rx and Tx mbufs still in the queues. */
3784  	for (i = 0; i < MSK_RX_RING_CNT; i++) {
3785  		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
3786  		if (rxd->rx_m != NULL) {
3787  			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
3788  			    rxd->rx_dmamap);
3789  			m_freem(rxd->rx_m);
3790  			rxd->rx_m = NULL;
3791  		}
3792  	}
3793  #ifdef MSK_JUMBO
3794  	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
3795  		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
3796  		if (jrxd->rx_m != NULL) {
3797  			bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
3798  			    jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3799  			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
3800  			    jrxd->rx_dmamap);
3801  			m_freem(jrxd->rx_m);
3802  			jrxd->rx_m = NULL;
3803  		}
3804  	}
3805  #endif
3806  	for (i = 0; i < MSK_TX_RING_CNT; i++) {
3807  		txd = &sc_if->msk_cdata.msk_txdesc[i];
3808  		if (txd->tx_m != NULL) {
3809  			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
3810  			    txd->tx_dmamap);
3811  			m_freem(txd->tx_m);
3812  			txd->tx_m = NULL;
3813  		}
3814  	}
3815  
3816  	/*
3817  	 * Mark the interface down.
3818  	 */
3819  	ifp->if_flags &= ~IFF_RUNNING;
3820  	ifq_clr_oactive(&ifp->if_snd);
3821  	sc_if->msk_link = 0;
3822  }
3823  
3824  static int
3825  mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS)
3826  {
3827  	return sysctl_int_range(oidp, arg1, arg2, req,
3828  				MSK_PROC_MIN, MSK_PROC_MAX);
3829  }
3830  
3831  static int
3832  mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
3833  {
3834  	struct msk_softc *sc = arg1;
3835  	struct lwkt_serialize *serializer = &sc->msk_serializer;
3836  	int error = 0, v;
3837  
3838  	lwkt_serialize_enter(serializer);
3839  
3840  	v = sc->msk_intr_rate;
3841  	error = sysctl_handle_int(oidp, &v, 0, req);
3842  	if (error || req->newptr == NULL)
3843  		goto back;
3844  	if (v < 0) {
3845  		error = EINVAL;
3846  		goto back;
3847  	}
3848  
3849  	if (sc->msk_intr_rate != v) {
3850  		int flag = 0, i;
3851  
3852  		sc->msk_intr_rate = v;
3853  		for (i = 0; i < 2; ++i) {
3854  			if (sc->msk_if[i] != NULL) {
3855  				flag |= sc->msk_if[i]->
3856  					arpcom.ac_if.if_flags & IFF_RUNNING;
3857  			}
3858  		}
3859  		if (flag)
3860  			mskc_set_imtimer(sc);
3861  	}
3862  back:
3863  	lwkt_serialize_exit(serializer);
3864  	return error;
3865  }
3866  
3867  static int
3868  msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
3869  		  void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
3870  {
3871  	struct msk_if_softc *sc_if = device_get_softc(dev);
3872  	bus_dmamem_t dmem;
3873  	int error;
3874  
3875  	error = bus_dmamem_coherent(sc_if->msk_cdata.msk_parent_tag,
3876  			MSK_RING_ALIGN, 0,
3877  			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3878  			size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3879  	if (error) {
3880  		device_printf(dev, "can't create coherent DMA memory\n");
3881  		return error;
3882  	}
3883  
3884  	*dtag = dmem.dmem_tag;
3885  	*dmap = dmem.dmem_map;
3886  	*addr = dmem.dmem_addr;
3887  	*paddr = dmem.dmem_busaddr;
3888  
3889  	return 0;
3890  }
3891  
3892  static void
3893  msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
3894  {
3895  	if (dtag != NULL) {
3896  		bus_dmamap_unload(dtag, dmap);
3897  		bus_dmamem_free(dtag, addr, dmap);
3898  		bus_dma_tag_destroy(dtag);
3899  	}
3900  }
3901  
3902  static void
3903  mskc_set_imtimer(struct msk_softc *sc)
3904  {
3905  	if (sc->msk_intr_rate > 0) {
3906  		/*
3907  		 * XXX myk(4) seems to use 125MHz for EC/FE/XL
3908  		 *     and 78.125MHz for rest of chip types
3909  		 */
3910  		CSR_WRITE_4(sc, B2_IRQM_INI,
3911  			    MSK_USECS(sc, 1000000 / sc->msk_intr_rate));
3912  		CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
3913  		CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START);
3914  	} else {
3915  		CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP);
3916  	}
3917  }
3918