xref: /dflybsd-src/sys/dev/netif/jme/if_jme.c (revision a6c7286058a3a8feee23bce4b2d3d52ce1f5878f)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  */
29 
30 #include "opt_polling.h"
31 #include "opt_rss.h"
32 #include "opt_jme.h"
33 
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/interrupt.h>
39 #include <sys/malloc.h>
40 #include <sys/proc.h>
41 #include <sys/rman.h>
42 #include <sys/serialize.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/bpf.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/ifq_var.h>
54 #include <net/toeplitz.h>
55 #include <net/toeplitz2.h>
56 #include <net/vlan/if_vlan_var.h>
57 #include <net/vlan/if_vlan_ether.h>
58 
59 #include <netinet/in.h>
60 
61 #include <dev/netif/mii_layer/miivar.h>
62 #include <dev/netif/mii_layer/jmphyreg.h>
63 
64 #include <bus/pci/pcireg.h>
65 #include <bus/pci/pcivar.h>
66 #include <bus/pci/pcidevs.h>
67 
68 #include <dev/netif/jme/if_jmereg.h>
69 #include <dev/netif/jme/if_jmevar.h>
70 
71 #include "miibus_if.h"
72 
73 /* Define the following to disable printing Rx errors. */
74 #undef	JME_SHOW_ERRORS
75 
76 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
77 
78 #ifdef JME_RSS_DEBUG
79 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
80 do { \
81 	if ((sc)->jme_rss_debug >= (lvl)) \
82 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
83 } while (0)
84 #else	/* !JME_RSS_DEBUG */
85 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
86 #endif	/* JME_RSS_DEBUG */
87 
88 static int	jme_probe(device_t);
89 static int	jme_attach(device_t);
90 static int	jme_detach(device_t);
91 static int	jme_shutdown(device_t);
92 static int	jme_suspend(device_t);
93 static int	jme_resume(device_t);
94 
95 static int	jme_miibus_readreg(device_t, int, int);
96 static int	jme_miibus_writereg(device_t, int, int, int);
97 static void	jme_miibus_statchg(device_t);
98 
99 static void	jme_init(void *);
100 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
101 static void	jme_start(struct ifnet *);
102 static void	jme_watchdog(struct ifnet *);
103 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
104 static int	jme_mediachange(struct ifnet *);
105 #ifdef DEVICE_POLLING
106 static void	jme_poll(struct ifnet *, enum poll_cmd, int);
107 #endif
108 
109 static void	jme_intr(void *);
110 static void	jme_txeof(struct jme_softc *);
111 static void	jme_rxeof(struct jme_softc *, int);
112 static int	jme_rxeof_chain(struct jme_softc *, int,
113 				struct mbuf_chain *, int);
114 static void	jme_rx_intr(struct jme_softc *, uint32_t);
115 
116 static int	jme_dma_alloc(struct jme_softc *);
117 static void	jme_dma_free(struct jme_softc *);
118 static int	jme_init_rx_ring(struct jme_softc *, int);
119 static void	jme_init_tx_ring(struct jme_softc *);
120 static void	jme_init_ssb(struct jme_softc *);
121 static int	jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
122 static int	jme_encap(struct jme_softc *, struct mbuf **);
123 static void	jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
124 static int	jme_rxring_dma_alloc(struct jme_softc *, int);
125 static int	jme_rxbuf_dma_alloc(struct jme_softc *, int);
126 
127 static void	jme_tick(void *);
128 static void	jme_stop(struct jme_softc *);
129 static void	jme_reset(struct jme_softc *);
130 static void	jme_set_vlan(struct jme_softc *);
131 static void	jme_set_filter(struct jme_softc *);
132 static void	jme_stop_tx(struct jme_softc *);
133 static void	jme_stop_rx(struct jme_softc *);
134 static void	jme_mac_config(struct jme_softc *);
135 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
136 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
137 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
138 #ifdef notyet
139 static void	jme_setwol(struct jme_softc *);
140 static void	jme_setlinkspeed(struct jme_softc *);
141 #endif
142 static void	jme_set_tx_coal(struct jme_softc *);
143 static void	jme_set_rx_coal(struct jme_softc *);
144 static void	jme_enable_rss(struct jme_softc *);
145 static void	jme_disable_rss(struct jme_softc *);
146 
147 static void	jme_sysctl_node(struct jme_softc *);
148 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
149 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
150 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
151 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
152 
153 /*
154  * Devices supported by this driver.
155  */
156 static const struct jme_dev {
157 	uint16_t	jme_vendorid;
158 	uint16_t	jme_deviceid;
159 	uint32_t	jme_caps;
160 	const char	*jme_name;
161 } jme_devs[] = {
162 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
163 	    JME_CAP_JUMBO,
164 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
165 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
166 	    JME_CAP_FASTETH,
167 	    "JMicron Inc, JMC260 Fast Ethernet" },
168 	{ 0, 0, 0, NULL }
169 };
170 
171 static device_method_t jme_methods[] = {
172 	/* Device interface. */
173 	DEVMETHOD(device_probe,		jme_probe),
174 	DEVMETHOD(device_attach,	jme_attach),
175 	DEVMETHOD(device_detach,	jme_detach),
176 	DEVMETHOD(device_shutdown,	jme_shutdown),
177 	DEVMETHOD(device_suspend,	jme_suspend),
178 	DEVMETHOD(device_resume,	jme_resume),
179 
180 	/* Bus interface. */
181 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
182 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
183 
184 	/* MII interface. */
185 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
186 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
187 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
188 
189 	{ NULL, NULL }
190 };
191 
192 static driver_t jme_driver = {
193 	"jme",
194 	jme_methods,
195 	sizeof(struct jme_softc)
196 };
197 
198 static devclass_t jme_devclass;
199 
200 DECLARE_DUMMY_MODULE(if_jme);
201 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
202 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
203 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
204 
205 static const struct {
206 	uint32_t	jme_coal;
207 	uint32_t	jme_comp;
208 } jme_rx_status[JME_NRXRING_MAX] = {
209 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
210 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
211 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
212 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
213 };
214 
215 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
216 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
217 static int	jme_rx_ring_count = JME_NRXRING_DEF;
218 static int	jme_msi_enable = 1;
219 
220 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
221 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
222 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
223 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
224 
225 /*
226  *	Read a PHY register on the MII of the JMC250.
227  */
228 static int
229 jme_miibus_readreg(device_t dev, int phy, int reg)
230 {
231 	struct jme_softc *sc = device_get_softc(dev);
232 	uint32_t val;
233 	int i;
234 
235 	/* For FPGA version, PHY address 0 should be ignored. */
236 	if (sc->jme_caps & JME_CAP_FPGA) {
237 		if (phy == 0)
238 			return (0);
239 	} else {
240 		if (sc->jme_phyaddr != phy)
241 			return (0);
242 	}
243 
244 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
245 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
246 
247 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
248 		DELAY(1);
249 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
250 			break;
251 	}
252 	if (i == 0) {
253 		device_printf(sc->jme_dev, "phy read timeout: "
254 			      "phy %d, reg %d\n", phy, reg);
255 		return (0);
256 	}
257 
258 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
259 }
260 
261 /*
262  *	Write a PHY register on the MII of the JMC250.
263  */
264 static int
265 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
266 {
267 	struct jme_softc *sc = device_get_softc(dev);
268 	int i;
269 
270 	/* For FPGA version, PHY address 0 should be ignored. */
271 	if (sc->jme_caps & JME_CAP_FPGA) {
272 		if (phy == 0)
273 			return (0);
274 	} else {
275 		if (sc->jme_phyaddr != phy)
276 			return (0);
277 	}
278 
279 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
280 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
281 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
282 
283 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
284 		DELAY(1);
285 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
286 			break;
287 	}
288 	if (i == 0) {
289 		device_printf(sc->jme_dev, "phy write timeout: "
290 			      "phy %d, reg %d\n", phy, reg);
291 	}
292 
293 	return (0);
294 }
295 
296 /*
297  *	Callback from MII layer when media changes.
298  */
299 static void
300 jme_miibus_statchg(device_t dev)
301 {
302 	struct jme_softc *sc = device_get_softc(dev);
303 	struct ifnet *ifp = &sc->arpcom.ac_if;
304 	struct mii_data *mii;
305 	struct jme_txdesc *txd;
306 	bus_addr_t paddr;
307 	int i, r;
308 
309 	ASSERT_SERIALIZED(ifp->if_serializer);
310 
311 	if ((ifp->if_flags & IFF_RUNNING) == 0)
312 		return;
313 
314 	mii = device_get_softc(sc->jme_miibus);
315 
316 	sc->jme_flags &= ~JME_FLAG_LINK;
317 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
318 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
319 		case IFM_10_T:
320 		case IFM_100_TX:
321 			sc->jme_flags |= JME_FLAG_LINK;
322 			break;
323 		case IFM_1000_T:
324 			if (sc->jme_caps & JME_CAP_FASTETH)
325 				break;
326 			sc->jme_flags |= JME_FLAG_LINK;
327 			break;
328 		default:
329 			break;
330 		}
331 	}
332 
333 	/*
334 	 * Disabling Rx/Tx MACs have a side-effect of resetting
335 	 * JME_TXNDA/JME_RXNDA register to the first address of
336 	 * Tx/Rx descriptor address. So driver should reset its
337 	 * internal procucer/consumer pointer and reclaim any
338 	 * allocated resources.  Note, just saving the value of
339 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
340 	 * and restoring JME_TXNDA/JME_RXNDA register is not
341 	 * sufficient to make sure correct MAC state because
342 	 * stopping MAC operation can take a while and hardware
343 	 * might have updated JME_TXNDA/JME_RXNDA registers
344 	 * during the stop operation.
345 	 */
346 
347 	/* Disable interrupts */
348 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
349 
350 	/* Stop driver */
351 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
352 	ifp->if_timer = 0;
353 	callout_stop(&sc->jme_tick_ch);
354 
355 	/* Stop receiver/transmitter. */
356 	jme_stop_rx(sc);
357 	jme_stop_tx(sc);
358 
359 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
360 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
361 
362 		jme_rxeof(sc, r);
363 		if (rdata->jme_rxhead != NULL)
364 			m_freem(rdata->jme_rxhead);
365 		JME_RXCHAIN_RESET(sc, r);
366 
367 		/*
368 		 * Reuse configured Rx descriptors and reset
369 		 * procuder/consumer index.
370 		 */
371 		rdata->jme_rx_cons = 0;
372 	}
373 
374 	jme_txeof(sc);
375 	if (sc->jme_cdata.jme_tx_cnt != 0) {
376 		/* Remove queued packets for transmit. */
377 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
378 			txd = &sc->jme_cdata.jme_txdesc[i];
379 			if (txd->tx_m != NULL) {
380 				bus_dmamap_unload(
381 				    sc->jme_cdata.jme_tx_tag,
382 				    txd->tx_dmamap);
383 				m_freem(txd->tx_m);
384 				txd->tx_m = NULL;
385 				txd->tx_ndesc = 0;
386 				ifp->if_oerrors++;
387 			}
388 		}
389 	}
390 	jme_init_tx_ring(sc);
391 
392 	/* Initialize shadow status block. */
393 	jme_init_ssb(sc);
394 
395 	/* Program MAC with resolved speed/duplex/flow-control. */
396 	if (sc->jme_flags & JME_FLAG_LINK) {
397 		jme_mac_config(sc);
398 
399 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
400 
401 		/* Set Tx ring address to the hardware. */
402 		paddr = sc->jme_cdata.jme_tx_ring_paddr;
403 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
404 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
405 
406 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
407 			CSR_WRITE_4(sc, JME_RXCSR,
408 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
409 
410 			/* Set Rx ring address to the hardware. */
411 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
412 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
413 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
414 		}
415 
416 		/* Restart receiver/transmitter. */
417 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
418 		    RXCSR_RXQ_START);
419 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
420 	}
421 
422 	ifp->if_flags |= IFF_RUNNING;
423 	ifp->if_flags &= ~IFF_OACTIVE;
424 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
425 
426 #ifdef DEVICE_POLLING
427 	if (!(ifp->if_flags & IFF_POLLING))
428 #endif
429 	/* Reenable interrupts. */
430 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
431 }
432 
433 /*
434  *	Get the current interface media status.
435  */
436 static void
437 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
438 {
439 	struct jme_softc *sc = ifp->if_softc;
440 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
441 
442 	ASSERT_SERIALIZED(ifp->if_serializer);
443 
444 	mii_pollstat(mii);
445 	ifmr->ifm_status = mii->mii_media_status;
446 	ifmr->ifm_active = mii->mii_media_active;
447 }
448 
449 /*
450  *	Set hardware to newly-selected media.
451  */
452 static int
453 jme_mediachange(struct ifnet *ifp)
454 {
455 	struct jme_softc *sc = ifp->if_softc;
456 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
457 	int error;
458 
459 	ASSERT_SERIALIZED(ifp->if_serializer);
460 
461 	if (mii->mii_instance != 0) {
462 		struct mii_softc *miisc;
463 
464 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
465 			mii_phy_reset(miisc);
466 	}
467 	error = mii_mediachg(mii);
468 
469 	return (error);
470 }
471 
472 static int
473 jme_probe(device_t dev)
474 {
475 	const struct jme_dev *sp;
476 	uint16_t vid, did;
477 
478 	vid = pci_get_vendor(dev);
479 	did = pci_get_device(dev);
480 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
481 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
482 			struct jme_softc *sc = device_get_softc(dev);
483 
484 			sc->jme_caps = sp->jme_caps;
485 			device_set_desc(dev, sp->jme_name);
486 			return (0);
487 		}
488 	}
489 	return (ENXIO);
490 }
491 
492 static int
493 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
494 {
495 	uint32_t reg;
496 	int i;
497 
498 	*val = 0;
499 	for (i = JME_TIMEOUT; i > 0; i--) {
500 		reg = CSR_READ_4(sc, JME_SMBCSR);
501 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
502 			break;
503 		DELAY(1);
504 	}
505 
506 	if (i == 0) {
507 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
508 		return (ETIMEDOUT);
509 	}
510 
511 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
512 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
513 	for (i = JME_TIMEOUT; i > 0; i--) {
514 		DELAY(1);
515 		reg = CSR_READ_4(sc, JME_SMBINTF);
516 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
517 			break;
518 	}
519 
520 	if (i == 0) {
521 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
522 		return (ETIMEDOUT);
523 	}
524 
525 	reg = CSR_READ_4(sc, JME_SMBINTF);
526 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
527 
528 	return (0);
529 }
530 
531 static int
532 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
533 {
534 	uint8_t fup, reg, val;
535 	uint32_t offset;
536 	int match;
537 
538 	offset = 0;
539 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
540 	    fup != JME_EEPROM_SIG0)
541 		return (ENOENT);
542 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
543 	    fup != JME_EEPROM_SIG1)
544 		return (ENOENT);
545 	match = 0;
546 	do {
547 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
548 			break;
549 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
550 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
551 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
552 				break;
553 			if (reg >= JME_PAR0 &&
554 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
555 				if (jme_eeprom_read_byte(sc, offset + 2,
556 				    &val) != 0)
557 					break;
558 				eaddr[reg - JME_PAR0] = val;
559 				match++;
560 			}
561 		}
562 		/* Check for the end of EEPROM descriptor. */
563 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
564 			break;
565 		/* Try next eeprom descriptor. */
566 		offset += JME_EEPROM_DESC_BYTES;
567 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
568 
569 	if (match == ETHER_ADDR_LEN)
570 		return (0);
571 
572 	return (ENOENT);
573 }
574 
575 static void
576 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
577 {
578 	uint32_t par0, par1;
579 
580 	/* Read station address. */
581 	par0 = CSR_READ_4(sc, JME_PAR0);
582 	par1 = CSR_READ_4(sc, JME_PAR1);
583 	par1 &= 0xFFFF;
584 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
585 		device_printf(sc->jme_dev,
586 		    "generating fake ethernet address.\n");
587 		par0 = karc4random();
588 		/* Set OUI to JMicron. */
589 		eaddr[0] = 0x00;
590 		eaddr[1] = 0x1B;
591 		eaddr[2] = 0x8C;
592 		eaddr[3] = (par0 >> 16) & 0xff;
593 		eaddr[4] = (par0 >> 8) & 0xff;
594 		eaddr[5] = par0 & 0xff;
595 	} else {
596 		eaddr[0] = (par0 >> 0) & 0xFF;
597 		eaddr[1] = (par0 >> 8) & 0xFF;
598 		eaddr[2] = (par0 >> 16) & 0xFF;
599 		eaddr[3] = (par0 >> 24) & 0xFF;
600 		eaddr[4] = (par1 >> 0) & 0xFF;
601 		eaddr[5] = (par1 >> 8) & 0xFF;
602 	}
603 }
604 
605 static int
606 jme_attach(device_t dev)
607 {
608 	struct jme_softc *sc = device_get_softc(dev);
609 	struct ifnet *ifp = &sc->arpcom.ac_if;
610 	uint32_t reg;
611 	uint16_t did;
612 	uint8_t pcie_ptr, rev;
613 	int error = 0;
614 	uint8_t eaddr[ETHER_ADDR_LEN];
615 	u_int irq_flags;
616 
617 	sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
618 	if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
619 		sc->jme_rx_desc_cnt = JME_NDESC_MAX;
620 
621 	sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
622 	if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
623 		sc->jme_tx_desc_cnt = JME_NDESC_MAX;
624 
625 	/*
626 	 * Calculate rx rings based on ncpus2
627 	 */
628 	sc->jme_rx_ring_cnt = jme_rx_ring_count;
629 	if (sc->jme_rx_ring_cnt <= 0)
630 		sc->jme_rx_ring_cnt = JME_NRXRING_1;
631 	if (sc->jme_rx_ring_cnt > ncpus2)
632 		sc->jme_rx_ring_cnt = ncpus2;
633 
634 	if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
635 		sc->jme_rx_ring_cnt = JME_NRXRING_4;
636 	else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
637 		sc->jme_rx_ring_cnt = JME_NRXRING_2;
638 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
639 
640 	sc->jme_dev = dev;
641 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
642 
643 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
644 
645 	callout_init(&sc->jme_tick_ch);
646 
647 #ifndef BURN_BRIDGES
648 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
649 		uint32_t irq, mem;
650 
651 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
652 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
653 
654 		device_printf(dev, "chip is in D%d power mode "
655 		    "-- setting to D0\n", pci_get_powerstate(dev));
656 
657 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
658 
659 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
660 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
661 	}
662 #endif	/* !BURN_BRIDGE */
663 
664 	/* Enable bus mastering */
665 	pci_enable_busmaster(dev);
666 
667 	/*
668 	 * Allocate IO memory
669 	 *
670 	 * JMC250 supports both memory mapped and I/O register space
671 	 * access.  Because I/O register access should use different
672 	 * BARs to access registers it's waste of time to use I/O
673 	 * register spce access.  JMC250 uses 16K to map entire memory
674 	 * space.
675 	 */
676 	sc->jme_mem_rid = JME_PCIR_BAR;
677 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
678 						 &sc->jme_mem_rid, RF_ACTIVE);
679 	if (sc->jme_mem_res == NULL) {
680 		device_printf(dev, "can't allocate IO memory\n");
681 		return ENXIO;
682 	}
683 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
684 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
685 
686 	/*
687 	 * Allocate IRQ
688 	 */
689 	sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
690 	    &sc->jme_irq_rid, &irq_flags);
691 
692 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
693 	    &sc->jme_irq_rid, irq_flags);
694 	if (sc->jme_irq_res == NULL) {
695 		device_printf(dev, "can't allocate irq\n");
696 		error = ENXIO;
697 		goto fail;
698 	}
699 
700 	/*
701 	 * Extract revisions
702 	 */
703 	reg = CSR_READ_4(sc, JME_CHIPMODE);
704 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
705 	    CHIPMODE_NOT_FPGA) {
706 		sc->jme_caps |= JME_CAP_FPGA;
707 		if (bootverbose) {
708 			device_printf(dev, "FPGA revision: 0x%04x\n",
709 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
710 				      CHIPMODE_FPGA_REV_SHIFT);
711 		}
712 	}
713 
714 	/* NOTE: FM revision is put in the upper 4 bits */
715 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
716 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
717 	if (bootverbose)
718 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
719 
720 	did = pci_get_device(dev);
721 	switch (did) {
722 	case PCI_PRODUCT_JMICRON_JMC250:
723 		if (rev == JME_REV1_A2)
724 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
725 		break;
726 
727 	case PCI_PRODUCT_JMICRON_JMC260:
728 		if (rev == JME_REV2)
729 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
730 		break;
731 
732 	default:
733 		panic("unknown device id 0x%04x\n", did);
734 	}
735 	if (rev >= JME_REV2) {
736 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
737 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
738 				      GHC_TXMAC_CLKSRC_1000;
739 	}
740 
741 	/* Reset the ethernet controller. */
742 	jme_reset(sc);
743 
744 	/* Get station address. */
745 	reg = CSR_READ_4(sc, JME_SMBCSR);
746 	if (reg & SMBCSR_EEPROM_PRESENT)
747 		error = jme_eeprom_macaddr(sc, eaddr);
748 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
749 		if (error != 0 && (bootverbose)) {
750 			device_printf(dev, "ethernet hardware address "
751 				      "not found in EEPROM.\n");
752 		}
753 		jme_reg_macaddr(sc, eaddr);
754 	}
755 
756 	/*
757 	 * Save PHY address.
758 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
759 	 * requires PHY probing to get correct PHY address.
760 	 */
761 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
762 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
763 		    GPREG0_PHY_ADDR_MASK;
764 		if (bootverbose) {
765 			device_printf(dev, "PHY is at address %d.\n",
766 			    sc->jme_phyaddr);
767 		}
768 	} else {
769 		sc->jme_phyaddr = 0;
770 	}
771 
772 	/* Set max allowable DMA size. */
773 	pcie_ptr = pci_get_pciecap_ptr(dev);
774 	if (pcie_ptr != 0) {
775 		uint16_t ctrl;
776 
777 		sc->jme_caps |= JME_CAP_PCIE;
778 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
779 		if (bootverbose) {
780 			device_printf(dev, "Read request size : %d bytes.\n",
781 			    128 << ((ctrl >> 12) & 0x07));
782 			device_printf(dev, "TLP payload size : %d bytes.\n",
783 			    128 << ((ctrl >> 5) & 0x07));
784 		}
785 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
786 		case PCIEM_DEVCTL_MAX_READRQ_128:
787 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
788 			break;
789 		case PCIEM_DEVCTL_MAX_READRQ_256:
790 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
791 			break;
792 		default:
793 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
794 			break;
795 		}
796 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
797 	} else {
798 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
799 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
800 	}
801 
802 #ifdef notyet
803 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
804 		sc->jme_caps |= JME_CAP_PMCAP;
805 #endif
806 
807 	/*
808 	 * Create sysctl tree
809 	 */
810 	jme_sysctl_node(sc);
811 
812 	/* Allocate DMA stuffs */
813 	error = jme_dma_alloc(sc);
814 	if (error)
815 		goto fail;
816 
817 	ifp->if_softc = sc;
818 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
819 	ifp->if_init = jme_init;
820 	ifp->if_ioctl = jme_ioctl;
821 	ifp->if_start = jme_start;
822 #ifdef DEVICE_POLLING
823 	ifp->if_poll = jme_poll;
824 #endif
825 	ifp->if_watchdog = jme_watchdog;
826 	ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
827 	ifq_set_ready(&ifp->if_snd);
828 
829 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
830 	ifp->if_capabilities = IFCAP_HWCSUM |
831 			       IFCAP_VLAN_MTU |
832 			       IFCAP_VLAN_HWTAGGING;
833 	if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN)
834 		ifp->if_capabilities |= IFCAP_RSS;
835 	ifp->if_capenable = ifp->if_capabilities;
836 
837 	/*
838 	 * Disable TXCSUM by default to improve bulk data
839 	 * transmit performance (+20Mbps improvement).
840 	 */
841 	ifp->if_capenable &= ~IFCAP_TXCSUM;
842 
843 	if (ifp->if_capenable & IFCAP_TXCSUM)
844 		ifp->if_hwassist = JME_CSUM_FEATURES;
845 
846 	/* Set up MII bus. */
847 	error = mii_phy_probe(dev, &sc->jme_miibus,
848 			      jme_mediachange, jme_mediastatus);
849 	if (error) {
850 		device_printf(dev, "no PHY found!\n");
851 		goto fail;
852 	}
853 
854 	/*
855 	 * Save PHYADDR for FPGA mode PHY.
856 	 */
857 	if (sc->jme_caps & JME_CAP_FPGA) {
858 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
859 
860 		if (mii->mii_instance != 0) {
861 			struct mii_softc *miisc;
862 
863 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
864 				if (miisc->mii_phy != 0) {
865 					sc->jme_phyaddr = miisc->mii_phy;
866 					break;
867 				}
868 			}
869 			if (sc->jme_phyaddr != 0) {
870 				device_printf(sc->jme_dev,
871 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
872 				/* vendor magic. */
873 				jme_miibus_writereg(dev, sc->jme_phyaddr,
874 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
875 
876 				/* XXX should we clear JME_WA_EXTFIFO */
877 			}
878 		}
879 	}
880 
881 	ether_ifattach(ifp, eaddr, NULL);
882 
883 	/* Tell the upper layer(s) we support long frames. */
884 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
885 
886 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
887 			       &sc->jme_irq_handle, ifp->if_serializer);
888 	if (error) {
889 		device_printf(dev, "could not set up interrupt handler.\n");
890 		ether_ifdetach(ifp);
891 		goto fail;
892 	}
893 
894 	ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
895 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
896 	return 0;
897 fail:
898 	jme_detach(dev);
899 	return (error);
900 }
901 
902 static int
903 jme_detach(device_t dev)
904 {
905 	struct jme_softc *sc = device_get_softc(dev);
906 
907 	if (device_is_attached(dev)) {
908 		struct ifnet *ifp = &sc->arpcom.ac_if;
909 
910 		lwkt_serialize_enter(ifp->if_serializer);
911 		jme_stop(sc);
912 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
913 		lwkt_serialize_exit(ifp->if_serializer);
914 
915 		ether_ifdetach(ifp);
916 	}
917 
918 	if (sc->jme_sysctl_tree != NULL)
919 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
920 
921 	if (sc->jme_miibus != NULL)
922 		device_delete_child(dev, sc->jme_miibus);
923 	bus_generic_detach(dev);
924 
925 	if (sc->jme_irq_res != NULL) {
926 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
927 				     sc->jme_irq_res);
928 	}
929 	if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
930 		pci_release_msi(dev);
931 
932 	if (sc->jme_mem_res != NULL) {
933 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
934 				     sc->jme_mem_res);
935 	}
936 
937 	jme_dma_free(sc);
938 
939 	return (0);
940 }
941 
942 static void
943 jme_sysctl_node(struct jme_softc *sc)
944 {
945 	int coal_max;
946 #ifdef JME_RSS_DEBUG
947 	char rx_ring_pkt[32];
948 	int r;
949 #endif
950 
951 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
952 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
953 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
954 				device_get_nameunit(sc->jme_dev),
955 				CTLFLAG_RD, 0, "");
956 	if (sc->jme_sysctl_tree == NULL) {
957 		device_printf(sc->jme_dev, "can't add sysctl node\n");
958 		return;
959 	}
960 
961 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
962 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
963 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
964 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
965 
966 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
967 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
968 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
969 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
970 
971 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
972 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
973 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
974 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
975 
976 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
977 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
978 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
979 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
980 
981 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
982 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
983 		       "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
984 		       0, "RX desc count");
985 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
986 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
987 		       "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
988 		       0, "TX desc count");
989 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
990 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
991 		       "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
992 		       0, "RX ring count");
993 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
994 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
995 		       "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
996 		       0, "RX ring in use");
997 #ifdef JME_RSS_DEBUG
998 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
999 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1000 		       "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1001 		       0, "RSS debug level");
1002 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1003 		ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
1004 		SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
1005 				SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1006 				rx_ring_pkt, CTLFLAG_RW,
1007 				&sc->jme_rx_ring_pkt[r],
1008 				0, "RXed packets");
1009 	}
1010 #endif
1011 
1012 	/*
1013 	 * Set default coalesce valves
1014 	 */
1015 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1016 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1017 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1018 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1019 
1020 	/*
1021 	 * Adjust coalesce valves, in case that the number of TX/RX
1022 	 * descs are set to small values by users.
1023 	 *
1024 	 * NOTE: coal_max will not be zero, since number of descs
1025 	 * must aligned by JME_NDESC_ALIGN (16 currently)
1026 	 */
1027 	coal_max = sc->jme_tx_desc_cnt / 6;
1028 	if (coal_max < sc->jme_tx_coal_pkt)
1029 		sc->jme_tx_coal_pkt = coal_max;
1030 
1031 	coal_max = sc->jme_rx_desc_cnt / 4;
1032 	if (coal_max < sc->jme_rx_coal_pkt)
1033 		sc->jme_rx_coal_pkt = coal_max;
1034 }
1035 
1036 static int
1037 jme_dma_alloc(struct jme_softc *sc)
1038 {
1039 	struct jme_txdesc *txd;
1040 	bus_dmamem_t dmem;
1041 	int error, i;
1042 
1043 	sc->jme_cdata.jme_txdesc =
1044 	kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1045 		M_DEVBUF, M_WAITOK | M_ZERO);
1046 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1047 		sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1048 		kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1049 			M_DEVBUF, M_WAITOK | M_ZERO);
1050 	}
1051 
1052 	/* Create parent ring tag. */
1053 	error = bus_dma_tag_create(NULL,/* parent */
1054 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1055 	    sc->jme_lowaddr,		/* lowaddr */
1056 	    BUS_SPACE_MAXADDR,		/* highaddr */
1057 	    NULL, NULL,			/* filter, filterarg */
1058 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1059 	    0,				/* nsegments */
1060 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1061 	    0,				/* flags */
1062 	    &sc->jme_cdata.jme_ring_tag);
1063 	if (error) {
1064 		device_printf(sc->jme_dev,
1065 		    "could not create parent ring DMA tag.\n");
1066 		return error;
1067 	}
1068 
1069 	/*
1070 	 * Create DMA stuffs for TX ring
1071 	 */
1072 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1073 			JME_TX_RING_ALIGN, 0,
1074 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1075 			JME_TX_RING_SIZE(sc),
1076 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1077 	if (error) {
1078 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1079 		return error;
1080 	}
1081 	sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1082 	sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1083 	sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1084 	sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1085 
1086 	/*
1087 	 * Create DMA stuffs for RX rings
1088 	 */
1089 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1090 		error = jme_rxring_dma_alloc(sc, i);
1091 		if (error)
1092 			return error;
1093 	}
1094 
1095 	/* Create parent buffer tag. */
1096 	error = bus_dma_tag_create(NULL,/* parent */
1097 	    1, 0,			/* algnmnt, boundary */
1098 	    sc->jme_lowaddr,		/* lowaddr */
1099 	    BUS_SPACE_MAXADDR,		/* highaddr */
1100 	    NULL, NULL,			/* filter, filterarg */
1101 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1102 	    0,				/* nsegments */
1103 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1104 	    0,				/* flags */
1105 	    &sc->jme_cdata.jme_buffer_tag);
1106 	if (error) {
1107 		device_printf(sc->jme_dev,
1108 		    "could not create parent buffer DMA tag.\n");
1109 		return error;
1110 	}
1111 
1112 	/*
1113 	 * Create DMA stuffs for shadow status block
1114 	 */
1115 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1116 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1117 			JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1118 	if (error) {
1119 		device_printf(sc->jme_dev,
1120 		    "could not create shadow status block.\n");
1121 		return error;
1122 	}
1123 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1124 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1125 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1126 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1127 
1128 	/*
1129 	 * Create DMA stuffs for TX buffers
1130 	 */
1131 
1132 	/* Create tag for Tx buffers. */
1133 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1134 	    1, 0,			/* algnmnt, boundary */
1135 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1136 	    BUS_SPACE_MAXADDR,		/* highaddr */
1137 	    NULL, NULL,			/* filter, filterarg */
1138 	    JME_JUMBO_FRAMELEN,		/* maxsize */
1139 	    JME_MAXTXSEGS,		/* nsegments */
1140 	    JME_MAXSEGSIZE,		/* maxsegsize */
1141 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1142 	    &sc->jme_cdata.jme_tx_tag);
1143 	if (error != 0) {
1144 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1145 		return error;
1146 	}
1147 
1148 	/* Create DMA maps for Tx buffers. */
1149 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1150 		txd = &sc->jme_cdata.jme_txdesc[i];
1151 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1152 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1153 				&txd->tx_dmamap);
1154 		if (error) {
1155 			int j;
1156 
1157 			device_printf(sc->jme_dev,
1158 			    "could not create %dth Tx dmamap.\n", i);
1159 
1160 			for (j = 0; j < i; ++j) {
1161 				txd = &sc->jme_cdata.jme_txdesc[j];
1162 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1163 						   txd->tx_dmamap);
1164 			}
1165 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1166 			sc->jme_cdata.jme_tx_tag = NULL;
1167 			return error;
1168 		}
1169 	}
1170 
1171 	/*
1172 	 * Create DMA stuffs for RX buffers
1173 	 */
1174 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1175 		error = jme_rxbuf_dma_alloc(sc, i);
1176 		if (error)
1177 			return error;
1178 	}
1179 	return 0;
1180 }
1181 
1182 static void
1183 jme_dma_free(struct jme_softc *sc)
1184 {
1185 	struct jme_txdesc *txd;
1186 	struct jme_rxdesc *rxd;
1187 	struct jme_rxdata *rdata;
1188 	int i, r;
1189 
1190 	/* Tx ring */
1191 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1192 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1193 		    sc->jme_cdata.jme_tx_ring_map);
1194 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1195 		    sc->jme_cdata.jme_tx_ring,
1196 		    sc->jme_cdata.jme_tx_ring_map);
1197 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1198 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1199 	}
1200 
1201 	/* Rx ring */
1202 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1203 		rdata = &sc->jme_cdata.jme_rx_data[r];
1204 		if (rdata->jme_rx_ring_tag != NULL) {
1205 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1206 					  rdata->jme_rx_ring_map);
1207 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1208 					rdata->jme_rx_ring,
1209 					rdata->jme_rx_ring_map);
1210 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1211 			rdata->jme_rx_ring_tag = NULL;
1212 		}
1213 	}
1214 
1215 	/* Tx buffers */
1216 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1217 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1218 			txd = &sc->jme_cdata.jme_txdesc[i];
1219 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1220 			    txd->tx_dmamap);
1221 		}
1222 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1223 		sc->jme_cdata.jme_tx_tag = NULL;
1224 	}
1225 
1226 	/* Rx buffers */
1227 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1228 		rdata = &sc->jme_cdata.jme_rx_data[r];
1229 		if (rdata->jme_rx_tag != NULL) {
1230 			for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1231 				rxd = &rdata->jme_rxdesc[i];
1232 				bus_dmamap_destroy(rdata->jme_rx_tag,
1233 						   rxd->rx_dmamap);
1234 			}
1235 			bus_dmamap_destroy(rdata->jme_rx_tag,
1236 					   rdata->jme_rx_sparemap);
1237 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1238 			rdata->jme_rx_tag = NULL;
1239 		}
1240 	}
1241 
1242 	/* Shadow status block. */
1243 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1244 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1245 		    sc->jme_cdata.jme_ssb_map);
1246 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1247 		    sc->jme_cdata.jme_ssb_block,
1248 		    sc->jme_cdata.jme_ssb_map);
1249 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1250 		sc->jme_cdata.jme_ssb_tag = NULL;
1251 	}
1252 
1253 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1254 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1255 		sc->jme_cdata.jme_buffer_tag = NULL;
1256 	}
1257 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1258 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1259 		sc->jme_cdata.jme_ring_tag = NULL;
1260 	}
1261 
1262 	if (sc->jme_cdata.jme_txdesc != NULL) {
1263 		kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1264 		sc->jme_cdata.jme_txdesc = NULL;
1265 	}
1266 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1267 		rdata = &sc->jme_cdata.jme_rx_data[r];
1268 		if (rdata->jme_rxdesc != NULL) {
1269 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1270 			rdata->jme_rxdesc = NULL;
1271 		}
1272 	}
1273 }
1274 
1275 /*
1276  *	Make sure the interface is stopped at reboot time.
1277  */
1278 static int
1279 jme_shutdown(device_t dev)
1280 {
1281 	return jme_suspend(dev);
1282 }
1283 
1284 #ifdef notyet
1285 /*
1286  * Unlike other ethernet controllers, JMC250 requires
1287  * explicit resetting link speed to 10/100Mbps as gigabit
1288  * link will cunsume more power than 375mA.
1289  * Note, we reset the link speed to 10/100Mbps with
1290  * auto-negotiation but we don't know whether that operation
1291  * would succeed or not as we have no control after powering
1292  * off. If the renegotiation fail WOL may not work. Running
1293  * at 1Gbps draws more power than 375mA at 3.3V which is
1294  * specified in PCI specification and that would result in
1295  * complete shutdowning power to ethernet controller.
1296  *
1297  * TODO
1298  *  Save current negotiated media speed/duplex/flow-control
1299  *  to softc and restore the same link again after resuming.
1300  *  PHY handling such as power down/resetting to 100Mbps
1301  *  may be better handled in suspend method in phy driver.
1302  */
1303 static void
1304 jme_setlinkspeed(struct jme_softc *sc)
1305 {
1306 	struct mii_data *mii;
1307 	int aneg, i;
1308 
1309 	JME_LOCK_ASSERT(sc);
1310 
1311 	mii = device_get_softc(sc->jme_miibus);
1312 	mii_pollstat(mii);
1313 	aneg = 0;
1314 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1315 		switch IFM_SUBTYPE(mii->mii_media_active) {
1316 		case IFM_10_T:
1317 		case IFM_100_TX:
1318 			return;
1319 		case IFM_1000_T:
1320 			aneg++;
1321 		default:
1322 			break;
1323 		}
1324 	}
1325 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1326 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1327 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1328 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1329 	    BMCR_AUTOEN | BMCR_STARTNEG);
1330 	DELAY(1000);
1331 	if (aneg != 0) {
1332 		/* Poll link state until jme(4) get a 10/100 link. */
1333 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1334 			mii_pollstat(mii);
1335 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1336 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1337 				case IFM_10_T:
1338 				case IFM_100_TX:
1339 					jme_mac_config(sc);
1340 					return;
1341 				default:
1342 					break;
1343 				}
1344 			}
1345 			JME_UNLOCK(sc);
1346 			pause("jmelnk", hz);
1347 			JME_LOCK(sc);
1348 		}
1349 		if (i == MII_ANEGTICKS_GIGE)
1350 			device_printf(sc->jme_dev, "establishing link failed, "
1351 			    "WOL may not work!");
1352 	}
1353 	/*
1354 	 * No link, force MAC to have 100Mbps, full-duplex link.
1355 	 * This is the last resort and may/may not work.
1356 	 */
1357 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1358 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1359 	jme_mac_config(sc);
1360 }
1361 
1362 static void
1363 jme_setwol(struct jme_softc *sc)
1364 {
1365 	struct ifnet *ifp = &sc->arpcom.ac_if;
1366 	uint32_t gpr, pmcs;
1367 	uint16_t pmstat;
1368 	int pmc;
1369 
1370 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1371 		/* No PME capability, PHY power down. */
1372 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1373 		    MII_BMCR, BMCR_PDOWN);
1374 		return;
1375 	}
1376 
1377 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1378 	pmcs = CSR_READ_4(sc, JME_PMCS);
1379 	pmcs &= ~PMCS_WOL_ENB_MASK;
1380 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1381 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1382 		/* Enable PME message. */
1383 		gpr |= GPREG0_PME_ENB;
1384 		/* For gigabit controllers, reset link speed to 10/100. */
1385 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1386 			jme_setlinkspeed(sc);
1387 	}
1388 
1389 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1390 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1391 
1392 	/* Request PME. */
1393 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1394 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1395 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1396 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1397 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1398 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1399 		/* No WOL, PHY power down. */
1400 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1401 		    MII_BMCR, BMCR_PDOWN);
1402 	}
1403 }
1404 #endif
1405 
1406 static int
1407 jme_suspend(device_t dev)
1408 {
1409 	struct jme_softc *sc = device_get_softc(dev);
1410 	struct ifnet *ifp = &sc->arpcom.ac_if;
1411 
1412 	lwkt_serialize_enter(ifp->if_serializer);
1413 	jme_stop(sc);
1414 #ifdef notyet
1415 	jme_setwol(sc);
1416 #endif
1417 	lwkt_serialize_exit(ifp->if_serializer);
1418 
1419 	return (0);
1420 }
1421 
1422 static int
1423 jme_resume(device_t dev)
1424 {
1425 	struct jme_softc *sc = device_get_softc(dev);
1426 	struct ifnet *ifp = &sc->arpcom.ac_if;
1427 #ifdef notyet
1428 	int pmc;
1429 #endif
1430 
1431 	lwkt_serialize_enter(ifp->if_serializer);
1432 
1433 #ifdef notyet
1434 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1435 		uint16_t pmstat;
1436 
1437 		pmstat = pci_read_config(sc->jme_dev,
1438 		    pmc + PCIR_POWER_STATUS, 2);
1439 		/* Disable PME clear PME status. */
1440 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1441 		pci_write_config(sc->jme_dev,
1442 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1443 	}
1444 #endif
1445 
1446 	if (ifp->if_flags & IFF_UP)
1447 		jme_init(sc);
1448 
1449 	lwkt_serialize_exit(ifp->if_serializer);
1450 
1451 	return (0);
1452 }
1453 
1454 static int
1455 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1456 {
1457 	struct jme_txdesc *txd;
1458 	struct jme_desc *desc;
1459 	struct mbuf *m;
1460 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1461 	int maxsegs, nsegs;
1462 	int error, i, prod, symbol_desc;
1463 	uint32_t cflags, flag64;
1464 
1465 	M_ASSERTPKTHDR((*m_head));
1466 
1467 	prod = sc->jme_cdata.jme_tx_prod;
1468 	txd = &sc->jme_cdata.jme_txdesc[prod];
1469 
1470 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1471 		symbol_desc = 1;
1472 	else
1473 		symbol_desc = 0;
1474 
1475 	maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1476 		  (JME_TXD_RSVD + symbol_desc);
1477 	if (maxsegs > JME_MAXTXSEGS)
1478 		maxsegs = JME_MAXTXSEGS;
1479 	KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1480 		("not enough segments %d\n", maxsegs));
1481 
1482 	error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1483 			txd->tx_dmamap, m_head,
1484 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1485 	if (error)
1486 		goto fail;
1487 
1488 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1489 			BUS_DMASYNC_PREWRITE);
1490 
1491 	m = *m_head;
1492 	cflags = 0;
1493 
1494 	/* Configure checksum offload. */
1495 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1496 		cflags |= JME_TD_IPCSUM;
1497 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1498 		cflags |= JME_TD_TCPCSUM;
1499 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1500 		cflags |= JME_TD_UDPCSUM;
1501 
1502 	/* Configure VLAN. */
1503 	if (m->m_flags & M_VLANTAG) {
1504 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1505 		cflags |= JME_TD_VLAN_TAG;
1506 	}
1507 
1508 	desc = &sc->jme_cdata.jme_tx_ring[prod];
1509 	desc->flags = htole32(cflags);
1510 	desc->addr_hi = htole32(m->m_pkthdr.len);
1511 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1512 		/*
1513 		 * Use 64bits TX desc chain format.
1514 		 *
1515 		 * The first TX desc of the chain, which is setup here,
1516 		 * is just a symbol TX desc carrying no payload.
1517 		 */
1518 		flag64 = JME_TD_64BIT;
1519 		desc->buflen = 0;
1520 		desc->addr_lo = 0;
1521 
1522 		/* No effective TX desc is consumed */
1523 		i = 0;
1524 	} else {
1525 		/*
1526 		 * Use 32bits TX desc chain format.
1527 		 *
1528 		 * The first TX desc of the chain, which is setup here,
1529 		 * is an effective TX desc carrying the first segment of
1530 		 * the mbuf chain.
1531 		 */
1532 		flag64 = 0;
1533 		desc->buflen = htole32(txsegs[0].ds_len);
1534 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1535 
1536 		/* One effective TX desc is consumed */
1537 		i = 1;
1538 	}
1539 	sc->jme_cdata.jme_tx_cnt++;
1540 	KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1541 		 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1542 	JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1543 
1544 	txd->tx_ndesc = 1 - i;
1545 	for (; i < nsegs; i++) {
1546 		desc = &sc->jme_cdata.jme_tx_ring[prod];
1547 		desc->flags = htole32(JME_TD_OWN | flag64);
1548 		desc->buflen = htole32(txsegs[i].ds_len);
1549 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1550 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1551 
1552 		sc->jme_cdata.jme_tx_cnt++;
1553 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1554 			 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1555 		JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1556 	}
1557 
1558 	/* Update producer index. */
1559 	sc->jme_cdata.jme_tx_prod = prod;
1560 	/*
1561 	 * Finally request interrupt and give the first descriptor
1562 	 * owenership to hardware.
1563 	 */
1564 	desc = txd->tx_desc;
1565 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1566 
1567 	txd->tx_m = m;
1568 	txd->tx_ndesc += nsegs;
1569 
1570 	return 0;
1571 fail:
1572 	m_freem(*m_head);
1573 	*m_head = NULL;
1574 	return error;
1575 }
1576 
1577 static void
1578 jme_start(struct ifnet *ifp)
1579 {
1580 	struct jme_softc *sc = ifp->if_softc;
1581 	struct mbuf *m_head;
1582 	int enq = 0;
1583 
1584 	ASSERT_SERIALIZED(ifp->if_serializer);
1585 
1586 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1587 		ifq_purge(&ifp->if_snd);
1588 		return;
1589 	}
1590 
1591 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1592 		return;
1593 
1594 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1595 		jme_txeof(sc);
1596 
1597 	while (!ifq_is_empty(&ifp->if_snd)) {
1598 		/*
1599 		 * Check number of available TX descs, always
1600 		 * leave JME_TXD_RSVD free TX descs.
1601 		 */
1602 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1603 		    sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1604 			ifp->if_flags |= IFF_OACTIVE;
1605 			break;
1606 		}
1607 
1608 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1609 		if (m_head == NULL)
1610 			break;
1611 
1612 		/*
1613 		 * Pack the data into the transmit ring. If we
1614 		 * don't have room, set the OACTIVE flag and wait
1615 		 * for the NIC to drain the ring.
1616 		 */
1617 		if (jme_encap(sc, &m_head)) {
1618 			KKASSERT(m_head == NULL);
1619 			ifp->if_oerrors++;
1620 			ifp->if_flags |= IFF_OACTIVE;
1621 			break;
1622 		}
1623 		enq++;
1624 
1625 		/*
1626 		 * If there's a BPF listener, bounce a copy of this frame
1627 		 * to him.
1628 		 */
1629 		ETHER_BPF_MTAP(ifp, m_head);
1630 	}
1631 
1632 	if (enq > 0) {
1633 		/*
1634 		 * Reading TXCSR takes very long time under heavy load
1635 		 * so cache TXCSR value and writes the ORed value with
1636 		 * the kick command to the TXCSR. This saves one register
1637 		 * access cycle.
1638 		 */
1639 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1640 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1641 		/* Set a timeout in case the chip goes out to lunch. */
1642 		ifp->if_timer = JME_TX_TIMEOUT;
1643 	}
1644 }
1645 
1646 static void
1647 jme_watchdog(struct ifnet *ifp)
1648 {
1649 	struct jme_softc *sc = ifp->if_softc;
1650 
1651 	ASSERT_SERIALIZED(ifp->if_serializer);
1652 
1653 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1654 		if_printf(ifp, "watchdog timeout (missed link)\n");
1655 		ifp->if_oerrors++;
1656 		jme_init(sc);
1657 		return;
1658 	}
1659 
1660 	jme_txeof(sc);
1661 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1662 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1663 			  "-- recovering\n");
1664 		if (!ifq_is_empty(&ifp->if_snd))
1665 			if_devstart(ifp);
1666 		return;
1667 	}
1668 
1669 	if_printf(ifp, "watchdog timeout\n");
1670 	ifp->if_oerrors++;
1671 	jme_init(sc);
1672 	if (!ifq_is_empty(&ifp->if_snd))
1673 		if_devstart(ifp);
1674 }
1675 
1676 static int
1677 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1678 {
1679 	struct jme_softc *sc = ifp->if_softc;
1680 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1681 	struct ifreq *ifr = (struct ifreq *)data;
1682 	int error = 0, mask;
1683 
1684 	ASSERT_SERIALIZED(ifp->if_serializer);
1685 
1686 	switch (cmd) {
1687 	case SIOCSIFMTU:
1688 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1689 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1690 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1691 			error = EINVAL;
1692 			break;
1693 		}
1694 
1695 		if (ifp->if_mtu != ifr->ifr_mtu) {
1696 			/*
1697 			 * No special configuration is required when interface
1698 			 * MTU is changed but availability of Tx checksum
1699 			 * offload should be chcked against new MTU size as
1700 			 * FIFO size is just 2K.
1701 			 */
1702 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1703 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1704 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1705 			}
1706 			ifp->if_mtu = ifr->ifr_mtu;
1707 			if (ifp->if_flags & IFF_RUNNING)
1708 				jme_init(sc);
1709 		}
1710 		break;
1711 
1712 	case SIOCSIFFLAGS:
1713 		if (ifp->if_flags & IFF_UP) {
1714 			if (ifp->if_flags & IFF_RUNNING) {
1715 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1716 				    (IFF_PROMISC | IFF_ALLMULTI))
1717 					jme_set_filter(sc);
1718 			} else {
1719 				jme_init(sc);
1720 			}
1721 		} else {
1722 			if (ifp->if_flags & IFF_RUNNING)
1723 				jme_stop(sc);
1724 		}
1725 		sc->jme_if_flags = ifp->if_flags;
1726 		break;
1727 
1728 	case SIOCADDMULTI:
1729 	case SIOCDELMULTI:
1730 		if (ifp->if_flags & IFF_RUNNING)
1731 			jme_set_filter(sc);
1732 		break;
1733 
1734 	case SIOCSIFMEDIA:
1735 	case SIOCGIFMEDIA:
1736 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1737 		break;
1738 
1739 	case SIOCSIFCAP:
1740 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1741 
1742 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1743 			ifp->if_capenable ^= IFCAP_TXCSUM;
1744 			if (IFCAP_TXCSUM & ifp->if_capenable)
1745 				ifp->if_hwassist |= JME_CSUM_FEATURES;
1746 			else
1747 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1748 		}
1749 		if (mask & IFCAP_RXCSUM) {
1750 			uint32_t reg;
1751 
1752 			ifp->if_capenable ^= IFCAP_RXCSUM;
1753 			reg = CSR_READ_4(sc, JME_RXMAC);
1754 			reg &= ~RXMAC_CSUM_ENB;
1755 			if (ifp->if_capenable & IFCAP_RXCSUM)
1756 				reg |= RXMAC_CSUM_ENB;
1757 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1758 		}
1759 
1760 		if (mask & IFCAP_VLAN_HWTAGGING) {
1761 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1762 			jme_set_vlan(sc);
1763 		}
1764 
1765 		if (mask & IFCAP_RSS) {
1766 			ifp->if_capenable ^= IFCAP_RSS;
1767 			if (ifp->if_flags & IFF_RUNNING)
1768 				jme_init(sc);
1769 		}
1770 		break;
1771 
1772 	default:
1773 		error = ether_ioctl(ifp, cmd, data);
1774 		break;
1775 	}
1776 	return (error);
1777 }
1778 
1779 static void
1780 jme_mac_config(struct jme_softc *sc)
1781 {
1782 	struct mii_data *mii;
1783 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1784 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1785 
1786 	mii = device_get_softc(sc->jme_miibus);
1787 
1788 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1789 	DELAY(10);
1790 	CSR_WRITE_4(sc, JME_GHC, 0);
1791 	ghc = 0;
1792 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1793 	rxmac &= ~RXMAC_FC_ENB;
1794 	txmac = CSR_READ_4(sc, JME_TXMAC);
1795 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1796 	txpause = CSR_READ_4(sc, JME_TXPFC);
1797 	txpause &= ~TXPFC_PAUSE_ENB;
1798 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1799 		ghc |= GHC_FULL_DUPLEX;
1800 		rxmac &= ~RXMAC_COLL_DET_ENB;
1801 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1802 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1803 		    TXMAC_FRAME_BURST);
1804 #ifdef notyet
1805 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1806 			txpause |= TXPFC_PAUSE_ENB;
1807 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1808 			rxmac |= RXMAC_FC_ENB;
1809 #endif
1810 		/* Disable retry transmit timer/retry limit. */
1811 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1812 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1813 	} else {
1814 		rxmac |= RXMAC_COLL_DET_ENB;
1815 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1816 		/* Enable retry transmit timer/retry limit. */
1817 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1818 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1819 	}
1820 
1821 	/*
1822 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1823 	 */
1824 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1825 	gp1 &= ~GPREG1_WA_HDX;
1826 
1827 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1828 		hdx = 1;
1829 
1830 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1831 	case IFM_10_T:
1832 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1833 		if (hdx)
1834 			gp1 |= GPREG1_WA_HDX;
1835 		break;
1836 
1837 	case IFM_100_TX:
1838 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1839 		if (hdx)
1840 			gp1 |= GPREG1_WA_HDX;
1841 
1842 		/*
1843 		 * Use extended FIFO depth to workaround CRC errors
1844 		 * emitted by chips before JMC250B
1845 		 */
1846 		phyconf = JMPHY_CONF_EXTFIFO;
1847 		break;
1848 
1849 	case IFM_1000_T:
1850 		if (sc->jme_caps & JME_CAP_FASTETH)
1851 			break;
1852 
1853 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1854 		if (hdx)
1855 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1856 		break;
1857 
1858 	default:
1859 		break;
1860 	}
1861 	CSR_WRITE_4(sc, JME_GHC, ghc);
1862 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1863 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1864 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1865 
1866 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
1867 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1868 				    JMPHY_CONF, phyconf);
1869 	}
1870 	if (sc->jme_workaround & JME_WA_HDX)
1871 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1872 }
1873 
1874 static void
1875 jme_intr(void *xsc)
1876 {
1877 	struct jme_softc *sc = xsc;
1878 	struct ifnet *ifp = &sc->arpcom.ac_if;
1879 	uint32_t status;
1880 	int r;
1881 
1882 	ASSERT_SERIALIZED(ifp->if_serializer);
1883 
1884 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1885 	if (status == 0 || status == 0xFFFFFFFF)
1886 		return;
1887 
1888 	/* Disable interrupts. */
1889 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1890 
1891 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1892 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1893 		goto back;
1894 
1895 	/* Reset PCC counter/timer and Ack interrupts. */
1896 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1897 
1898 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1899 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1900 
1901 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1902 		if (status & jme_rx_status[r].jme_coal) {
1903 			status |= jme_rx_status[r].jme_coal |
1904 				  jme_rx_status[r].jme_comp;
1905 		}
1906 	}
1907 
1908 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1909 
1910 	if (ifp->if_flags & IFF_RUNNING) {
1911 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1912 			jme_rx_intr(sc, status);
1913 
1914 		if (status & INTR_RXQ_DESC_EMPTY) {
1915 			/*
1916 			 * Notify hardware availability of new Rx buffers.
1917 			 * Reading RXCSR takes very long time under heavy
1918 			 * load so cache RXCSR value and writes the ORed
1919 			 * value with the kick command to the RXCSR. This
1920 			 * saves one register access cycle.
1921 			 */
1922 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1923 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1924 		}
1925 
1926 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1927 			jme_txeof(sc);
1928 			if (!ifq_is_empty(&ifp->if_snd))
1929 				if_devstart(ifp);
1930 		}
1931 	}
1932 back:
1933 	/* Reenable interrupts. */
1934 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1935 }
1936 
1937 static void
1938 jme_txeof(struct jme_softc *sc)
1939 {
1940 	struct ifnet *ifp = &sc->arpcom.ac_if;
1941 	struct jme_txdesc *txd;
1942 	uint32_t status;
1943 	int cons, nsegs;
1944 
1945 	cons = sc->jme_cdata.jme_tx_cons;
1946 	if (cons == sc->jme_cdata.jme_tx_prod)
1947 		return;
1948 
1949 	/*
1950 	 * Go through our Tx list and free mbufs for those
1951 	 * frames which have been transmitted.
1952 	 */
1953 	while (cons != sc->jme_cdata.jme_tx_prod) {
1954 		txd = &sc->jme_cdata.jme_txdesc[cons];
1955 		KASSERT(txd->tx_m != NULL,
1956 			("%s: freeing NULL mbuf!\n", __func__));
1957 
1958 		status = le32toh(txd->tx_desc->flags);
1959 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1960 			break;
1961 
1962 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1963 			ifp->if_oerrors++;
1964 		} else {
1965 			ifp->if_opackets++;
1966 			if (status & JME_TD_COLLISION) {
1967 				ifp->if_collisions +=
1968 				    le32toh(txd->tx_desc->buflen) &
1969 				    JME_TD_BUF_LEN_MASK;
1970 			}
1971 		}
1972 
1973 		/*
1974 		 * Only the first descriptor of multi-descriptor
1975 		 * transmission is updated so driver have to skip entire
1976 		 * chained buffers for the transmiited frame. In other
1977 		 * words, JME_TD_OWN bit is valid only at the first
1978 		 * descriptor of a multi-descriptor transmission.
1979 		 */
1980 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1981 			sc->jme_cdata.jme_tx_ring[cons].flags = 0;
1982 			JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
1983 		}
1984 
1985 		/* Reclaim transferred mbufs. */
1986 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1987 		m_freem(txd->tx_m);
1988 		txd->tx_m = NULL;
1989 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1990 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
1991 			("%s: Active Tx desc counter was garbled\n", __func__));
1992 		txd->tx_ndesc = 0;
1993 	}
1994 	sc->jme_cdata.jme_tx_cons = cons;
1995 
1996 	if (sc->jme_cdata.jme_tx_cnt == 0)
1997 		ifp->if_timer = 0;
1998 
1999 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2000 	    sc->jme_tx_desc_cnt - JME_TXD_RSVD)
2001 		ifp->if_flags &= ~IFF_OACTIVE;
2002 }
2003 
2004 static __inline void
2005 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2006 {
2007 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2008 	int i;
2009 
2010 	for (i = 0; i < count; ++i) {
2011 		struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2012 
2013 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2014 		desc->buflen = htole32(MCLBYTES);
2015 		JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2016 	}
2017 }
2018 
2019 static __inline struct pktinfo *
2020 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2021 {
2022 	if (flags & JME_RD_IPV4)
2023 		pi->pi_netisr = NETISR_IP;
2024 	else if (flags & JME_RD_IPV6)
2025 		pi->pi_netisr = NETISR_IPV6;
2026 	else
2027 		return NULL;
2028 
2029 	pi->pi_flags = 0;
2030 	pi->pi_l3proto = IPPROTO_UNKNOWN;
2031 
2032 	if (flags & JME_RD_MORE_FRAG)
2033 		pi->pi_flags |= PKTINFO_FLAG_FRAG;
2034 	else if (flags & JME_RD_TCP)
2035 		pi->pi_l3proto = IPPROTO_TCP;
2036 	else if (flags & JME_RD_UDP)
2037 		pi->pi_l3proto = IPPROTO_UDP;
2038 	else
2039 		pi = NULL;
2040 	return pi;
2041 }
2042 
2043 /* Receive a frame. */
2044 static void
2045 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2046 {
2047 	struct ifnet *ifp = &sc->arpcom.ac_if;
2048 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2049 	struct jme_desc *desc;
2050 	struct jme_rxdesc *rxd;
2051 	struct mbuf *mp, *m;
2052 	uint32_t flags, status, hash, hashinfo;
2053 	int cons, count, nsegs;
2054 
2055 	cons = rdata->jme_rx_cons;
2056 	desc = &rdata->jme_rx_ring[cons];
2057 	flags = le32toh(desc->flags);
2058 	status = le32toh(desc->buflen);
2059 	hash = le32toh(desc->addr_hi);
2060 	hashinfo = le32toh(desc->addr_lo);
2061 	nsegs = JME_RX_NSEGS(status);
2062 
2063 	JME_RSS_DPRINTF(sc, 15, "ring%d, flags 0x%08x, "
2064 			"hash 0x%08x, hash info 0x%08x\n",
2065 			ring, flags, hash, hashinfo);
2066 
2067 	if (status & JME_RX_ERR_STAT) {
2068 		ifp->if_ierrors++;
2069 		jme_discard_rxbufs(sc, ring, cons, nsegs);
2070 #ifdef JME_SHOW_ERRORS
2071 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2072 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2073 #endif
2074 		rdata->jme_rx_cons += nsegs;
2075 		rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2076 		return;
2077 	}
2078 
2079 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2080 	for (count = 0; count < nsegs; count++,
2081 	     JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2082 		rxd = &rdata->jme_rxdesc[cons];
2083 		mp = rxd->rx_m;
2084 
2085 		/* Add a new receive buffer to the ring. */
2086 		if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2087 			ifp->if_iqdrops++;
2088 			/* Reuse buffer. */
2089 			jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2090 			if (rdata->jme_rxhead != NULL) {
2091 				m_freem(rdata->jme_rxhead);
2092 				JME_RXCHAIN_RESET(sc, ring);
2093 			}
2094 			break;
2095 		}
2096 
2097 		/*
2098 		 * Assume we've received a full sized frame.
2099 		 * Actual size is fixed when we encounter the end of
2100 		 * multi-segmented frame.
2101 		 */
2102 		mp->m_len = MCLBYTES;
2103 
2104 		/* Chain received mbufs. */
2105 		if (rdata->jme_rxhead == NULL) {
2106 			rdata->jme_rxhead = mp;
2107 			rdata->jme_rxtail = mp;
2108 		} else {
2109 			/*
2110 			 * Receive processor can receive a maximum frame
2111 			 * size of 65535 bytes.
2112 			 */
2113 			rdata->jme_rxtail->m_next = mp;
2114 			rdata->jme_rxtail = mp;
2115 		}
2116 
2117 		if (count == nsegs - 1) {
2118 			struct pktinfo pi0, *pi;
2119 
2120 			/* Last desc. for this frame. */
2121 			m = rdata->jme_rxhead;
2122 			m->m_pkthdr.len = rdata->jme_rxlen;
2123 			if (nsegs > 1) {
2124 				/* Set first mbuf size. */
2125 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2126 				/* Set last mbuf size. */
2127 				mp->m_len = rdata->jme_rxlen -
2128 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2129 				    (MCLBYTES * (nsegs - 2)));
2130 			} else {
2131 				m->m_len = rdata->jme_rxlen;
2132 			}
2133 			m->m_pkthdr.rcvif = ifp;
2134 
2135 			/*
2136 			 * Account for 10bytes auto padding which is used
2137 			 * to align IP header on 32bit boundary. Also note,
2138 			 * CRC bytes is automatically removed by the
2139 			 * hardware.
2140 			 */
2141 			m->m_data += JME_RX_PAD_BYTES;
2142 
2143 			/* Set checksum information. */
2144 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2145 			    (flags & JME_RD_IPV4)) {
2146 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2147 				if (flags & JME_RD_IPCSUM)
2148 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2149 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2150 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2151 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2152 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2153 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2154 					m->m_pkthdr.csum_flags |=
2155 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2156 					m->m_pkthdr.csum_data = 0xffff;
2157 				}
2158 			}
2159 
2160 			/* Check for VLAN tagged packets. */
2161 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2162 			    (flags & JME_RD_VLAN_TAG)) {
2163 				m->m_pkthdr.ether_vlantag =
2164 				    flags & JME_RD_VLAN_MASK;
2165 				m->m_flags |= M_VLANTAG;
2166 			}
2167 
2168 			ifp->if_ipackets++;
2169 
2170 			if (ifp->if_capenable & IFCAP_RSS)
2171 				pi = jme_pktinfo(&pi0, flags);
2172 			else
2173 				pi = NULL;
2174 
2175 			if (pi != NULL &&
2176 			    (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2177 				m->m_flags |= M_HASH;
2178 				m->m_pkthdr.hash = toeplitz_hash(hash);
2179 			}
2180 
2181 #ifdef JME_RSS_DEBUG
2182 			if (pi != NULL) {
2183 				JME_RSS_DPRINTF(sc, 10,
2184 				    "isr %d flags %08x, l3 %d %s\n",
2185 				    pi->pi_netisr, pi->pi_flags,
2186 				    pi->pi_l3proto,
2187 				    (m->m_flags & M_HASH) ? "hash" : "");
2188 			}
2189 #endif
2190 
2191 			/* Pass it on. */
2192 			ether_input_chain(ifp, m, pi, chain);
2193 
2194 			/* Reset mbuf chains. */
2195 			JME_RXCHAIN_RESET(sc, ring);
2196 #ifdef JME_RSS_DEBUG
2197 			sc->jme_rx_ring_pkt[ring]++;
2198 #endif
2199 		}
2200 	}
2201 
2202 	rdata->jme_rx_cons += nsegs;
2203 	rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2204 }
2205 
2206 static int
2207 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2208 		int count)
2209 {
2210 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2211 	struct jme_desc *desc;
2212 	int nsegs, prog, pktlen;
2213 
2214 	prog = 0;
2215 	for (;;) {
2216 #ifdef DEVICE_POLLING
2217 		if (count >= 0 && count-- == 0)
2218 			break;
2219 #endif
2220 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2221 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2222 			break;
2223 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2224 			break;
2225 
2226 		/*
2227 		 * Check number of segments against received bytes.
2228 		 * Non-matching value would indicate that hardware
2229 		 * is still trying to update Rx descriptors. I'm not
2230 		 * sure whether this check is needed.
2231 		 */
2232 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2233 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2234 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2235 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2236 				  "and packet size(%d) mismach\n",
2237 				  nsegs, pktlen);
2238 			break;
2239 		}
2240 
2241 		/* Received a frame. */
2242 		jme_rxpkt(sc, ring, chain);
2243 		prog++;
2244 	}
2245 	return prog;
2246 }
2247 
2248 static void
2249 jme_rxeof(struct jme_softc *sc, int ring)
2250 {
2251 	struct mbuf_chain chain[MAXCPU];
2252 
2253 	ether_input_chain_init(chain);
2254 	if (jme_rxeof_chain(sc, ring, chain, -1))
2255 		ether_input_dispatch(chain);
2256 }
2257 
2258 static void
2259 jme_tick(void *xsc)
2260 {
2261 	struct jme_softc *sc = xsc;
2262 	struct ifnet *ifp = &sc->arpcom.ac_if;
2263 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2264 
2265 	lwkt_serialize_enter(ifp->if_serializer);
2266 
2267 	mii_tick(mii);
2268 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2269 
2270 	lwkt_serialize_exit(ifp->if_serializer);
2271 }
2272 
2273 static void
2274 jme_reset(struct jme_softc *sc)
2275 {
2276 	uint32_t val;
2277 
2278 	/* Make sure that TX and RX are stopped */
2279 	jme_stop_tx(sc);
2280 	jme_stop_rx(sc);
2281 
2282 	/* Start reset */
2283 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2284 	DELAY(20);
2285 
2286 	/*
2287 	 * Hold reset bit before stop reset
2288 	 */
2289 
2290 	/* Disable TXMAC and TXOFL clock sources */
2291 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2292 	/* Disable RXMAC clock source */
2293 	val = CSR_READ_4(sc, JME_GPREG1);
2294 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2295 	/* Flush */
2296 	CSR_READ_4(sc, JME_GHC);
2297 
2298 	/* Stop reset */
2299 	CSR_WRITE_4(sc, JME_GHC, 0);
2300 	/* Flush */
2301 	CSR_READ_4(sc, JME_GHC);
2302 
2303 	/*
2304 	 * Clear reset bit after stop reset
2305 	 */
2306 
2307 	/* Enable TXMAC and TXOFL clock sources */
2308 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2309 	/* Enable RXMAC clock source */
2310 	val = CSR_READ_4(sc, JME_GPREG1);
2311 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2312 	/* Flush */
2313 	CSR_READ_4(sc, JME_GHC);
2314 
2315 	/* Disable TXMAC and TXOFL clock sources */
2316 	CSR_WRITE_4(sc, JME_GHC, 0);
2317 	/* Disable RXMAC clock source */
2318 	val = CSR_READ_4(sc, JME_GPREG1);
2319 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2320 	/* Flush */
2321 	CSR_READ_4(sc, JME_GHC);
2322 
2323 	/* Enable TX and RX */
2324 	val = CSR_READ_4(sc, JME_TXCSR);
2325 	CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2326 	val = CSR_READ_4(sc, JME_RXCSR);
2327 	CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2328 	/* Flush */
2329 	CSR_READ_4(sc, JME_TXCSR);
2330 	CSR_READ_4(sc, JME_RXCSR);
2331 
2332 	/* Enable TXMAC and TXOFL clock sources */
2333 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2334 	/* Eisable RXMAC clock source */
2335 	val = CSR_READ_4(sc, JME_GPREG1);
2336 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2337 	/* Flush */
2338 	CSR_READ_4(sc, JME_GHC);
2339 
2340 	/* Stop TX and RX */
2341 	jme_stop_tx(sc);
2342 	jme_stop_rx(sc);
2343 }
2344 
2345 static void
2346 jme_init(void *xsc)
2347 {
2348 	struct jme_softc *sc = xsc;
2349 	struct ifnet *ifp = &sc->arpcom.ac_if;
2350 	struct mii_data *mii;
2351 	uint8_t eaddr[ETHER_ADDR_LEN];
2352 	bus_addr_t paddr;
2353 	uint32_t reg;
2354 	int error, r;
2355 
2356 	ASSERT_SERIALIZED(ifp->if_serializer);
2357 
2358 	/*
2359 	 * Cancel any pending I/O.
2360 	 */
2361 	jme_stop(sc);
2362 
2363 	/*
2364 	 * Reset the chip to a known state.
2365 	 */
2366 	jme_reset(sc);
2367 
2368 	sc->jme_txd_spare =
2369 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2370 	KKASSERT(sc->jme_txd_spare >= 1);
2371 
2372 	/*
2373 	 * If we use 64bit address mode for transmitting, each Tx request
2374 	 * needs one more symbol descriptor.
2375 	 */
2376 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2377 		sc->jme_txd_spare += 1;
2378 
2379 	if (ifp->if_capenable & IFCAP_RSS)
2380 		jme_enable_rss(sc);
2381 	else
2382 		jme_disable_rss(sc);
2383 
2384 	/* Init RX descriptors */
2385 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2386 		error = jme_init_rx_ring(sc, r);
2387 		if (error) {
2388 			if_printf(ifp, "initialization failed: "
2389 				  "no memory for %dth RX ring.\n", r);
2390 			jme_stop(sc);
2391 			return;
2392 		}
2393 	}
2394 
2395 	/* Init TX descriptors */
2396 	jme_init_tx_ring(sc);
2397 
2398 	/* Initialize shadow status block. */
2399 	jme_init_ssb(sc);
2400 
2401 	/* Reprogram the station address. */
2402 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2403 	CSR_WRITE_4(sc, JME_PAR0,
2404 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2405 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2406 
2407 	/*
2408 	 * Configure Tx queue.
2409 	 *  Tx priority queue weight value : 0
2410 	 *  Tx FIFO threshold for processing next packet : 16QW
2411 	 *  Maximum Tx DMA length : 512
2412 	 *  Allow Tx DMA burst.
2413 	 */
2414 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2415 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2416 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2417 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2418 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2419 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2420 
2421 	/* Set Tx descriptor counter. */
2422 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2423 
2424 	/* Set Tx ring address to the hardware. */
2425 	paddr = sc->jme_cdata.jme_tx_ring_paddr;
2426 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2427 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2428 
2429 	/* Configure TxMAC parameters. */
2430 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2431 	reg |= TXMAC_THRESH_1_PKT;
2432 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2433 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2434 
2435 	/*
2436 	 * Configure Rx queue.
2437 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2438 	 *  FIFO threshold for processing next packet : 128QW
2439 	 *  Rx queue 0 select
2440 	 *  Max Rx DMA length : 128
2441 	 *  Rx descriptor retry : 32
2442 	 *  Rx descriptor retry time gap : 256ns
2443 	 *  Don't receive runt/bad frame.
2444 	 */
2445 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2446 #if 0
2447 	/*
2448 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2449 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2450 	 * decrease FIFO threshold to reduce the FIFO overruns for
2451 	 * frames larger than 4000 bytes.
2452 	 * For best performance of standard MTU sized frames use
2453 	 * maximum allowable FIFO threshold, 128QW.
2454 	 */
2455 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2456 	    JME_RX_FIFO_SIZE)
2457 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2458 	else
2459 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2460 #else
2461 	/* Improve PCI Express compatibility */
2462 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2463 #endif
2464 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2465 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2466 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2467 	/* XXX TODO DROP_BAD */
2468 
2469 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2470 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2471 
2472 		/* Set Rx descriptor counter. */
2473 		CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2474 
2475 		/* Set Rx ring address to the hardware. */
2476 		paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2477 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2478 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2479 	}
2480 
2481 	/* Clear receive filter. */
2482 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2483 
2484 	/* Set up the receive filter. */
2485 	jme_set_filter(sc);
2486 	jme_set_vlan(sc);
2487 
2488 	/*
2489 	 * Disable all WOL bits as WOL can interfere normal Rx
2490 	 * operation. Also clear WOL detection status bits.
2491 	 */
2492 	reg = CSR_READ_4(sc, JME_PMCS);
2493 	reg &= ~PMCS_WOL_ENB_MASK;
2494 	CSR_WRITE_4(sc, JME_PMCS, reg);
2495 
2496 	/*
2497 	 * Pad 10bytes right before received frame. This will greatly
2498 	 * help Rx performance on strict-alignment architectures as
2499 	 * it does not need to copy the frame to align the payload.
2500 	 */
2501 	reg = CSR_READ_4(sc, JME_RXMAC);
2502 	reg |= RXMAC_PAD_10BYTES;
2503 
2504 	if (ifp->if_capenable & IFCAP_RXCSUM)
2505 		reg |= RXMAC_CSUM_ENB;
2506 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2507 
2508 	/* Configure general purpose reg0 */
2509 	reg = CSR_READ_4(sc, JME_GPREG0);
2510 	reg &= ~GPREG0_PCC_UNIT_MASK;
2511 	/* Set PCC timer resolution to micro-seconds unit. */
2512 	reg |= GPREG0_PCC_UNIT_US;
2513 	/*
2514 	 * Disable all shadow register posting as we have to read
2515 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2516 	 * that it's hard to synchronize interrupt status between
2517 	 * hardware and software with shadow posting due to
2518 	 * requirements of bus_dmamap_sync(9).
2519 	 */
2520 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2521 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2522 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2523 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2524 	/* Disable posting of DW0. */
2525 	reg &= ~GPREG0_POST_DW0_ENB;
2526 	/* Clear PME message. */
2527 	reg &= ~GPREG0_PME_ENB;
2528 	/* Set PHY address. */
2529 	reg &= ~GPREG0_PHY_ADDR_MASK;
2530 	reg |= sc->jme_phyaddr;
2531 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2532 
2533 	/* Configure Tx queue 0 packet completion coalescing. */
2534 	jme_set_tx_coal(sc);
2535 
2536 	/* Configure Rx queue 0 packet completion coalescing. */
2537 	jme_set_rx_coal(sc);
2538 
2539 	/* Configure shadow status block but don't enable posting. */
2540 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2541 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2542 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2543 
2544 	/* Disable Timer 1 and Timer 2. */
2545 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2546 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2547 
2548 	/* Configure retry transmit period, retry limit value. */
2549 	CSR_WRITE_4(sc, JME_TXTRHD,
2550 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2551 	    TXTRHD_RT_PERIOD_MASK) |
2552 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2553 	    TXTRHD_RT_LIMIT_SHIFT));
2554 
2555 #ifdef DEVICE_POLLING
2556 	if (!(ifp->if_flags & IFF_POLLING))
2557 #endif
2558 	/* Initialize the interrupt mask. */
2559 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2560 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2561 
2562 	/*
2563 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2564 	 * done after detection of valid link in jme_miibus_statchg.
2565 	 */
2566 	sc->jme_flags &= ~JME_FLAG_LINK;
2567 
2568 	/* Set the current media. */
2569 	mii = device_get_softc(sc->jme_miibus);
2570 	mii_mediachg(mii);
2571 
2572 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2573 
2574 	ifp->if_flags |= IFF_RUNNING;
2575 	ifp->if_flags &= ~IFF_OACTIVE;
2576 }
2577 
2578 static void
2579 jme_stop(struct jme_softc *sc)
2580 {
2581 	struct ifnet *ifp = &sc->arpcom.ac_if;
2582 	struct jme_txdesc *txd;
2583 	struct jme_rxdesc *rxd;
2584 	struct jme_rxdata *rdata;
2585 	int i, r;
2586 
2587 	ASSERT_SERIALIZED(ifp->if_serializer);
2588 
2589 	/*
2590 	 * Mark the interface down and cancel the watchdog timer.
2591 	 */
2592 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2593 	ifp->if_timer = 0;
2594 
2595 	callout_stop(&sc->jme_tick_ch);
2596 	sc->jme_flags &= ~JME_FLAG_LINK;
2597 
2598 	/*
2599 	 * Disable interrupts.
2600 	 */
2601 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2602 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2603 
2604 	/* Disable updating shadow status block. */
2605 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2606 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2607 
2608 	/* Stop receiver, transmitter. */
2609 	jme_stop_rx(sc);
2610 	jme_stop_tx(sc);
2611 
2612 	/*
2613 	 * Free partial finished RX segments
2614 	 */
2615 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2616 		rdata = &sc->jme_cdata.jme_rx_data[r];
2617 		if (rdata->jme_rxhead != NULL)
2618 			m_freem(rdata->jme_rxhead);
2619 		JME_RXCHAIN_RESET(sc, r);
2620 	}
2621 
2622 	/*
2623 	 * Free RX and TX mbufs still in the queues.
2624 	 */
2625 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2626 		rdata = &sc->jme_cdata.jme_rx_data[r];
2627 		for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2628 			rxd = &rdata->jme_rxdesc[i];
2629 			if (rxd->rx_m != NULL) {
2630 				bus_dmamap_unload(rdata->jme_rx_tag,
2631 						  rxd->rx_dmamap);
2632 				m_freem(rxd->rx_m);
2633 				rxd->rx_m = NULL;
2634 			}
2635 		}
2636 	}
2637 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2638 		txd = &sc->jme_cdata.jme_txdesc[i];
2639 		if (txd->tx_m != NULL) {
2640 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2641 			    txd->tx_dmamap);
2642 			m_freem(txd->tx_m);
2643 			txd->tx_m = NULL;
2644 			txd->tx_ndesc = 0;
2645 		}
2646         }
2647 }
2648 
2649 static void
2650 jme_stop_tx(struct jme_softc *sc)
2651 {
2652 	uint32_t reg;
2653 	int i;
2654 
2655 	reg = CSR_READ_4(sc, JME_TXCSR);
2656 	if ((reg & TXCSR_TX_ENB) == 0)
2657 		return;
2658 	reg &= ~TXCSR_TX_ENB;
2659 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2660 	for (i = JME_TIMEOUT; i > 0; i--) {
2661 		DELAY(1);
2662 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2663 			break;
2664 	}
2665 	if (i == 0)
2666 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2667 }
2668 
2669 static void
2670 jme_stop_rx(struct jme_softc *sc)
2671 {
2672 	uint32_t reg;
2673 	int i;
2674 
2675 	reg = CSR_READ_4(sc, JME_RXCSR);
2676 	if ((reg & RXCSR_RX_ENB) == 0)
2677 		return;
2678 	reg &= ~RXCSR_RX_ENB;
2679 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2680 	for (i = JME_TIMEOUT; i > 0; i--) {
2681 		DELAY(1);
2682 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2683 			break;
2684 	}
2685 	if (i == 0)
2686 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2687 }
2688 
2689 static void
2690 jme_init_tx_ring(struct jme_softc *sc)
2691 {
2692 	struct jme_chain_data *cd;
2693 	struct jme_txdesc *txd;
2694 	int i;
2695 
2696 	sc->jme_cdata.jme_tx_prod = 0;
2697 	sc->jme_cdata.jme_tx_cons = 0;
2698 	sc->jme_cdata.jme_tx_cnt = 0;
2699 
2700 	cd = &sc->jme_cdata;
2701 	bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2702 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2703 		txd = &sc->jme_cdata.jme_txdesc[i];
2704 		txd->tx_m = NULL;
2705 		txd->tx_desc = &cd->jme_tx_ring[i];
2706 		txd->tx_ndesc = 0;
2707 	}
2708 }
2709 
2710 static void
2711 jme_init_ssb(struct jme_softc *sc)
2712 {
2713 	struct jme_chain_data *cd;
2714 
2715 	cd = &sc->jme_cdata;
2716 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2717 }
2718 
2719 static int
2720 jme_init_rx_ring(struct jme_softc *sc, int ring)
2721 {
2722 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2723 	struct jme_rxdesc *rxd;
2724 	int i;
2725 
2726 	KKASSERT(rdata->jme_rxhead == NULL &&
2727 		 rdata->jme_rxtail == NULL &&
2728 		 rdata->jme_rxlen == 0);
2729 	rdata->jme_rx_cons = 0;
2730 
2731 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2732 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2733 		int error;
2734 
2735 		rxd = &rdata->jme_rxdesc[i];
2736 		rxd->rx_m = NULL;
2737 		rxd->rx_desc = &rdata->jme_rx_ring[i];
2738 		error = jme_newbuf(sc, ring, rxd, 1);
2739 		if (error)
2740 			return error;
2741 	}
2742 	return 0;
2743 }
2744 
2745 static int
2746 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2747 {
2748 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2749 	struct jme_desc *desc;
2750 	struct mbuf *m;
2751 	bus_dma_segment_t segs;
2752 	bus_dmamap_t map;
2753 	int error, nsegs;
2754 
2755 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2756 	if (m == NULL)
2757 		return ENOBUFS;
2758 	/*
2759 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2760 	 * takes advantage of 10 bytes padding feature of hardware
2761 	 * in order not to copy entire frame to align IP header on
2762 	 * 32bit boundary.
2763 	 */
2764 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2765 
2766 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2767 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2768 			BUS_DMA_NOWAIT);
2769 	if (error) {
2770 		m_freem(m);
2771 		if (init)
2772 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2773 		return error;
2774 	}
2775 
2776 	if (rxd->rx_m != NULL) {
2777 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2778 				BUS_DMASYNC_POSTREAD);
2779 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2780 	}
2781 	map = rxd->rx_dmamap;
2782 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
2783 	rdata->jme_rx_sparemap = map;
2784 	rxd->rx_m = m;
2785 
2786 	desc = rxd->rx_desc;
2787 	desc->buflen = htole32(segs.ds_len);
2788 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2789 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2790 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2791 
2792 	return 0;
2793 }
2794 
2795 static void
2796 jme_set_vlan(struct jme_softc *sc)
2797 {
2798 	struct ifnet *ifp = &sc->arpcom.ac_if;
2799 	uint32_t reg;
2800 
2801 	ASSERT_SERIALIZED(ifp->if_serializer);
2802 
2803 	reg = CSR_READ_4(sc, JME_RXMAC);
2804 	reg &= ~RXMAC_VLAN_ENB;
2805 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2806 		reg |= RXMAC_VLAN_ENB;
2807 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2808 }
2809 
2810 static void
2811 jme_set_filter(struct jme_softc *sc)
2812 {
2813 	struct ifnet *ifp = &sc->arpcom.ac_if;
2814 	struct ifmultiaddr *ifma;
2815 	uint32_t crc;
2816 	uint32_t mchash[2];
2817 	uint32_t rxcfg;
2818 
2819 	ASSERT_SERIALIZED(ifp->if_serializer);
2820 
2821 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2822 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2823 	    RXMAC_ALLMULTI);
2824 
2825 	/*
2826 	 * Always accept frames destined to our station address.
2827 	 * Always accept broadcast frames.
2828 	 */
2829 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2830 
2831 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2832 		if (ifp->if_flags & IFF_PROMISC)
2833 			rxcfg |= RXMAC_PROMISC;
2834 		if (ifp->if_flags & IFF_ALLMULTI)
2835 			rxcfg |= RXMAC_ALLMULTI;
2836 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2837 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2838 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2839 		return;
2840 	}
2841 
2842 	/*
2843 	 * Set up the multicast address filter by passing all multicast
2844 	 * addresses through a CRC generator, and then using the low-order
2845 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2846 	 * high order bits select the register, while the rest of the bits
2847 	 * select the bit within the register.
2848 	 */
2849 	rxcfg |= RXMAC_MULTICAST;
2850 	bzero(mchash, sizeof(mchash));
2851 
2852 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2853 		if (ifma->ifma_addr->sa_family != AF_LINK)
2854 			continue;
2855 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2856 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2857 
2858 		/* Just want the 6 least significant bits. */
2859 		crc &= 0x3f;
2860 
2861 		/* Set the corresponding bit in the hash table. */
2862 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2863 	}
2864 
2865 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2866 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2867 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2868 }
2869 
2870 static int
2871 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2872 {
2873 	struct jme_softc *sc = arg1;
2874 	struct ifnet *ifp = &sc->arpcom.ac_if;
2875 	int error, v;
2876 
2877 	lwkt_serialize_enter(ifp->if_serializer);
2878 
2879 	v = sc->jme_tx_coal_to;
2880 	error = sysctl_handle_int(oidp, &v, 0, req);
2881 	if (error || req->newptr == NULL)
2882 		goto back;
2883 
2884 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2885 		error = EINVAL;
2886 		goto back;
2887 	}
2888 
2889 	if (v != sc->jme_tx_coal_to) {
2890 		sc->jme_tx_coal_to = v;
2891 		if (ifp->if_flags & IFF_RUNNING)
2892 			jme_set_tx_coal(sc);
2893 	}
2894 back:
2895 	lwkt_serialize_exit(ifp->if_serializer);
2896 	return error;
2897 }
2898 
2899 static int
2900 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2901 {
2902 	struct jme_softc *sc = arg1;
2903 	struct ifnet *ifp = &sc->arpcom.ac_if;
2904 	int error, v;
2905 
2906 	lwkt_serialize_enter(ifp->if_serializer);
2907 
2908 	v = sc->jme_tx_coal_pkt;
2909 	error = sysctl_handle_int(oidp, &v, 0, req);
2910 	if (error || req->newptr == NULL)
2911 		goto back;
2912 
2913 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2914 		error = EINVAL;
2915 		goto back;
2916 	}
2917 
2918 	if (v != sc->jme_tx_coal_pkt) {
2919 		sc->jme_tx_coal_pkt = v;
2920 		if (ifp->if_flags & IFF_RUNNING)
2921 			jme_set_tx_coal(sc);
2922 	}
2923 back:
2924 	lwkt_serialize_exit(ifp->if_serializer);
2925 	return error;
2926 }
2927 
2928 static int
2929 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2930 {
2931 	struct jme_softc *sc = arg1;
2932 	struct ifnet *ifp = &sc->arpcom.ac_if;
2933 	int error, v;
2934 
2935 	lwkt_serialize_enter(ifp->if_serializer);
2936 
2937 	v = sc->jme_rx_coal_to;
2938 	error = sysctl_handle_int(oidp, &v, 0, req);
2939 	if (error || req->newptr == NULL)
2940 		goto back;
2941 
2942 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2943 		error = EINVAL;
2944 		goto back;
2945 	}
2946 
2947 	if (v != sc->jme_rx_coal_to) {
2948 		sc->jme_rx_coal_to = v;
2949 		if (ifp->if_flags & IFF_RUNNING)
2950 			jme_set_rx_coal(sc);
2951 	}
2952 back:
2953 	lwkt_serialize_exit(ifp->if_serializer);
2954 	return error;
2955 }
2956 
2957 static int
2958 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2959 {
2960 	struct jme_softc *sc = arg1;
2961 	struct ifnet *ifp = &sc->arpcom.ac_if;
2962 	int error, v;
2963 
2964 	lwkt_serialize_enter(ifp->if_serializer);
2965 
2966 	v = sc->jme_rx_coal_pkt;
2967 	error = sysctl_handle_int(oidp, &v, 0, req);
2968 	if (error || req->newptr == NULL)
2969 		goto back;
2970 
2971 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2972 		error = EINVAL;
2973 		goto back;
2974 	}
2975 
2976 	if (v != sc->jme_rx_coal_pkt) {
2977 		sc->jme_rx_coal_pkt = v;
2978 		if (ifp->if_flags & IFF_RUNNING)
2979 			jme_set_rx_coal(sc);
2980 	}
2981 back:
2982 	lwkt_serialize_exit(ifp->if_serializer);
2983 	return error;
2984 }
2985 
2986 static void
2987 jme_set_tx_coal(struct jme_softc *sc)
2988 {
2989 	uint32_t reg;
2990 
2991 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2992 	    PCCTX_COAL_TO_MASK;
2993 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2994 	    PCCTX_COAL_PKT_MASK;
2995 	reg |= PCCTX_COAL_TXQ0;
2996 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2997 }
2998 
2999 static void
3000 jme_set_rx_coal(struct jme_softc *sc)
3001 {
3002 	uint32_t reg;
3003 	int r;
3004 
3005 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3006 	    PCCRX_COAL_TO_MASK;
3007 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3008 	    PCCRX_COAL_PKT_MASK;
3009 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3010 		if (r < sc->jme_rx_ring_inuse)
3011 			CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3012 		else
3013 			CSR_WRITE_4(sc, JME_PCCRX(r), 0);
3014 	}
3015 }
3016 
3017 #ifdef DEVICE_POLLING
3018 
3019 static void
3020 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3021 {
3022 	struct jme_softc *sc = ifp->if_softc;
3023 	struct mbuf_chain chain[MAXCPU];
3024 	uint32_t status;
3025 	int r, prog = 0;
3026 
3027 	ASSERT_SERIALIZED(ifp->if_serializer);
3028 
3029 	switch (cmd) {
3030 	case POLL_REGISTER:
3031 		CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3032 		break;
3033 
3034 	case POLL_DEREGISTER:
3035 		CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3036 		break;
3037 
3038 	case POLL_AND_CHECK_STATUS:
3039 	case POLL_ONLY:
3040 		status = CSR_READ_4(sc, JME_INTR_STATUS);
3041 
3042 		ether_input_chain_init(chain);
3043 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
3044 			prog += jme_rxeof_chain(sc, r, chain, count);
3045 		if (prog)
3046 			ether_input_dispatch(chain);
3047 
3048 		if (status & INTR_RXQ_DESC_EMPTY) {
3049 			CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3050 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3051 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
3052 		}
3053 
3054 		jme_txeof(sc);
3055 		if (!ifq_is_empty(&ifp->if_snd))
3056 			if_devstart(ifp);
3057 		break;
3058 	}
3059 }
3060 
3061 #endif	/* DEVICE_POLLING */
3062 
3063 static int
3064 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
3065 {
3066 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3067 	bus_dmamem_t dmem;
3068 	int error;
3069 
3070 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
3071 			JME_RX_RING_ALIGN, 0,
3072 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3073 			JME_RX_RING_SIZE(sc),
3074 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3075 	if (error) {
3076 		device_printf(sc->jme_dev,
3077 		    "could not allocate %dth Rx ring.\n", ring);
3078 		return error;
3079 	}
3080 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
3081 	rdata->jme_rx_ring_map = dmem.dmem_map;
3082 	rdata->jme_rx_ring = dmem.dmem_addr;
3083 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3084 
3085 	return 0;
3086 }
3087 
3088 static int
3089 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3090 {
3091 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3092 	int i, error;
3093 
3094 	/* Create tag for Rx buffers. */
3095 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3096 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
3097 	    BUS_SPACE_MAXADDR,		/* lowaddr */
3098 	    BUS_SPACE_MAXADDR,		/* highaddr */
3099 	    NULL, NULL,			/* filter, filterarg */
3100 	    MCLBYTES,			/* maxsize */
3101 	    1,				/* nsegments */
3102 	    MCLBYTES,			/* maxsegsize */
3103 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3104 	    &rdata->jme_rx_tag);
3105 	if (error) {
3106 		device_printf(sc->jme_dev,
3107 		    "could not create %dth Rx DMA tag.\n", ring);
3108 		return error;
3109 	}
3110 
3111 	/* Create DMA maps for Rx buffers. */
3112 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3113 				  &rdata->jme_rx_sparemap);
3114 	if (error) {
3115 		device_printf(sc->jme_dev,
3116 		    "could not create %dth spare Rx dmamap.\n", ring);
3117 		bus_dma_tag_destroy(rdata->jme_rx_tag);
3118 		rdata->jme_rx_tag = NULL;
3119 		return error;
3120 	}
3121 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3122 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3123 
3124 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3125 					  &rxd->rx_dmamap);
3126 		if (error) {
3127 			int j;
3128 
3129 			device_printf(sc->jme_dev,
3130 			    "could not create %dth Rx dmamap "
3131 			    "for %dth RX ring.\n", i, ring);
3132 
3133 			for (j = 0; j < i; ++j) {
3134 				rxd = &rdata->jme_rxdesc[j];
3135 				bus_dmamap_destroy(rdata->jme_rx_tag,
3136 						   rxd->rx_dmamap);
3137 			}
3138 			bus_dmamap_destroy(rdata->jme_rx_tag,
3139 					   rdata->jme_rx_sparemap);
3140 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3141 			rdata->jme_rx_tag = NULL;
3142 			return error;
3143 		}
3144 	}
3145 	return 0;
3146 }
3147 
3148 static void
3149 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3150 {
3151 	struct mbuf_chain chain[MAXCPU];
3152 	int r, prog = 0;
3153 
3154 	ether_input_chain_init(chain);
3155 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3156 		if (status & jme_rx_status[r].jme_coal)
3157 			prog += jme_rxeof_chain(sc, r, chain, -1);
3158 	}
3159 	if (prog)
3160 		ether_input_dispatch(chain);
3161 }
3162 
3163 static void
3164 jme_enable_rss(struct jme_softc *sc)
3165 {
3166 	uint32_t rssc, ind;
3167 	uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3168 	int i;
3169 
3170 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3171 
3172 	KASSERT(sc->jme_rx_ring_inuse == JME_NRXRING_2 ||
3173 		sc->jme_rx_ring_inuse == JME_NRXRING_4,
3174 		("%s: invalid # of RX rings (%d)\n",
3175 		 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse));
3176 
3177 	rssc = RSSC_HASH_64_ENTRY;
3178 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3179 	rssc |= sc->jme_rx_ring_inuse >> 1;
3180 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3181 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3182 
3183 	toeplitz_get_key(key, sizeof(key));
3184 	for (i = 0; i < RSSKEY_NREGS; ++i) {
3185 		uint32_t keyreg;
3186 
3187 		keyreg = RSSKEY_REGVAL(key, i);
3188 		JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3189 
3190 		CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3191 	}
3192 
3193 	/*
3194 	 * Create redirect table in following fashion:
3195 	 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3196 	 */
3197 	ind = 0;
3198 	for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3199 		int q;
3200 
3201 		q = i % sc->jme_rx_ring_inuse;
3202 		ind |= q << (i * 8);
3203 	}
3204 	JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3205 
3206 	for (i = 0; i < RSSTBL_NREGS; ++i)
3207 		CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3208 }
3209 
3210 static void
3211 jme_disable_rss(struct jme_softc *sc)
3212 {
3213 	sc->jme_rx_ring_inuse = JME_NRXRING_1;
3214 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3215 }
3216