xref: /dflybsd-src/sys/dev/netif/jme/if_jme.c (revision 2eb0d0694bd74032ad456be1c83b669b6b692b18)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
29  */
30 
31 #include "opt_polling.h"
32 #include "opt_rss.h"
33 #include "opt_jme.h"
34 
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/interrupt.h>
40 #include <sys/malloc.h>
41 #include <sys/proc.h>
42 #include <sys/rman.h>
43 #include <sys/serialize.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 
48 #include <net/ethernet.h>
49 #include <net/if.h>
50 #include <net/bpf.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/ifq_var.h>
55 #ifdef RSS
56 #include <net/toeplitz.h>
57 #endif
58 #include <net/vlan/if_vlan_var.h>
59 #include <net/vlan/if_vlan_ether.h>
60 
61 #include <dev/netif/mii_layer/miivar.h>
62 #include <dev/netif/mii_layer/jmphyreg.h>
63 
64 #include <bus/pci/pcireg.h>
65 #include <bus/pci/pcivar.h>
66 #include <bus/pci/pcidevs.h>
67 
68 #include <dev/netif/jme/if_jmereg.h>
69 #include <dev/netif/jme/if_jmevar.h>
70 
71 #include "miibus_if.h"
72 
73 /* Define the following to disable printing Rx errors. */
74 #undef	JME_SHOW_ERRORS
75 
76 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
77 
78 #ifdef JME_RSS_DEBUG
79 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
80 do { \
81 	if ((sc)->jme_rss_debug >= (lvl)) \
82 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
83 } while (0)
84 #else	/* !JME_RSS_DEBUG */
85 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
86 #endif	/* JME_RSS_DEBUG */
87 
88 static int	jme_probe(device_t);
89 static int	jme_attach(device_t);
90 static int	jme_detach(device_t);
91 static int	jme_shutdown(device_t);
92 static int	jme_suspend(device_t);
93 static int	jme_resume(device_t);
94 
95 static int	jme_miibus_readreg(device_t, int, int);
96 static int	jme_miibus_writereg(device_t, int, int, int);
97 static void	jme_miibus_statchg(device_t);
98 
99 static void	jme_init(void *);
100 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
101 static void	jme_start(struct ifnet *);
102 static void	jme_watchdog(struct ifnet *);
103 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
104 static int	jme_mediachange(struct ifnet *);
105 #ifdef DEVICE_POLLING
106 static void	jme_poll(struct ifnet *, enum poll_cmd, int);
107 #endif
108 
109 static void	jme_intr(void *);
110 static void	jme_txeof(struct jme_softc *);
111 static void	jme_rxeof(struct jme_softc *, int);
112 static int	jme_rxeof_chain(struct jme_softc *, int,
113 				struct mbuf_chain *, int);
114 static void	jme_rx_intr(struct jme_softc *, uint32_t);
115 
116 static int	jme_dma_alloc(struct jme_softc *);
117 static void	jme_dma_free(struct jme_softc *);
118 static int	jme_init_rx_ring(struct jme_softc *, int);
119 static void	jme_init_tx_ring(struct jme_softc *);
120 static void	jme_init_ssb(struct jme_softc *);
121 static int	jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
122 static int	jme_encap(struct jme_softc *, struct mbuf **);
123 static void	jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
124 static int	jme_rxring_dma_alloc(struct jme_softc *, int);
125 static int	jme_rxbuf_dma_alloc(struct jme_softc *, int);
126 
127 static void	jme_tick(void *);
128 static void	jme_stop(struct jme_softc *);
129 static void	jme_reset(struct jme_softc *);
130 static void	jme_set_vlan(struct jme_softc *);
131 static void	jme_set_filter(struct jme_softc *);
132 static void	jme_stop_tx(struct jme_softc *);
133 static void	jme_stop_rx(struct jme_softc *);
134 static void	jme_mac_config(struct jme_softc *);
135 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
136 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
137 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
138 #ifdef notyet
139 static void	jme_setwol(struct jme_softc *);
140 static void	jme_setlinkspeed(struct jme_softc *);
141 #endif
142 static void	jme_set_tx_coal(struct jme_softc *);
143 static void	jme_set_rx_coal(struct jme_softc *);
144 #ifdef RSS
145 static void	jme_enable_rss(struct jme_softc *);
146 #endif
147 static void	jme_disable_rss(struct jme_softc *);
148 
149 static void	jme_sysctl_node(struct jme_softc *);
150 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
151 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
152 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
153 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
154 
155 /*
156  * Devices supported by this driver.
157  */
158 static const struct jme_dev {
159 	uint16_t	jme_vendorid;
160 	uint16_t	jme_deviceid;
161 	uint32_t	jme_caps;
162 	const char	*jme_name;
163 } jme_devs[] = {
164 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
165 	    JME_CAP_JUMBO,
166 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
167 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
168 	    JME_CAP_FASTETH,
169 	    "JMicron Inc, JMC260 Fast Ethernet" },
170 	{ 0, 0, 0, NULL }
171 };
172 
173 static device_method_t jme_methods[] = {
174 	/* Device interface. */
175 	DEVMETHOD(device_probe,		jme_probe),
176 	DEVMETHOD(device_attach,	jme_attach),
177 	DEVMETHOD(device_detach,	jme_detach),
178 	DEVMETHOD(device_shutdown,	jme_shutdown),
179 	DEVMETHOD(device_suspend,	jme_suspend),
180 	DEVMETHOD(device_resume,	jme_resume),
181 
182 	/* Bus interface. */
183 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
184 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
185 
186 	/* MII interface. */
187 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
188 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
189 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
190 
191 	{ NULL, NULL }
192 };
193 
194 static driver_t jme_driver = {
195 	"jme",
196 	jme_methods,
197 	sizeof(struct jme_softc)
198 };
199 
200 static devclass_t jme_devclass;
201 
202 DECLARE_DUMMY_MODULE(if_jme);
203 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
204 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
205 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
206 
207 static const struct {
208 	uint32_t	jme_coal;
209 	uint32_t	jme_comp;
210 } jme_rx_status[JME_NRXRING_MAX] = {
211 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
212 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
213 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
214 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
215 };
216 
217 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
218 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
219 static int	jme_rx_ring_count = JME_NRXRING_DEF;
220 
221 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
222 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
223 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
224 
225 /*
226  *	Read a PHY register on the MII of the JMC250.
227  */
228 static int
229 jme_miibus_readreg(device_t dev, int phy, int reg)
230 {
231 	struct jme_softc *sc = device_get_softc(dev);
232 	uint32_t val;
233 	int i;
234 
235 	/* For FPGA version, PHY address 0 should be ignored. */
236 	if (sc->jme_caps & JME_CAP_FPGA) {
237 		if (phy == 0)
238 			return (0);
239 	} else {
240 		if (sc->jme_phyaddr != phy)
241 			return (0);
242 	}
243 
244 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
245 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
246 
247 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
248 		DELAY(1);
249 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
250 			break;
251 	}
252 	if (i == 0) {
253 		device_printf(sc->jme_dev, "phy read timeout: "
254 			      "phy %d, reg %d\n", phy, reg);
255 		return (0);
256 	}
257 
258 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
259 }
260 
261 /*
262  *	Write a PHY register on the MII of the JMC250.
263  */
264 static int
265 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
266 {
267 	struct jme_softc *sc = device_get_softc(dev);
268 	int i;
269 
270 	/* For FPGA version, PHY address 0 should be ignored. */
271 	if (sc->jme_caps & JME_CAP_FPGA) {
272 		if (phy == 0)
273 			return (0);
274 	} else {
275 		if (sc->jme_phyaddr != phy)
276 			return (0);
277 	}
278 
279 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
280 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
281 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
282 
283 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
284 		DELAY(1);
285 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
286 			break;
287 	}
288 	if (i == 0) {
289 		device_printf(sc->jme_dev, "phy write timeout: "
290 			      "phy %d, reg %d\n", phy, reg);
291 	}
292 
293 	return (0);
294 }
295 
296 /*
297  *	Callback from MII layer when media changes.
298  */
299 static void
300 jme_miibus_statchg(device_t dev)
301 {
302 	struct jme_softc *sc = device_get_softc(dev);
303 	struct ifnet *ifp = &sc->arpcom.ac_if;
304 	struct mii_data *mii;
305 	struct jme_txdesc *txd;
306 	bus_addr_t paddr;
307 	int i, r;
308 
309 	ASSERT_SERIALIZED(ifp->if_serializer);
310 
311 	if ((ifp->if_flags & IFF_RUNNING) == 0)
312 		return;
313 
314 	mii = device_get_softc(sc->jme_miibus);
315 
316 	sc->jme_flags &= ~JME_FLAG_LINK;
317 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
318 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
319 		case IFM_10_T:
320 		case IFM_100_TX:
321 			sc->jme_flags |= JME_FLAG_LINK;
322 			break;
323 		case IFM_1000_T:
324 			if (sc->jme_caps & JME_CAP_FASTETH)
325 				break;
326 			sc->jme_flags |= JME_FLAG_LINK;
327 			break;
328 		default:
329 			break;
330 		}
331 	}
332 
333 	/*
334 	 * Disabling Rx/Tx MACs have a side-effect of resetting
335 	 * JME_TXNDA/JME_RXNDA register to the first address of
336 	 * Tx/Rx descriptor address. So driver should reset its
337 	 * internal procucer/consumer pointer and reclaim any
338 	 * allocated resources.  Note, just saving the value of
339 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
340 	 * and restoring JME_TXNDA/JME_RXNDA register is not
341 	 * sufficient to make sure correct MAC state because
342 	 * stopping MAC operation can take a while and hardware
343 	 * might have updated JME_TXNDA/JME_RXNDA registers
344 	 * during the stop operation.
345 	 */
346 
347 	/* Disable interrupts */
348 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
349 
350 	/* Stop driver */
351 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
352 	ifp->if_timer = 0;
353 	callout_stop(&sc->jme_tick_ch);
354 
355 	/* Stop receiver/transmitter. */
356 	jme_stop_rx(sc);
357 	jme_stop_tx(sc);
358 
359 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
360 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
361 
362 		jme_rxeof(sc, r);
363 		if (rdata->jme_rxhead != NULL)
364 			m_freem(rdata->jme_rxhead);
365 		JME_RXCHAIN_RESET(sc, r);
366 
367 		/*
368 		 * Reuse configured Rx descriptors and reset
369 		 * procuder/consumer index.
370 		 */
371 		rdata->jme_rx_cons = 0;
372 	}
373 
374 	jme_txeof(sc);
375 	if (sc->jme_cdata.jme_tx_cnt != 0) {
376 		/* Remove queued packets for transmit. */
377 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
378 			txd = &sc->jme_cdata.jme_txdesc[i];
379 			if (txd->tx_m != NULL) {
380 				bus_dmamap_unload(
381 				    sc->jme_cdata.jme_tx_tag,
382 				    txd->tx_dmamap);
383 				m_freem(txd->tx_m);
384 				txd->tx_m = NULL;
385 				txd->tx_ndesc = 0;
386 				ifp->if_oerrors++;
387 			}
388 		}
389 	}
390 	jme_init_tx_ring(sc);
391 
392 	/* Initialize shadow status block. */
393 	jme_init_ssb(sc);
394 
395 	/* Program MAC with resolved speed/duplex/flow-control. */
396 	if (sc->jme_flags & JME_FLAG_LINK) {
397 		jme_mac_config(sc);
398 
399 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
400 
401 		/* Set Tx ring address to the hardware. */
402 		paddr = sc->jme_cdata.jme_tx_ring_paddr;
403 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
404 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
405 
406 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
407 			CSR_WRITE_4(sc, JME_RXCSR,
408 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
409 
410 			/* Set Rx ring address to the hardware. */
411 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
412 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
413 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
414 		}
415 
416 		/* Restart receiver/transmitter. */
417 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
418 		    RXCSR_RXQ_START);
419 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
420 	}
421 
422 	ifp->if_flags |= IFF_RUNNING;
423 	ifp->if_flags &= ~IFF_OACTIVE;
424 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
425 
426 #ifdef DEVICE_POLLING
427 	if (!(ifp->if_flags & IFF_POLLING))
428 #endif
429 	/* Reenable interrupts. */
430 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
431 }
432 
433 /*
434  *	Get the current interface media status.
435  */
436 static void
437 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
438 {
439 	struct jme_softc *sc = ifp->if_softc;
440 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
441 
442 	ASSERT_SERIALIZED(ifp->if_serializer);
443 
444 	mii_pollstat(mii);
445 	ifmr->ifm_status = mii->mii_media_status;
446 	ifmr->ifm_active = mii->mii_media_active;
447 }
448 
449 /*
450  *	Set hardware to newly-selected media.
451  */
452 static int
453 jme_mediachange(struct ifnet *ifp)
454 {
455 	struct jme_softc *sc = ifp->if_softc;
456 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
457 	int error;
458 
459 	ASSERT_SERIALIZED(ifp->if_serializer);
460 
461 	if (mii->mii_instance != 0) {
462 		struct mii_softc *miisc;
463 
464 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
465 			mii_phy_reset(miisc);
466 	}
467 	error = mii_mediachg(mii);
468 
469 	return (error);
470 }
471 
472 static int
473 jme_probe(device_t dev)
474 {
475 	const struct jme_dev *sp;
476 	uint16_t vid, did;
477 
478 	vid = pci_get_vendor(dev);
479 	did = pci_get_device(dev);
480 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
481 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
482 			struct jme_softc *sc = device_get_softc(dev);
483 
484 			sc->jme_caps = sp->jme_caps;
485 			device_set_desc(dev, sp->jme_name);
486 			return (0);
487 		}
488 	}
489 	return (ENXIO);
490 }
491 
492 static int
493 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
494 {
495 	uint32_t reg;
496 	int i;
497 
498 	*val = 0;
499 	for (i = JME_TIMEOUT; i > 0; i--) {
500 		reg = CSR_READ_4(sc, JME_SMBCSR);
501 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
502 			break;
503 		DELAY(1);
504 	}
505 
506 	if (i == 0) {
507 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
508 		return (ETIMEDOUT);
509 	}
510 
511 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
512 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
513 	for (i = JME_TIMEOUT; i > 0; i--) {
514 		DELAY(1);
515 		reg = CSR_READ_4(sc, JME_SMBINTF);
516 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
517 			break;
518 	}
519 
520 	if (i == 0) {
521 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
522 		return (ETIMEDOUT);
523 	}
524 
525 	reg = CSR_READ_4(sc, JME_SMBINTF);
526 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
527 
528 	return (0);
529 }
530 
531 static int
532 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
533 {
534 	uint8_t fup, reg, val;
535 	uint32_t offset;
536 	int match;
537 
538 	offset = 0;
539 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
540 	    fup != JME_EEPROM_SIG0)
541 		return (ENOENT);
542 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
543 	    fup != JME_EEPROM_SIG1)
544 		return (ENOENT);
545 	match = 0;
546 	do {
547 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
548 			break;
549 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
550 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
551 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
552 				break;
553 			if (reg >= JME_PAR0 &&
554 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
555 				if (jme_eeprom_read_byte(sc, offset + 2,
556 				    &val) != 0)
557 					break;
558 				eaddr[reg - JME_PAR0] = val;
559 				match++;
560 			}
561 		}
562 		/* Check for the end of EEPROM descriptor. */
563 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
564 			break;
565 		/* Try next eeprom descriptor. */
566 		offset += JME_EEPROM_DESC_BYTES;
567 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
568 
569 	if (match == ETHER_ADDR_LEN)
570 		return (0);
571 
572 	return (ENOENT);
573 }
574 
575 static void
576 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
577 {
578 	uint32_t par0, par1;
579 
580 	/* Read station address. */
581 	par0 = CSR_READ_4(sc, JME_PAR0);
582 	par1 = CSR_READ_4(sc, JME_PAR1);
583 	par1 &= 0xFFFF;
584 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
585 		device_printf(sc->jme_dev,
586 		    "generating fake ethernet address.\n");
587 		par0 = karc4random();
588 		/* Set OUI to JMicron. */
589 		eaddr[0] = 0x00;
590 		eaddr[1] = 0x1B;
591 		eaddr[2] = 0x8C;
592 		eaddr[3] = (par0 >> 16) & 0xff;
593 		eaddr[4] = (par0 >> 8) & 0xff;
594 		eaddr[5] = par0 & 0xff;
595 	} else {
596 		eaddr[0] = (par0 >> 0) & 0xFF;
597 		eaddr[1] = (par0 >> 8) & 0xFF;
598 		eaddr[2] = (par0 >> 16) & 0xFF;
599 		eaddr[3] = (par0 >> 24) & 0xFF;
600 		eaddr[4] = (par1 >> 0) & 0xFF;
601 		eaddr[5] = (par1 >> 8) & 0xFF;
602 	}
603 }
604 
605 static int
606 jme_attach(device_t dev)
607 {
608 	struct jme_softc *sc = device_get_softc(dev);
609 	struct ifnet *ifp = &sc->arpcom.ac_if;
610 	uint32_t reg;
611 	uint16_t did;
612 	uint8_t pcie_ptr, rev;
613 	int error = 0;
614 	uint8_t eaddr[ETHER_ADDR_LEN];
615 
616 	sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
617 	if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
618 		sc->jme_rx_desc_cnt = JME_NDESC_MAX;
619 
620 	sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
621 	if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
622 		sc->jme_tx_desc_cnt = JME_NDESC_MAX;
623 
624 #ifdef RSS
625 	sc->jme_rx_ring_cnt = jme_rx_ring_count;
626 	if (sc->jme_rx_ring_cnt <= 0)
627 		sc->jme_rx_ring_cnt = JME_NRXRING_1;
628 	if (sc->jme_rx_ring_cnt > ncpus2)
629 		sc->jme_rx_ring_cnt = ncpus2;
630 
631 	if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
632 		sc->jme_rx_ring_cnt = JME_NRXRING_4;
633 	else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
634 		sc->jme_rx_ring_cnt = JME_NRXRING_2;
635 #else
636 	sc->jme_rx_ring_cnt = JME_NRXRING_MIN;
637 #endif
638 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
639 
640 	sc->jme_dev = dev;
641 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
642 
643 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
644 
645 	callout_init(&sc->jme_tick_ch);
646 
647 #ifndef BURN_BRIDGES
648 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
649 		uint32_t irq, mem;
650 
651 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
652 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
653 
654 		device_printf(dev, "chip is in D%d power mode "
655 		    "-- setting to D0\n", pci_get_powerstate(dev));
656 
657 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
658 
659 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
660 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
661 	}
662 #endif	/* !BURN_BRIDGE */
663 
664 	/* Enable bus mastering */
665 	pci_enable_busmaster(dev);
666 
667 	/*
668 	 * Allocate IO memory
669 	 *
670 	 * JMC250 supports both memory mapped and I/O register space
671 	 * access.  Because I/O register access should use different
672 	 * BARs to access registers it's waste of time to use I/O
673 	 * register spce access.  JMC250 uses 16K to map entire memory
674 	 * space.
675 	 */
676 	sc->jme_mem_rid = JME_PCIR_BAR;
677 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
678 						 &sc->jme_mem_rid, RF_ACTIVE);
679 	if (sc->jme_mem_res == NULL) {
680 		device_printf(dev, "can't allocate IO memory\n");
681 		return ENXIO;
682 	}
683 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
684 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
685 
686 	/*
687 	 * Allocate IRQ
688 	 */
689 	sc->jme_irq_rid = 0;
690 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
691 						 &sc->jme_irq_rid,
692 						 RF_SHAREABLE | RF_ACTIVE);
693 	if (sc->jme_irq_res == NULL) {
694 		device_printf(dev, "can't allocate irq\n");
695 		error = ENXIO;
696 		goto fail;
697 	}
698 
699 	/*
700 	 * Extract revisions
701 	 */
702 	reg = CSR_READ_4(sc, JME_CHIPMODE);
703 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
704 	    CHIPMODE_NOT_FPGA) {
705 		sc->jme_caps |= JME_CAP_FPGA;
706 		if (bootverbose) {
707 			device_printf(dev, "FPGA revision: 0x%04x\n",
708 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
709 				      CHIPMODE_FPGA_REV_SHIFT);
710 		}
711 	}
712 
713 	/* NOTE: FM revision is put in the upper 4 bits */
714 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
715 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
716 	if (bootverbose)
717 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
718 
719 	did = pci_get_device(dev);
720 	switch (did) {
721 	case PCI_PRODUCT_JMICRON_JMC250:
722 		if (rev == JME_REV1_A2)
723 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
724 		break;
725 
726 	case PCI_PRODUCT_JMICRON_JMC260:
727 		if (rev == JME_REV2)
728 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
729 		break;
730 
731 	default:
732 		panic("unknown device id 0x%04x\n", did);
733 	}
734 	if (rev >= JME_REV2) {
735 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
736 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
737 				      GHC_TXMAC_CLKSRC_1000;
738 	}
739 
740 	/* Reset the ethernet controller. */
741 	jme_reset(sc);
742 
743 	/* Get station address. */
744 	reg = CSR_READ_4(sc, JME_SMBCSR);
745 	if (reg & SMBCSR_EEPROM_PRESENT)
746 		error = jme_eeprom_macaddr(sc, eaddr);
747 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
748 		if (error != 0 && (bootverbose)) {
749 			device_printf(dev, "ethernet hardware address "
750 				      "not found in EEPROM.\n");
751 		}
752 		jme_reg_macaddr(sc, eaddr);
753 	}
754 
755 	/*
756 	 * Save PHY address.
757 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
758 	 * requires PHY probing to get correct PHY address.
759 	 */
760 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
761 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
762 		    GPREG0_PHY_ADDR_MASK;
763 		if (bootverbose) {
764 			device_printf(dev, "PHY is at address %d.\n",
765 			    sc->jme_phyaddr);
766 		}
767 	} else {
768 		sc->jme_phyaddr = 0;
769 	}
770 
771 	/* Set max allowable DMA size. */
772 	pcie_ptr = pci_get_pciecap_ptr(dev);
773 	if (pcie_ptr != 0) {
774 		uint16_t ctrl;
775 
776 		sc->jme_caps |= JME_CAP_PCIE;
777 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
778 		if (bootverbose) {
779 			device_printf(dev, "Read request size : %d bytes.\n",
780 			    128 << ((ctrl >> 12) & 0x07));
781 			device_printf(dev, "TLP payload size : %d bytes.\n",
782 			    128 << ((ctrl >> 5) & 0x07));
783 		}
784 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
785 		case PCIEM_DEVCTL_MAX_READRQ_128:
786 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
787 			break;
788 		case PCIEM_DEVCTL_MAX_READRQ_256:
789 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
790 			break;
791 		default:
792 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
793 			break;
794 		}
795 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
796 	} else {
797 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
798 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
799 	}
800 
801 #ifdef notyet
802 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
803 		sc->jme_caps |= JME_CAP_PMCAP;
804 #endif
805 
806 	/*
807 	 * Create sysctl tree
808 	 */
809 	jme_sysctl_node(sc);
810 
811 	/* Allocate DMA stuffs */
812 	error = jme_dma_alloc(sc);
813 	if (error)
814 		goto fail;
815 
816 	ifp->if_softc = sc;
817 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
818 	ifp->if_init = jme_init;
819 	ifp->if_ioctl = jme_ioctl;
820 	ifp->if_start = jme_start;
821 #ifdef DEVICE_POLLING
822 	ifp->if_poll = jme_poll;
823 #endif
824 	ifp->if_watchdog = jme_watchdog;
825 	ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
826 	ifq_set_ready(&ifp->if_snd);
827 
828 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
829 	ifp->if_capabilities = IFCAP_HWCSUM |
830 			       IFCAP_VLAN_MTU |
831 			       IFCAP_VLAN_HWTAGGING;
832 	if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN)
833 		ifp->if_capabilities |= IFCAP_RSS;
834 	ifp->if_hwassist = JME_CSUM_FEATURES;
835 	ifp->if_capenable = ifp->if_capabilities;
836 
837 	/* Set up MII bus. */
838 	error = mii_phy_probe(dev, &sc->jme_miibus,
839 			      jme_mediachange, jme_mediastatus);
840 	if (error) {
841 		device_printf(dev, "no PHY found!\n");
842 		goto fail;
843 	}
844 
845 	/*
846 	 * Save PHYADDR for FPGA mode PHY.
847 	 */
848 	if (sc->jme_caps & JME_CAP_FPGA) {
849 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
850 
851 		if (mii->mii_instance != 0) {
852 			struct mii_softc *miisc;
853 
854 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
855 				if (miisc->mii_phy != 0) {
856 					sc->jme_phyaddr = miisc->mii_phy;
857 					break;
858 				}
859 			}
860 			if (sc->jme_phyaddr != 0) {
861 				device_printf(sc->jme_dev,
862 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
863 				/* vendor magic. */
864 				jme_miibus_writereg(dev, sc->jme_phyaddr,
865 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
866 
867 				/* XXX should we clear JME_WA_EXTFIFO */
868 			}
869 		}
870 	}
871 
872 	ether_ifattach(ifp, eaddr, NULL);
873 
874 	/* Tell the upper layer(s) we support long frames. */
875 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
876 
877 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
878 			       &sc->jme_irq_handle, ifp->if_serializer);
879 	if (error) {
880 		device_printf(dev, "could not set up interrupt handler.\n");
881 		ether_ifdetach(ifp);
882 		goto fail;
883 	}
884 
885 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
886 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
887 	return 0;
888 fail:
889 	jme_detach(dev);
890 	return (error);
891 }
892 
893 static int
894 jme_detach(device_t dev)
895 {
896 	struct jme_softc *sc = device_get_softc(dev);
897 
898 	if (device_is_attached(dev)) {
899 		struct ifnet *ifp = &sc->arpcom.ac_if;
900 
901 		lwkt_serialize_enter(ifp->if_serializer);
902 		jme_stop(sc);
903 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
904 		lwkt_serialize_exit(ifp->if_serializer);
905 
906 		ether_ifdetach(ifp);
907 	}
908 
909 	if (sc->jme_sysctl_tree != NULL)
910 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
911 
912 	if (sc->jme_miibus != NULL)
913 		device_delete_child(dev, sc->jme_miibus);
914 	bus_generic_detach(dev);
915 
916 	if (sc->jme_irq_res != NULL) {
917 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
918 				     sc->jme_irq_res);
919 	}
920 
921 	if (sc->jme_mem_res != NULL) {
922 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
923 				     sc->jme_mem_res);
924 	}
925 
926 	jme_dma_free(sc);
927 
928 	return (0);
929 }
930 
931 static void
932 jme_sysctl_node(struct jme_softc *sc)
933 {
934 	int coal_max;
935 #ifdef JME_RSS_DEBUG
936 	char rx_ring_pkt[32];
937 	int r;
938 #endif
939 
940 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
941 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
942 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
943 				device_get_nameunit(sc->jme_dev),
944 				CTLFLAG_RD, 0, "");
945 	if (sc->jme_sysctl_tree == NULL) {
946 		device_printf(sc->jme_dev, "can't add sysctl node\n");
947 		return;
948 	}
949 
950 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
951 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
952 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
953 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
954 
955 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
956 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
957 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
958 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
959 
960 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
961 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
962 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
963 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
964 
965 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
966 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
967 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
968 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
969 
970 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
971 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
972 		       "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
973 		       0, "RX desc count");
974 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
975 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
976 		       "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
977 		       0, "TX desc count");
978 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
979 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
980 		       "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
981 		       0, "RX ring count");
982 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
983 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
984 		       "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
985 		       0, "RX ring in use");
986 #ifdef JME_RSS_DEBUG
987 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
988 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
989 		       "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
990 		       0, "RSS debug level");
991 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
992 		ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
993 		SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
994 				SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
995 				rx_ring_pkt, CTLFLAG_RD,
996 				&sc->jme_rx_ring_pkt[r],
997 				0, "RXed packets");
998 	}
999 #endif
1000 
1001 	/*
1002 	 * Set default coalesce valves
1003 	 */
1004 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1005 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1006 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1007 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1008 
1009 	/*
1010 	 * Adjust coalesce valves, in case that the number of TX/RX
1011 	 * descs are set to small values by users.
1012 	 *
1013 	 * NOTE: coal_max will not be zero, since number of descs
1014 	 * must aligned by JME_NDESC_ALIGN (16 currently)
1015 	 */
1016 	coal_max = sc->jme_tx_desc_cnt / 6;
1017 	if (coal_max < sc->jme_tx_coal_pkt)
1018 		sc->jme_tx_coal_pkt = coal_max;
1019 
1020 	coal_max = sc->jme_rx_desc_cnt / 4;
1021 	if (coal_max < sc->jme_rx_coal_pkt)
1022 		sc->jme_rx_coal_pkt = coal_max;
1023 }
1024 
1025 static int
1026 jme_dma_alloc(struct jme_softc *sc)
1027 {
1028 	struct jme_txdesc *txd;
1029 	bus_dmamem_t dmem;
1030 	int error, i;
1031 
1032 	sc->jme_cdata.jme_txdesc =
1033 	kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1034 		M_DEVBUF, M_WAITOK | M_ZERO);
1035 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1036 		sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1037 		kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1038 			M_DEVBUF, M_WAITOK | M_ZERO);
1039 	}
1040 
1041 	/* Create parent ring tag. */
1042 	error = bus_dma_tag_create(NULL,/* parent */
1043 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1044 	    sc->jme_lowaddr,		/* lowaddr */
1045 	    BUS_SPACE_MAXADDR,		/* highaddr */
1046 	    NULL, NULL,			/* filter, filterarg */
1047 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1048 	    0,				/* nsegments */
1049 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1050 	    0,				/* flags */
1051 	    &sc->jme_cdata.jme_ring_tag);
1052 	if (error) {
1053 		device_printf(sc->jme_dev,
1054 		    "could not create parent ring DMA tag.\n");
1055 		return error;
1056 	}
1057 
1058 	/*
1059 	 * Create DMA stuffs for TX ring
1060 	 */
1061 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1062 			JME_TX_RING_ALIGN, 0,
1063 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1064 			JME_TX_RING_SIZE(sc),
1065 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1066 	if (error) {
1067 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1068 		return error;
1069 	}
1070 	sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1071 	sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1072 	sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1073 	sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1074 
1075 	/*
1076 	 * Create DMA stuffs for RX rings
1077 	 */
1078 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1079 		error = jme_rxring_dma_alloc(sc, i);
1080 		if (error)
1081 			return error;
1082 	}
1083 
1084 	/* Create parent buffer tag. */
1085 	error = bus_dma_tag_create(NULL,/* parent */
1086 	    1, 0,			/* algnmnt, boundary */
1087 	    sc->jme_lowaddr,		/* lowaddr */
1088 	    BUS_SPACE_MAXADDR,		/* highaddr */
1089 	    NULL, NULL,			/* filter, filterarg */
1090 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1091 	    0,				/* nsegments */
1092 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1093 	    0,				/* flags */
1094 	    &sc->jme_cdata.jme_buffer_tag);
1095 	if (error) {
1096 		device_printf(sc->jme_dev,
1097 		    "could not create parent buffer DMA tag.\n");
1098 		return error;
1099 	}
1100 
1101 	/*
1102 	 * Create DMA stuffs for shadow status block
1103 	 */
1104 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1105 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1106 			JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1107 	if (error) {
1108 		device_printf(sc->jme_dev,
1109 		    "could not create shadow status block.\n");
1110 		return error;
1111 	}
1112 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1113 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1114 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1115 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1116 
1117 	/*
1118 	 * Create DMA stuffs for TX buffers
1119 	 */
1120 
1121 	/* Create tag for Tx buffers. */
1122 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1123 	    1, 0,			/* algnmnt, boundary */
1124 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1125 	    BUS_SPACE_MAXADDR,		/* highaddr */
1126 	    NULL, NULL,			/* filter, filterarg */
1127 	    JME_JUMBO_FRAMELEN,		/* maxsize */
1128 	    JME_MAXTXSEGS,		/* nsegments */
1129 	    JME_MAXSEGSIZE,		/* maxsegsize */
1130 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1131 	    &sc->jme_cdata.jme_tx_tag);
1132 	if (error != 0) {
1133 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1134 		return error;
1135 	}
1136 
1137 	/* Create DMA maps for Tx buffers. */
1138 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1139 		txd = &sc->jme_cdata.jme_txdesc[i];
1140 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1141 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1142 				&txd->tx_dmamap);
1143 		if (error) {
1144 			int j;
1145 
1146 			device_printf(sc->jme_dev,
1147 			    "could not create %dth Tx dmamap.\n", i);
1148 
1149 			for (j = 0; j < i; ++j) {
1150 				txd = &sc->jme_cdata.jme_txdesc[j];
1151 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1152 						   txd->tx_dmamap);
1153 			}
1154 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1155 			sc->jme_cdata.jme_tx_tag = NULL;
1156 			return error;
1157 		}
1158 	}
1159 
1160 	/*
1161 	 * Create DMA stuffs for RX buffers
1162 	 */
1163 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1164 		error = jme_rxbuf_dma_alloc(sc, i);
1165 		if (error)
1166 			return error;
1167 	}
1168 	return 0;
1169 }
1170 
1171 static void
1172 jme_dma_free(struct jme_softc *sc)
1173 {
1174 	struct jme_txdesc *txd;
1175 	struct jme_rxdesc *rxd;
1176 	struct jme_rxdata *rdata;
1177 	int i, r;
1178 
1179 	/* Tx ring */
1180 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1181 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1182 		    sc->jme_cdata.jme_tx_ring_map);
1183 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1184 		    sc->jme_cdata.jme_tx_ring,
1185 		    sc->jme_cdata.jme_tx_ring_map);
1186 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1187 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1188 	}
1189 
1190 	/* Rx ring */
1191 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1192 		rdata = &sc->jme_cdata.jme_rx_data[r];
1193 		if (rdata->jme_rx_ring_tag != NULL) {
1194 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1195 					  rdata->jme_rx_ring_map);
1196 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1197 					rdata->jme_rx_ring,
1198 					rdata->jme_rx_ring_map);
1199 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1200 			rdata->jme_rx_ring_tag = NULL;
1201 		}
1202 	}
1203 
1204 	/* Tx buffers */
1205 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1206 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1207 			txd = &sc->jme_cdata.jme_txdesc[i];
1208 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1209 			    txd->tx_dmamap);
1210 		}
1211 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1212 		sc->jme_cdata.jme_tx_tag = NULL;
1213 	}
1214 
1215 	/* Rx buffers */
1216 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1217 		rdata = &sc->jme_cdata.jme_rx_data[r];
1218 		if (rdata->jme_rx_tag != NULL) {
1219 			for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1220 				rxd = &rdata->jme_rxdesc[i];
1221 				bus_dmamap_destroy(rdata->jme_rx_tag,
1222 						   rxd->rx_dmamap);
1223 			}
1224 			bus_dmamap_destroy(rdata->jme_rx_tag,
1225 					   rdata->jme_rx_sparemap);
1226 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1227 			rdata->jme_rx_tag = NULL;
1228 		}
1229 	}
1230 
1231 	/* Shadow status block. */
1232 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1233 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1234 		    sc->jme_cdata.jme_ssb_map);
1235 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1236 		    sc->jme_cdata.jme_ssb_block,
1237 		    sc->jme_cdata.jme_ssb_map);
1238 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1239 		sc->jme_cdata.jme_ssb_tag = NULL;
1240 	}
1241 
1242 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1243 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1244 		sc->jme_cdata.jme_buffer_tag = NULL;
1245 	}
1246 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1247 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1248 		sc->jme_cdata.jme_ring_tag = NULL;
1249 	}
1250 
1251 	if (sc->jme_cdata.jme_txdesc != NULL) {
1252 		kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1253 		sc->jme_cdata.jme_txdesc = NULL;
1254 	}
1255 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1256 		rdata = &sc->jme_cdata.jme_rx_data[r];
1257 		if (rdata->jme_rxdesc != NULL) {
1258 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1259 			rdata->jme_rxdesc = NULL;
1260 		}
1261 	}
1262 }
1263 
1264 /*
1265  *	Make sure the interface is stopped at reboot time.
1266  */
1267 static int
1268 jme_shutdown(device_t dev)
1269 {
1270 	return jme_suspend(dev);
1271 }
1272 
1273 #ifdef notyet
1274 /*
1275  * Unlike other ethernet controllers, JMC250 requires
1276  * explicit resetting link speed to 10/100Mbps as gigabit
1277  * link will cunsume more power than 375mA.
1278  * Note, we reset the link speed to 10/100Mbps with
1279  * auto-negotiation but we don't know whether that operation
1280  * would succeed or not as we have no control after powering
1281  * off. If the renegotiation fail WOL may not work. Running
1282  * at 1Gbps draws more power than 375mA at 3.3V which is
1283  * specified in PCI specification and that would result in
1284  * complete shutdowning power to ethernet controller.
1285  *
1286  * TODO
1287  *  Save current negotiated media speed/duplex/flow-control
1288  *  to softc and restore the same link again after resuming.
1289  *  PHY handling such as power down/resetting to 100Mbps
1290  *  may be better handled in suspend method in phy driver.
1291  */
1292 static void
1293 jme_setlinkspeed(struct jme_softc *sc)
1294 {
1295 	struct mii_data *mii;
1296 	int aneg, i;
1297 
1298 	JME_LOCK_ASSERT(sc);
1299 
1300 	mii = device_get_softc(sc->jme_miibus);
1301 	mii_pollstat(mii);
1302 	aneg = 0;
1303 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1304 		switch IFM_SUBTYPE(mii->mii_media_active) {
1305 		case IFM_10_T:
1306 		case IFM_100_TX:
1307 			return;
1308 		case IFM_1000_T:
1309 			aneg++;
1310 		default:
1311 			break;
1312 		}
1313 	}
1314 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1315 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1316 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1317 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1318 	    BMCR_AUTOEN | BMCR_STARTNEG);
1319 	DELAY(1000);
1320 	if (aneg != 0) {
1321 		/* Poll link state until jme(4) get a 10/100 link. */
1322 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1323 			mii_pollstat(mii);
1324 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1325 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1326 				case IFM_10_T:
1327 				case IFM_100_TX:
1328 					jme_mac_config(sc);
1329 					return;
1330 				default:
1331 					break;
1332 				}
1333 			}
1334 			JME_UNLOCK(sc);
1335 			pause("jmelnk", hz);
1336 			JME_LOCK(sc);
1337 		}
1338 		if (i == MII_ANEGTICKS_GIGE)
1339 			device_printf(sc->jme_dev, "establishing link failed, "
1340 			    "WOL may not work!");
1341 	}
1342 	/*
1343 	 * No link, force MAC to have 100Mbps, full-duplex link.
1344 	 * This is the last resort and may/may not work.
1345 	 */
1346 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1347 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1348 	jme_mac_config(sc);
1349 }
1350 
1351 static void
1352 jme_setwol(struct jme_softc *sc)
1353 {
1354 	struct ifnet *ifp = &sc->arpcom.ac_if;
1355 	uint32_t gpr, pmcs;
1356 	uint16_t pmstat;
1357 	int pmc;
1358 
1359 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1360 		/* No PME capability, PHY power down. */
1361 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1362 		    MII_BMCR, BMCR_PDOWN);
1363 		return;
1364 	}
1365 
1366 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1367 	pmcs = CSR_READ_4(sc, JME_PMCS);
1368 	pmcs &= ~PMCS_WOL_ENB_MASK;
1369 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1370 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1371 		/* Enable PME message. */
1372 		gpr |= GPREG0_PME_ENB;
1373 		/* For gigabit controllers, reset link speed to 10/100. */
1374 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1375 			jme_setlinkspeed(sc);
1376 	}
1377 
1378 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1379 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1380 
1381 	/* Request PME. */
1382 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1383 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1384 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1385 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1386 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1387 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1388 		/* No WOL, PHY power down. */
1389 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1390 		    MII_BMCR, BMCR_PDOWN);
1391 	}
1392 }
1393 #endif
1394 
1395 static int
1396 jme_suspend(device_t dev)
1397 {
1398 	struct jme_softc *sc = device_get_softc(dev);
1399 	struct ifnet *ifp = &sc->arpcom.ac_if;
1400 
1401 	lwkt_serialize_enter(ifp->if_serializer);
1402 	jme_stop(sc);
1403 #ifdef notyet
1404 	jme_setwol(sc);
1405 #endif
1406 	lwkt_serialize_exit(ifp->if_serializer);
1407 
1408 	return (0);
1409 }
1410 
1411 static int
1412 jme_resume(device_t dev)
1413 {
1414 	struct jme_softc *sc = device_get_softc(dev);
1415 	struct ifnet *ifp = &sc->arpcom.ac_if;
1416 #ifdef notyet
1417 	int pmc;
1418 #endif
1419 
1420 	lwkt_serialize_enter(ifp->if_serializer);
1421 
1422 #ifdef notyet
1423 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1424 		uint16_t pmstat;
1425 
1426 		pmstat = pci_read_config(sc->jme_dev,
1427 		    pmc + PCIR_POWER_STATUS, 2);
1428 		/* Disable PME clear PME status. */
1429 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1430 		pci_write_config(sc->jme_dev,
1431 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1432 	}
1433 #endif
1434 
1435 	if (ifp->if_flags & IFF_UP)
1436 		jme_init(sc);
1437 
1438 	lwkt_serialize_exit(ifp->if_serializer);
1439 
1440 	return (0);
1441 }
1442 
1443 static int
1444 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1445 {
1446 	struct jme_txdesc *txd;
1447 	struct jme_desc *desc;
1448 	struct mbuf *m;
1449 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1450 	int maxsegs, nsegs;
1451 	int error, i, prod, symbol_desc;
1452 	uint32_t cflags, flag64;
1453 
1454 	M_ASSERTPKTHDR((*m_head));
1455 
1456 	prod = sc->jme_cdata.jme_tx_prod;
1457 	txd = &sc->jme_cdata.jme_txdesc[prod];
1458 
1459 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1460 		symbol_desc = 1;
1461 	else
1462 		symbol_desc = 0;
1463 
1464 	maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1465 		  (JME_TXD_RSVD + symbol_desc);
1466 	if (maxsegs > JME_MAXTXSEGS)
1467 		maxsegs = JME_MAXTXSEGS;
1468 	KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1469 		("not enough segments %d\n", maxsegs));
1470 
1471 	error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1472 			txd->tx_dmamap, m_head,
1473 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1474 	if (error)
1475 		goto fail;
1476 
1477 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1478 			BUS_DMASYNC_PREWRITE);
1479 
1480 	m = *m_head;
1481 	cflags = 0;
1482 
1483 	/* Configure checksum offload. */
1484 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1485 		cflags |= JME_TD_IPCSUM;
1486 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1487 		cflags |= JME_TD_TCPCSUM;
1488 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1489 		cflags |= JME_TD_UDPCSUM;
1490 
1491 	/* Configure VLAN. */
1492 	if (m->m_flags & M_VLANTAG) {
1493 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1494 		cflags |= JME_TD_VLAN_TAG;
1495 	}
1496 
1497 	desc = &sc->jme_cdata.jme_tx_ring[prod];
1498 	desc->flags = htole32(cflags);
1499 	desc->addr_hi = htole32(m->m_pkthdr.len);
1500 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1501 		/*
1502 		 * Use 64bits TX desc chain format.
1503 		 *
1504 		 * The first TX desc of the chain, which is setup here,
1505 		 * is just a symbol TX desc carrying no payload.
1506 		 */
1507 		flag64 = JME_TD_64BIT;
1508 		desc->buflen = 0;
1509 		desc->addr_lo = 0;
1510 
1511 		/* No effective TX desc is consumed */
1512 		i = 0;
1513 	} else {
1514 		/*
1515 		 * Use 32bits TX desc chain format.
1516 		 *
1517 		 * The first TX desc of the chain, which is setup here,
1518 		 * is an effective TX desc carrying the first segment of
1519 		 * the mbuf chain.
1520 		 */
1521 		flag64 = 0;
1522 		desc->buflen = htole32(txsegs[0].ds_len);
1523 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1524 
1525 		/* One effective TX desc is consumed */
1526 		i = 1;
1527 	}
1528 	sc->jme_cdata.jme_tx_cnt++;
1529 	KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1530 		 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1531 	JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1532 
1533 	txd->tx_ndesc = 1 - i;
1534 	for (; i < nsegs; i++) {
1535 		desc = &sc->jme_cdata.jme_tx_ring[prod];
1536 		desc->flags = htole32(JME_TD_OWN | flag64);
1537 		desc->buflen = htole32(txsegs[i].ds_len);
1538 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1539 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1540 
1541 		sc->jme_cdata.jme_tx_cnt++;
1542 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1543 			 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1544 		JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1545 	}
1546 
1547 	/* Update producer index. */
1548 	sc->jme_cdata.jme_tx_prod = prod;
1549 	/*
1550 	 * Finally request interrupt and give the first descriptor
1551 	 * owenership to hardware.
1552 	 */
1553 	desc = txd->tx_desc;
1554 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1555 
1556 	txd->tx_m = m;
1557 	txd->tx_ndesc += nsegs;
1558 
1559 	return 0;
1560 fail:
1561 	m_freem(*m_head);
1562 	*m_head = NULL;
1563 	return error;
1564 }
1565 
1566 static void
1567 jme_start(struct ifnet *ifp)
1568 {
1569 	struct jme_softc *sc = ifp->if_softc;
1570 	struct mbuf *m_head;
1571 	int enq = 0;
1572 
1573 	ASSERT_SERIALIZED(ifp->if_serializer);
1574 
1575 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1576 		ifq_purge(&ifp->if_snd);
1577 		return;
1578 	}
1579 
1580 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1581 		return;
1582 
1583 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1584 		jme_txeof(sc);
1585 
1586 	while (!ifq_is_empty(&ifp->if_snd)) {
1587 		/*
1588 		 * Check number of available TX descs, always
1589 		 * leave JME_TXD_RSVD free TX descs.
1590 		 */
1591 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1592 		    sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1593 			ifp->if_flags |= IFF_OACTIVE;
1594 			break;
1595 		}
1596 
1597 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1598 		if (m_head == NULL)
1599 			break;
1600 
1601 		/*
1602 		 * Pack the data into the transmit ring. If we
1603 		 * don't have room, set the OACTIVE flag and wait
1604 		 * for the NIC to drain the ring.
1605 		 */
1606 		if (jme_encap(sc, &m_head)) {
1607 			KKASSERT(m_head == NULL);
1608 			ifp->if_oerrors++;
1609 			ifp->if_flags |= IFF_OACTIVE;
1610 			break;
1611 		}
1612 		enq++;
1613 
1614 		/*
1615 		 * If there's a BPF listener, bounce a copy of this frame
1616 		 * to him.
1617 		 */
1618 		ETHER_BPF_MTAP(ifp, m_head);
1619 	}
1620 
1621 	if (enq > 0) {
1622 		/*
1623 		 * Reading TXCSR takes very long time under heavy load
1624 		 * so cache TXCSR value and writes the ORed value with
1625 		 * the kick command to the TXCSR. This saves one register
1626 		 * access cycle.
1627 		 */
1628 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1629 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1630 		/* Set a timeout in case the chip goes out to lunch. */
1631 		ifp->if_timer = JME_TX_TIMEOUT;
1632 	}
1633 }
1634 
1635 static void
1636 jme_watchdog(struct ifnet *ifp)
1637 {
1638 	struct jme_softc *sc = ifp->if_softc;
1639 
1640 	ASSERT_SERIALIZED(ifp->if_serializer);
1641 
1642 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1643 		if_printf(ifp, "watchdog timeout (missed link)\n");
1644 		ifp->if_oerrors++;
1645 		jme_init(sc);
1646 		return;
1647 	}
1648 
1649 	jme_txeof(sc);
1650 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1651 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1652 			  "-- recovering\n");
1653 		if (!ifq_is_empty(&ifp->if_snd))
1654 			if_devstart(ifp);
1655 		return;
1656 	}
1657 
1658 	if_printf(ifp, "watchdog timeout\n");
1659 	ifp->if_oerrors++;
1660 	jme_init(sc);
1661 	if (!ifq_is_empty(&ifp->if_snd))
1662 		if_devstart(ifp);
1663 }
1664 
1665 static int
1666 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1667 {
1668 	struct jme_softc *sc = ifp->if_softc;
1669 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1670 	struct ifreq *ifr = (struct ifreq *)data;
1671 	int error = 0, mask;
1672 
1673 	ASSERT_SERIALIZED(ifp->if_serializer);
1674 
1675 	switch (cmd) {
1676 	case SIOCSIFMTU:
1677 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1678 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1679 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1680 			error = EINVAL;
1681 			break;
1682 		}
1683 
1684 		if (ifp->if_mtu != ifr->ifr_mtu) {
1685 			/*
1686 			 * No special configuration is required when interface
1687 			 * MTU is changed but availability of Tx checksum
1688 			 * offload should be chcked against new MTU size as
1689 			 * FIFO size is just 2K.
1690 			 */
1691 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1692 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1693 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1694 			}
1695 			ifp->if_mtu = ifr->ifr_mtu;
1696 			if (ifp->if_flags & IFF_RUNNING)
1697 				jme_init(sc);
1698 		}
1699 		break;
1700 
1701 	case SIOCSIFFLAGS:
1702 		if (ifp->if_flags & IFF_UP) {
1703 			if (ifp->if_flags & IFF_RUNNING) {
1704 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1705 				    (IFF_PROMISC | IFF_ALLMULTI))
1706 					jme_set_filter(sc);
1707 			} else {
1708 				jme_init(sc);
1709 			}
1710 		} else {
1711 			if (ifp->if_flags & IFF_RUNNING)
1712 				jme_stop(sc);
1713 		}
1714 		sc->jme_if_flags = ifp->if_flags;
1715 		break;
1716 
1717 	case SIOCADDMULTI:
1718 	case SIOCDELMULTI:
1719 		if (ifp->if_flags & IFF_RUNNING)
1720 			jme_set_filter(sc);
1721 		break;
1722 
1723 	case SIOCSIFMEDIA:
1724 	case SIOCGIFMEDIA:
1725 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1726 		break;
1727 
1728 	case SIOCSIFCAP:
1729 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1730 
1731 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1732 			ifp->if_capenable ^= IFCAP_TXCSUM;
1733 			if (IFCAP_TXCSUM & ifp->if_capenable)
1734 				ifp->if_hwassist |= JME_CSUM_FEATURES;
1735 			else
1736 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1737 		}
1738 		if (mask & IFCAP_RXCSUM) {
1739 			uint32_t reg;
1740 
1741 			ifp->if_capenable ^= IFCAP_RXCSUM;
1742 			reg = CSR_READ_4(sc, JME_RXMAC);
1743 			reg &= ~RXMAC_CSUM_ENB;
1744 			if (ifp->if_capenable & IFCAP_RXCSUM)
1745 				reg |= RXMAC_CSUM_ENB;
1746 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1747 		}
1748 
1749 		if (mask & IFCAP_VLAN_HWTAGGING) {
1750 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1751 			jme_set_vlan(sc);
1752 		}
1753 
1754 		if (mask & IFCAP_RSS) {
1755 			ifp->if_capenable ^= IFCAP_RSS;
1756 			if (ifp->if_flags & IFF_RUNNING)
1757 				jme_init(sc);
1758 		}
1759 		break;
1760 
1761 	default:
1762 		error = ether_ioctl(ifp, cmd, data);
1763 		break;
1764 	}
1765 	return (error);
1766 }
1767 
1768 static void
1769 jme_mac_config(struct jme_softc *sc)
1770 {
1771 	struct mii_data *mii;
1772 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1773 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1774 
1775 	mii = device_get_softc(sc->jme_miibus);
1776 
1777 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1778 	DELAY(10);
1779 	CSR_WRITE_4(sc, JME_GHC, 0);
1780 	ghc = 0;
1781 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1782 	rxmac &= ~RXMAC_FC_ENB;
1783 	txmac = CSR_READ_4(sc, JME_TXMAC);
1784 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1785 	txpause = CSR_READ_4(sc, JME_TXPFC);
1786 	txpause &= ~TXPFC_PAUSE_ENB;
1787 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1788 		ghc |= GHC_FULL_DUPLEX;
1789 		rxmac &= ~RXMAC_COLL_DET_ENB;
1790 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1791 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1792 		    TXMAC_FRAME_BURST);
1793 #ifdef notyet
1794 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1795 			txpause |= TXPFC_PAUSE_ENB;
1796 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1797 			rxmac |= RXMAC_FC_ENB;
1798 #endif
1799 		/* Disable retry transmit timer/retry limit. */
1800 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1801 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1802 	} else {
1803 		rxmac |= RXMAC_COLL_DET_ENB;
1804 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1805 		/* Enable retry transmit timer/retry limit. */
1806 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1807 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1808 	}
1809 
1810 	/*
1811 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1812 	 */
1813 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1814 	gp1 &= ~GPREG1_WA_HDX;
1815 
1816 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1817 		hdx = 1;
1818 
1819 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1820 	case IFM_10_T:
1821 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1822 		if (hdx)
1823 			gp1 |= GPREG1_WA_HDX;
1824 		break;
1825 
1826 	case IFM_100_TX:
1827 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1828 		if (hdx)
1829 			gp1 |= GPREG1_WA_HDX;
1830 
1831 		/*
1832 		 * Use extended FIFO depth to workaround CRC errors
1833 		 * emitted by chips before JMC250B
1834 		 */
1835 		phyconf = JMPHY_CONF_EXTFIFO;
1836 		break;
1837 
1838 	case IFM_1000_T:
1839 		if (sc->jme_caps & JME_CAP_FASTETH)
1840 			break;
1841 
1842 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1843 		if (hdx)
1844 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1845 		break;
1846 
1847 	default:
1848 		break;
1849 	}
1850 	CSR_WRITE_4(sc, JME_GHC, ghc);
1851 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1852 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1853 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1854 
1855 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
1856 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1857 				    JMPHY_CONF, phyconf);
1858 	}
1859 	if (sc->jme_workaround & JME_WA_HDX)
1860 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1861 }
1862 
1863 static void
1864 jme_intr(void *xsc)
1865 {
1866 	struct jme_softc *sc = xsc;
1867 	struct ifnet *ifp = &sc->arpcom.ac_if;
1868 	uint32_t status;
1869 	int r;
1870 
1871 	ASSERT_SERIALIZED(ifp->if_serializer);
1872 
1873 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1874 	if (status == 0 || status == 0xFFFFFFFF)
1875 		return;
1876 
1877 	/* Disable interrupts. */
1878 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1879 
1880 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1881 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1882 		goto back;
1883 
1884 	/* Reset PCC counter/timer and Ack interrupts. */
1885 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1886 
1887 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1888 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1889 
1890 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1891 		if (status & jme_rx_status[r].jme_coal) {
1892 			status |= jme_rx_status[r].jme_coal |
1893 				  jme_rx_status[r].jme_comp;
1894 		}
1895 	}
1896 
1897 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1898 
1899 	if (ifp->if_flags & IFF_RUNNING) {
1900 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1901 			jme_rx_intr(sc, status);
1902 
1903 		if (status & INTR_RXQ_DESC_EMPTY) {
1904 			/*
1905 			 * Notify hardware availability of new Rx buffers.
1906 			 * Reading RXCSR takes very long time under heavy
1907 			 * load so cache RXCSR value and writes the ORed
1908 			 * value with the kick command to the RXCSR. This
1909 			 * saves one register access cycle.
1910 			 */
1911 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1912 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1913 		}
1914 
1915 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1916 			jme_txeof(sc);
1917 			if (!ifq_is_empty(&ifp->if_snd))
1918 				if_devstart(ifp);
1919 		}
1920 	}
1921 back:
1922 	/* Reenable interrupts. */
1923 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1924 }
1925 
1926 static void
1927 jme_txeof(struct jme_softc *sc)
1928 {
1929 	struct ifnet *ifp = &sc->arpcom.ac_if;
1930 	struct jme_txdesc *txd;
1931 	uint32_t status;
1932 	int cons, nsegs;
1933 
1934 	cons = sc->jme_cdata.jme_tx_cons;
1935 	if (cons == sc->jme_cdata.jme_tx_prod)
1936 		return;
1937 
1938 	/*
1939 	 * Go through our Tx list and free mbufs for those
1940 	 * frames which have been transmitted.
1941 	 */
1942 	while (cons != sc->jme_cdata.jme_tx_prod) {
1943 		txd = &sc->jme_cdata.jme_txdesc[cons];
1944 		KASSERT(txd->tx_m != NULL,
1945 			("%s: freeing NULL mbuf!\n", __func__));
1946 
1947 		status = le32toh(txd->tx_desc->flags);
1948 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1949 			break;
1950 
1951 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1952 			ifp->if_oerrors++;
1953 		} else {
1954 			ifp->if_opackets++;
1955 			if (status & JME_TD_COLLISION) {
1956 				ifp->if_collisions +=
1957 				    le32toh(txd->tx_desc->buflen) &
1958 				    JME_TD_BUF_LEN_MASK;
1959 			}
1960 		}
1961 
1962 		/*
1963 		 * Only the first descriptor of multi-descriptor
1964 		 * transmission is updated so driver have to skip entire
1965 		 * chained buffers for the transmiited frame. In other
1966 		 * words, JME_TD_OWN bit is valid only at the first
1967 		 * descriptor of a multi-descriptor transmission.
1968 		 */
1969 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1970 			sc->jme_cdata.jme_tx_ring[cons].flags = 0;
1971 			JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
1972 		}
1973 
1974 		/* Reclaim transferred mbufs. */
1975 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1976 		m_freem(txd->tx_m);
1977 		txd->tx_m = NULL;
1978 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1979 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
1980 			("%s: Active Tx desc counter was garbled\n", __func__));
1981 		txd->tx_ndesc = 0;
1982 	}
1983 	sc->jme_cdata.jme_tx_cons = cons;
1984 
1985 	if (sc->jme_cdata.jme_tx_cnt == 0)
1986 		ifp->if_timer = 0;
1987 
1988 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
1989 	    sc->jme_tx_desc_cnt - JME_TXD_RSVD)
1990 		ifp->if_flags &= ~IFF_OACTIVE;
1991 }
1992 
1993 static __inline void
1994 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
1995 {
1996 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
1997 	int i;
1998 
1999 	for (i = 0; i < count; ++i) {
2000 		struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2001 
2002 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2003 		desc->buflen = htole32(MCLBYTES);
2004 		JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2005 	}
2006 }
2007 
2008 /* Receive a frame. */
2009 static void
2010 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2011 {
2012 	struct ifnet *ifp = &sc->arpcom.ac_if;
2013 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2014 	struct jme_desc *desc;
2015 	struct jme_rxdesc *rxd;
2016 	struct mbuf *mp, *m;
2017 	uint32_t flags, status;
2018 	int cons, count, nsegs;
2019 
2020 	cons = rdata->jme_rx_cons;
2021 	desc = &rdata->jme_rx_ring[cons];
2022 	flags = le32toh(desc->flags);
2023 	status = le32toh(desc->buflen);
2024 	nsegs = JME_RX_NSEGS(status);
2025 
2026 	JME_RSS_DPRINTF(sc, 10, "ring%d, flags 0x%08x, "
2027 			"hash 0x%08x, hash type 0x%08x\n",
2028 			ring, flags, desc->addr_hi, desc->addr_lo);
2029 
2030 	if (status & JME_RX_ERR_STAT) {
2031 		ifp->if_ierrors++;
2032 		jme_discard_rxbufs(sc, ring, cons, nsegs);
2033 #ifdef JME_SHOW_ERRORS
2034 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2035 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2036 #endif
2037 		rdata->jme_rx_cons += nsegs;
2038 		rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2039 		return;
2040 	}
2041 
2042 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2043 	for (count = 0; count < nsegs; count++,
2044 	     JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2045 		rxd = &rdata->jme_rxdesc[cons];
2046 		mp = rxd->rx_m;
2047 
2048 		/* Add a new receive buffer to the ring. */
2049 		if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2050 			ifp->if_iqdrops++;
2051 			/* Reuse buffer. */
2052 			jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2053 			if (rdata->jme_rxhead != NULL) {
2054 				m_freem(rdata->jme_rxhead);
2055 				JME_RXCHAIN_RESET(sc, ring);
2056 			}
2057 			break;
2058 		}
2059 
2060 		/*
2061 		 * Assume we've received a full sized frame.
2062 		 * Actual size is fixed when we encounter the end of
2063 		 * multi-segmented frame.
2064 		 */
2065 		mp->m_len = MCLBYTES;
2066 
2067 		/* Chain received mbufs. */
2068 		if (rdata->jme_rxhead == NULL) {
2069 			rdata->jme_rxhead = mp;
2070 			rdata->jme_rxtail = mp;
2071 		} else {
2072 			/*
2073 			 * Receive processor can receive a maximum frame
2074 			 * size of 65535 bytes.
2075 			 */
2076 			mp->m_flags &= ~M_PKTHDR;
2077 			rdata->jme_rxtail->m_next = mp;
2078 			rdata->jme_rxtail = mp;
2079 		}
2080 
2081 		if (count == nsegs - 1) {
2082 			/* Last desc. for this frame. */
2083 			m = rdata->jme_rxhead;
2084 			/* XXX assert PKTHDR? */
2085 			m->m_flags |= M_PKTHDR;
2086 			m->m_pkthdr.len = rdata->jme_rxlen;
2087 			if (nsegs > 1) {
2088 				/* Set first mbuf size. */
2089 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2090 				/* Set last mbuf size. */
2091 				mp->m_len = rdata->jme_rxlen -
2092 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2093 				    (MCLBYTES * (nsegs - 2)));
2094 			} else {
2095 				m->m_len = rdata->jme_rxlen;
2096 			}
2097 			m->m_pkthdr.rcvif = ifp;
2098 
2099 			/*
2100 			 * Account for 10bytes auto padding which is used
2101 			 * to align IP header on 32bit boundary. Also note,
2102 			 * CRC bytes is automatically removed by the
2103 			 * hardware.
2104 			 */
2105 			m->m_data += JME_RX_PAD_BYTES;
2106 
2107 			/* Set checksum information. */
2108 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2109 			    (flags & JME_RD_IPV4)) {
2110 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2111 				if (flags & JME_RD_IPCSUM)
2112 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2113 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2114 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2115 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2116 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2117 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2118 					m->m_pkthdr.csum_flags |=
2119 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2120 					m->m_pkthdr.csum_data = 0xffff;
2121 				}
2122 			}
2123 
2124 			/* Check for VLAN tagged packets. */
2125 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2126 			    (flags & JME_RD_VLAN_TAG)) {
2127 				m->m_pkthdr.ether_vlantag =
2128 				    flags & JME_RD_VLAN_MASK;
2129 				m->m_flags |= M_VLANTAG;
2130 			}
2131 
2132 			ifp->if_ipackets++;
2133 			/* Pass it on. */
2134 			ether_input_chain(ifp, m, NULL, chain);
2135 
2136 			/* Reset mbuf chains. */
2137 			JME_RXCHAIN_RESET(sc, ring);
2138 #ifdef JME_RSS_DEBUG
2139 			sc->jme_rx_ring_pkt[ring]++;
2140 #endif
2141 		}
2142 	}
2143 
2144 	rdata->jme_rx_cons += nsegs;
2145 	rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2146 }
2147 
2148 static int
2149 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2150 		int count)
2151 {
2152 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2153 	struct jme_desc *desc;
2154 	int nsegs, prog, pktlen;
2155 
2156 	prog = 0;
2157 	for (;;) {
2158 #ifdef DEVICE_POLLING
2159 		if (count >= 0 && count-- == 0)
2160 			break;
2161 #endif
2162 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2163 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2164 			break;
2165 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2166 			break;
2167 
2168 		/*
2169 		 * Check number of segments against received bytes.
2170 		 * Non-matching value would indicate that hardware
2171 		 * is still trying to update Rx descriptors. I'm not
2172 		 * sure whether this check is needed.
2173 		 */
2174 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2175 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2176 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2177 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2178 				  "and packet size(%d) mismach\n",
2179 				  nsegs, pktlen);
2180 			break;
2181 		}
2182 
2183 		/* Received a frame. */
2184 		jme_rxpkt(sc, ring, chain);
2185 		prog++;
2186 	}
2187 	return prog;
2188 }
2189 
2190 static void
2191 jme_rxeof(struct jme_softc *sc, int ring)
2192 {
2193 	struct mbuf_chain chain[MAXCPU];
2194 
2195 	ether_input_chain_init(chain);
2196 	if (jme_rxeof_chain(sc, ring, chain, -1))
2197 		ether_input_dispatch(chain);
2198 }
2199 
2200 static void
2201 jme_tick(void *xsc)
2202 {
2203 	struct jme_softc *sc = xsc;
2204 	struct ifnet *ifp = &sc->arpcom.ac_if;
2205 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2206 
2207 	lwkt_serialize_enter(ifp->if_serializer);
2208 
2209 	mii_tick(mii);
2210 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2211 
2212 	lwkt_serialize_exit(ifp->if_serializer);
2213 }
2214 
2215 static void
2216 jme_reset(struct jme_softc *sc)
2217 {
2218 #ifdef foo
2219 	/* Stop receiver, transmitter. */
2220 	jme_stop_rx(sc);
2221 	jme_stop_tx(sc);
2222 #endif
2223 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2224 	DELAY(10);
2225 	CSR_WRITE_4(sc, JME_GHC, 0);
2226 }
2227 
2228 static void
2229 jme_init(void *xsc)
2230 {
2231 	struct jme_softc *sc = xsc;
2232 	struct ifnet *ifp = &sc->arpcom.ac_if;
2233 	struct mii_data *mii;
2234 	uint8_t eaddr[ETHER_ADDR_LEN];
2235 	bus_addr_t paddr;
2236 	uint32_t reg;
2237 	int error, r;
2238 
2239 	ASSERT_SERIALIZED(ifp->if_serializer);
2240 
2241 	/*
2242 	 * Cancel any pending I/O.
2243 	 */
2244 	jme_stop(sc);
2245 
2246 	/*
2247 	 * Reset the chip to a known state.
2248 	 */
2249 	jme_reset(sc);
2250 
2251 	sc->jme_txd_spare =
2252 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2253 	KKASSERT(sc->jme_txd_spare >= 1);
2254 
2255 	/*
2256 	 * If we use 64bit address mode for transmitting, each Tx request
2257 	 * needs one more symbol descriptor.
2258 	 */
2259 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2260 		sc->jme_txd_spare += 1;
2261 
2262 #ifdef RSS
2263 	if (ifp->if_capenable & IFCAP_RSS)
2264 		jme_enable_rss(sc);
2265 	else
2266 #endif
2267 		jme_disable_rss(sc);
2268 
2269 	/* Init RX descriptors */
2270 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2271 		error = jme_init_rx_ring(sc, r);
2272 		if (error) {
2273 			if_printf(ifp, "initialization failed: "
2274 				  "no memory for %dth RX ring.\n", r);
2275 			jme_stop(sc);
2276 			return;
2277 		}
2278 	}
2279 
2280 	/* Init TX descriptors */
2281 	jme_init_tx_ring(sc);
2282 
2283 	/* Initialize shadow status block. */
2284 	jme_init_ssb(sc);
2285 
2286 	/* Reprogram the station address. */
2287 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2288 	CSR_WRITE_4(sc, JME_PAR0,
2289 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2290 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2291 
2292 	/*
2293 	 * Configure Tx queue.
2294 	 *  Tx priority queue weight value : 0
2295 	 *  Tx FIFO threshold for processing next packet : 16QW
2296 	 *  Maximum Tx DMA length : 512
2297 	 *  Allow Tx DMA burst.
2298 	 */
2299 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2300 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2301 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2302 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2303 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2304 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2305 
2306 	/* Set Tx descriptor counter. */
2307 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2308 
2309 	/* Set Tx ring address to the hardware. */
2310 	paddr = sc->jme_cdata.jme_tx_ring_paddr;
2311 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2312 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2313 
2314 	/* Configure TxMAC parameters. */
2315 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2316 	reg |= TXMAC_THRESH_1_PKT;
2317 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2318 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2319 
2320 	/*
2321 	 * Configure Rx queue.
2322 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2323 	 *  FIFO threshold for processing next packet : 128QW
2324 	 *  Rx queue 0 select
2325 	 *  Max Rx DMA length : 128
2326 	 *  Rx descriptor retry : 32
2327 	 *  Rx descriptor retry time gap : 256ns
2328 	 *  Don't receive runt/bad frame.
2329 	 */
2330 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2331 #if 0
2332 	/*
2333 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2334 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2335 	 * decrease FIFO threshold to reduce the FIFO overruns for
2336 	 * frames larger than 4000 bytes.
2337 	 * For best performance of standard MTU sized frames use
2338 	 * maximum allowable FIFO threshold, 128QW.
2339 	 */
2340 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2341 	    JME_RX_FIFO_SIZE)
2342 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2343 	else
2344 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2345 #else
2346 	/* Improve PCI Express compatibility */
2347 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2348 #endif
2349 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2350 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2351 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2352 	/* XXX TODO DROP_BAD */
2353 
2354 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2355 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2356 
2357 		/* Set Rx descriptor counter. */
2358 		CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2359 
2360 		/* Set Rx ring address to the hardware. */
2361 		paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2362 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2363 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2364 	}
2365 
2366 	/* Clear receive filter. */
2367 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2368 
2369 	/* Set up the receive filter. */
2370 	jme_set_filter(sc);
2371 	jme_set_vlan(sc);
2372 
2373 	/*
2374 	 * Disable all WOL bits as WOL can interfere normal Rx
2375 	 * operation. Also clear WOL detection status bits.
2376 	 */
2377 	reg = CSR_READ_4(sc, JME_PMCS);
2378 	reg &= ~PMCS_WOL_ENB_MASK;
2379 	CSR_WRITE_4(sc, JME_PMCS, reg);
2380 
2381 	/*
2382 	 * Pad 10bytes right before received frame. This will greatly
2383 	 * help Rx performance on strict-alignment architectures as
2384 	 * it does not need to copy the frame to align the payload.
2385 	 */
2386 	reg = CSR_READ_4(sc, JME_RXMAC);
2387 	reg |= RXMAC_PAD_10BYTES;
2388 
2389 	if (ifp->if_capenable & IFCAP_RXCSUM)
2390 		reg |= RXMAC_CSUM_ENB;
2391 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2392 
2393 	/* Configure general purpose reg0 */
2394 	reg = CSR_READ_4(sc, JME_GPREG0);
2395 	reg &= ~GPREG0_PCC_UNIT_MASK;
2396 	/* Set PCC timer resolution to micro-seconds unit. */
2397 	reg |= GPREG0_PCC_UNIT_US;
2398 	/*
2399 	 * Disable all shadow register posting as we have to read
2400 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2401 	 * that it's hard to synchronize interrupt status between
2402 	 * hardware and software with shadow posting due to
2403 	 * requirements of bus_dmamap_sync(9).
2404 	 */
2405 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2406 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2407 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2408 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2409 	/* Disable posting of DW0. */
2410 	reg &= ~GPREG0_POST_DW0_ENB;
2411 	/* Clear PME message. */
2412 	reg &= ~GPREG0_PME_ENB;
2413 	/* Set PHY address. */
2414 	reg &= ~GPREG0_PHY_ADDR_MASK;
2415 	reg |= sc->jme_phyaddr;
2416 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2417 
2418 	/* Configure Tx queue 0 packet completion coalescing. */
2419 	jme_set_tx_coal(sc);
2420 
2421 	/* Configure Rx queue 0 packet completion coalescing. */
2422 	jme_set_rx_coal(sc);
2423 
2424 	/* Configure shadow status block but don't enable posting. */
2425 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2426 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2427 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2428 
2429 	/* Disable Timer 1 and Timer 2. */
2430 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2431 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2432 
2433 	/* Configure retry transmit period, retry limit value. */
2434 	CSR_WRITE_4(sc, JME_TXTRHD,
2435 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2436 	    TXTRHD_RT_PERIOD_MASK) |
2437 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2438 	    TXTRHD_RT_LIMIT_SHIFT));
2439 
2440 #ifdef DEVICE_POLLING
2441 	if (!(ifp->if_flags & IFF_POLLING))
2442 #endif
2443 	/* Initialize the interrupt mask. */
2444 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2445 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2446 
2447 	/*
2448 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2449 	 * done after detection of valid link in jme_miibus_statchg.
2450 	 */
2451 	sc->jme_flags &= ~JME_FLAG_LINK;
2452 
2453 	/* Set the current media. */
2454 	mii = device_get_softc(sc->jme_miibus);
2455 	mii_mediachg(mii);
2456 
2457 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2458 
2459 	ifp->if_flags |= IFF_RUNNING;
2460 	ifp->if_flags &= ~IFF_OACTIVE;
2461 }
2462 
2463 static void
2464 jme_stop(struct jme_softc *sc)
2465 {
2466 	struct ifnet *ifp = &sc->arpcom.ac_if;
2467 	struct jme_txdesc *txd;
2468 	struct jme_rxdesc *rxd;
2469 	struct jme_rxdata *rdata;
2470 	int i, r;
2471 
2472 	ASSERT_SERIALIZED(ifp->if_serializer);
2473 
2474 	/*
2475 	 * Mark the interface down and cancel the watchdog timer.
2476 	 */
2477 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2478 	ifp->if_timer = 0;
2479 
2480 	callout_stop(&sc->jme_tick_ch);
2481 	sc->jme_flags &= ~JME_FLAG_LINK;
2482 
2483 	/*
2484 	 * Disable interrupts.
2485 	 */
2486 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2487 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2488 
2489 	/* Disable updating shadow status block. */
2490 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2491 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2492 
2493 	/* Stop receiver, transmitter. */
2494 	jme_stop_rx(sc);
2495 	jme_stop_tx(sc);
2496 
2497 	/*
2498 	 * Free partial finished RX segments
2499 	 */
2500 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2501 		rdata = &sc->jme_cdata.jme_rx_data[r];
2502 		if (rdata->jme_rxhead != NULL)
2503 			m_freem(rdata->jme_rxhead);
2504 		JME_RXCHAIN_RESET(sc, r);
2505 	}
2506 
2507 	/*
2508 	 * Free RX and TX mbufs still in the queues.
2509 	 */
2510 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2511 		rdata = &sc->jme_cdata.jme_rx_data[r];
2512 		for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2513 			rxd = &rdata->jme_rxdesc[i];
2514 			if (rxd->rx_m != NULL) {
2515 				bus_dmamap_unload(rdata->jme_rx_tag,
2516 						  rxd->rx_dmamap);
2517 				m_freem(rxd->rx_m);
2518 				rxd->rx_m = NULL;
2519 			}
2520 		}
2521 	}
2522 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2523 		txd = &sc->jme_cdata.jme_txdesc[i];
2524 		if (txd->tx_m != NULL) {
2525 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2526 			    txd->tx_dmamap);
2527 			m_freem(txd->tx_m);
2528 			txd->tx_m = NULL;
2529 			txd->tx_ndesc = 0;
2530 		}
2531         }
2532 }
2533 
2534 static void
2535 jme_stop_tx(struct jme_softc *sc)
2536 {
2537 	uint32_t reg;
2538 	int i;
2539 
2540 	reg = CSR_READ_4(sc, JME_TXCSR);
2541 	if ((reg & TXCSR_TX_ENB) == 0)
2542 		return;
2543 	reg &= ~TXCSR_TX_ENB;
2544 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2545 	for (i = JME_TIMEOUT; i > 0; i--) {
2546 		DELAY(1);
2547 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2548 			break;
2549 	}
2550 	if (i == 0)
2551 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2552 }
2553 
2554 static void
2555 jme_stop_rx(struct jme_softc *sc)
2556 {
2557 	uint32_t reg;
2558 	int i;
2559 
2560 	reg = CSR_READ_4(sc, JME_RXCSR);
2561 	if ((reg & RXCSR_RX_ENB) == 0)
2562 		return;
2563 	reg &= ~RXCSR_RX_ENB;
2564 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2565 	for (i = JME_TIMEOUT; i > 0; i--) {
2566 		DELAY(1);
2567 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2568 			break;
2569 	}
2570 	if (i == 0)
2571 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2572 }
2573 
2574 static void
2575 jme_init_tx_ring(struct jme_softc *sc)
2576 {
2577 	struct jme_chain_data *cd;
2578 	struct jme_txdesc *txd;
2579 	int i;
2580 
2581 	sc->jme_cdata.jme_tx_prod = 0;
2582 	sc->jme_cdata.jme_tx_cons = 0;
2583 	sc->jme_cdata.jme_tx_cnt = 0;
2584 
2585 	cd = &sc->jme_cdata;
2586 	bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2587 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2588 		txd = &sc->jme_cdata.jme_txdesc[i];
2589 		txd->tx_m = NULL;
2590 		txd->tx_desc = &cd->jme_tx_ring[i];
2591 		txd->tx_ndesc = 0;
2592 	}
2593 }
2594 
2595 static void
2596 jme_init_ssb(struct jme_softc *sc)
2597 {
2598 	struct jme_chain_data *cd;
2599 
2600 	cd = &sc->jme_cdata;
2601 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2602 }
2603 
2604 static int
2605 jme_init_rx_ring(struct jme_softc *sc, int ring)
2606 {
2607 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2608 	struct jme_rxdesc *rxd;
2609 	int i;
2610 
2611 	KKASSERT(rdata->jme_rxhead == NULL &&
2612 		 rdata->jme_rxtail == NULL &&
2613 		 rdata->jme_rxlen == 0);
2614 	rdata->jme_rx_cons = 0;
2615 
2616 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2617 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2618 		int error;
2619 
2620 		rxd = &rdata->jme_rxdesc[i];
2621 		rxd->rx_m = NULL;
2622 		rxd->rx_desc = &rdata->jme_rx_ring[i];
2623 		error = jme_newbuf(sc, ring, rxd, 1);
2624 		if (error)
2625 			return error;
2626 	}
2627 	return 0;
2628 }
2629 
2630 static int
2631 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2632 {
2633 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2634 	struct jme_desc *desc;
2635 	struct mbuf *m;
2636 	bus_dma_segment_t segs;
2637 	bus_dmamap_t map;
2638 	int error, nsegs;
2639 
2640 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2641 	if (m == NULL)
2642 		return ENOBUFS;
2643 	/*
2644 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2645 	 * takes advantage of 10 bytes padding feature of hardware
2646 	 * in order not to copy entire frame to align IP header on
2647 	 * 32bit boundary.
2648 	 */
2649 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2650 
2651 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2652 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2653 			BUS_DMA_NOWAIT);
2654 	if (error) {
2655 		m_freem(m);
2656 		if (init)
2657 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2658 		return error;
2659 	}
2660 
2661 	if (rxd->rx_m != NULL) {
2662 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2663 				BUS_DMASYNC_POSTREAD);
2664 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2665 	}
2666 	map = rxd->rx_dmamap;
2667 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
2668 	rdata->jme_rx_sparemap = map;
2669 	rxd->rx_m = m;
2670 
2671 	desc = rxd->rx_desc;
2672 	desc->buflen = htole32(segs.ds_len);
2673 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2674 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2675 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2676 
2677 	return 0;
2678 }
2679 
2680 static void
2681 jme_set_vlan(struct jme_softc *sc)
2682 {
2683 	struct ifnet *ifp = &sc->arpcom.ac_if;
2684 	uint32_t reg;
2685 
2686 	ASSERT_SERIALIZED(ifp->if_serializer);
2687 
2688 	reg = CSR_READ_4(sc, JME_RXMAC);
2689 	reg &= ~RXMAC_VLAN_ENB;
2690 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2691 		reg |= RXMAC_VLAN_ENB;
2692 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2693 }
2694 
2695 static void
2696 jme_set_filter(struct jme_softc *sc)
2697 {
2698 	struct ifnet *ifp = &sc->arpcom.ac_if;
2699 	struct ifmultiaddr *ifma;
2700 	uint32_t crc;
2701 	uint32_t mchash[2];
2702 	uint32_t rxcfg;
2703 
2704 	ASSERT_SERIALIZED(ifp->if_serializer);
2705 
2706 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2707 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2708 	    RXMAC_ALLMULTI);
2709 
2710 	/*
2711 	 * Always accept frames destined to our station address.
2712 	 * Always accept broadcast frames.
2713 	 */
2714 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2715 
2716 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2717 		if (ifp->if_flags & IFF_PROMISC)
2718 			rxcfg |= RXMAC_PROMISC;
2719 		if (ifp->if_flags & IFF_ALLMULTI)
2720 			rxcfg |= RXMAC_ALLMULTI;
2721 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2722 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2723 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2724 		return;
2725 	}
2726 
2727 	/*
2728 	 * Set up the multicast address filter by passing all multicast
2729 	 * addresses through a CRC generator, and then using the low-order
2730 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2731 	 * high order bits select the register, while the rest of the bits
2732 	 * select the bit within the register.
2733 	 */
2734 	rxcfg |= RXMAC_MULTICAST;
2735 	bzero(mchash, sizeof(mchash));
2736 
2737 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2738 		if (ifma->ifma_addr->sa_family != AF_LINK)
2739 			continue;
2740 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2741 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2742 
2743 		/* Just want the 6 least significant bits. */
2744 		crc &= 0x3f;
2745 
2746 		/* Set the corresponding bit in the hash table. */
2747 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2748 	}
2749 
2750 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2751 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2752 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2753 }
2754 
2755 static int
2756 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2757 {
2758 	struct jme_softc *sc = arg1;
2759 	struct ifnet *ifp = &sc->arpcom.ac_if;
2760 	int error, v;
2761 
2762 	lwkt_serialize_enter(ifp->if_serializer);
2763 
2764 	v = sc->jme_tx_coal_to;
2765 	error = sysctl_handle_int(oidp, &v, 0, req);
2766 	if (error || req->newptr == NULL)
2767 		goto back;
2768 
2769 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2770 		error = EINVAL;
2771 		goto back;
2772 	}
2773 
2774 	if (v != sc->jme_tx_coal_to) {
2775 		sc->jme_tx_coal_to = v;
2776 		if (ifp->if_flags & IFF_RUNNING)
2777 			jme_set_tx_coal(sc);
2778 	}
2779 back:
2780 	lwkt_serialize_exit(ifp->if_serializer);
2781 	return error;
2782 }
2783 
2784 static int
2785 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2786 {
2787 	struct jme_softc *sc = arg1;
2788 	struct ifnet *ifp = &sc->arpcom.ac_if;
2789 	int error, v;
2790 
2791 	lwkt_serialize_enter(ifp->if_serializer);
2792 
2793 	v = sc->jme_tx_coal_pkt;
2794 	error = sysctl_handle_int(oidp, &v, 0, req);
2795 	if (error || req->newptr == NULL)
2796 		goto back;
2797 
2798 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2799 		error = EINVAL;
2800 		goto back;
2801 	}
2802 
2803 	if (v != sc->jme_tx_coal_pkt) {
2804 		sc->jme_tx_coal_pkt = v;
2805 		if (ifp->if_flags & IFF_RUNNING)
2806 			jme_set_tx_coal(sc);
2807 	}
2808 back:
2809 	lwkt_serialize_exit(ifp->if_serializer);
2810 	return error;
2811 }
2812 
2813 static int
2814 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2815 {
2816 	struct jme_softc *sc = arg1;
2817 	struct ifnet *ifp = &sc->arpcom.ac_if;
2818 	int error, v;
2819 
2820 	lwkt_serialize_enter(ifp->if_serializer);
2821 
2822 	v = sc->jme_rx_coal_to;
2823 	error = sysctl_handle_int(oidp, &v, 0, req);
2824 	if (error || req->newptr == NULL)
2825 		goto back;
2826 
2827 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2828 		error = EINVAL;
2829 		goto back;
2830 	}
2831 
2832 	if (v != sc->jme_rx_coal_to) {
2833 		sc->jme_rx_coal_to = v;
2834 		if (ifp->if_flags & IFF_RUNNING)
2835 			jme_set_rx_coal(sc);
2836 	}
2837 back:
2838 	lwkt_serialize_exit(ifp->if_serializer);
2839 	return error;
2840 }
2841 
2842 static int
2843 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2844 {
2845 	struct jme_softc *sc = arg1;
2846 	struct ifnet *ifp = &sc->arpcom.ac_if;
2847 	int error, v;
2848 
2849 	lwkt_serialize_enter(ifp->if_serializer);
2850 
2851 	v = sc->jme_rx_coal_pkt;
2852 	error = sysctl_handle_int(oidp, &v, 0, req);
2853 	if (error || req->newptr == NULL)
2854 		goto back;
2855 
2856 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2857 		error = EINVAL;
2858 		goto back;
2859 	}
2860 
2861 	if (v != sc->jme_rx_coal_pkt) {
2862 		sc->jme_rx_coal_pkt = v;
2863 		if (ifp->if_flags & IFF_RUNNING)
2864 			jme_set_rx_coal(sc);
2865 	}
2866 back:
2867 	lwkt_serialize_exit(ifp->if_serializer);
2868 	return error;
2869 }
2870 
2871 static void
2872 jme_set_tx_coal(struct jme_softc *sc)
2873 {
2874 	uint32_t reg;
2875 
2876 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2877 	    PCCTX_COAL_TO_MASK;
2878 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2879 	    PCCTX_COAL_PKT_MASK;
2880 	reg |= PCCTX_COAL_TXQ0;
2881 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2882 }
2883 
2884 static void
2885 jme_set_rx_coal(struct jme_softc *sc)
2886 {
2887 	uint32_t reg;
2888 	int r;
2889 
2890 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2891 	    PCCRX_COAL_TO_MASK;
2892 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2893 	    PCCRX_COAL_PKT_MASK;
2894 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
2895 		if (r < sc->jme_rx_ring_inuse)
2896 			CSR_WRITE_4(sc, JME_PCCRX(r), reg);
2897 		else
2898 			CSR_WRITE_4(sc, JME_PCCRX(r), 0);
2899 	}
2900 }
2901 
2902 #ifdef DEVICE_POLLING
2903 
2904 static void
2905 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2906 {
2907 	struct jme_softc *sc = ifp->if_softc;
2908 	struct mbuf_chain chain[MAXCPU];
2909 	uint32_t status;
2910 	int r, prog = 0;
2911 
2912 	ASSERT_SERIALIZED(ifp->if_serializer);
2913 
2914 	switch (cmd) {
2915 	case POLL_REGISTER:
2916 		CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2917 		break;
2918 
2919 	case POLL_DEREGISTER:
2920 		CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2921 		break;
2922 
2923 	case POLL_AND_CHECK_STATUS:
2924 	case POLL_ONLY:
2925 		status = CSR_READ_4(sc, JME_INTR_STATUS);
2926 
2927 		ether_input_chain_init(chain);
2928 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
2929 			prog += jme_rxeof_chain(sc, r, chain, count);
2930 		if (prog)
2931 			ether_input_dispatch(chain);
2932 
2933 		if (status & INTR_RXQ_DESC_EMPTY) {
2934 			CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2935 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2936 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2937 		}
2938 
2939 		jme_txeof(sc);
2940 		if (!ifq_is_empty(&ifp->if_snd))
2941 			if_devstart(ifp);
2942 		break;
2943 	}
2944 }
2945 
2946 #endif	/* DEVICE_POLLING */
2947 
2948 static int
2949 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
2950 {
2951 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2952 	bus_dmamem_t dmem;
2953 	int error;
2954 
2955 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
2956 			JME_RX_RING_ALIGN, 0,
2957 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2958 			JME_RX_RING_SIZE(sc),
2959 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
2960 	if (error) {
2961 		device_printf(sc->jme_dev,
2962 		    "could not allocate %dth Rx ring.\n", ring);
2963 		return error;
2964 	}
2965 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
2966 	rdata->jme_rx_ring_map = dmem.dmem_map;
2967 	rdata->jme_rx_ring = dmem.dmem_addr;
2968 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
2969 
2970 	return 0;
2971 }
2972 
2973 static int
2974 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
2975 {
2976 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2977 	int i, error;
2978 
2979 	/* Create tag for Rx buffers. */
2980 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
2981 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
2982 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2983 	    BUS_SPACE_MAXADDR,		/* highaddr */
2984 	    NULL, NULL,			/* filter, filterarg */
2985 	    MCLBYTES,			/* maxsize */
2986 	    1,				/* nsegments */
2987 	    MCLBYTES,			/* maxsegsize */
2988 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
2989 	    &rdata->jme_rx_tag);
2990 	if (error) {
2991 		device_printf(sc->jme_dev,
2992 		    "could not create %dth Rx DMA tag.\n", ring);
2993 		return error;
2994 	}
2995 
2996 	/* Create DMA maps for Rx buffers. */
2997 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
2998 				  &rdata->jme_rx_sparemap);
2999 	if (error) {
3000 		device_printf(sc->jme_dev,
3001 		    "could not create %dth spare Rx dmamap.\n", ring);
3002 		bus_dma_tag_destroy(rdata->jme_rx_tag);
3003 		rdata->jme_rx_tag = NULL;
3004 		return error;
3005 	}
3006 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3007 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3008 
3009 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3010 					  &rxd->rx_dmamap);
3011 		if (error) {
3012 			int j;
3013 
3014 			device_printf(sc->jme_dev,
3015 			    "could not create %dth Rx dmamap "
3016 			    "for %dth RX ring.\n", i, ring);
3017 
3018 			for (j = 0; j < i; ++j) {
3019 				rxd = &rdata->jme_rxdesc[j];
3020 				bus_dmamap_destroy(rdata->jme_rx_tag,
3021 						   rxd->rx_dmamap);
3022 			}
3023 			bus_dmamap_destroy(rdata->jme_rx_tag,
3024 					   rdata->jme_rx_sparemap);
3025 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3026 			rdata->jme_rx_tag = NULL;
3027 			return error;
3028 		}
3029 	}
3030 	return 0;
3031 }
3032 
3033 static void
3034 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3035 {
3036 	struct mbuf_chain chain[MAXCPU];
3037 	int r, prog = 0;
3038 
3039 	ether_input_chain_init(chain);
3040 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3041 		if (status & jme_rx_status[r].jme_coal)
3042 			prog += jme_rxeof_chain(sc, r, chain, -1);
3043 	}
3044 	if (prog)
3045 		ether_input_dispatch(chain);
3046 }
3047 
3048 #ifdef RSS
3049 
3050 static void
3051 jme_enable_rss(struct jme_softc *sc)
3052 {
3053 	uint32_t rssc, ind;
3054 	uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3055 	int i;
3056 
3057 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3058 
3059 	KASSERT(sc->jme_rx_ring_inuse == JME_NRXRING_2 ||
3060 		sc->jme_rx_ring_inuse == JME_NRXRING_4,
3061 		("%s: invalid # of RX rings (%d)\n",
3062 		 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse));
3063 
3064 	rssc = RSSC_HASH_64_ENTRY;
3065 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3066 	rssc |= sc->jme_rx_ring_inuse >> 1;
3067 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3068 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3069 
3070 	toeplitz_get_key(key, sizeof(key));
3071 	for (i = 0; i < RSSKEY_NREGS; ++i) {
3072 		uint32_t keyreg;
3073 
3074 		keyreg = RSSKEY_REGVAL(key, i);
3075 		JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3076 
3077 		CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3078 	}
3079 
3080 	/*
3081 	 * Create redirect table in following fashion:
3082 	 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3083 	 */
3084 	ind = 0;
3085 	for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3086 		int q;
3087 
3088 		q = i % sc->jme_rx_ring_inuse;
3089 		ind |= q << (i * 8);
3090 	}
3091 	JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3092 
3093 	for (i = 0; i < RSSTBL_NREGS; ++i)
3094 		CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3095 }
3096 
3097 #endif	/* RSS */
3098 
3099 static void
3100 jme_disable_rss(struct jme_softc *sc)
3101 {
3102 	sc->jme_rx_ring_inuse = JME_NRXRING_1;
3103 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3104 }
3105