xref: /dflybsd-src/sys/dev/netif/jme/if_jme.c (revision ee2af4a46fc2ddef3a589ee9d5b58aa1c1f923bc)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
29  */
30 
31 #include "opt_polling.h"
32 
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/bpf.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/ifq_var.h>
53 #include <net/vlan/if_vlan_var.h>
54 #include <net/vlan/if_vlan_ether.h>
55 
56 #include <dev/netif/mii_layer/miivar.h>
57 #include <dev/netif/mii_layer/jmphyreg.h>
58 
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pcidevs.h>
62 
63 #include <dev/netif/jme/if_jmereg.h>
64 #include <dev/netif/jme/if_jmevar.h>
65 
66 #include "miibus_if.h"
67 
68 /* Define the following to disable printing Rx errors. */
69 #undef	JME_SHOW_ERRORS
70 
71 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
72 
73 #define JME_RSS_DEBUG
74 
75 #ifdef JME_RSS_DEBUG
76 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
77 do { \
78 	if ((sc)->jme_rss_debug > (lvl)) \
79 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
80 } while (0)
81 #else	/* !JME_RSS_DEBUG */
82 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
83 #endif	/* JME_RSS_DEBUG */
84 
85 static int	jme_probe(device_t);
86 static int	jme_attach(device_t);
87 static int	jme_detach(device_t);
88 static int	jme_shutdown(device_t);
89 static int	jme_suspend(device_t);
90 static int	jme_resume(device_t);
91 
92 static int	jme_miibus_readreg(device_t, int, int);
93 static int	jme_miibus_writereg(device_t, int, int, int);
94 static void	jme_miibus_statchg(device_t);
95 
96 static void	jme_init(void *);
97 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
98 static void	jme_start(struct ifnet *);
99 static void	jme_watchdog(struct ifnet *);
100 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
101 static int	jme_mediachange(struct ifnet *);
102 #ifdef DEVICE_POLLING
103 static void	jme_poll(struct ifnet *, enum poll_cmd, int);
104 #endif
105 
106 static void	jme_intr(void *);
107 static void	jme_txeof(struct jme_softc *);
108 static void	jme_rxeof(struct jme_softc *, int);
109 static int	jme_rxeof_chain(struct jme_softc *, int,
110 				struct mbuf_chain *, int);
111 static void	jme_rx_intr(struct jme_softc *, uint32_t);
112 
113 static int	jme_dma_alloc(struct jme_softc *);
114 static void	jme_dma_free(struct jme_softc *);
115 static int	jme_init_rx_ring(struct jme_softc *, int);
116 static void	jme_init_tx_ring(struct jme_softc *);
117 static void	jme_init_ssb(struct jme_softc *);
118 static int	jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
119 static int	jme_encap(struct jme_softc *, struct mbuf **);
120 static void	jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
121 static int	jme_rxring_dma_alloc(struct jme_softc *, int);
122 static int	jme_rxbuf_dma_alloc(struct jme_softc *, int);
123 
124 static void	jme_tick(void *);
125 static void	jme_stop(struct jme_softc *);
126 static void	jme_reset(struct jme_softc *);
127 static void	jme_set_vlan(struct jme_softc *);
128 static void	jme_set_filter(struct jme_softc *);
129 static void	jme_stop_tx(struct jme_softc *);
130 static void	jme_stop_rx(struct jme_softc *);
131 static void	jme_mac_config(struct jme_softc *);
132 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
133 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
134 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
135 #ifdef notyet
136 static void	jme_setwol(struct jme_softc *);
137 static void	jme_setlinkspeed(struct jme_softc *);
138 #endif
139 static void	jme_set_tx_coal(struct jme_softc *);
140 static void	jme_set_rx_coal(struct jme_softc *);
141 static void	jme_enable_rss(struct jme_softc *);
142 static void	jme_disable_rss(struct jme_softc *);
143 
144 static void	jme_sysctl_node(struct jme_softc *);
145 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
146 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
147 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
148 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
149 
150 /*
151  * Devices supported by this driver.
152  */
153 static const struct jme_dev {
154 	uint16_t	jme_vendorid;
155 	uint16_t	jme_deviceid;
156 	uint32_t	jme_caps;
157 	const char	*jme_name;
158 } jme_devs[] = {
159 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
160 	    JME_CAP_JUMBO,
161 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
162 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
163 	    JME_CAP_FASTETH,
164 	    "JMicron Inc, JMC260 Fast Ethernet" },
165 	{ 0, 0, 0, NULL }
166 };
167 
168 static device_method_t jme_methods[] = {
169 	/* Device interface. */
170 	DEVMETHOD(device_probe,		jme_probe),
171 	DEVMETHOD(device_attach,	jme_attach),
172 	DEVMETHOD(device_detach,	jme_detach),
173 	DEVMETHOD(device_shutdown,	jme_shutdown),
174 	DEVMETHOD(device_suspend,	jme_suspend),
175 	DEVMETHOD(device_resume,	jme_resume),
176 
177 	/* Bus interface. */
178 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
179 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
180 
181 	/* MII interface. */
182 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
183 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
184 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
185 
186 	{ NULL, NULL }
187 };
188 
189 static driver_t jme_driver = {
190 	"jme",
191 	jme_methods,
192 	sizeof(struct jme_softc)
193 };
194 
195 static devclass_t jme_devclass;
196 
197 DECLARE_DUMMY_MODULE(if_jme);
198 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
199 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
200 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
201 
202 static const struct {
203 	uint32_t	jme_coal;
204 	uint32_t	jme_comp;
205 } jme_rx_status[JME_NRXRING_MAX] = {
206 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
207 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
208 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
209 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
210 };
211 
212 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
213 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
214 static int	jme_rx_ring_count = JME_NRXRING_DEF;
215 
216 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
217 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
218 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
219 
220 /*
221  *	Read a PHY register on the MII of the JMC250.
222  */
223 static int
224 jme_miibus_readreg(device_t dev, int phy, int reg)
225 {
226 	struct jme_softc *sc = device_get_softc(dev);
227 	uint32_t val;
228 	int i;
229 
230 	/* For FPGA version, PHY address 0 should be ignored. */
231 	if (sc->jme_caps & JME_CAP_FPGA) {
232 		if (phy == 0)
233 			return (0);
234 	} else {
235 		if (sc->jme_phyaddr != phy)
236 			return (0);
237 	}
238 
239 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
240 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
241 
242 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
243 		DELAY(1);
244 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
245 			break;
246 	}
247 	if (i == 0) {
248 		device_printf(sc->jme_dev, "phy read timeout: "
249 			      "phy %d, reg %d\n", phy, reg);
250 		return (0);
251 	}
252 
253 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
254 }
255 
256 /*
257  *	Write a PHY register on the MII of the JMC250.
258  */
259 static int
260 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
261 {
262 	struct jme_softc *sc = device_get_softc(dev);
263 	int i;
264 
265 	/* For FPGA version, PHY address 0 should be ignored. */
266 	if (sc->jme_caps & JME_CAP_FPGA) {
267 		if (phy == 0)
268 			return (0);
269 	} else {
270 		if (sc->jme_phyaddr != phy)
271 			return (0);
272 	}
273 
274 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
275 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
276 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
277 
278 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
279 		DELAY(1);
280 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
281 			break;
282 	}
283 	if (i == 0) {
284 		device_printf(sc->jme_dev, "phy write timeout: "
285 			      "phy %d, reg %d\n", phy, reg);
286 	}
287 
288 	return (0);
289 }
290 
291 /*
292  *	Callback from MII layer when media changes.
293  */
294 static void
295 jme_miibus_statchg(device_t dev)
296 {
297 	struct jme_softc *sc = device_get_softc(dev);
298 	struct ifnet *ifp = &sc->arpcom.ac_if;
299 	struct mii_data *mii;
300 	struct jme_txdesc *txd;
301 	bus_addr_t paddr;
302 	int i, r;
303 
304 	ASSERT_SERIALIZED(ifp->if_serializer);
305 
306 	if ((ifp->if_flags & IFF_RUNNING) == 0)
307 		return;
308 
309 	mii = device_get_softc(sc->jme_miibus);
310 
311 	sc->jme_flags &= ~JME_FLAG_LINK;
312 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
313 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
314 		case IFM_10_T:
315 		case IFM_100_TX:
316 			sc->jme_flags |= JME_FLAG_LINK;
317 			break;
318 		case IFM_1000_T:
319 			if (sc->jme_caps & JME_CAP_FASTETH)
320 				break;
321 			sc->jme_flags |= JME_FLAG_LINK;
322 			break;
323 		default:
324 			break;
325 		}
326 	}
327 
328 	/*
329 	 * Disabling Rx/Tx MACs have a side-effect of resetting
330 	 * JME_TXNDA/JME_RXNDA register to the first address of
331 	 * Tx/Rx descriptor address. So driver should reset its
332 	 * internal procucer/consumer pointer and reclaim any
333 	 * allocated resources.  Note, just saving the value of
334 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
335 	 * and restoring JME_TXNDA/JME_RXNDA register is not
336 	 * sufficient to make sure correct MAC state because
337 	 * stopping MAC operation can take a while and hardware
338 	 * might have updated JME_TXNDA/JME_RXNDA registers
339 	 * during the stop operation.
340 	 */
341 
342 	/* Disable interrupts */
343 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
344 
345 	/* Stop driver */
346 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
347 	ifp->if_timer = 0;
348 	callout_stop(&sc->jme_tick_ch);
349 
350 	/* Stop receiver/transmitter. */
351 	jme_stop_rx(sc);
352 	jme_stop_tx(sc);
353 
354 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
355 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
356 
357 		jme_rxeof(sc, r);
358 		if (rdata->jme_rxhead != NULL)
359 			m_freem(rdata->jme_rxhead);
360 		JME_RXCHAIN_RESET(sc, r);
361 
362 		/*
363 		 * Reuse configured Rx descriptors and reset
364 		 * procuder/consumer index.
365 		 */
366 		rdata->jme_rx_cons = 0;
367 	}
368 
369 	jme_txeof(sc);
370 	if (sc->jme_cdata.jme_tx_cnt != 0) {
371 		/* Remove queued packets for transmit. */
372 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
373 			txd = &sc->jme_cdata.jme_txdesc[i];
374 			if (txd->tx_m != NULL) {
375 				bus_dmamap_unload(
376 				    sc->jme_cdata.jme_tx_tag,
377 				    txd->tx_dmamap);
378 				m_freem(txd->tx_m);
379 				txd->tx_m = NULL;
380 				txd->tx_ndesc = 0;
381 				ifp->if_oerrors++;
382 			}
383 		}
384 	}
385 	jme_init_tx_ring(sc);
386 
387 	/* Initialize shadow status block. */
388 	jme_init_ssb(sc);
389 
390 	/* Program MAC with resolved speed/duplex/flow-control. */
391 	if (sc->jme_flags & JME_FLAG_LINK) {
392 		jme_mac_config(sc);
393 
394 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
395 
396 		/* Set Tx ring address to the hardware. */
397 		paddr = sc->jme_cdata.jme_tx_ring_paddr;
398 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
399 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
400 
401 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
402 			CSR_WRITE_4(sc, JME_RXCSR,
403 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
404 
405 			/* Set Rx ring address to the hardware. */
406 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
407 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
408 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
409 		}
410 
411 		/* Restart receiver/transmitter. */
412 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
413 		    RXCSR_RXQ_START);
414 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
415 	}
416 
417 	ifp->if_flags |= IFF_RUNNING;
418 	ifp->if_flags &= ~IFF_OACTIVE;
419 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
420 
421 #ifdef DEVICE_POLLING
422 	if (!(ifp->if_flags & IFF_POLLING))
423 #endif
424 	/* Reenable interrupts. */
425 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
426 }
427 
428 /*
429  *	Get the current interface media status.
430  */
431 static void
432 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
433 {
434 	struct jme_softc *sc = ifp->if_softc;
435 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
436 
437 	ASSERT_SERIALIZED(ifp->if_serializer);
438 
439 	mii_pollstat(mii);
440 	ifmr->ifm_status = mii->mii_media_status;
441 	ifmr->ifm_active = mii->mii_media_active;
442 }
443 
444 /*
445  *	Set hardware to newly-selected media.
446  */
447 static int
448 jme_mediachange(struct ifnet *ifp)
449 {
450 	struct jme_softc *sc = ifp->if_softc;
451 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
452 	int error;
453 
454 	ASSERT_SERIALIZED(ifp->if_serializer);
455 
456 	if (mii->mii_instance != 0) {
457 		struct mii_softc *miisc;
458 
459 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
460 			mii_phy_reset(miisc);
461 	}
462 	error = mii_mediachg(mii);
463 
464 	return (error);
465 }
466 
467 static int
468 jme_probe(device_t dev)
469 {
470 	const struct jme_dev *sp;
471 	uint16_t vid, did;
472 
473 	vid = pci_get_vendor(dev);
474 	did = pci_get_device(dev);
475 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
476 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
477 			struct jme_softc *sc = device_get_softc(dev);
478 
479 			sc->jme_caps = sp->jme_caps;
480 			device_set_desc(dev, sp->jme_name);
481 			return (0);
482 		}
483 	}
484 	return (ENXIO);
485 }
486 
487 static int
488 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
489 {
490 	uint32_t reg;
491 	int i;
492 
493 	*val = 0;
494 	for (i = JME_TIMEOUT; i > 0; i--) {
495 		reg = CSR_READ_4(sc, JME_SMBCSR);
496 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
497 			break;
498 		DELAY(1);
499 	}
500 
501 	if (i == 0) {
502 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
503 		return (ETIMEDOUT);
504 	}
505 
506 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
507 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
508 	for (i = JME_TIMEOUT; i > 0; i--) {
509 		DELAY(1);
510 		reg = CSR_READ_4(sc, JME_SMBINTF);
511 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
512 			break;
513 	}
514 
515 	if (i == 0) {
516 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
517 		return (ETIMEDOUT);
518 	}
519 
520 	reg = CSR_READ_4(sc, JME_SMBINTF);
521 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
522 
523 	return (0);
524 }
525 
526 static int
527 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
528 {
529 	uint8_t fup, reg, val;
530 	uint32_t offset;
531 	int match;
532 
533 	offset = 0;
534 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
535 	    fup != JME_EEPROM_SIG0)
536 		return (ENOENT);
537 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
538 	    fup != JME_EEPROM_SIG1)
539 		return (ENOENT);
540 	match = 0;
541 	do {
542 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
543 			break;
544 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
545 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
546 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
547 				break;
548 			if (reg >= JME_PAR0 &&
549 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
550 				if (jme_eeprom_read_byte(sc, offset + 2,
551 				    &val) != 0)
552 					break;
553 				eaddr[reg - JME_PAR0] = val;
554 				match++;
555 			}
556 		}
557 		/* Check for the end of EEPROM descriptor. */
558 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
559 			break;
560 		/* Try next eeprom descriptor. */
561 		offset += JME_EEPROM_DESC_BYTES;
562 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
563 
564 	if (match == ETHER_ADDR_LEN)
565 		return (0);
566 
567 	return (ENOENT);
568 }
569 
570 static void
571 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
572 {
573 	uint32_t par0, par1;
574 
575 	/* Read station address. */
576 	par0 = CSR_READ_4(sc, JME_PAR0);
577 	par1 = CSR_READ_4(sc, JME_PAR1);
578 	par1 &= 0xFFFF;
579 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
580 		device_printf(sc->jme_dev,
581 		    "generating fake ethernet address.\n");
582 		par0 = karc4random();
583 		/* Set OUI to JMicron. */
584 		eaddr[0] = 0x00;
585 		eaddr[1] = 0x1B;
586 		eaddr[2] = 0x8C;
587 		eaddr[3] = (par0 >> 16) & 0xff;
588 		eaddr[4] = (par0 >> 8) & 0xff;
589 		eaddr[5] = par0 & 0xff;
590 	} else {
591 		eaddr[0] = (par0 >> 0) & 0xFF;
592 		eaddr[1] = (par0 >> 8) & 0xFF;
593 		eaddr[2] = (par0 >> 16) & 0xFF;
594 		eaddr[3] = (par0 >> 24) & 0xFF;
595 		eaddr[4] = (par1 >> 0) & 0xFF;
596 		eaddr[5] = (par1 >> 8) & 0xFF;
597 	}
598 }
599 
600 static int
601 jme_attach(device_t dev)
602 {
603 	struct jme_softc *sc = device_get_softc(dev);
604 	struct ifnet *ifp = &sc->arpcom.ac_if;
605 	uint32_t reg;
606 	uint16_t did;
607 	uint8_t pcie_ptr, rev;
608 	int error = 0;
609 	uint8_t eaddr[ETHER_ADDR_LEN];
610 
611 	sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
612 	if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
613 		sc->jme_rx_desc_cnt = JME_NDESC_MAX;
614 
615 	sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
616 	if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
617 		sc->jme_tx_desc_cnt = JME_NDESC_MAX;
618 
619 	sc->jme_rx_ring_cnt = jme_rx_ring_count;
620 	if (sc->jme_rx_ring_cnt <= 0)
621 		sc->jme_rx_ring_cnt = JME_NRXRING_1;
622 	if (sc->jme_rx_ring_cnt > ncpus2)
623 		sc->jme_rx_ring_cnt = ncpus2;
624 
625 	if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
626 		sc->jme_rx_ring_cnt = JME_NRXRING_4;
627 	else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
628 		sc->jme_rx_ring_cnt = JME_NRXRING_2;
629 
630 	if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN) {
631 		sc->jme_caps |= JME_CAP_RSS;
632 		sc->jme_flags |= JME_FLAG_RSS;
633 	}
634 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
635 
636 	sc->jme_dev = dev;
637 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
638 
639 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
640 
641 	callout_init(&sc->jme_tick_ch);
642 
643 #ifndef BURN_BRIDGES
644 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
645 		uint32_t irq, mem;
646 
647 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
648 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
649 
650 		device_printf(dev, "chip is in D%d power mode "
651 		    "-- setting to D0\n", pci_get_powerstate(dev));
652 
653 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
654 
655 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
656 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
657 	}
658 #endif	/* !BURN_BRIDGE */
659 
660 	/* Enable bus mastering */
661 	pci_enable_busmaster(dev);
662 
663 	/*
664 	 * Allocate IO memory
665 	 *
666 	 * JMC250 supports both memory mapped and I/O register space
667 	 * access.  Because I/O register access should use different
668 	 * BARs to access registers it's waste of time to use I/O
669 	 * register spce access.  JMC250 uses 16K to map entire memory
670 	 * space.
671 	 */
672 	sc->jme_mem_rid = JME_PCIR_BAR;
673 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
674 						 &sc->jme_mem_rid, RF_ACTIVE);
675 	if (sc->jme_mem_res == NULL) {
676 		device_printf(dev, "can't allocate IO memory\n");
677 		return ENXIO;
678 	}
679 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
680 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
681 
682 	/*
683 	 * Allocate IRQ
684 	 */
685 	sc->jme_irq_rid = 0;
686 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
687 						 &sc->jme_irq_rid,
688 						 RF_SHAREABLE | RF_ACTIVE);
689 	if (sc->jme_irq_res == NULL) {
690 		device_printf(dev, "can't allocate irq\n");
691 		error = ENXIO;
692 		goto fail;
693 	}
694 
695 	/*
696 	 * Extract revisions
697 	 */
698 	reg = CSR_READ_4(sc, JME_CHIPMODE);
699 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
700 	    CHIPMODE_NOT_FPGA) {
701 		sc->jme_caps |= JME_CAP_FPGA;
702 		if (bootverbose) {
703 			device_printf(dev, "FPGA revision: 0x%04x\n",
704 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
705 				      CHIPMODE_FPGA_REV_SHIFT);
706 		}
707 	}
708 
709 	/* NOTE: FM revision is put in the upper 4 bits */
710 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
711 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
712 	if (bootverbose)
713 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
714 
715 	did = pci_get_device(dev);
716 	switch (did) {
717 	case PCI_PRODUCT_JMICRON_JMC250:
718 		if (rev == JME_REV1_A2)
719 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
720 		break;
721 
722 	case PCI_PRODUCT_JMICRON_JMC260:
723 		if (rev == JME_REV2)
724 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
725 		break;
726 
727 	default:
728 		panic("unknown device id 0x%04x\n", did);
729 	}
730 	if (rev >= JME_REV2) {
731 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
732 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
733 				      GHC_TXMAC_CLKSRC_1000;
734 	}
735 
736 	/* Reset the ethernet controller. */
737 	jme_reset(sc);
738 
739 	/* Get station address. */
740 	reg = CSR_READ_4(sc, JME_SMBCSR);
741 	if (reg & SMBCSR_EEPROM_PRESENT)
742 		error = jme_eeprom_macaddr(sc, eaddr);
743 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
744 		if (error != 0 && (bootverbose)) {
745 			device_printf(dev, "ethernet hardware address "
746 				      "not found in EEPROM.\n");
747 		}
748 		jme_reg_macaddr(sc, eaddr);
749 	}
750 
751 	/*
752 	 * Save PHY address.
753 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
754 	 * requires PHY probing to get correct PHY address.
755 	 */
756 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
757 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
758 		    GPREG0_PHY_ADDR_MASK;
759 		if (bootverbose) {
760 			device_printf(dev, "PHY is at address %d.\n",
761 			    sc->jme_phyaddr);
762 		}
763 	} else {
764 		sc->jme_phyaddr = 0;
765 	}
766 
767 	/* Set max allowable DMA size. */
768 	pcie_ptr = pci_get_pciecap_ptr(dev);
769 	if (pcie_ptr != 0) {
770 		uint16_t ctrl;
771 
772 		sc->jme_caps |= JME_CAP_PCIE;
773 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
774 		if (bootverbose) {
775 			device_printf(dev, "Read request size : %d bytes.\n",
776 			    128 << ((ctrl >> 12) & 0x07));
777 			device_printf(dev, "TLP payload size : %d bytes.\n",
778 			    128 << ((ctrl >> 5) & 0x07));
779 		}
780 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
781 		case PCIEM_DEVCTL_MAX_READRQ_128:
782 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
783 			break;
784 		case PCIEM_DEVCTL_MAX_READRQ_256:
785 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
786 			break;
787 		default:
788 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
789 			break;
790 		}
791 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
792 	} else {
793 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
794 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
795 	}
796 
797 #ifdef notyet
798 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
799 		sc->jme_caps |= JME_CAP_PMCAP;
800 #endif
801 
802 	/*
803 	 * Create sysctl tree
804 	 */
805 	jme_sysctl_node(sc);
806 
807 	/* Allocate DMA stuffs */
808 	error = jme_dma_alloc(sc);
809 	if (error)
810 		goto fail;
811 
812 	ifp->if_softc = sc;
813 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
814 	ifp->if_init = jme_init;
815 	ifp->if_ioctl = jme_ioctl;
816 	ifp->if_start = jme_start;
817 #ifdef DEVICE_POLLING
818 	ifp->if_poll = jme_poll;
819 #endif
820 	ifp->if_watchdog = jme_watchdog;
821 	ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
822 	ifq_set_ready(&ifp->if_snd);
823 
824 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
825 	ifp->if_capabilities = IFCAP_HWCSUM |
826 			       IFCAP_VLAN_MTU |
827 			       IFCAP_VLAN_HWTAGGING;
828 	ifp->if_hwassist = JME_CSUM_FEATURES;
829 	ifp->if_capenable = ifp->if_capabilities;
830 
831 	/* Set up MII bus. */
832 	error = mii_phy_probe(dev, &sc->jme_miibus,
833 			      jme_mediachange, jme_mediastatus);
834 	if (error) {
835 		device_printf(dev, "no PHY found!\n");
836 		goto fail;
837 	}
838 
839 	/*
840 	 * Save PHYADDR for FPGA mode PHY.
841 	 */
842 	if (sc->jme_caps & JME_CAP_FPGA) {
843 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
844 
845 		if (mii->mii_instance != 0) {
846 			struct mii_softc *miisc;
847 
848 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
849 				if (miisc->mii_phy != 0) {
850 					sc->jme_phyaddr = miisc->mii_phy;
851 					break;
852 				}
853 			}
854 			if (sc->jme_phyaddr != 0) {
855 				device_printf(sc->jme_dev,
856 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
857 				/* vendor magic. */
858 				jme_miibus_writereg(dev, sc->jme_phyaddr,
859 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
860 
861 				/* XXX should we clear JME_WA_EXTFIFO */
862 			}
863 		}
864 	}
865 
866 	ether_ifattach(ifp, eaddr, NULL);
867 
868 	/* Tell the upper layer(s) we support long frames. */
869 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
870 
871 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
872 			       &sc->jme_irq_handle, ifp->if_serializer);
873 	if (error) {
874 		device_printf(dev, "could not set up interrupt handler.\n");
875 		ether_ifdetach(ifp);
876 		goto fail;
877 	}
878 
879 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
880 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
881 	return 0;
882 fail:
883 	jme_detach(dev);
884 	return (error);
885 }
886 
887 static int
888 jme_detach(device_t dev)
889 {
890 	struct jme_softc *sc = device_get_softc(dev);
891 
892 	if (device_is_attached(dev)) {
893 		struct ifnet *ifp = &sc->arpcom.ac_if;
894 
895 		lwkt_serialize_enter(ifp->if_serializer);
896 		jme_stop(sc);
897 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
898 		lwkt_serialize_exit(ifp->if_serializer);
899 
900 		ether_ifdetach(ifp);
901 	}
902 
903 	if (sc->jme_sysctl_tree != NULL)
904 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
905 
906 	if (sc->jme_miibus != NULL)
907 		device_delete_child(dev, sc->jme_miibus);
908 	bus_generic_detach(dev);
909 
910 	if (sc->jme_irq_res != NULL) {
911 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
912 				     sc->jme_irq_res);
913 	}
914 
915 	if (sc->jme_mem_res != NULL) {
916 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
917 				     sc->jme_mem_res);
918 	}
919 
920 	jme_dma_free(sc);
921 
922 	return (0);
923 }
924 
925 static void
926 jme_sysctl_node(struct jme_softc *sc)
927 {
928 	int coal_max;
929 #ifdef JME_RSS_DEBUG
930 	char rx_ring_pkt[32];
931 	int r;
932 #endif
933 
934 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
935 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
936 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
937 				device_get_nameunit(sc->jme_dev),
938 				CTLFLAG_RD, 0, "");
939 	if (sc->jme_sysctl_tree == NULL) {
940 		device_printf(sc->jme_dev, "can't add sysctl node\n");
941 		return;
942 	}
943 
944 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
945 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
946 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
947 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
948 
949 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
950 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
951 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
952 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
953 
954 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
955 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
956 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
957 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
958 
959 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
960 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
961 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
962 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
963 
964 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
965 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
966 		       "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
967 		       0, "RX desc count");
968 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
969 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
970 		       "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
971 		       0, "TX desc count");
972 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
973 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
974 		       "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
975 		       0, "RX ring count");
976 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
977 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
978 		       "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
979 		       0, "RX ring in use");
980 #ifdef JME_RSS_DEBUG
981 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
982 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
983 		       "rss_debug", CTLFLAG_RD, &sc->jme_rss_debug,
984 		       0, "RSS debug level");
985 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
986 		ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
987 		SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
988 				SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
989 				rx_ring_pkt, CTLFLAG_RD,
990 				&sc->jme_rx_ring_pkt[r],
991 				0, "RXed packets");
992 	}
993 #endif
994 
995 	/*
996 	 * Set default coalesce valves
997 	 */
998 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
999 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1000 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1001 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1002 
1003 	/*
1004 	 * Adjust coalesce valves, in case that the number of TX/RX
1005 	 * descs are set to small values by users.
1006 	 *
1007 	 * NOTE: coal_max will not be zero, since number of descs
1008 	 * must aligned by JME_NDESC_ALIGN (16 currently)
1009 	 */
1010 	coal_max = sc->jme_tx_desc_cnt / 6;
1011 	if (coal_max < sc->jme_tx_coal_pkt)
1012 		sc->jme_tx_coal_pkt = coal_max;
1013 
1014 	coal_max = sc->jme_rx_desc_cnt / 4;
1015 	if (coal_max < sc->jme_rx_coal_pkt)
1016 		sc->jme_rx_coal_pkt = coal_max;
1017 }
1018 
1019 static int
1020 jme_dma_alloc(struct jme_softc *sc)
1021 {
1022 	struct jme_txdesc *txd;
1023 	bus_dmamem_t dmem;
1024 	int error, i;
1025 
1026 	sc->jme_cdata.jme_txdesc =
1027 	kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1028 		M_DEVBUF, M_WAITOK | M_ZERO);
1029 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1030 		sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1031 		kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1032 			M_DEVBUF, M_WAITOK | M_ZERO);
1033 	}
1034 
1035 	/* Create parent ring tag. */
1036 	error = bus_dma_tag_create(NULL,/* parent */
1037 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1038 	    sc->jme_lowaddr,		/* lowaddr */
1039 	    BUS_SPACE_MAXADDR,		/* highaddr */
1040 	    NULL, NULL,			/* filter, filterarg */
1041 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1042 	    0,				/* nsegments */
1043 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1044 	    0,				/* flags */
1045 	    &sc->jme_cdata.jme_ring_tag);
1046 	if (error) {
1047 		device_printf(sc->jme_dev,
1048 		    "could not create parent ring DMA tag.\n");
1049 		return error;
1050 	}
1051 
1052 	/*
1053 	 * Create DMA stuffs for TX ring
1054 	 */
1055 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1056 			JME_TX_RING_ALIGN, 0,
1057 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1058 			JME_TX_RING_SIZE(sc),
1059 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1060 	if (error) {
1061 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1062 		return error;
1063 	}
1064 	sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1065 	sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1066 	sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1067 	sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1068 
1069 	/*
1070 	 * Create DMA stuffs for RX rings
1071 	 */
1072 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1073 		error = jme_rxring_dma_alloc(sc, i);
1074 		if (error)
1075 			return error;
1076 	}
1077 
1078 	/* Create parent buffer tag. */
1079 	error = bus_dma_tag_create(NULL,/* parent */
1080 	    1, 0,			/* algnmnt, boundary */
1081 	    sc->jme_lowaddr,		/* lowaddr */
1082 	    BUS_SPACE_MAXADDR,		/* highaddr */
1083 	    NULL, NULL,			/* filter, filterarg */
1084 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1085 	    0,				/* nsegments */
1086 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1087 	    0,				/* flags */
1088 	    &sc->jme_cdata.jme_buffer_tag);
1089 	if (error) {
1090 		device_printf(sc->jme_dev,
1091 		    "could not create parent buffer DMA tag.\n");
1092 		return error;
1093 	}
1094 
1095 	/*
1096 	 * Create DMA stuffs for shadow status block
1097 	 */
1098 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1099 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1100 			JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1101 	if (error) {
1102 		device_printf(sc->jme_dev,
1103 		    "could not create shadow status block.\n");
1104 		return error;
1105 	}
1106 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1107 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1108 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1109 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1110 
1111 	/*
1112 	 * Create DMA stuffs for TX buffers
1113 	 */
1114 
1115 	/* Create tag for Tx buffers. */
1116 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1117 	    1, 0,			/* algnmnt, boundary */
1118 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1119 	    BUS_SPACE_MAXADDR,		/* highaddr */
1120 	    NULL, NULL,			/* filter, filterarg */
1121 	    JME_JUMBO_FRAMELEN,		/* maxsize */
1122 	    JME_MAXTXSEGS,		/* nsegments */
1123 	    JME_MAXSEGSIZE,		/* maxsegsize */
1124 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1125 	    &sc->jme_cdata.jme_tx_tag);
1126 	if (error != 0) {
1127 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1128 		return error;
1129 	}
1130 
1131 	/* Create DMA maps for Tx buffers. */
1132 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1133 		txd = &sc->jme_cdata.jme_txdesc[i];
1134 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1135 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1136 				&txd->tx_dmamap);
1137 		if (error) {
1138 			int j;
1139 
1140 			device_printf(sc->jme_dev,
1141 			    "could not create %dth Tx dmamap.\n", i);
1142 
1143 			for (j = 0; j < i; ++j) {
1144 				txd = &sc->jme_cdata.jme_txdesc[j];
1145 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1146 						   txd->tx_dmamap);
1147 			}
1148 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1149 			sc->jme_cdata.jme_tx_tag = NULL;
1150 			return error;
1151 		}
1152 	}
1153 
1154 	/*
1155 	 * Create DMA stuffs for RX buffers
1156 	 */
1157 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1158 		error = jme_rxbuf_dma_alloc(sc, i);
1159 		if (error)
1160 			return error;
1161 	}
1162 	return 0;
1163 }
1164 
1165 static void
1166 jme_dma_free(struct jme_softc *sc)
1167 {
1168 	struct jme_txdesc *txd;
1169 	struct jme_rxdesc *rxd;
1170 	struct jme_rxdata *rdata;
1171 	int i, r;
1172 
1173 	/* Tx ring */
1174 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1175 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1176 		    sc->jme_cdata.jme_tx_ring_map);
1177 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1178 		    sc->jme_cdata.jme_tx_ring,
1179 		    sc->jme_cdata.jme_tx_ring_map);
1180 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1181 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1182 	}
1183 
1184 	/* Rx ring */
1185 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1186 		rdata = &sc->jme_cdata.jme_rx_data[r];
1187 		if (rdata->jme_rx_ring_tag != NULL) {
1188 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1189 					  rdata->jme_rx_ring_map);
1190 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1191 					rdata->jme_rx_ring,
1192 					rdata->jme_rx_ring_map);
1193 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1194 			rdata->jme_rx_ring_tag = NULL;
1195 		}
1196 	}
1197 
1198 	/* Tx buffers */
1199 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1200 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1201 			txd = &sc->jme_cdata.jme_txdesc[i];
1202 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1203 			    txd->tx_dmamap);
1204 		}
1205 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1206 		sc->jme_cdata.jme_tx_tag = NULL;
1207 	}
1208 
1209 	/* Rx buffers */
1210 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1211 		rdata = &sc->jme_cdata.jme_rx_data[r];
1212 		if (rdata->jme_rx_tag != NULL) {
1213 			for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1214 				rxd = &rdata->jme_rxdesc[i];
1215 				bus_dmamap_destroy(rdata->jme_rx_tag,
1216 						   rxd->rx_dmamap);
1217 			}
1218 			bus_dmamap_destroy(rdata->jme_rx_tag,
1219 					   rdata->jme_rx_sparemap);
1220 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1221 			rdata->jme_rx_tag = NULL;
1222 		}
1223 	}
1224 
1225 	/* Shadow status block. */
1226 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1227 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1228 		    sc->jme_cdata.jme_ssb_map);
1229 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1230 		    sc->jme_cdata.jme_ssb_block,
1231 		    sc->jme_cdata.jme_ssb_map);
1232 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1233 		sc->jme_cdata.jme_ssb_tag = NULL;
1234 	}
1235 
1236 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1237 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1238 		sc->jme_cdata.jme_buffer_tag = NULL;
1239 	}
1240 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1241 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1242 		sc->jme_cdata.jme_ring_tag = NULL;
1243 	}
1244 
1245 	if (sc->jme_cdata.jme_txdesc != NULL) {
1246 		kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1247 		sc->jme_cdata.jme_txdesc = NULL;
1248 	}
1249 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1250 		rdata = &sc->jme_cdata.jme_rx_data[r];
1251 		if (rdata->jme_rxdesc != NULL) {
1252 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1253 			rdata->jme_rxdesc = NULL;
1254 		}
1255 	}
1256 }
1257 
1258 /*
1259  *	Make sure the interface is stopped at reboot time.
1260  */
1261 static int
1262 jme_shutdown(device_t dev)
1263 {
1264 	return jme_suspend(dev);
1265 }
1266 
1267 #ifdef notyet
1268 /*
1269  * Unlike other ethernet controllers, JMC250 requires
1270  * explicit resetting link speed to 10/100Mbps as gigabit
1271  * link will cunsume more power than 375mA.
1272  * Note, we reset the link speed to 10/100Mbps with
1273  * auto-negotiation but we don't know whether that operation
1274  * would succeed or not as we have no control after powering
1275  * off. If the renegotiation fail WOL may not work. Running
1276  * at 1Gbps draws more power than 375mA at 3.3V which is
1277  * specified in PCI specification and that would result in
1278  * complete shutdowning power to ethernet controller.
1279  *
1280  * TODO
1281  *  Save current negotiated media speed/duplex/flow-control
1282  *  to softc and restore the same link again after resuming.
1283  *  PHY handling such as power down/resetting to 100Mbps
1284  *  may be better handled in suspend method in phy driver.
1285  */
1286 static void
1287 jme_setlinkspeed(struct jme_softc *sc)
1288 {
1289 	struct mii_data *mii;
1290 	int aneg, i;
1291 
1292 	JME_LOCK_ASSERT(sc);
1293 
1294 	mii = device_get_softc(sc->jme_miibus);
1295 	mii_pollstat(mii);
1296 	aneg = 0;
1297 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1298 		switch IFM_SUBTYPE(mii->mii_media_active) {
1299 		case IFM_10_T:
1300 		case IFM_100_TX:
1301 			return;
1302 		case IFM_1000_T:
1303 			aneg++;
1304 		default:
1305 			break;
1306 		}
1307 	}
1308 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1309 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1310 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1311 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1312 	    BMCR_AUTOEN | BMCR_STARTNEG);
1313 	DELAY(1000);
1314 	if (aneg != 0) {
1315 		/* Poll link state until jme(4) get a 10/100 link. */
1316 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1317 			mii_pollstat(mii);
1318 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1319 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1320 				case IFM_10_T:
1321 				case IFM_100_TX:
1322 					jme_mac_config(sc);
1323 					return;
1324 				default:
1325 					break;
1326 				}
1327 			}
1328 			JME_UNLOCK(sc);
1329 			pause("jmelnk", hz);
1330 			JME_LOCK(sc);
1331 		}
1332 		if (i == MII_ANEGTICKS_GIGE)
1333 			device_printf(sc->jme_dev, "establishing link failed, "
1334 			    "WOL may not work!");
1335 	}
1336 	/*
1337 	 * No link, force MAC to have 100Mbps, full-duplex link.
1338 	 * This is the last resort and may/may not work.
1339 	 */
1340 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1341 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1342 	jme_mac_config(sc);
1343 }
1344 
1345 static void
1346 jme_setwol(struct jme_softc *sc)
1347 {
1348 	struct ifnet *ifp = &sc->arpcom.ac_if;
1349 	uint32_t gpr, pmcs;
1350 	uint16_t pmstat;
1351 	int pmc;
1352 
1353 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1354 		/* No PME capability, PHY power down. */
1355 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1356 		    MII_BMCR, BMCR_PDOWN);
1357 		return;
1358 	}
1359 
1360 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1361 	pmcs = CSR_READ_4(sc, JME_PMCS);
1362 	pmcs &= ~PMCS_WOL_ENB_MASK;
1363 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1364 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1365 		/* Enable PME message. */
1366 		gpr |= GPREG0_PME_ENB;
1367 		/* For gigabit controllers, reset link speed to 10/100. */
1368 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1369 			jme_setlinkspeed(sc);
1370 	}
1371 
1372 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1373 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1374 
1375 	/* Request PME. */
1376 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1377 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1378 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1379 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1380 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1381 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1382 		/* No WOL, PHY power down. */
1383 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1384 		    MII_BMCR, BMCR_PDOWN);
1385 	}
1386 }
1387 #endif
1388 
1389 static int
1390 jme_suspend(device_t dev)
1391 {
1392 	struct jme_softc *sc = device_get_softc(dev);
1393 	struct ifnet *ifp = &sc->arpcom.ac_if;
1394 
1395 	lwkt_serialize_enter(ifp->if_serializer);
1396 	jme_stop(sc);
1397 #ifdef notyet
1398 	jme_setwol(sc);
1399 #endif
1400 	lwkt_serialize_exit(ifp->if_serializer);
1401 
1402 	return (0);
1403 }
1404 
1405 static int
1406 jme_resume(device_t dev)
1407 {
1408 	struct jme_softc *sc = device_get_softc(dev);
1409 	struct ifnet *ifp = &sc->arpcom.ac_if;
1410 #ifdef notyet
1411 	int pmc;
1412 #endif
1413 
1414 	lwkt_serialize_enter(ifp->if_serializer);
1415 
1416 #ifdef notyet
1417 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1418 		uint16_t pmstat;
1419 
1420 		pmstat = pci_read_config(sc->jme_dev,
1421 		    pmc + PCIR_POWER_STATUS, 2);
1422 		/* Disable PME clear PME status. */
1423 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1424 		pci_write_config(sc->jme_dev,
1425 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1426 	}
1427 #endif
1428 
1429 	if (ifp->if_flags & IFF_UP)
1430 		jme_init(sc);
1431 
1432 	lwkt_serialize_exit(ifp->if_serializer);
1433 
1434 	return (0);
1435 }
1436 
1437 static int
1438 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1439 {
1440 	struct jme_txdesc *txd;
1441 	struct jme_desc *desc;
1442 	struct mbuf *m;
1443 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1444 	int maxsegs, nsegs;
1445 	int error, i, prod, symbol_desc;
1446 	uint32_t cflags, flag64;
1447 
1448 	M_ASSERTPKTHDR((*m_head));
1449 
1450 	prod = sc->jme_cdata.jme_tx_prod;
1451 	txd = &sc->jme_cdata.jme_txdesc[prod];
1452 
1453 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1454 		symbol_desc = 1;
1455 	else
1456 		symbol_desc = 0;
1457 
1458 	maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1459 		  (JME_TXD_RSVD + symbol_desc);
1460 	if (maxsegs > JME_MAXTXSEGS)
1461 		maxsegs = JME_MAXTXSEGS;
1462 	KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1463 		("not enough segments %d\n", maxsegs));
1464 
1465 	error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1466 			txd->tx_dmamap, m_head,
1467 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1468 	if (error)
1469 		goto fail;
1470 
1471 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1472 			BUS_DMASYNC_PREWRITE);
1473 
1474 	m = *m_head;
1475 	cflags = 0;
1476 
1477 	/* Configure checksum offload. */
1478 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1479 		cflags |= JME_TD_IPCSUM;
1480 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1481 		cflags |= JME_TD_TCPCSUM;
1482 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1483 		cflags |= JME_TD_UDPCSUM;
1484 
1485 	/* Configure VLAN. */
1486 	if (m->m_flags & M_VLANTAG) {
1487 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1488 		cflags |= JME_TD_VLAN_TAG;
1489 	}
1490 
1491 	desc = &sc->jme_cdata.jme_tx_ring[prod];
1492 	desc->flags = htole32(cflags);
1493 	desc->addr_hi = htole32(m->m_pkthdr.len);
1494 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1495 		/*
1496 		 * Use 64bits TX desc chain format.
1497 		 *
1498 		 * The first TX desc of the chain, which is setup here,
1499 		 * is just a symbol TX desc carrying no payload.
1500 		 */
1501 		flag64 = JME_TD_64BIT;
1502 		desc->buflen = 0;
1503 		desc->addr_lo = 0;
1504 
1505 		/* No effective TX desc is consumed */
1506 		i = 0;
1507 	} else {
1508 		/*
1509 		 * Use 32bits TX desc chain format.
1510 		 *
1511 		 * The first TX desc of the chain, which is setup here,
1512 		 * is an effective TX desc carrying the first segment of
1513 		 * the mbuf chain.
1514 		 */
1515 		flag64 = 0;
1516 		desc->buflen = htole32(txsegs[0].ds_len);
1517 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1518 
1519 		/* One effective TX desc is consumed */
1520 		i = 1;
1521 	}
1522 	sc->jme_cdata.jme_tx_cnt++;
1523 	KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1524 		 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1525 	JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1526 
1527 	txd->tx_ndesc = 1 - i;
1528 	for (; i < nsegs; i++) {
1529 		desc = &sc->jme_cdata.jme_tx_ring[prod];
1530 		desc->flags = htole32(JME_TD_OWN | flag64);
1531 		desc->buflen = htole32(txsegs[i].ds_len);
1532 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1533 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1534 
1535 		sc->jme_cdata.jme_tx_cnt++;
1536 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1537 			 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1538 		JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1539 	}
1540 
1541 	/* Update producer index. */
1542 	sc->jme_cdata.jme_tx_prod = prod;
1543 	/*
1544 	 * Finally request interrupt and give the first descriptor
1545 	 * owenership to hardware.
1546 	 */
1547 	desc = txd->tx_desc;
1548 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1549 
1550 	txd->tx_m = m;
1551 	txd->tx_ndesc += nsegs;
1552 
1553 	return 0;
1554 fail:
1555 	m_freem(*m_head);
1556 	*m_head = NULL;
1557 	return error;
1558 }
1559 
1560 static void
1561 jme_start(struct ifnet *ifp)
1562 {
1563 	struct jme_softc *sc = ifp->if_softc;
1564 	struct mbuf *m_head;
1565 	int enq = 0;
1566 
1567 	ASSERT_SERIALIZED(ifp->if_serializer);
1568 
1569 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1570 		ifq_purge(&ifp->if_snd);
1571 		return;
1572 	}
1573 
1574 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1575 		return;
1576 
1577 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1578 		jme_txeof(sc);
1579 
1580 	while (!ifq_is_empty(&ifp->if_snd)) {
1581 		/*
1582 		 * Check number of available TX descs, always
1583 		 * leave JME_TXD_RSVD free TX descs.
1584 		 */
1585 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1586 		    sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1587 			ifp->if_flags |= IFF_OACTIVE;
1588 			break;
1589 		}
1590 
1591 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1592 		if (m_head == NULL)
1593 			break;
1594 
1595 		/*
1596 		 * Pack the data into the transmit ring. If we
1597 		 * don't have room, set the OACTIVE flag and wait
1598 		 * for the NIC to drain the ring.
1599 		 */
1600 		if (jme_encap(sc, &m_head)) {
1601 			KKASSERT(m_head == NULL);
1602 			ifp->if_oerrors++;
1603 			ifp->if_flags |= IFF_OACTIVE;
1604 			break;
1605 		}
1606 		enq++;
1607 
1608 		/*
1609 		 * If there's a BPF listener, bounce a copy of this frame
1610 		 * to him.
1611 		 */
1612 		ETHER_BPF_MTAP(ifp, m_head);
1613 	}
1614 
1615 	if (enq > 0) {
1616 		/*
1617 		 * Reading TXCSR takes very long time under heavy load
1618 		 * so cache TXCSR value and writes the ORed value with
1619 		 * the kick command to the TXCSR. This saves one register
1620 		 * access cycle.
1621 		 */
1622 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1623 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1624 		/* Set a timeout in case the chip goes out to lunch. */
1625 		ifp->if_timer = JME_TX_TIMEOUT;
1626 	}
1627 }
1628 
1629 static void
1630 jme_watchdog(struct ifnet *ifp)
1631 {
1632 	struct jme_softc *sc = ifp->if_softc;
1633 
1634 	ASSERT_SERIALIZED(ifp->if_serializer);
1635 
1636 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1637 		if_printf(ifp, "watchdog timeout (missed link)\n");
1638 		ifp->if_oerrors++;
1639 		jme_init(sc);
1640 		return;
1641 	}
1642 
1643 	jme_txeof(sc);
1644 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1645 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1646 			  "-- recovering\n");
1647 		if (!ifq_is_empty(&ifp->if_snd))
1648 			if_devstart(ifp);
1649 		return;
1650 	}
1651 
1652 	if_printf(ifp, "watchdog timeout\n");
1653 	ifp->if_oerrors++;
1654 	jme_init(sc);
1655 	if (!ifq_is_empty(&ifp->if_snd))
1656 		if_devstart(ifp);
1657 }
1658 
1659 static int
1660 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1661 {
1662 	struct jme_softc *sc = ifp->if_softc;
1663 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1664 	struct ifreq *ifr = (struct ifreq *)data;
1665 	int error = 0, mask;
1666 
1667 	ASSERT_SERIALIZED(ifp->if_serializer);
1668 
1669 	switch (cmd) {
1670 	case SIOCSIFMTU:
1671 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1672 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1673 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1674 			error = EINVAL;
1675 			break;
1676 		}
1677 
1678 		if (ifp->if_mtu != ifr->ifr_mtu) {
1679 			/*
1680 			 * No special configuration is required when interface
1681 			 * MTU is changed but availability of Tx checksum
1682 			 * offload should be chcked against new MTU size as
1683 			 * FIFO size is just 2K.
1684 			 */
1685 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1686 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1687 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1688 			}
1689 			ifp->if_mtu = ifr->ifr_mtu;
1690 			if (ifp->if_flags & IFF_RUNNING)
1691 				jme_init(sc);
1692 		}
1693 		break;
1694 
1695 	case SIOCSIFFLAGS:
1696 		if (ifp->if_flags & IFF_UP) {
1697 			if (ifp->if_flags & IFF_RUNNING) {
1698 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1699 				    (IFF_PROMISC | IFF_ALLMULTI))
1700 					jme_set_filter(sc);
1701 			} else {
1702 				jme_init(sc);
1703 			}
1704 		} else {
1705 			if (ifp->if_flags & IFF_RUNNING)
1706 				jme_stop(sc);
1707 		}
1708 		sc->jme_if_flags = ifp->if_flags;
1709 		break;
1710 
1711 	case SIOCADDMULTI:
1712 	case SIOCDELMULTI:
1713 		if (ifp->if_flags & IFF_RUNNING)
1714 			jme_set_filter(sc);
1715 		break;
1716 
1717 	case SIOCSIFMEDIA:
1718 	case SIOCGIFMEDIA:
1719 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1720 		break;
1721 
1722 	case SIOCSIFCAP:
1723 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1724 
1725 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1726 			if (IFCAP_TXCSUM & ifp->if_capabilities) {
1727 				ifp->if_capenable ^= IFCAP_TXCSUM;
1728 				if (IFCAP_TXCSUM & ifp->if_capenable)
1729 					ifp->if_hwassist |= JME_CSUM_FEATURES;
1730 				else
1731 					ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1732 			}
1733 		}
1734 		if ((mask & IFCAP_RXCSUM) &&
1735 		    (IFCAP_RXCSUM & ifp->if_capabilities)) {
1736 			uint32_t reg;
1737 
1738 			ifp->if_capenable ^= IFCAP_RXCSUM;
1739 			reg = CSR_READ_4(sc, JME_RXMAC);
1740 			reg &= ~RXMAC_CSUM_ENB;
1741 			if (ifp->if_capenable & IFCAP_RXCSUM)
1742 				reg |= RXMAC_CSUM_ENB;
1743 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1744 		}
1745 
1746 		if ((mask & IFCAP_VLAN_HWTAGGING) &&
1747 		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) {
1748 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1749 			jme_set_vlan(sc);
1750 		}
1751 		break;
1752 
1753 	default:
1754 		error = ether_ioctl(ifp, cmd, data);
1755 		break;
1756 	}
1757 	return (error);
1758 }
1759 
1760 static void
1761 jme_mac_config(struct jme_softc *sc)
1762 {
1763 	struct mii_data *mii;
1764 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1765 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1766 
1767 	mii = device_get_softc(sc->jme_miibus);
1768 
1769 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1770 	DELAY(10);
1771 	CSR_WRITE_4(sc, JME_GHC, 0);
1772 	ghc = 0;
1773 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1774 	rxmac &= ~RXMAC_FC_ENB;
1775 	txmac = CSR_READ_4(sc, JME_TXMAC);
1776 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1777 	txpause = CSR_READ_4(sc, JME_TXPFC);
1778 	txpause &= ~TXPFC_PAUSE_ENB;
1779 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1780 		ghc |= GHC_FULL_DUPLEX;
1781 		rxmac &= ~RXMAC_COLL_DET_ENB;
1782 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1783 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1784 		    TXMAC_FRAME_BURST);
1785 #ifdef notyet
1786 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1787 			txpause |= TXPFC_PAUSE_ENB;
1788 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1789 			rxmac |= RXMAC_FC_ENB;
1790 #endif
1791 		/* Disable retry transmit timer/retry limit. */
1792 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1793 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1794 	} else {
1795 		rxmac |= RXMAC_COLL_DET_ENB;
1796 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1797 		/* Enable retry transmit timer/retry limit. */
1798 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1799 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1800 	}
1801 
1802 	/*
1803 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1804 	 */
1805 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1806 	gp1 &= ~GPREG1_WA_HDX;
1807 
1808 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1809 		hdx = 1;
1810 
1811 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1812 	case IFM_10_T:
1813 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1814 		if (hdx)
1815 			gp1 |= GPREG1_WA_HDX;
1816 		break;
1817 
1818 	case IFM_100_TX:
1819 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1820 		if (hdx)
1821 			gp1 |= GPREG1_WA_HDX;
1822 
1823 		/*
1824 		 * Use extended FIFO depth to workaround CRC errors
1825 		 * emitted by chips before JMC250B
1826 		 */
1827 		phyconf = JMPHY_CONF_EXTFIFO;
1828 		break;
1829 
1830 	case IFM_1000_T:
1831 		if (sc->jme_caps & JME_CAP_FASTETH)
1832 			break;
1833 
1834 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1835 		if (hdx)
1836 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1837 		break;
1838 
1839 	default:
1840 		break;
1841 	}
1842 	CSR_WRITE_4(sc, JME_GHC, ghc);
1843 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1844 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1845 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1846 
1847 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
1848 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1849 				    JMPHY_CONF, phyconf);
1850 	}
1851 	if (sc->jme_workaround & JME_WA_HDX)
1852 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1853 }
1854 
1855 static void
1856 jme_intr(void *xsc)
1857 {
1858 	struct jme_softc *sc = xsc;
1859 	struct ifnet *ifp = &sc->arpcom.ac_if;
1860 	uint32_t status;
1861 	int r;
1862 
1863 	ASSERT_SERIALIZED(ifp->if_serializer);
1864 
1865 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1866 	if (status == 0 || status == 0xFFFFFFFF)
1867 		return;
1868 
1869 	/* Disable interrupts. */
1870 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1871 
1872 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1873 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1874 		goto back;
1875 
1876 	/* Reset PCC counter/timer and Ack interrupts. */
1877 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1878 
1879 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1880 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1881 
1882 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1883 		if (status & jme_rx_status[r].jme_coal) {
1884 			status |= jme_rx_status[r].jme_coal |
1885 				  jme_rx_status[r].jme_comp;
1886 		}
1887 	}
1888 
1889 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1890 
1891 	if (ifp->if_flags & IFF_RUNNING) {
1892 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1893 			jme_rx_intr(sc, status);
1894 
1895 		if (status & INTR_RXQ_DESC_EMPTY) {
1896 			/*
1897 			 * Notify hardware availability of new Rx buffers.
1898 			 * Reading RXCSR takes very long time under heavy
1899 			 * load so cache RXCSR value and writes the ORed
1900 			 * value with the kick command to the RXCSR. This
1901 			 * saves one register access cycle.
1902 			 */
1903 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1904 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1905 		}
1906 
1907 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1908 			jme_txeof(sc);
1909 			if (!ifq_is_empty(&ifp->if_snd))
1910 				if_devstart(ifp);
1911 		}
1912 	}
1913 back:
1914 	/* Reenable interrupts. */
1915 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1916 }
1917 
1918 static void
1919 jme_txeof(struct jme_softc *sc)
1920 {
1921 	struct ifnet *ifp = &sc->arpcom.ac_if;
1922 	struct jme_txdesc *txd;
1923 	uint32_t status;
1924 	int cons, nsegs;
1925 
1926 	cons = sc->jme_cdata.jme_tx_cons;
1927 	if (cons == sc->jme_cdata.jme_tx_prod)
1928 		return;
1929 
1930 	/*
1931 	 * Go through our Tx list and free mbufs for those
1932 	 * frames which have been transmitted.
1933 	 */
1934 	while (cons != sc->jme_cdata.jme_tx_prod) {
1935 		txd = &sc->jme_cdata.jme_txdesc[cons];
1936 		KASSERT(txd->tx_m != NULL,
1937 			("%s: freeing NULL mbuf!\n", __func__));
1938 
1939 		status = le32toh(txd->tx_desc->flags);
1940 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1941 			break;
1942 
1943 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1944 			ifp->if_oerrors++;
1945 		} else {
1946 			ifp->if_opackets++;
1947 			if (status & JME_TD_COLLISION) {
1948 				ifp->if_collisions +=
1949 				    le32toh(txd->tx_desc->buflen) &
1950 				    JME_TD_BUF_LEN_MASK;
1951 			}
1952 		}
1953 
1954 		/*
1955 		 * Only the first descriptor of multi-descriptor
1956 		 * transmission is updated so driver have to skip entire
1957 		 * chained buffers for the transmiited frame. In other
1958 		 * words, JME_TD_OWN bit is valid only at the first
1959 		 * descriptor of a multi-descriptor transmission.
1960 		 */
1961 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1962 			sc->jme_cdata.jme_tx_ring[cons].flags = 0;
1963 			JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
1964 		}
1965 
1966 		/* Reclaim transferred mbufs. */
1967 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1968 		m_freem(txd->tx_m);
1969 		txd->tx_m = NULL;
1970 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1971 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
1972 			("%s: Active Tx desc counter was garbled\n", __func__));
1973 		txd->tx_ndesc = 0;
1974 	}
1975 	sc->jme_cdata.jme_tx_cons = cons;
1976 
1977 	if (sc->jme_cdata.jme_tx_cnt == 0)
1978 		ifp->if_timer = 0;
1979 
1980 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
1981 	    sc->jme_tx_desc_cnt - JME_TXD_RSVD)
1982 		ifp->if_flags &= ~IFF_OACTIVE;
1983 }
1984 
1985 static __inline void
1986 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
1987 {
1988 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
1989 	int i;
1990 
1991 	for (i = 0; i < count; ++i) {
1992 		struct jme_desc *desc = &rdata->jme_rx_ring[cons];
1993 
1994 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
1995 		desc->buflen = htole32(MCLBYTES);
1996 		JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
1997 	}
1998 }
1999 
2000 /* Receive a frame. */
2001 static void
2002 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2003 {
2004 	struct ifnet *ifp = &sc->arpcom.ac_if;
2005 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2006 	struct jme_desc *desc;
2007 	struct jme_rxdesc *rxd;
2008 	struct mbuf *mp, *m;
2009 	uint32_t flags, status;
2010 	int cons, count, nsegs;
2011 
2012 	cons = rdata->jme_rx_cons;
2013 	desc = &rdata->jme_rx_ring[cons];
2014 	flags = le32toh(desc->flags);
2015 	status = le32toh(desc->buflen);
2016 	nsegs = JME_RX_NSEGS(status);
2017 
2018 	JME_RSS_DPRINTF(sc, 10, "ring%d, flags 0x%08x, "
2019 			"hash 0x%08x, hash type 0x%08x\n",
2020 			ring, flags, desc->addr_hi, desc->addr_lo);
2021 
2022 	if (status & JME_RX_ERR_STAT) {
2023 		ifp->if_ierrors++;
2024 		jme_discard_rxbufs(sc, ring, cons, nsegs);
2025 #ifdef JME_SHOW_ERRORS
2026 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2027 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2028 #endif
2029 		rdata->jme_rx_cons += nsegs;
2030 		rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2031 		return;
2032 	}
2033 
2034 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2035 	for (count = 0; count < nsegs; count++,
2036 	     JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2037 		rxd = &rdata->jme_rxdesc[cons];
2038 		mp = rxd->rx_m;
2039 
2040 		/* Add a new receive buffer to the ring. */
2041 		if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2042 			ifp->if_iqdrops++;
2043 			/* Reuse buffer. */
2044 			jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2045 			if (rdata->jme_rxhead != NULL) {
2046 				m_freem(rdata->jme_rxhead);
2047 				JME_RXCHAIN_RESET(sc, ring);
2048 			}
2049 			break;
2050 		}
2051 
2052 		/*
2053 		 * Assume we've received a full sized frame.
2054 		 * Actual size is fixed when we encounter the end of
2055 		 * multi-segmented frame.
2056 		 */
2057 		mp->m_len = MCLBYTES;
2058 
2059 		/* Chain received mbufs. */
2060 		if (rdata->jme_rxhead == NULL) {
2061 			rdata->jme_rxhead = mp;
2062 			rdata->jme_rxtail = mp;
2063 		} else {
2064 			/*
2065 			 * Receive processor can receive a maximum frame
2066 			 * size of 65535 bytes.
2067 			 */
2068 			mp->m_flags &= ~M_PKTHDR;
2069 			rdata->jme_rxtail->m_next = mp;
2070 			rdata->jme_rxtail = mp;
2071 		}
2072 
2073 		if (count == nsegs - 1) {
2074 			/* Last desc. for this frame. */
2075 			m = rdata->jme_rxhead;
2076 			/* XXX assert PKTHDR? */
2077 			m->m_flags |= M_PKTHDR;
2078 			m->m_pkthdr.len = rdata->jme_rxlen;
2079 			if (nsegs > 1) {
2080 				/* Set first mbuf size. */
2081 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2082 				/* Set last mbuf size. */
2083 				mp->m_len = rdata->jme_rxlen -
2084 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2085 				    (MCLBYTES * (nsegs - 2)));
2086 			} else {
2087 				m->m_len = rdata->jme_rxlen;
2088 			}
2089 			m->m_pkthdr.rcvif = ifp;
2090 
2091 			/*
2092 			 * Account for 10bytes auto padding which is used
2093 			 * to align IP header on 32bit boundary. Also note,
2094 			 * CRC bytes is automatically removed by the
2095 			 * hardware.
2096 			 */
2097 			m->m_data += JME_RX_PAD_BYTES;
2098 
2099 			/* Set checksum information. */
2100 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2101 			    (flags & JME_RD_IPV4)) {
2102 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2103 				if (flags & JME_RD_IPCSUM)
2104 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2105 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2106 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2107 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2108 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2109 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2110 					m->m_pkthdr.csum_flags |=
2111 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2112 					m->m_pkthdr.csum_data = 0xffff;
2113 				}
2114 			}
2115 
2116 			/* Check for VLAN tagged packets. */
2117 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2118 			    (flags & JME_RD_VLAN_TAG)) {
2119 				m->m_pkthdr.ether_vlantag =
2120 				    flags & JME_RD_VLAN_MASK;
2121 				m->m_flags |= M_VLANTAG;
2122 			}
2123 
2124 			ifp->if_ipackets++;
2125 			/* Pass it on. */
2126 			ether_input_chain(ifp, m, chain);
2127 
2128 			/* Reset mbuf chains. */
2129 			JME_RXCHAIN_RESET(sc, ring);
2130 #ifdef JME_RSS_DEBUG
2131 			sc->jme_rx_ring_pkt[ring]++;
2132 #endif
2133 		}
2134 	}
2135 
2136 	rdata->jme_rx_cons += nsegs;
2137 	rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2138 }
2139 
2140 static int
2141 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2142 		int count)
2143 {
2144 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2145 	struct jme_desc *desc;
2146 	int nsegs, prog, pktlen;
2147 
2148 	prog = 0;
2149 	for (;;) {
2150 #ifdef DEVICE_POLLING
2151 		if (count >= 0 && count-- == 0)
2152 			break;
2153 #endif
2154 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2155 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2156 			break;
2157 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2158 			break;
2159 
2160 		/*
2161 		 * Check number of segments against received bytes.
2162 		 * Non-matching value would indicate that hardware
2163 		 * is still trying to update Rx descriptors. I'm not
2164 		 * sure whether this check is needed.
2165 		 */
2166 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2167 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2168 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2169 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2170 				  "and packet size(%d) mismach\n",
2171 				  nsegs, pktlen);
2172 			break;
2173 		}
2174 
2175 		/* Received a frame. */
2176 		jme_rxpkt(sc, ring, chain);
2177 		prog++;
2178 	}
2179 	return prog;
2180 }
2181 
2182 static void
2183 jme_rxeof(struct jme_softc *sc, int ring)
2184 {
2185 	struct mbuf_chain chain[MAXCPU];
2186 
2187 	ether_input_chain_init(chain);
2188 	if (jme_rxeof_chain(sc, ring, chain, -1))
2189 		ether_input_dispatch(chain);
2190 }
2191 
2192 static void
2193 jme_tick(void *xsc)
2194 {
2195 	struct jme_softc *sc = xsc;
2196 	struct ifnet *ifp = &sc->arpcom.ac_if;
2197 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2198 
2199 	lwkt_serialize_enter(ifp->if_serializer);
2200 
2201 	mii_tick(mii);
2202 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2203 
2204 	lwkt_serialize_exit(ifp->if_serializer);
2205 }
2206 
2207 static void
2208 jme_reset(struct jme_softc *sc)
2209 {
2210 #ifdef foo
2211 	/* Stop receiver, transmitter. */
2212 	jme_stop_rx(sc);
2213 	jme_stop_tx(sc);
2214 #endif
2215 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2216 	DELAY(10);
2217 	CSR_WRITE_4(sc, JME_GHC, 0);
2218 }
2219 
2220 static void
2221 jme_init(void *xsc)
2222 {
2223 	struct jme_softc *sc = xsc;
2224 	struct ifnet *ifp = &sc->arpcom.ac_if;
2225 	struct mii_data *mii;
2226 	uint8_t eaddr[ETHER_ADDR_LEN];
2227 	bus_addr_t paddr;
2228 	uint32_t reg;
2229 	int error, r;
2230 
2231 	ASSERT_SERIALIZED(ifp->if_serializer);
2232 
2233 	/*
2234 	 * Cancel any pending I/O.
2235 	 */
2236 	jme_stop(sc);
2237 
2238 	/*
2239 	 * Reset the chip to a known state.
2240 	 */
2241 	jme_reset(sc);
2242 
2243 	sc->jme_txd_spare =
2244 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2245 	KKASSERT(sc->jme_txd_spare >= 1);
2246 
2247 	/*
2248 	 * If we use 64bit address mode for transmitting, each Tx request
2249 	 * needs one more symbol descriptor.
2250 	 */
2251 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2252 		sc->jme_txd_spare += 1;
2253 
2254 	if (sc->jme_flags & JME_FLAG_RSS)
2255 		jme_enable_rss(sc);
2256 	else
2257 		jme_disable_rss(sc);
2258 
2259 	/* Init RX descriptors */
2260 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2261 		error = jme_init_rx_ring(sc, r);
2262 		if (error) {
2263 			if_printf(ifp, "initialization failed: "
2264 				  "no memory for %dth RX ring.\n", r);
2265 			jme_stop(sc);
2266 			return;
2267 		}
2268 	}
2269 
2270 	/* Init TX descriptors */
2271 	jme_init_tx_ring(sc);
2272 
2273 	/* Initialize shadow status block. */
2274 	jme_init_ssb(sc);
2275 
2276 	/* Reprogram the station address. */
2277 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2278 	CSR_WRITE_4(sc, JME_PAR0,
2279 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2280 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2281 
2282 	/*
2283 	 * Configure Tx queue.
2284 	 *  Tx priority queue weight value : 0
2285 	 *  Tx FIFO threshold for processing next packet : 16QW
2286 	 *  Maximum Tx DMA length : 512
2287 	 *  Allow Tx DMA burst.
2288 	 */
2289 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2290 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2291 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2292 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2293 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2294 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2295 
2296 	/* Set Tx descriptor counter. */
2297 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2298 
2299 	/* Set Tx ring address to the hardware. */
2300 	paddr = sc->jme_cdata.jme_tx_ring_paddr;
2301 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2302 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2303 
2304 	/* Configure TxMAC parameters. */
2305 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2306 	reg |= TXMAC_THRESH_1_PKT;
2307 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2308 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2309 
2310 	/*
2311 	 * Configure Rx queue.
2312 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2313 	 *  FIFO threshold for processing next packet : 128QW
2314 	 *  Rx queue 0 select
2315 	 *  Max Rx DMA length : 128
2316 	 *  Rx descriptor retry : 32
2317 	 *  Rx descriptor retry time gap : 256ns
2318 	 *  Don't receive runt/bad frame.
2319 	 */
2320 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2321 #if 0
2322 	/*
2323 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2324 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2325 	 * decrease FIFO threshold to reduce the FIFO overruns for
2326 	 * frames larger than 4000 bytes.
2327 	 * For best performance of standard MTU sized frames use
2328 	 * maximum allowable FIFO threshold, 128QW.
2329 	 */
2330 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2331 	    JME_RX_FIFO_SIZE)
2332 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2333 	else
2334 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2335 #else
2336 	/* Improve PCI Express compatibility */
2337 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2338 #endif
2339 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2340 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2341 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2342 	/* XXX TODO DROP_BAD */
2343 
2344 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2345 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2346 
2347 		/* Set Rx descriptor counter. */
2348 		CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2349 
2350 		/* Set Rx ring address to the hardware. */
2351 		paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2352 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2353 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2354 	}
2355 
2356 	/* Clear receive filter. */
2357 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2358 
2359 	/* Set up the receive filter. */
2360 	jme_set_filter(sc);
2361 	jme_set_vlan(sc);
2362 
2363 	/*
2364 	 * Disable all WOL bits as WOL can interfere normal Rx
2365 	 * operation. Also clear WOL detection status bits.
2366 	 */
2367 	reg = CSR_READ_4(sc, JME_PMCS);
2368 	reg &= ~PMCS_WOL_ENB_MASK;
2369 	CSR_WRITE_4(sc, JME_PMCS, reg);
2370 
2371 	/*
2372 	 * Pad 10bytes right before received frame. This will greatly
2373 	 * help Rx performance on strict-alignment architectures as
2374 	 * it does not need to copy the frame to align the payload.
2375 	 */
2376 	reg = CSR_READ_4(sc, JME_RXMAC);
2377 	reg |= RXMAC_PAD_10BYTES;
2378 
2379 	if (ifp->if_capenable & IFCAP_RXCSUM)
2380 		reg |= RXMAC_CSUM_ENB;
2381 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2382 
2383 	/* Configure general purpose reg0 */
2384 	reg = CSR_READ_4(sc, JME_GPREG0);
2385 	reg &= ~GPREG0_PCC_UNIT_MASK;
2386 	/* Set PCC timer resolution to micro-seconds unit. */
2387 	reg |= GPREG0_PCC_UNIT_US;
2388 	/*
2389 	 * Disable all shadow register posting as we have to read
2390 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2391 	 * that it's hard to synchronize interrupt status between
2392 	 * hardware and software with shadow posting due to
2393 	 * requirements of bus_dmamap_sync(9).
2394 	 */
2395 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2396 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2397 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2398 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2399 	/* Disable posting of DW0. */
2400 	reg &= ~GPREG0_POST_DW0_ENB;
2401 	/* Clear PME message. */
2402 	reg &= ~GPREG0_PME_ENB;
2403 	/* Set PHY address. */
2404 	reg &= ~GPREG0_PHY_ADDR_MASK;
2405 	reg |= sc->jme_phyaddr;
2406 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2407 
2408 	/* Configure Tx queue 0 packet completion coalescing. */
2409 	jme_set_tx_coal(sc);
2410 
2411 	/* Configure Rx queue 0 packet completion coalescing. */
2412 	jme_set_rx_coal(sc);
2413 
2414 	/* Configure shadow status block but don't enable posting. */
2415 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2416 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2417 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2418 
2419 	/* Disable Timer 1 and Timer 2. */
2420 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2421 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2422 
2423 	/* Configure retry transmit period, retry limit value. */
2424 	CSR_WRITE_4(sc, JME_TXTRHD,
2425 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2426 	    TXTRHD_RT_PERIOD_MASK) |
2427 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2428 	    TXTRHD_RT_LIMIT_SHIFT));
2429 
2430 #ifdef DEVICE_POLLING
2431 	if (!(ifp->if_flags & IFF_POLLING))
2432 #endif
2433 	/* Initialize the interrupt mask. */
2434 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2435 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2436 
2437 	/*
2438 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2439 	 * done after detection of valid link in jme_miibus_statchg.
2440 	 */
2441 	sc->jme_flags &= ~JME_FLAG_LINK;
2442 
2443 	/* Set the current media. */
2444 	mii = device_get_softc(sc->jme_miibus);
2445 	mii_mediachg(mii);
2446 
2447 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2448 
2449 	ifp->if_flags |= IFF_RUNNING;
2450 	ifp->if_flags &= ~IFF_OACTIVE;
2451 }
2452 
2453 static void
2454 jme_stop(struct jme_softc *sc)
2455 {
2456 	struct ifnet *ifp = &sc->arpcom.ac_if;
2457 	struct jme_txdesc *txd;
2458 	struct jme_rxdesc *rxd;
2459 	struct jme_rxdata *rdata;
2460 	int i, r;
2461 
2462 	ASSERT_SERIALIZED(ifp->if_serializer);
2463 
2464 	/*
2465 	 * Mark the interface down and cancel the watchdog timer.
2466 	 */
2467 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2468 	ifp->if_timer = 0;
2469 
2470 	callout_stop(&sc->jme_tick_ch);
2471 	sc->jme_flags &= ~JME_FLAG_LINK;
2472 
2473 	/*
2474 	 * Disable interrupts.
2475 	 */
2476 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2477 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2478 
2479 	/* Disable updating shadow status block. */
2480 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2481 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2482 
2483 	/* Stop receiver, transmitter. */
2484 	jme_stop_rx(sc);
2485 	jme_stop_tx(sc);
2486 
2487 	/*
2488 	 * Free partial finished RX segments
2489 	 */
2490 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2491 		rdata = &sc->jme_cdata.jme_rx_data[r];
2492 		if (rdata->jme_rxhead != NULL)
2493 			m_freem(rdata->jme_rxhead);
2494 		JME_RXCHAIN_RESET(sc, r);
2495 	}
2496 
2497 	/*
2498 	 * Free RX and TX mbufs still in the queues.
2499 	 */
2500 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2501 		rdata = &sc->jme_cdata.jme_rx_data[r];
2502 		for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2503 			rxd = &rdata->jme_rxdesc[i];
2504 			if (rxd->rx_m != NULL) {
2505 				bus_dmamap_unload(rdata->jme_rx_tag,
2506 						  rxd->rx_dmamap);
2507 				m_freem(rxd->rx_m);
2508 				rxd->rx_m = NULL;
2509 			}
2510 		}
2511 	}
2512 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2513 		txd = &sc->jme_cdata.jme_txdesc[i];
2514 		if (txd->tx_m != NULL) {
2515 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2516 			    txd->tx_dmamap);
2517 			m_freem(txd->tx_m);
2518 			txd->tx_m = NULL;
2519 			txd->tx_ndesc = 0;
2520 		}
2521         }
2522 }
2523 
2524 static void
2525 jme_stop_tx(struct jme_softc *sc)
2526 {
2527 	uint32_t reg;
2528 	int i;
2529 
2530 	reg = CSR_READ_4(sc, JME_TXCSR);
2531 	if ((reg & TXCSR_TX_ENB) == 0)
2532 		return;
2533 	reg &= ~TXCSR_TX_ENB;
2534 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2535 	for (i = JME_TIMEOUT; i > 0; i--) {
2536 		DELAY(1);
2537 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2538 			break;
2539 	}
2540 	if (i == 0)
2541 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2542 }
2543 
2544 static void
2545 jme_stop_rx(struct jme_softc *sc)
2546 {
2547 	uint32_t reg;
2548 	int i;
2549 
2550 	reg = CSR_READ_4(sc, JME_RXCSR);
2551 	if ((reg & RXCSR_RX_ENB) == 0)
2552 		return;
2553 	reg &= ~RXCSR_RX_ENB;
2554 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2555 	for (i = JME_TIMEOUT; i > 0; i--) {
2556 		DELAY(1);
2557 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2558 			break;
2559 	}
2560 	if (i == 0)
2561 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2562 }
2563 
2564 static void
2565 jme_init_tx_ring(struct jme_softc *sc)
2566 {
2567 	struct jme_chain_data *cd;
2568 	struct jme_txdesc *txd;
2569 	int i;
2570 
2571 	sc->jme_cdata.jme_tx_prod = 0;
2572 	sc->jme_cdata.jme_tx_cons = 0;
2573 	sc->jme_cdata.jme_tx_cnt = 0;
2574 
2575 	cd = &sc->jme_cdata;
2576 	bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2577 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2578 		txd = &sc->jme_cdata.jme_txdesc[i];
2579 		txd->tx_m = NULL;
2580 		txd->tx_desc = &cd->jme_tx_ring[i];
2581 		txd->tx_ndesc = 0;
2582 	}
2583 }
2584 
2585 static void
2586 jme_init_ssb(struct jme_softc *sc)
2587 {
2588 	struct jme_chain_data *cd;
2589 
2590 	cd = &sc->jme_cdata;
2591 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2592 }
2593 
2594 static int
2595 jme_init_rx_ring(struct jme_softc *sc, int ring)
2596 {
2597 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2598 	struct jme_rxdesc *rxd;
2599 	int i;
2600 
2601 	KKASSERT(rdata->jme_rxhead == NULL &&
2602 		 rdata->jme_rxtail == NULL &&
2603 		 rdata->jme_rxlen == 0);
2604 	rdata->jme_rx_cons = 0;
2605 
2606 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2607 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2608 		int error;
2609 
2610 		rxd = &rdata->jme_rxdesc[i];
2611 		rxd->rx_m = NULL;
2612 		rxd->rx_desc = &rdata->jme_rx_ring[i];
2613 		error = jme_newbuf(sc, ring, rxd, 1);
2614 		if (error)
2615 			return error;
2616 	}
2617 	return 0;
2618 }
2619 
2620 static int
2621 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2622 {
2623 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2624 	struct jme_desc *desc;
2625 	struct mbuf *m;
2626 	bus_dma_segment_t segs;
2627 	bus_dmamap_t map;
2628 	int error, nsegs;
2629 
2630 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2631 	if (m == NULL)
2632 		return ENOBUFS;
2633 	/*
2634 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2635 	 * takes advantage of 10 bytes padding feature of hardware
2636 	 * in order not to copy entire frame to align IP header on
2637 	 * 32bit boundary.
2638 	 */
2639 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2640 
2641 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2642 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2643 			BUS_DMA_NOWAIT);
2644 	if (error) {
2645 		m_freem(m);
2646 		if (init)
2647 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2648 		return error;
2649 	}
2650 
2651 	if (rxd->rx_m != NULL) {
2652 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2653 				BUS_DMASYNC_POSTREAD);
2654 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2655 	}
2656 	map = rxd->rx_dmamap;
2657 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
2658 	rdata->jme_rx_sparemap = map;
2659 	rxd->rx_m = m;
2660 
2661 	desc = rxd->rx_desc;
2662 	desc->buflen = htole32(segs.ds_len);
2663 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2664 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2665 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2666 
2667 	return 0;
2668 }
2669 
2670 static void
2671 jme_set_vlan(struct jme_softc *sc)
2672 {
2673 	struct ifnet *ifp = &sc->arpcom.ac_if;
2674 	uint32_t reg;
2675 
2676 	ASSERT_SERIALIZED(ifp->if_serializer);
2677 
2678 	reg = CSR_READ_4(sc, JME_RXMAC);
2679 	reg &= ~RXMAC_VLAN_ENB;
2680 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2681 		reg |= RXMAC_VLAN_ENB;
2682 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2683 }
2684 
2685 static void
2686 jme_set_filter(struct jme_softc *sc)
2687 {
2688 	struct ifnet *ifp = &sc->arpcom.ac_if;
2689 	struct ifmultiaddr *ifma;
2690 	uint32_t crc;
2691 	uint32_t mchash[2];
2692 	uint32_t rxcfg;
2693 
2694 	ASSERT_SERIALIZED(ifp->if_serializer);
2695 
2696 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2697 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2698 	    RXMAC_ALLMULTI);
2699 
2700 	/*
2701 	 * Always accept frames destined to our station address.
2702 	 * Always accept broadcast frames.
2703 	 */
2704 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2705 
2706 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2707 		if (ifp->if_flags & IFF_PROMISC)
2708 			rxcfg |= RXMAC_PROMISC;
2709 		if (ifp->if_flags & IFF_ALLMULTI)
2710 			rxcfg |= RXMAC_ALLMULTI;
2711 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2712 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2713 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2714 		return;
2715 	}
2716 
2717 	/*
2718 	 * Set up the multicast address filter by passing all multicast
2719 	 * addresses through a CRC generator, and then using the low-order
2720 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2721 	 * high order bits select the register, while the rest of the bits
2722 	 * select the bit within the register.
2723 	 */
2724 	rxcfg |= RXMAC_MULTICAST;
2725 	bzero(mchash, sizeof(mchash));
2726 
2727 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2728 		if (ifma->ifma_addr->sa_family != AF_LINK)
2729 			continue;
2730 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2731 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2732 
2733 		/* Just want the 6 least significant bits. */
2734 		crc &= 0x3f;
2735 
2736 		/* Set the corresponding bit in the hash table. */
2737 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2738 	}
2739 
2740 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2741 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2742 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2743 }
2744 
2745 static int
2746 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2747 {
2748 	struct jme_softc *sc = arg1;
2749 	struct ifnet *ifp = &sc->arpcom.ac_if;
2750 	int error, v;
2751 
2752 	lwkt_serialize_enter(ifp->if_serializer);
2753 
2754 	v = sc->jme_tx_coal_to;
2755 	error = sysctl_handle_int(oidp, &v, 0, req);
2756 	if (error || req->newptr == NULL)
2757 		goto back;
2758 
2759 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2760 		error = EINVAL;
2761 		goto back;
2762 	}
2763 
2764 	if (v != sc->jme_tx_coal_to) {
2765 		sc->jme_tx_coal_to = v;
2766 		if (ifp->if_flags & IFF_RUNNING)
2767 			jme_set_tx_coal(sc);
2768 	}
2769 back:
2770 	lwkt_serialize_exit(ifp->if_serializer);
2771 	return error;
2772 }
2773 
2774 static int
2775 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2776 {
2777 	struct jme_softc *sc = arg1;
2778 	struct ifnet *ifp = &sc->arpcom.ac_if;
2779 	int error, v;
2780 
2781 	lwkt_serialize_enter(ifp->if_serializer);
2782 
2783 	v = sc->jme_tx_coal_pkt;
2784 	error = sysctl_handle_int(oidp, &v, 0, req);
2785 	if (error || req->newptr == NULL)
2786 		goto back;
2787 
2788 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2789 		error = EINVAL;
2790 		goto back;
2791 	}
2792 
2793 	if (v != sc->jme_tx_coal_pkt) {
2794 		sc->jme_tx_coal_pkt = v;
2795 		if (ifp->if_flags & IFF_RUNNING)
2796 			jme_set_tx_coal(sc);
2797 	}
2798 back:
2799 	lwkt_serialize_exit(ifp->if_serializer);
2800 	return error;
2801 }
2802 
2803 static int
2804 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2805 {
2806 	struct jme_softc *sc = arg1;
2807 	struct ifnet *ifp = &sc->arpcom.ac_if;
2808 	int error, v;
2809 
2810 	lwkt_serialize_enter(ifp->if_serializer);
2811 
2812 	v = sc->jme_rx_coal_to;
2813 	error = sysctl_handle_int(oidp, &v, 0, req);
2814 	if (error || req->newptr == NULL)
2815 		goto back;
2816 
2817 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2818 		error = EINVAL;
2819 		goto back;
2820 	}
2821 
2822 	if (v != sc->jme_rx_coal_to) {
2823 		sc->jme_rx_coal_to = v;
2824 		if (ifp->if_flags & IFF_RUNNING)
2825 			jme_set_rx_coal(sc);
2826 	}
2827 back:
2828 	lwkt_serialize_exit(ifp->if_serializer);
2829 	return error;
2830 }
2831 
2832 static int
2833 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2834 {
2835 	struct jme_softc *sc = arg1;
2836 	struct ifnet *ifp = &sc->arpcom.ac_if;
2837 	int error, v;
2838 
2839 	lwkt_serialize_enter(ifp->if_serializer);
2840 
2841 	v = sc->jme_rx_coal_pkt;
2842 	error = sysctl_handle_int(oidp, &v, 0, req);
2843 	if (error || req->newptr == NULL)
2844 		goto back;
2845 
2846 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2847 		error = EINVAL;
2848 		goto back;
2849 	}
2850 
2851 	if (v != sc->jme_rx_coal_pkt) {
2852 		sc->jme_rx_coal_pkt = v;
2853 		if (ifp->if_flags & IFF_RUNNING)
2854 			jme_set_rx_coal(sc);
2855 	}
2856 back:
2857 	lwkt_serialize_exit(ifp->if_serializer);
2858 	return error;
2859 }
2860 
2861 static void
2862 jme_set_tx_coal(struct jme_softc *sc)
2863 {
2864 	uint32_t reg;
2865 
2866 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2867 	    PCCTX_COAL_TO_MASK;
2868 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2869 	    PCCTX_COAL_PKT_MASK;
2870 	reg |= PCCTX_COAL_TXQ0;
2871 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2872 }
2873 
2874 static void
2875 jme_set_rx_coal(struct jme_softc *sc)
2876 {
2877 	uint32_t reg;
2878 	int r;
2879 
2880 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2881 	    PCCRX_COAL_TO_MASK;
2882 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2883 	    PCCRX_COAL_PKT_MASK;
2884 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
2885 		if (r < sc->jme_rx_ring_inuse)
2886 			CSR_WRITE_4(sc, JME_PCCRX(r), reg);
2887 		else
2888 			CSR_WRITE_4(sc, JME_PCCRX(r), 0);
2889 	}
2890 }
2891 
2892 #ifdef DEVICE_POLLING
2893 
2894 static void
2895 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2896 {
2897 	struct jme_softc *sc = ifp->if_softc;
2898 	struct mbuf_chain chain[MAXCPU];
2899 	uint32_t status;
2900 	int r, prog = 0;
2901 
2902 	ASSERT_SERIALIZED(ifp->if_serializer);
2903 
2904 	switch (cmd) {
2905 	case POLL_REGISTER:
2906 		CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2907 		break;
2908 
2909 	case POLL_DEREGISTER:
2910 		CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2911 		break;
2912 
2913 	case POLL_AND_CHECK_STATUS:
2914 	case POLL_ONLY:
2915 		status = CSR_READ_4(sc, JME_INTR_STATUS);
2916 
2917 		ether_input_chain_init(chain);
2918 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
2919 			prog += jme_rxeof_chain(sc, r, chain, count);
2920 		if (prog)
2921 			ether_input_dispatch(chain);
2922 
2923 		if (status & INTR_RXQ_DESC_EMPTY) {
2924 			CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2925 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2926 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2927 		}
2928 
2929 		jme_txeof(sc);
2930 		if (!ifq_is_empty(&ifp->if_snd))
2931 			if_devstart(ifp);
2932 		break;
2933 	}
2934 }
2935 
2936 #endif	/* DEVICE_POLLING */
2937 
2938 static int
2939 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
2940 {
2941 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2942 	bus_dmamem_t dmem;
2943 	int error;
2944 
2945 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
2946 			JME_RX_RING_ALIGN, 0,
2947 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2948 			JME_RX_RING_SIZE(sc),
2949 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
2950 	if (error) {
2951 		device_printf(sc->jme_dev,
2952 		    "could not allocate %dth Rx ring.\n", ring);
2953 		return error;
2954 	}
2955 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
2956 	rdata->jme_rx_ring_map = dmem.dmem_map;
2957 	rdata->jme_rx_ring = dmem.dmem_addr;
2958 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
2959 
2960 	return 0;
2961 }
2962 
2963 static int
2964 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
2965 {
2966 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2967 	int i, error;
2968 
2969 	/* Create tag for Rx buffers. */
2970 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
2971 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
2972 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2973 	    BUS_SPACE_MAXADDR,		/* highaddr */
2974 	    NULL, NULL,			/* filter, filterarg */
2975 	    MCLBYTES,			/* maxsize */
2976 	    1,				/* nsegments */
2977 	    MCLBYTES,			/* maxsegsize */
2978 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
2979 	    &rdata->jme_rx_tag);
2980 	if (error) {
2981 		device_printf(sc->jme_dev,
2982 		    "could not create %dth Rx DMA tag.\n", ring);
2983 		return error;
2984 	}
2985 
2986 	/* Create DMA maps for Rx buffers. */
2987 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
2988 				  &rdata->jme_rx_sparemap);
2989 	if (error) {
2990 		device_printf(sc->jme_dev,
2991 		    "could not create %dth spare Rx dmamap.\n", ring);
2992 		bus_dma_tag_destroy(rdata->jme_rx_tag);
2993 		rdata->jme_rx_tag = NULL;
2994 		return error;
2995 	}
2996 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2997 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
2998 
2999 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3000 					  &rxd->rx_dmamap);
3001 		if (error) {
3002 			int j;
3003 
3004 			device_printf(sc->jme_dev,
3005 			    "could not create %dth Rx dmamap "
3006 			    "for %dth RX ring.\n", i, ring);
3007 
3008 			for (j = 0; j < i; ++j) {
3009 				rxd = &rdata->jme_rxdesc[j];
3010 				bus_dmamap_destroy(rdata->jme_rx_tag,
3011 						   rxd->rx_dmamap);
3012 			}
3013 			bus_dmamap_destroy(rdata->jme_rx_tag,
3014 					   rdata->jme_rx_sparemap);
3015 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3016 			rdata->jme_rx_tag = NULL;
3017 			return error;
3018 		}
3019 	}
3020 	return 0;
3021 }
3022 
3023 static void
3024 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3025 {
3026 	struct mbuf_chain chain[MAXCPU];
3027 	int r, prog = 0;
3028 
3029 	ether_input_chain_init(chain);
3030 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3031 		if (status & jme_rx_status[r].jme_coal)
3032 			prog += jme_rxeof_chain(sc, r, chain, -1);
3033 	}
3034 	if (prog)
3035 		ether_input_dispatch(chain);
3036 }
3037 
3038 static void
3039 jme_enable_rss(struct jme_softc *sc)
3040 {
3041 	uint32_t rssc, key, ind;
3042 	int i;
3043 
3044 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3045 
3046 	rssc = RSSC_HASH_64_ENTRY;
3047 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3048 	rssc |= sc->jme_rx_ring_inuse >> 1;
3049 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3050 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3051 
3052 	key = 0x6d5a6d5a; /* XXX */
3053 	for (i = 0; i < RSSKEY_NREGS; ++i)
3054 		CSR_WRITE_4(sc, RSSKEY_REG(i), key);
3055 
3056 	ind = 0;
3057 	if (sc->jme_rx_ring_inuse == JME_NRXRING_2) {
3058 		ind = 0x01000100;
3059 	} else if (sc->jme_rx_ring_inuse == JME_NRXRING_4) {
3060 		ind = 0x03020100;
3061 	} else {
3062 		panic("%s: invalid # of RX rings (%d)\n",
3063 		      sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse);
3064 	}
3065 	JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3066 	for (i = 0; i < RSSTBL_NREGS; ++i)
3067 		CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3068 }
3069 
3070 static void
3071 jme_disable_rss(struct jme_softc *sc)
3072 {
3073 	sc->jme_rx_ring_inuse = JME_NRXRING_1;
3074 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3075 }
3076