xref: /dflybsd-src/sys/dev/netif/jme/if_jme.c (revision 3cf8dfbcc5e851c3571a9caa420ccd50eb89a824)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
29  */
30 
31 #include "opt_polling.h"
32 
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/bpf.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/ifq_var.h>
53 #include <net/vlan/if_vlan_var.h>
54 #include <net/vlan/if_vlan_ether.h>
55 
56 #include <dev/netif/mii_layer/miivar.h>
57 #include <dev/netif/mii_layer/jmphyreg.h>
58 
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pcidevs.h>
62 
63 #include <dev/netif/jme/if_jmereg.h>
64 #include <dev/netif/jme/if_jmevar.h>
65 
66 #include "miibus_if.h"
67 
68 /* Define the following to disable printing Rx errors. */
69 #undef	JME_SHOW_ERRORS
70 
71 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
72 
73 static int	jme_probe(device_t);
74 static int	jme_attach(device_t);
75 static int	jme_detach(device_t);
76 static int	jme_shutdown(device_t);
77 static int	jme_suspend(device_t);
78 static int	jme_resume(device_t);
79 
80 static int	jme_miibus_readreg(device_t, int, int);
81 static int	jme_miibus_writereg(device_t, int, int, int);
82 static void	jme_miibus_statchg(device_t);
83 
84 static void	jme_init(void *);
85 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
86 static void	jme_start(struct ifnet *);
87 static void	jme_watchdog(struct ifnet *);
88 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
89 static int	jme_mediachange(struct ifnet *);
90 #ifdef DEVICE_POLLING
91 static void	jme_poll(struct ifnet *, enum poll_cmd, int);
92 #endif
93 
94 static void	jme_intr(void *);
95 static void	jme_txeof(struct jme_softc *);
96 static void	jme_rxeof(struct jme_softc *);
97 
98 static int	jme_dma_alloc(struct jme_softc *);
99 static void	jme_dma_free(struct jme_softc *, int);
100 static void	jme_dmamap_ring_cb(void *, bus_dma_segment_t *, int, int);
101 static void	jme_dmamap_buf_cb(void *, bus_dma_segment_t *, int,
102 				  bus_size_t, int);
103 static int	jme_init_rx_ring(struct jme_softc *);
104 static void	jme_init_tx_ring(struct jme_softc *);
105 static void	jme_init_ssb(struct jme_softc *);
106 static int	jme_newbuf(struct jme_softc *, struct jme_rxdesc *, int);
107 static int	jme_encap(struct jme_softc *, struct mbuf **);
108 static void	jme_rxpkt(struct jme_softc *);
109 
110 static void	jme_tick(void *);
111 static void	jme_stop(struct jme_softc *);
112 static void	jme_reset(struct jme_softc *);
113 static void	jme_set_vlan(struct jme_softc *);
114 static void	jme_set_filter(struct jme_softc *);
115 static void	jme_stop_tx(struct jme_softc *);
116 static void	jme_stop_rx(struct jme_softc *);
117 static void	jme_mac_config(struct jme_softc *);
118 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
119 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
120 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
121 #ifdef notyet
122 static void	jme_setwol(struct jme_softc *);
123 static void	jme_setlinkspeed(struct jme_softc *);
124 #endif
125 static void	jme_set_tx_coal(struct jme_softc *);
126 static void	jme_set_rx_coal(struct jme_softc *);
127 
128 static void	jme_sysctl_node(struct jme_softc *);
129 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
130 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
131 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
132 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
133 
134 /*
135  * Devices supported by this driver.
136  */
137 static const struct jme_dev {
138 	uint16_t	jme_vendorid;
139 	uint16_t	jme_deviceid;
140 	uint32_t	jme_caps;
141 	const char	*jme_name;
142 } jme_devs[] = {
143 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
144 	    JME_CAP_JUMBO,
145 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
146 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
147 	    JME_CAP_FASTETH,
148 	    "JMicron Inc, JMC260 Fast Ethernet" },
149 	{ 0, 0, 0, NULL }
150 };
151 
152 static device_method_t jme_methods[] = {
153 	/* Device interface. */
154 	DEVMETHOD(device_probe,		jme_probe),
155 	DEVMETHOD(device_attach,	jme_attach),
156 	DEVMETHOD(device_detach,	jme_detach),
157 	DEVMETHOD(device_shutdown,	jme_shutdown),
158 	DEVMETHOD(device_suspend,	jme_suspend),
159 	DEVMETHOD(device_resume,	jme_resume),
160 
161 	/* Bus interface. */
162 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
163 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
164 
165 	/* MII interface. */
166 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
167 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
168 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
169 
170 	{ NULL, NULL }
171 };
172 
173 static driver_t jme_driver = {
174 	"jme",
175 	jme_methods,
176 	sizeof(struct jme_softc)
177 };
178 
179 static devclass_t jme_devclass;
180 
181 DECLARE_DUMMY_MODULE(if_jme);
182 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
183 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
184 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
185 
186 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
187 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
188 
189 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
190 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
191 
192 /*
193  *	Read a PHY register on the MII of the JMC250.
194  */
195 static int
196 jme_miibus_readreg(device_t dev, int phy, int reg)
197 {
198 	struct jme_softc *sc = device_get_softc(dev);
199 	uint32_t val;
200 	int i;
201 
202 	/* For FPGA version, PHY address 0 should be ignored. */
203 	if (sc->jme_caps & JME_CAP_FPGA) {
204 		if (phy == 0)
205 			return (0);
206 	} else {
207 		if (sc->jme_phyaddr != phy)
208 			return (0);
209 	}
210 
211 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
212 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
213 
214 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
215 		DELAY(1);
216 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
217 			break;
218 	}
219 	if (i == 0) {
220 		device_printf(sc->jme_dev, "phy read timeout: "
221 			      "phy %d, reg %d\n", phy, reg);
222 		return (0);
223 	}
224 
225 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
226 }
227 
228 /*
229  *	Write a PHY register on the MII of the JMC250.
230  */
231 static int
232 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
233 {
234 	struct jme_softc *sc = device_get_softc(dev);
235 	int i;
236 
237 	/* For FPGA version, PHY address 0 should be ignored. */
238 	if (sc->jme_caps & JME_CAP_FPGA) {
239 		if (phy == 0)
240 			return (0);
241 	} else {
242 		if (sc->jme_phyaddr != phy)
243 			return (0);
244 	}
245 
246 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
247 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
248 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
249 
250 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
251 		DELAY(1);
252 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
253 			break;
254 	}
255 	if (i == 0) {
256 		device_printf(sc->jme_dev, "phy write timeout: "
257 			      "phy %d, reg %d\n", phy, reg);
258 	}
259 
260 	return (0);
261 }
262 
263 /*
264  *	Callback from MII layer when media changes.
265  */
266 static void
267 jme_miibus_statchg(device_t dev)
268 {
269 	struct jme_softc *sc = device_get_softc(dev);
270 	struct ifnet *ifp = &sc->arpcom.ac_if;
271 	struct mii_data *mii;
272 	struct jme_txdesc *txd;
273 	bus_addr_t paddr;
274 	int i;
275 
276 	ASSERT_SERIALIZED(ifp->if_serializer);
277 
278 	if ((ifp->if_flags & IFF_RUNNING) == 0)
279 		return;
280 
281 	mii = device_get_softc(sc->jme_miibus);
282 
283 	sc->jme_flags &= ~JME_FLAG_LINK;
284 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
285 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
286 		case IFM_10_T:
287 		case IFM_100_TX:
288 			sc->jme_flags |= JME_FLAG_LINK;
289 			break;
290 		case IFM_1000_T:
291 			if (sc->jme_caps & JME_CAP_FASTETH)
292 				break;
293 			sc->jme_flags |= JME_FLAG_LINK;
294 			break;
295 		default:
296 			break;
297 		}
298 	}
299 
300 	/*
301 	 * Disabling Rx/Tx MACs have a side-effect of resetting
302 	 * JME_TXNDA/JME_RXNDA register to the first address of
303 	 * Tx/Rx descriptor address. So driver should reset its
304 	 * internal procucer/consumer pointer and reclaim any
305 	 * allocated resources.  Note, just saving the value of
306 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
307 	 * and restoring JME_TXNDA/JME_RXNDA register is not
308 	 * sufficient to make sure correct MAC state because
309 	 * stopping MAC operation can take a while and hardware
310 	 * might have updated JME_TXNDA/JME_RXNDA registers
311 	 * during the stop operation.
312 	 */
313 
314 	/* Disable interrupts */
315 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
316 
317 	/* Stop driver */
318 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
319 	ifp->if_timer = 0;
320 	callout_stop(&sc->jme_tick_ch);
321 
322 	/* Stop receiver/transmitter. */
323 	jme_stop_rx(sc);
324 	jme_stop_tx(sc);
325 
326 	jme_rxeof(sc);
327 	if (sc->jme_cdata.jme_rxhead != NULL)
328 		m_freem(sc->jme_cdata.jme_rxhead);
329 	JME_RXCHAIN_RESET(sc);
330 
331 	jme_txeof(sc);
332 	if (sc->jme_cdata.jme_tx_cnt != 0) {
333 		/* Remove queued packets for transmit. */
334 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
335 			txd = &sc->jme_cdata.jme_txdesc[i];
336 			if (txd->tx_m != NULL) {
337 				bus_dmamap_unload(
338 				    sc->jme_cdata.jme_tx_tag,
339 				    txd->tx_dmamap);
340 				m_freem(txd->tx_m);
341 				txd->tx_m = NULL;
342 				txd->tx_ndesc = 0;
343 				ifp->if_oerrors++;
344 			}
345 		}
346 	}
347 
348 	/*
349 	 * Reuse configured Rx descriptors and reset
350 	 * procuder/consumer index.
351 	 */
352 	sc->jme_cdata.jme_rx_cons = 0;
353 
354 	jme_init_tx_ring(sc);
355 
356 	/* Initialize shadow status block. */
357 	jme_init_ssb(sc);
358 
359 	/* Program MAC with resolved speed/duplex/flow-control. */
360 	if (sc->jme_flags & JME_FLAG_LINK) {
361 		jme_mac_config(sc);
362 
363 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
364 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
365 
366 		/* Set Tx ring address to the hardware. */
367 		paddr = JME_TX_RING_ADDR(sc, 0);
368 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
369 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
370 
371 		/* Set Rx ring address to the hardware. */
372 		paddr = JME_RX_RING_ADDR(sc, 0);
373 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
374 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
375 
376 		/* Restart receiver/transmitter. */
377 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
378 		    RXCSR_RXQ_START);
379 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
380 	}
381 
382 	ifp->if_flags |= IFF_RUNNING;
383 	ifp->if_flags &= ~IFF_OACTIVE;
384 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
385 
386 #ifdef DEVICE_POLLING
387 	if (!(ifp->if_flags & IFF_POLLING))
388 #endif
389 	/* Reenable interrupts. */
390 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
391 }
392 
393 /*
394  *	Get the current interface media status.
395  */
396 static void
397 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
398 {
399 	struct jme_softc *sc = ifp->if_softc;
400 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
401 
402 	ASSERT_SERIALIZED(ifp->if_serializer);
403 
404 	mii_pollstat(mii);
405 	ifmr->ifm_status = mii->mii_media_status;
406 	ifmr->ifm_active = mii->mii_media_active;
407 }
408 
409 /*
410  *	Set hardware to newly-selected media.
411  */
412 static int
413 jme_mediachange(struct ifnet *ifp)
414 {
415 	struct jme_softc *sc = ifp->if_softc;
416 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
417 	int error;
418 
419 	ASSERT_SERIALIZED(ifp->if_serializer);
420 
421 	if (mii->mii_instance != 0) {
422 		struct mii_softc *miisc;
423 
424 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
425 			mii_phy_reset(miisc);
426 	}
427 	error = mii_mediachg(mii);
428 
429 	return (error);
430 }
431 
432 static int
433 jme_probe(device_t dev)
434 {
435 	const struct jme_dev *sp;
436 	uint16_t vid, did;
437 
438 	vid = pci_get_vendor(dev);
439 	did = pci_get_device(dev);
440 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
441 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
442 			struct jme_softc *sc = device_get_softc(dev);
443 
444 			sc->jme_caps = sp->jme_caps;
445 			device_set_desc(dev, sp->jme_name);
446 			return (0);
447 		}
448 	}
449 	return (ENXIO);
450 }
451 
452 static int
453 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
454 {
455 	uint32_t reg;
456 	int i;
457 
458 	*val = 0;
459 	for (i = JME_TIMEOUT; i > 0; i--) {
460 		reg = CSR_READ_4(sc, JME_SMBCSR);
461 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
462 			break;
463 		DELAY(1);
464 	}
465 
466 	if (i == 0) {
467 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
468 		return (ETIMEDOUT);
469 	}
470 
471 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
472 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
473 	for (i = JME_TIMEOUT; i > 0; i--) {
474 		DELAY(1);
475 		reg = CSR_READ_4(sc, JME_SMBINTF);
476 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
477 			break;
478 	}
479 
480 	if (i == 0) {
481 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
482 		return (ETIMEDOUT);
483 	}
484 
485 	reg = CSR_READ_4(sc, JME_SMBINTF);
486 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
487 
488 	return (0);
489 }
490 
491 static int
492 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
493 {
494 	uint8_t fup, reg, val;
495 	uint32_t offset;
496 	int match;
497 
498 	offset = 0;
499 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
500 	    fup != JME_EEPROM_SIG0)
501 		return (ENOENT);
502 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
503 	    fup != JME_EEPROM_SIG1)
504 		return (ENOENT);
505 	match = 0;
506 	do {
507 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
508 			break;
509 		/* Check for the end of EEPROM descriptor. */
510 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
511 			break;
512 		if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0,
513 		    JME_EEPROM_PAGE_BAR1) == fup) {
514 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
515 				break;
516 			if (reg >= JME_PAR0 &&
517 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
518 				if (jme_eeprom_read_byte(sc, offset + 2,
519 				    &val) != 0)
520 					break;
521 				eaddr[reg - JME_PAR0] = val;
522 				match++;
523 			}
524 		}
525 		/* Try next eeprom descriptor. */
526 		offset += JME_EEPROM_DESC_BYTES;
527 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
528 
529 	if (match == ETHER_ADDR_LEN)
530 		return (0);
531 
532 	return (ENOENT);
533 }
534 
535 static void
536 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
537 {
538 	uint32_t par0, par1;
539 
540 	/* Read station address. */
541 	par0 = CSR_READ_4(sc, JME_PAR0);
542 	par1 = CSR_READ_4(sc, JME_PAR1);
543 	par1 &= 0xFFFF;
544 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
545 		device_printf(sc->jme_dev,
546 		    "generating fake ethernet address.\n");
547 		par0 = karc4random();
548 		/* Set OUI to JMicron. */
549 		eaddr[0] = 0x00;
550 		eaddr[1] = 0x1B;
551 		eaddr[2] = 0x8C;
552 		eaddr[3] = (par0 >> 16) & 0xff;
553 		eaddr[4] = (par0 >> 8) & 0xff;
554 		eaddr[5] = par0 & 0xff;
555 	} else {
556 		eaddr[0] = (par0 >> 0) & 0xFF;
557 		eaddr[1] = (par0 >> 8) & 0xFF;
558 		eaddr[2] = (par0 >> 16) & 0xFF;
559 		eaddr[3] = (par0 >> 24) & 0xFF;
560 		eaddr[4] = (par1 >> 0) & 0xFF;
561 		eaddr[5] = (par1 >> 8) & 0xFF;
562 	}
563 }
564 
565 static int
566 jme_attach(device_t dev)
567 {
568 	struct jme_softc *sc = device_get_softc(dev);
569 	struct ifnet *ifp = &sc->arpcom.ac_if;
570 	uint32_t reg;
571 	uint16_t did;
572 	uint8_t pcie_ptr, rev;
573 	int error = 0;
574 	uint8_t eaddr[ETHER_ADDR_LEN];
575 
576 	sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
577 	if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
578 		sc->jme_rx_desc_cnt = JME_NDESC_MAX;
579 
580 	sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
581 	if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
582 		sc->jme_tx_desc_cnt = JME_NDESC_MAX;
583 
584 	sc->jme_dev = dev;
585 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
586 
587 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
588 
589 	callout_init(&sc->jme_tick_ch);
590 
591 #ifndef BURN_BRIDGES
592 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
593 		uint32_t irq, mem;
594 
595 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
596 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
597 
598 		device_printf(dev, "chip is in D%d power mode "
599 		    "-- setting to D0\n", pci_get_powerstate(dev));
600 
601 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
602 
603 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
604 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
605 	}
606 #endif	/* !BURN_BRIDGE */
607 
608 	/* Enable bus mastering */
609 	pci_enable_busmaster(dev);
610 
611 	/*
612 	 * Allocate IO memory
613 	 *
614 	 * JMC250 supports both memory mapped and I/O register space
615 	 * access.  Because I/O register access should use different
616 	 * BARs to access registers it's waste of time to use I/O
617 	 * register spce access.  JMC250 uses 16K to map entire memory
618 	 * space.
619 	 */
620 	sc->jme_mem_rid = JME_PCIR_BAR;
621 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
622 						 &sc->jme_mem_rid, RF_ACTIVE);
623 	if (sc->jme_mem_res == NULL) {
624 		device_printf(dev, "can't allocate IO memory\n");
625 		return ENXIO;
626 	}
627 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
628 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
629 
630 	/*
631 	 * Allocate IRQ
632 	 */
633 	sc->jme_irq_rid = 0;
634 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
635 						 &sc->jme_irq_rid,
636 						 RF_SHAREABLE | RF_ACTIVE);
637 	if (sc->jme_irq_res == NULL) {
638 		device_printf(dev, "can't allocate irq\n");
639 		error = ENXIO;
640 		goto fail;
641 	}
642 
643 	/*
644 	 * Extract revisions
645 	 */
646 	reg = CSR_READ_4(sc, JME_CHIPMODE);
647 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
648 	    CHIPMODE_NOT_FPGA) {
649 		sc->jme_caps |= JME_CAP_FPGA;
650 		if (bootverbose) {
651 			device_printf(dev, "FPGA revision: 0x%04x\n",
652 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
653 				      CHIPMODE_FPGA_REV_SHIFT);
654 		}
655 	}
656 
657 	/* NOTE: FM revision is put in the upper 4 bits */
658 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
659 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
660 	if (bootverbose)
661 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
662 
663 	did = pci_get_device(dev);
664 	switch (did) {
665 	case PCI_PRODUCT_JMICRON_JMC250:
666 		if (rev == JME_REV1_A2)
667 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
668 		break;
669 
670 	case PCI_PRODUCT_JMICRON_JMC260:
671 		if (rev == JME_REV2)
672 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
673 		break;
674 
675 	default:
676 		panic("unknown device id 0x%04x\n", did);
677 	}
678 	if (rev >= JME_REV2) {
679 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
680 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
681 				      GHC_TXMAC_CLKSRC_1000;
682 	}
683 
684 	/* Reset the ethernet controller. */
685 	jme_reset(sc);
686 
687 	/* Get station address. */
688 	reg = CSR_READ_4(sc, JME_SMBCSR);
689 	if (reg & SMBCSR_EEPROM_PRESENT)
690 		error = jme_eeprom_macaddr(sc, eaddr);
691 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
692 		if (error != 0 && (bootverbose)) {
693 			device_printf(dev, "ethernet hardware address "
694 				      "not found in EEPROM.\n");
695 		}
696 		jme_reg_macaddr(sc, eaddr);
697 	}
698 
699 	/*
700 	 * Save PHY address.
701 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
702 	 * requires PHY probing to get correct PHY address.
703 	 */
704 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
705 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
706 		    GPREG0_PHY_ADDR_MASK;
707 		if (bootverbose) {
708 			device_printf(dev, "PHY is at address %d.\n",
709 			    sc->jme_phyaddr);
710 		}
711 	} else {
712 		sc->jme_phyaddr = 0;
713 	}
714 
715 	/* Set max allowable DMA size. */
716 	pcie_ptr = pci_get_pciecap_ptr(dev);
717 	if (pcie_ptr != 0) {
718 		uint16_t ctrl;
719 
720 		sc->jme_caps |= JME_CAP_PCIE;
721 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
722 		if (bootverbose) {
723 			device_printf(dev, "Read request size : %d bytes.\n",
724 			    128 << ((ctrl >> 12) & 0x07));
725 			device_printf(dev, "TLP payload size : %d bytes.\n",
726 			    128 << ((ctrl >> 5) & 0x07));
727 		}
728 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
729 		case PCIEM_DEVCTL_MAX_READRQ_128:
730 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
731 			break;
732 		case PCIEM_DEVCTL_MAX_READRQ_256:
733 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
734 			break;
735 		default:
736 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
737 			break;
738 		}
739 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
740 	} else {
741 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
742 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
743 	}
744 
745 #ifdef notyet
746 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
747 		sc->jme_caps |= JME_CAP_PMCAP;
748 #endif
749 
750 	/*
751 	 * Create sysctl tree
752 	 */
753 	jme_sysctl_node(sc);
754 
755 	/* Allocate DMA stuffs */
756 	error = jme_dma_alloc(sc);
757 	if (error)
758 		goto fail;
759 
760 	ifp->if_softc = sc;
761 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
762 	ifp->if_init = jme_init;
763 	ifp->if_ioctl = jme_ioctl;
764 	ifp->if_start = jme_start;
765 #ifdef DEVICE_POLLING
766 	ifp->if_poll = jme_poll;
767 #endif
768 	ifp->if_watchdog = jme_watchdog;
769 	ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
770 	ifq_set_ready(&ifp->if_snd);
771 
772 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
773 	ifp->if_capabilities = IFCAP_HWCSUM |
774 			       IFCAP_VLAN_MTU |
775 			       IFCAP_VLAN_HWTAGGING;
776 	ifp->if_hwassist = JME_CSUM_FEATURES;
777 	ifp->if_capenable = ifp->if_capabilities;
778 
779 	/* Set up MII bus. */
780 	error = mii_phy_probe(dev, &sc->jme_miibus,
781 			      jme_mediachange, jme_mediastatus);
782 	if (error) {
783 		device_printf(dev, "no PHY found!\n");
784 		goto fail;
785 	}
786 
787 	/*
788 	 * Save PHYADDR for FPGA mode PHY.
789 	 */
790 	if (sc->jme_caps & JME_CAP_FPGA) {
791 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
792 
793 		if (mii->mii_instance != 0) {
794 			struct mii_softc *miisc;
795 
796 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
797 				if (miisc->mii_phy != 0) {
798 					sc->jme_phyaddr = miisc->mii_phy;
799 					break;
800 				}
801 			}
802 			if (sc->jme_phyaddr != 0) {
803 				device_printf(sc->jme_dev,
804 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
805 				/* vendor magic. */
806 				jme_miibus_writereg(dev, sc->jme_phyaddr,
807 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
808 
809 				/* XXX should we clear JME_WA_EXTFIFO */
810 			}
811 		}
812 	}
813 
814 	ether_ifattach(ifp, eaddr, NULL);
815 
816 	/* Tell the upper layer(s) we support long frames. */
817 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
818 
819 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
820 			       &sc->jme_irq_handle, ifp->if_serializer);
821 	if (error) {
822 		device_printf(dev, "could not set up interrupt handler.\n");
823 		ether_ifdetach(ifp);
824 		goto fail;
825 	}
826 
827 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
828 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
829 	return 0;
830 fail:
831 	jme_detach(dev);
832 	return (error);
833 }
834 
835 static int
836 jme_detach(device_t dev)
837 {
838 	struct jme_softc *sc = device_get_softc(dev);
839 
840 	if (device_is_attached(dev)) {
841 		struct ifnet *ifp = &sc->arpcom.ac_if;
842 
843 		lwkt_serialize_enter(ifp->if_serializer);
844 		jme_stop(sc);
845 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
846 		lwkt_serialize_exit(ifp->if_serializer);
847 
848 		ether_ifdetach(ifp);
849 	}
850 
851 	if (sc->jme_sysctl_tree != NULL)
852 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
853 
854 	if (sc->jme_miibus != NULL)
855 		device_delete_child(dev, sc->jme_miibus);
856 	bus_generic_detach(dev);
857 
858 	if (sc->jme_irq_res != NULL) {
859 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
860 				     sc->jme_irq_res);
861 	}
862 
863 	if (sc->jme_mem_res != NULL) {
864 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
865 				     sc->jme_mem_res);
866 	}
867 
868 	jme_dma_free(sc, 1);
869 
870 	return (0);
871 }
872 
873 static void
874 jme_sysctl_node(struct jme_softc *sc)
875 {
876 	int coal_max;
877 
878 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
879 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
880 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
881 				device_get_nameunit(sc->jme_dev),
882 				CTLFLAG_RD, 0, "");
883 	if (sc->jme_sysctl_tree == NULL) {
884 		device_printf(sc->jme_dev, "can't add sysctl node\n");
885 		return;
886 	}
887 
888 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
889 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
890 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
891 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
892 
893 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
894 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
895 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
896 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
897 
898 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
899 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
900 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
901 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
902 
903 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
904 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
905 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
906 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
907 
908 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
909 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
910 		       "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
911 		       0, "RX desc count");
912 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
913 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
914 		       "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
915 		       0, "TX desc count");
916 
917 	/*
918 	 * Set default coalesce valves
919 	 */
920 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
921 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
922 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
923 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
924 
925 	/*
926 	 * Adjust coalesce valves, in case that the number of TX/RX
927 	 * descs are set to small values by users.
928 	 *
929 	 * NOTE: coal_max will not be zero, since number of descs
930 	 * must aligned by JME_NDESC_ALIGN (16 currently)
931 	 */
932 	coal_max = sc->jme_tx_desc_cnt / 6;
933 	if (coal_max < sc->jme_tx_coal_pkt)
934 		sc->jme_tx_coal_pkt = coal_max;
935 
936 	coal_max = sc->jme_rx_desc_cnt / 4;
937 	if (coal_max < sc->jme_rx_coal_pkt)
938 		sc->jme_rx_coal_pkt = coal_max;
939 }
940 
941 static void
942 jme_dmamap_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
943 {
944 	if (error)
945 		return;
946 
947 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
948 	*((bus_addr_t *)arg) = segs->ds_addr;
949 }
950 
951 static void
952 jme_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs,
953 		  bus_size_t mapsz __unused, int error)
954 {
955 	struct jme_dmamap_ctx *ctx = xctx;
956 	int i;
957 
958 	if (error)
959 		return;
960 
961 	if (nsegs > ctx->nsegs) {
962 		ctx->nsegs = 0;
963 		return;
964 	}
965 
966 	ctx->nsegs = nsegs;
967 	for (i = 0; i < nsegs; ++i)
968 		ctx->segs[i] = segs[i];
969 }
970 
971 static int
972 jme_dma_alloc(struct jme_softc *sc)
973 {
974 	struct jme_txdesc *txd;
975 	struct jme_rxdesc *rxd;
976 	bus_addr_t busaddr, lowaddr;
977 	int error, i;
978 
979 	sc->jme_cdata.jme_txdesc =
980 	kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
981 		M_DEVBUF, M_WAITOK | M_ZERO);
982 	sc->jme_cdata.jme_rxdesc =
983 	kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
984 		M_DEVBUF, M_WAITOK | M_ZERO);
985 
986 	lowaddr = sc->jme_lowaddr;
987 again:
988 	/* Create parent ring tag. */
989 	error = bus_dma_tag_create(NULL,/* parent */
990 	    1, 0,			/* algnmnt, boundary */
991 	    lowaddr,			/* lowaddr */
992 	    BUS_SPACE_MAXADDR,		/* highaddr */
993 	    NULL, NULL,			/* filter, filterarg */
994 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
995 	    0,				/* nsegments */
996 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
997 	    0,				/* flags */
998 	    &sc->jme_cdata.jme_ring_tag);
999 	if (error) {
1000 		device_printf(sc->jme_dev,
1001 		    "could not create parent ring DMA tag.\n");
1002 		return error;
1003 	}
1004 
1005 	/*
1006 	 * Create DMA stuffs for TX ring
1007 	 */
1008 
1009 	/* Create tag for Tx ring. */
1010 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1011 	    JME_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
1012 	    lowaddr,			/* lowaddr */
1013 	    BUS_SPACE_MAXADDR,		/* highaddr */
1014 	    NULL, NULL,			/* filter, filterarg */
1015 	    JME_TX_RING_SIZE(sc),	/* maxsize */
1016 	    1,				/* nsegments */
1017 	    JME_TX_RING_SIZE(sc),	/* maxsegsize */
1018 	    0,				/* flags */
1019 	    &sc->jme_cdata.jme_tx_ring_tag);
1020 	if (error) {
1021 		device_printf(sc->jme_dev,
1022 		    "could not allocate Tx ring DMA tag.\n");
1023 		return error;
1024 	}
1025 
1026 	/* Allocate DMA'able memory for TX ring */
1027 	error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1028 	    (void **)&sc->jme_rdata.jme_tx_ring,
1029 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
1030 	    &sc->jme_cdata.jme_tx_ring_map);
1031 	if (error) {
1032 		device_printf(sc->jme_dev,
1033 		    "could not allocate DMA'able memory for Tx ring.\n");
1034 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1035 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1036 		return error;
1037 	}
1038 
1039 	/*  Load the DMA map for Tx ring. */
1040 	error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1041 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1042 	    JME_TX_RING_SIZE(sc), jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1043 	if (error) {
1044 		device_printf(sc->jme_dev,
1045 		    "could not load DMA'able memory for Tx ring.\n");
1046 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1047 				sc->jme_rdata.jme_tx_ring,
1048 				sc->jme_cdata.jme_tx_ring_map);
1049 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1050 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1051 		return error;
1052 	}
1053 	sc->jme_rdata.jme_tx_ring_paddr = busaddr;
1054 
1055 	/*
1056 	 * Create DMA stuffs for RX ring
1057 	 */
1058 
1059 	/* Create tag for Rx ring. */
1060 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1061 	    JME_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
1062 	    lowaddr,			/* lowaddr */
1063 	    BUS_SPACE_MAXADDR,		/* highaddr */
1064 	    NULL, NULL,			/* filter, filterarg */
1065 	    JME_RX_RING_SIZE(sc),	/* maxsize */
1066 	    1,				/* nsegments */
1067 	    JME_RX_RING_SIZE(sc),	/* maxsegsize */
1068 	    0,				/* flags */
1069 	    &sc->jme_cdata.jme_rx_ring_tag);
1070 	if (error) {
1071 		device_printf(sc->jme_dev,
1072 		    "could not allocate Rx ring DMA tag.\n");
1073 		return error;
1074 	}
1075 
1076 	/* Allocate DMA'able memory for RX ring */
1077 	error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1078 	    (void **)&sc->jme_rdata.jme_rx_ring,
1079 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
1080 	    &sc->jme_cdata.jme_rx_ring_map);
1081 	if (error) {
1082 		device_printf(sc->jme_dev,
1083 		    "could not allocate DMA'able memory for Rx ring.\n");
1084 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1085 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1086 		return error;
1087 	}
1088 
1089 	/* Load the DMA map for Rx ring. */
1090 	error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1091 	    sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1092 	    JME_RX_RING_SIZE(sc), jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1093 	if (error) {
1094 		device_printf(sc->jme_dev,
1095 		    "could not load DMA'able memory for Rx ring.\n");
1096 		bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1097 				sc->jme_rdata.jme_rx_ring,
1098 				sc->jme_cdata.jme_rx_ring_map);
1099 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1100 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1101 		return error;
1102 	}
1103 	sc->jme_rdata.jme_rx_ring_paddr = busaddr;
1104 
1105 	if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1106 		bus_addr_t rx_ring_end, tx_ring_end;
1107 
1108 		/* Tx/Rx descriptor queue should reside within 4GB boundary. */
1109 		tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
1110 			      JME_TX_RING_SIZE(sc);
1111 		rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
1112 			      JME_RX_RING_SIZE(sc);
1113 		if ((JME_ADDR_HI(tx_ring_end) !=
1114 		     JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1115 		    (JME_ADDR_HI(rx_ring_end) !=
1116 		     JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1117 			device_printf(sc->jme_dev, "4GB boundary crossed, "
1118 			    "switching to 32bit DMA address mode.\n");
1119 			jme_dma_free(sc, 0);
1120 			/* Limit DMA address space to 32bit and try again. */
1121 			lowaddr = BUS_SPACE_MAXADDR_32BIT;
1122 			goto again;
1123 		}
1124 	}
1125 
1126 	/* Create parent buffer tag. */
1127 	error = bus_dma_tag_create(NULL,/* parent */
1128 	    1, 0,			/* algnmnt, boundary */
1129 	    sc->jme_lowaddr,		/* lowaddr */
1130 	    BUS_SPACE_MAXADDR,		/* highaddr */
1131 	    NULL, NULL,			/* filter, filterarg */
1132 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1133 	    0,				/* nsegments */
1134 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1135 	    0,				/* flags */
1136 	    &sc->jme_cdata.jme_buffer_tag);
1137 	if (error) {
1138 		device_printf(sc->jme_dev,
1139 		    "could not create parent buffer DMA tag.\n");
1140 		return error;
1141 	}
1142 
1143 	/*
1144 	 * Create DMA stuffs for shadow status block
1145 	 */
1146 
1147 	/* Create shadow status block tag. */
1148 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1149 	    JME_SSB_ALIGN, 0,		/* algnmnt, boundary */
1150 	    sc->jme_lowaddr,		/* lowaddr */
1151 	    BUS_SPACE_MAXADDR,		/* highaddr */
1152 	    NULL, NULL,			/* filter, filterarg */
1153 	    JME_SSB_SIZE,		/* maxsize */
1154 	    1,				/* nsegments */
1155 	    JME_SSB_SIZE,		/* maxsegsize */
1156 	    0,				/* flags */
1157 	    &sc->jme_cdata.jme_ssb_tag);
1158 	if (error) {
1159 		device_printf(sc->jme_dev,
1160 		    "could not create shared status block DMA tag.\n");
1161 		return error;
1162 	}
1163 
1164 	/* Allocate DMA'able memory for shared status block. */
1165 	error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1166 	    (void **)&sc->jme_rdata.jme_ssb_block,
1167 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
1168 	    &sc->jme_cdata.jme_ssb_map);
1169 	if (error) {
1170 		device_printf(sc->jme_dev, "could not allocate DMA'able "
1171 		    "memory for shared status block.\n");
1172 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1173 		sc->jme_cdata.jme_ssb_tag = NULL;
1174 		return error;
1175 	}
1176 
1177 	/* Load the DMA map for shared status block */
1178 	error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1179 	    sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1180 	    JME_SSB_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1181 	if (error) {
1182 		device_printf(sc->jme_dev, "could not load DMA'able memory "
1183 		    "for shared status block.\n");
1184 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1185 				sc->jme_rdata.jme_ssb_block,
1186 				sc->jme_cdata.jme_ssb_map);
1187 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1188 		sc->jme_cdata.jme_ssb_tag = NULL;
1189 		return error;
1190 	}
1191 	sc->jme_rdata.jme_ssb_block_paddr = busaddr;
1192 
1193 	/*
1194 	 * Create DMA stuffs for TX buffers
1195 	 */
1196 
1197 	/* Create tag for Tx buffers. */
1198 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1199 	    1, 0,			/* algnmnt, boundary */
1200 	    sc->jme_lowaddr,		/* lowaddr */
1201 	    BUS_SPACE_MAXADDR,		/* highaddr */
1202 	    NULL, NULL,			/* filter, filterarg */
1203 	    JME_TSO_MAXSIZE,		/* maxsize */
1204 	    JME_MAXTXSEGS,		/* nsegments */
1205 	    JME_TSO_MAXSEGSIZE,		/* maxsegsize */
1206 	    0,				/* flags */
1207 	    &sc->jme_cdata.jme_tx_tag);
1208 	if (error != 0) {
1209 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1210 		return error;
1211 	}
1212 
1213 	/* Create DMA maps for Tx buffers. */
1214 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1215 		txd = &sc->jme_cdata.jme_txdesc[i];
1216 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1217 		    &txd->tx_dmamap);
1218 		if (error) {
1219 			int j;
1220 
1221 			device_printf(sc->jme_dev,
1222 			    "could not create %dth Tx dmamap.\n", i);
1223 
1224 			for (j = 0; j < i; ++j) {
1225 				txd = &sc->jme_cdata.jme_txdesc[j];
1226 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1227 						   txd->tx_dmamap);
1228 			}
1229 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1230 			sc->jme_cdata.jme_tx_tag = NULL;
1231 			return error;
1232 		}
1233 	}
1234 
1235 	/*
1236 	 * Create DMA stuffs for RX buffers
1237 	 */
1238 
1239 	/* Create tag for Rx buffers. */
1240 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1241 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
1242 	    sc->jme_lowaddr,		/* lowaddr */
1243 	    BUS_SPACE_MAXADDR,		/* highaddr */
1244 	    NULL, NULL,			/* filter, filterarg */
1245 	    MCLBYTES,			/* maxsize */
1246 	    1,				/* nsegments */
1247 	    MCLBYTES,			/* maxsegsize */
1248 	    0,				/* flags */
1249 	    &sc->jme_cdata.jme_rx_tag);
1250 	if (error) {
1251 		device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1252 		return error;
1253 	}
1254 
1255 	/* Create DMA maps for Rx buffers. */
1256 	error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1257 				  &sc->jme_cdata.jme_rx_sparemap);
1258 	if (error) {
1259 		device_printf(sc->jme_dev,
1260 		    "could not create spare Rx dmamap.\n");
1261 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1262 		sc->jme_cdata.jme_rx_tag = NULL;
1263 		return error;
1264 	}
1265 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1266 		rxd = &sc->jme_cdata.jme_rxdesc[i];
1267 		error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1268 		    &rxd->rx_dmamap);
1269 		if (error) {
1270 			int j;
1271 
1272 			device_printf(sc->jme_dev,
1273 			    "could not create %dth Rx dmamap.\n", i);
1274 
1275 			for (j = 0; j < i; ++j) {
1276 				rxd = &sc->jme_cdata.jme_rxdesc[j];
1277 				bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1278 						   rxd->rx_dmamap);
1279 			}
1280 			bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1281 			    sc->jme_cdata.jme_rx_sparemap);
1282 			bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1283 			sc->jme_cdata.jme_rx_tag = NULL;
1284 			return error;
1285 		}
1286 	}
1287 	return 0;
1288 }
1289 
1290 static void
1291 jme_dma_free(struct jme_softc *sc, int detach)
1292 {
1293 	struct jme_txdesc *txd;
1294 	struct jme_rxdesc *rxd;
1295 	int i;
1296 
1297 	/* Tx ring */
1298 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1299 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1300 		    sc->jme_cdata.jme_tx_ring_map);
1301 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1302 		    sc->jme_rdata.jme_tx_ring,
1303 		    sc->jme_cdata.jme_tx_ring_map);
1304 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1305 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1306 	}
1307 
1308 	/* Rx ring */
1309 	if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1310 		bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1311 		    sc->jme_cdata.jme_rx_ring_map);
1312 		bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1313 		    sc->jme_rdata.jme_rx_ring,
1314 		    sc->jme_cdata.jme_rx_ring_map);
1315 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1316 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1317 	}
1318 
1319 	/* Tx buffers */
1320 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1321 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1322 			txd = &sc->jme_cdata.jme_txdesc[i];
1323 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1324 			    txd->tx_dmamap);
1325 		}
1326 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1327 		sc->jme_cdata.jme_tx_tag = NULL;
1328 	}
1329 
1330 	/* Rx buffers */
1331 	if (sc->jme_cdata.jme_rx_tag != NULL) {
1332 		for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1333 			rxd = &sc->jme_cdata.jme_rxdesc[i];
1334 			bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1335 			    rxd->rx_dmamap);
1336 		}
1337 		bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1338 		    sc->jme_cdata.jme_rx_sparemap);
1339 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1340 		sc->jme_cdata.jme_rx_tag = NULL;
1341 	}
1342 
1343 	/* Shadow status block. */
1344 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1345 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1346 		    sc->jme_cdata.jme_ssb_map);
1347 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1348 		    sc->jme_rdata.jme_ssb_block,
1349 		    sc->jme_cdata.jme_ssb_map);
1350 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1351 		sc->jme_cdata.jme_ssb_tag = NULL;
1352 	}
1353 
1354 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1355 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1356 		sc->jme_cdata.jme_buffer_tag = NULL;
1357 	}
1358 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1359 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1360 		sc->jme_cdata.jme_ring_tag = NULL;
1361 	}
1362 
1363 	if (detach) {
1364 		if (sc->jme_cdata.jme_txdesc != NULL) {
1365 			kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1366 			sc->jme_cdata.jme_txdesc = NULL;
1367 		}
1368 		if (sc->jme_cdata.jme_rxdesc != NULL) {
1369 			kfree(sc->jme_cdata.jme_rxdesc, M_DEVBUF);
1370 			sc->jme_cdata.jme_rxdesc = NULL;
1371 		}
1372 	}
1373 }
1374 
1375 /*
1376  *	Make sure the interface is stopped at reboot time.
1377  */
1378 static int
1379 jme_shutdown(device_t dev)
1380 {
1381 	return jme_suspend(dev);
1382 }
1383 
1384 #ifdef notyet
1385 /*
1386  * Unlike other ethernet controllers, JMC250 requires
1387  * explicit resetting link speed to 10/100Mbps as gigabit
1388  * link will cunsume more power than 375mA.
1389  * Note, we reset the link speed to 10/100Mbps with
1390  * auto-negotiation but we don't know whether that operation
1391  * would succeed or not as we have no control after powering
1392  * off. If the renegotiation fail WOL may not work. Running
1393  * at 1Gbps draws more power than 375mA at 3.3V which is
1394  * specified in PCI specification and that would result in
1395  * complete shutdowning power to ethernet controller.
1396  *
1397  * TODO
1398  *  Save current negotiated media speed/duplex/flow-control
1399  *  to softc and restore the same link again after resuming.
1400  *  PHY handling such as power down/resetting to 100Mbps
1401  *  may be better handled in suspend method in phy driver.
1402  */
1403 static void
1404 jme_setlinkspeed(struct jme_softc *sc)
1405 {
1406 	struct mii_data *mii;
1407 	int aneg, i;
1408 
1409 	JME_LOCK_ASSERT(sc);
1410 
1411 	mii = device_get_softc(sc->jme_miibus);
1412 	mii_pollstat(mii);
1413 	aneg = 0;
1414 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1415 		switch IFM_SUBTYPE(mii->mii_media_active) {
1416 		case IFM_10_T:
1417 		case IFM_100_TX:
1418 			return;
1419 		case IFM_1000_T:
1420 			aneg++;
1421 		default:
1422 			break;
1423 		}
1424 	}
1425 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1426 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1427 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1428 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1429 	    BMCR_AUTOEN | BMCR_STARTNEG);
1430 	DELAY(1000);
1431 	if (aneg != 0) {
1432 		/* Poll link state until jme(4) get a 10/100 link. */
1433 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1434 			mii_pollstat(mii);
1435 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1436 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1437 				case IFM_10_T:
1438 				case IFM_100_TX:
1439 					jme_mac_config(sc);
1440 					return;
1441 				default:
1442 					break;
1443 				}
1444 			}
1445 			JME_UNLOCK(sc);
1446 			pause("jmelnk", hz);
1447 			JME_LOCK(sc);
1448 		}
1449 		if (i == MII_ANEGTICKS_GIGE)
1450 			device_printf(sc->jme_dev, "establishing link failed, "
1451 			    "WOL may not work!");
1452 	}
1453 	/*
1454 	 * No link, force MAC to have 100Mbps, full-duplex link.
1455 	 * This is the last resort and may/may not work.
1456 	 */
1457 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1458 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1459 	jme_mac_config(sc);
1460 }
1461 
1462 static void
1463 jme_setwol(struct jme_softc *sc)
1464 {
1465 	struct ifnet *ifp = &sc->arpcom.ac_if;
1466 	uint32_t gpr, pmcs;
1467 	uint16_t pmstat;
1468 	int pmc;
1469 
1470 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1471 		/* No PME capability, PHY power down. */
1472 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1473 		    MII_BMCR, BMCR_PDOWN);
1474 		return;
1475 	}
1476 
1477 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1478 	pmcs = CSR_READ_4(sc, JME_PMCS);
1479 	pmcs &= ~PMCS_WOL_ENB_MASK;
1480 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1481 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1482 		/* Enable PME message. */
1483 		gpr |= GPREG0_PME_ENB;
1484 		/* For gigabit controllers, reset link speed to 10/100. */
1485 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1486 			jme_setlinkspeed(sc);
1487 	}
1488 
1489 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1490 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1491 
1492 	/* Request PME. */
1493 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1494 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1495 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1496 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1497 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1498 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1499 		/* No WOL, PHY power down. */
1500 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1501 		    MII_BMCR, BMCR_PDOWN);
1502 	}
1503 }
1504 #endif
1505 
1506 static int
1507 jme_suspend(device_t dev)
1508 {
1509 	struct jme_softc *sc = device_get_softc(dev);
1510 	struct ifnet *ifp = &sc->arpcom.ac_if;
1511 
1512 	lwkt_serialize_enter(ifp->if_serializer);
1513 	jme_stop(sc);
1514 #ifdef notyet
1515 	jme_setwol(sc);
1516 #endif
1517 	lwkt_serialize_exit(ifp->if_serializer);
1518 
1519 	return (0);
1520 }
1521 
1522 static int
1523 jme_resume(device_t dev)
1524 {
1525 	struct jme_softc *sc = device_get_softc(dev);
1526 	struct ifnet *ifp = &sc->arpcom.ac_if;
1527 #ifdef notyet
1528 	int pmc;
1529 #endif
1530 
1531 	lwkt_serialize_enter(ifp->if_serializer);
1532 
1533 #ifdef notyet
1534 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1535 		uint16_t pmstat;
1536 
1537 		pmstat = pci_read_config(sc->jme_dev,
1538 		    pmc + PCIR_POWER_STATUS, 2);
1539 		/* Disable PME clear PME status. */
1540 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1541 		pci_write_config(sc->jme_dev,
1542 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1543 	}
1544 #endif
1545 
1546 	if (ifp->if_flags & IFF_UP)
1547 		jme_init(sc);
1548 
1549 	lwkt_serialize_exit(ifp->if_serializer);
1550 
1551 	return (0);
1552 }
1553 
1554 static int
1555 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1556 {
1557 	struct jme_txdesc *txd;
1558 	struct jme_desc *desc;
1559 	struct mbuf *m;
1560 	struct jme_dmamap_ctx ctx;
1561 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1562 	int maxsegs;
1563 	int error, i, prod, symbol_desc;
1564 	uint32_t cflags, flag64;
1565 
1566 	M_ASSERTPKTHDR((*m_head));
1567 
1568 	prod = sc->jme_cdata.jme_tx_prod;
1569 	txd = &sc->jme_cdata.jme_txdesc[prod];
1570 
1571 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1572 		symbol_desc = 1;
1573 	else
1574 		symbol_desc = 0;
1575 
1576 	maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1577 		  (JME_TXD_RSVD + symbol_desc);
1578 	if (maxsegs > JME_MAXTXSEGS)
1579 		maxsegs = JME_MAXTXSEGS;
1580 	KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1581 		("not enough segments %d\n", maxsegs));
1582 
1583 	ctx.nsegs = maxsegs;
1584 	ctx.segs = txsegs;
1585 	error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1586 				     *m_head, jme_dmamap_buf_cb, &ctx,
1587 				     BUS_DMA_NOWAIT);
1588 	if (!error && ctx.nsegs == 0) {
1589 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1590 		error = EFBIG;
1591 	}
1592 	if (error == EFBIG) {
1593 		m = m_defrag(*m_head, MB_DONTWAIT);
1594 		if (m == NULL) {
1595 			if_printf(&sc->arpcom.ac_if,
1596 				  "could not defrag TX mbuf\n");
1597 			error = ENOBUFS;
1598 			goto fail;
1599 		}
1600 		*m_head = m;
1601 
1602 		ctx.nsegs = maxsegs;
1603 		ctx.segs = txsegs;
1604 		error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag,
1605 					     txd->tx_dmamap, *m_head,
1606 					     jme_dmamap_buf_cb, &ctx,
1607 					     BUS_DMA_NOWAIT);
1608 		if (error || ctx.nsegs == 0) {
1609 			if_printf(&sc->arpcom.ac_if,
1610 				  "could not load defragged TX mbuf\n");
1611 			if (!error) {
1612 				bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
1613 						  txd->tx_dmamap);
1614 				error = EFBIG;
1615 			}
1616 			goto fail;
1617 		}
1618 	} else if (error) {
1619 		if_printf(&sc->arpcom.ac_if, "could not load TX mbuf\n");
1620 		goto fail;
1621 	}
1622 
1623 	m = *m_head;
1624 	cflags = 0;
1625 
1626 	/* Configure checksum offload. */
1627 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1628 		cflags |= JME_TD_IPCSUM;
1629 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1630 		cflags |= JME_TD_TCPCSUM;
1631 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1632 		cflags |= JME_TD_UDPCSUM;
1633 
1634 	/* Configure VLAN. */
1635 	if (m->m_flags & M_VLANTAG) {
1636 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1637 		cflags |= JME_TD_VLAN_TAG;
1638 	}
1639 
1640 	desc = &sc->jme_rdata.jme_tx_ring[prod];
1641 	desc->flags = htole32(cflags);
1642 	desc->addr_hi = htole32(m->m_pkthdr.len);
1643 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1644 		/*
1645 		 * Use 64bits TX desc chain format.
1646 		 *
1647 		 * The first TX desc of the chain, which is setup here,
1648 		 * is just a symbol TX desc carrying no payload.
1649 		 */
1650 		flag64 = JME_TD_64BIT;
1651 		desc->buflen = 0;
1652 		desc->addr_lo = 0;
1653 
1654 		/* No effective TX desc is consumed */
1655 		i = 0;
1656 	} else {
1657 		/*
1658 		 * Use 32bits TX desc chain format.
1659 		 *
1660 		 * The first TX desc of the chain, which is setup here,
1661 		 * is an effective TX desc carrying the first segment of
1662 		 * the mbuf chain.
1663 		 */
1664 		flag64 = 0;
1665 		desc->buflen = htole32(txsegs[0].ds_len);
1666 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1667 
1668 		/* One effective TX desc is consumed */
1669 		i = 1;
1670 	}
1671 	sc->jme_cdata.jme_tx_cnt++;
1672 	KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1673 		 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1674 	JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1675 
1676 	txd->tx_ndesc = 1 - i;
1677 	for (; i < ctx.nsegs; i++) {
1678 		desc = &sc->jme_rdata.jme_tx_ring[prod];
1679 		desc->flags = htole32(JME_TD_OWN | flag64);
1680 		desc->buflen = htole32(txsegs[i].ds_len);
1681 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1682 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1683 
1684 		sc->jme_cdata.jme_tx_cnt++;
1685 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1686 			 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1687 		JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1688 	}
1689 
1690 	/* Update producer index. */
1691 	sc->jme_cdata.jme_tx_prod = prod;
1692 	/*
1693 	 * Finally request interrupt and give the first descriptor
1694 	 * owenership to hardware.
1695 	 */
1696 	desc = txd->tx_desc;
1697 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1698 
1699 	txd->tx_m = m;
1700 	txd->tx_ndesc += ctx.nsegs;
1701 
1702 	/* Sync descriptors. */
1703 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1704 			BUS_DMASYNC_PREWRITE);
1705 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1706 			sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE);
1707 	return 0;
1708 fail:
1709 	m_freem(*m_head);
1710 	*m_head = NULL;
1711 	return error;
1712 }
1713 
1714 static void
1715 jme_start(struct ifnet *ifp)
1716 {
1717 	struct jme_softc *sc = ifp->if_softc;
1718 	struct mbuf *m_head;
1719 	int enq = 0;
1720 
1721 	ASSERT_SERIALIZED(ifp->if_serializer);
1722 
1723 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1724 		ifq_purge(&ifp->if_snd);
1725 		return;
1726 	}
1727 
1728 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1729 		return;
1730 
1731 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1732 		jme_txeof(sc);
1733 
1734 	while (!ifq_is_empty(&ifp->if_snd)) {
1735 		/*
1736 		 * Check number of available TX descs, always
1737 		 * leave JME_TXD_RSVD free TX descs.
1738 		 */
1739 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1740 		    sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1741 			ifp->if_flags |= IFF_OACTIVE;
1742 			break;
1743 		}
1744 
1745 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1746 		if (m_head == NULL)
1747 			break;
1748 
1749 		/*
1750 		 * Pack the data into the transmit ring. If we
1751 		 * don't have room, set the OACTIVE flag and wait
1752 		 * for the NIC to drain the ring.
1753 		 */
1754 		if (jme_encap(sc, &m_head)) {
1755 			KKASSERT(m_head == NULL);
1756 			ifp->if_oerrors++;
1757 			ifp->if_flags |= IFF_OACTIVE;
1758 			break;
1759 		}
1760 		enq++;
1761 
1762 		/*
1763 		 * If there's a BPF listener, bounce a copy of this frame
1764 		 * to him.
1765 		 */
1766 		ETHER_BPF_MTAP(ifp, m_head);
1767 	}
1768 
1769 	if (enq > 0) {
1770 		/*
1771 		 * Reading TXCSR takes very long time under heavy load
1772 		 * so cache TXCSR value and writes the ORed value with
1773 		 * the kick command to the TXCSR. This saves one register
1774 		 * access cycle.
1775 		 */
1776 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1777 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1778 		/* Set a timeout in case the chip goes out to lunch. */
1779 		ifp->if_timer = JME_TX_TIMEOUT;
1780 	}
1781 }
1782 
1783 static void
1784 jme_watchdog(struct ifnet *ifp)
1785 {
1786 	struct jme_softc *sc = ifp->if_softc;
1787 
1788 	ASSERT_SERIALIZED(ifp->if_serializer);
1789 
1790 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1791 		if_printf(ifp, "watchdog timeout (missed link)\n");
1792 		ifp->if_oerrors++;
1793 		jme_init(sc);
1794 		return;
1795 	}
1796 
1797 	jme_txeof(sc);
1798 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1799 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1800 			  "-- recovering\n");
1801 		if (!ifq_is_empty(&ifp->if_snd))
1802 			if_devstart(ifp);
1803 		return;
1804 	}
1805 
1806 	if_printf(ifp, "watchdog timeout\n");
1807 	ifp->if_oerrors++;
1808 	jme_init(sc);
1809 	if (!ifq_is_empty(&ifp->if_snd))
1810 		if_devstart(ifp);
1811 }
1812 
1813 static int
1814 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1815 {
1816 	struct jme_softc *sc = ifp->if_softc;
1817 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1818 	struct ifreq *ifr = (struct ifreq *)data;
1819 	int error = 0, mask;
1820 
1821 	ASSERT_SERIALIZED(ifp->if_serializer);
1822 
1823 	switch (cmd) {
1824 	case SIOCSIFMTU:
1825 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1826 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1827 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1828 			error = EINVAL;
1829 			break;
1830 		}
1831 
1832 		if (ifp->if_mtu != ifr->ifr_mtu) {
1833 			/*
1834 			 * No special configuration is required when interface
1835 			 * MTU is changed but availability of Tx checksum
1836 			 * offload should be chcked against new MTU size as
1837 			 * FIFO size is just 2K.
1838 			 */
1839 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1840 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1841 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1842 			}
1843 			ifp->if_mtu = ifr->ifr_mtu;
1844 			if (ifp->if_flags & IFF_RUNNING)
1845 				jme_init(sc);
1846 		}
1847 		break;
1848 
1849 	case SIOCSIFFLAGS:
1850 		if (ifp->if_flags & IFF_UP) {
1851 			if (ifp->if_flags & IFF_RUNNING) {
1852 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1853 				    (IFF_PROMISC | IFF_ALLMULTI))
1854 					jme_set_filter(sc);
1855 			} else {
1856 				jme_init(sc);
1857 			}
1858 		} else {
1859 			if (ifp->if_flags & IFF_RUNNING)
1860 				jme_stop(sc);
1861 		}
1862 		sc->jme_if_flags = ifp->if_flags;
1863 		break;
1864 
1865 	case SIOCADDMULTI:
1866 	case SIOCDELMULTI:
1867 		if (ifp->if_flags & IFF_RUNNING)
1868 			jme_set_filter(sc);
1869 		break;
1870 
1871 	case SIOCSIFMEDIA:
1872 	case SIOCGIFMEDIA:
1873 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1874 		break;
1875 
1876 	case SIOCSIFCAP:
1877 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1878 
1879 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1880 			if (IFCAP_TXCSUM & ifp->if_capabilities) {
1881 				ifp->if_capenable ^= IFCAP_TXCSUM;
1882 				if (IFCAP_TXCSUM & ifp->if_capenable)
1883 					ifp->if_hwassist |= JME_CSUM_FEATURES;
1884 				else
1885 					ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1886 			}
1887 		}
1888 		if ((mask & IFCAP_RXCSUM) &&
1889 		    (IFCAP_RXCSUM & ifp->if_capabilities)) {
1890 			uint32_t reg;
1891 
1892 			ifp->if_capenable ^= IFCAP_RXCSUM;
1893 			reg = CSR_READ_4(sc, JME_RXMAC);
1894 			reg &= ~RXMAC_CSUM_ENB;
1895 			if (ifp->if_capenable & IFCAP_RXCSUM)
1896 				reg |= RXMAC_CSUM_ENB;
1897 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1898 		}
1899 
1900 		if ((mask & IFCAP_VLAN_HWTAGGING) &&
1901 		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) {
1902 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1903 			jme_set_vlan(sc);
1904 		}
1905 		break;
1906 
1907 	default:
1908 		error = ether_ioctl(ifp, cmd, data);
1909 		break;
1910 	}
1911 	return (error);
1912 }
1913 
1914 static void
1915 jme_mac_config(struct jme_softc *sc)
1916 {
1917 	struct mii_data *mii;
1918 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1919 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1920 
1921 	mii = device_get_softc(sc->jme_miibus);
1922 
1923 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1924 	DELAY(10);
1925 	CSR_WRITE_4(sc, JME_GHC, 0);
1926 	ghc = 0;
1927 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1928 	rxmac &= ~RXMAC_FC_ENB;
1929 	txmac = CSR_READ_4(sc, JME_TXMAC);
1930 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1931 	txpause = CSR_READ_4(sc, JME_TXPFC);
1932 	txpause &= ~TXPFC_PAUSE_ENB;
1933 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1934 		ghc |= GHC_FULL_DUPLEX;
1935 		rxmac &= ~RXMAC_COLL_DET_ENB;
1936 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1937 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1938 		    TXMAC_FRAME_BURST);
1939 #ifdef notyet
1940 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1941 			txpause |= TXPFC_PAUSE_ENB;
1942 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1943 			rxmac |= RXMAC_FC_ENB;
1944 #endif
1945 		/* Disable retry transmit timer/retry limit. */
1946 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1947 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1948 	} else {
1949 		rxmac |= RXMAC_COLL_DET_ENB;
1950 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1951 		/* Enable retry transmit timer/retry limit. */
1952 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1953 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1954 	}
1955 
1956 	/*
1957 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1958 	 */
1959 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1960 	gp1 &= ~GPREG1_WA_HDX;
1961 
1962 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1963 		hdx = 1;
1964 
1965 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1966 	case IFM_10_T:
1967 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1968 		if (hdx)
1969 			gp1 |= GPREG1_WA_HDX;
1970 		break;
1971 
1972 	case IFM_100_TX:
1973 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1974 		if (hdx)
1975 			gp1 |= GPREG1_WA_HDX;
1976 
1977 		/*
1978 		 * Use extended FIFO depth to workaround CRC errors
1979 		 * emitted by chips before JMC250B
1980 		 */
1981 		phyconf = JMPHY_CONF_EXTFIFO;
1982 		break;
1983 
1984 	case IFM_1000_T:
1985 		if (sc->jme_caps & JME_CAP_FASTETH)
1986 			break;
1987 
1988 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1989 		if (hdx)
1990 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1991 		break;
1992 
1993 	default:
1994 		break;
1995 	}
1996 	CSR_WRITE_4(sc, JME_GHC, ghc);
1997 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1998 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1999 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
2000 
2001 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
2002 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2003 				    JMPHY_CONF, phyconf);
2004 	}
2005 	if (sc->jme_workaround & JME_WA_HDX)
2006 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
2007 }
2008 
2009 static void
2010 jme_intr(void *xsc)
2011 {
2012 	struct jme_softc *sc = xsc;
2013 	struct ifnet *ifp = &sc->arpcom.ac_if;
2014 	uint32_t status;
2015 
2016 	ASSERT_SERIALIZED(ifp->if_serializer);
2017 
2018 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2019 	if (status == 0 || status == 0xFFFFFFFF)
2020 		return;
2021 
2022 	/* Disable interrupts. */
2023 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2024 
2025 	status = CSR_READ_4(sc, JME_INTR_STATUS);
2026 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2027 		goto back;
2028 
2029 	/* Reset PCC counter/timer and Ack interrupts. */
2030 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2031 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2032 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2033 	if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2034 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2035 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2036 
2037 	if (ifp->if_flags & IFF_RUNNING) {
2038 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2039 			jme_rxeof(sc);
2040 
2041 		if (status & INTR_RXQ_DESC_EMPTY) {
2042 			/*
2043 			 * Notify hardware availability of new Rx buffers.
2044 			 * Reading RXCSR takes very long time under heavy
2045 			 * load so cache RXCSR value and writes the ORed
2046 			 * value with the kick command to the RXCSR. This
2047 			 * saves one register access cycle.
2048 			 */
2049 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2050 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2051 		}
2052 
2053 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2054 			jme_txeof(sc);
2055 			if (!ifq_is_empty(&ifp->if_snd))
2056 				if_devstart(ifp);
2057 		}
2058 	}
2059 back:
2060 	/* Reenable interrupts. */
2061 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2062 }
2063 
2064 static void
2065 jme_txeof(struct jme_softc *sc)
2066 {
2067 	struct ifnet *ifp = &sc->arpcom.ac_if;
2068 	struct jme_txdesc *txd;
2069 	uint32_t status;
2070 	int cons, nsegs;
2071 
2072 	cons = sc->jme_cdata.jme_tx_cons;
2073 	if (cons == sc->jme_cdata.jme_tx_prod)
2074 		return;
2075 
2076 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2077 			sc->jme_cdata.jme_tx_ring_map,
2078 			BUS_DMASYNC_POSTREAD);
2079 
2080 	/*
2081 	 * Go through our Tx list and free mbufs for those
2082 	 * frames which have been transmitted.
2083 	 */
2084 	while (cons != sc->jme_cdata.jme_tx_prod) {
2085 		txd = &sc->jme_cdata.jme_txdesc[cons];
2086 		KASSERT(txd->tx_m != NULL,
2087 			("%s: freeing NULL mbuf!\n", __func__));
2088 
2089 		status = le32toh(txd->tx_desc->flags);
2090 		if ((status & JME_TD_OWN) == JME_TD_OWN)
2091 			break;
2092 
2093 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2094 			ifp->if_oerrors++;
2095 		} else {
2096 			ifp->if_opackets++;
2097 			if (status & JME_TD_COLLISION) {
2098 				ifp->if_collisions +=
2099 				    le32toh(txd->tx_desc->buflen) &
2100 				    JME_TD_BUF_LEN_MASK;
2101 			}
2102 		}
2103 
2104 		/*
2105 		 * Only the first descriptor of multi-descriptor
2106 		 * transmission is updated so driver have to skip entire
2107 		 * chained buffers for the transmiited frame. In other
2108 		 * words, JME_TD_OWN bit is valid only at the first
2109 		 * descriptor of a multi-descriptor transmission.
2110 		 */
2111 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2112 			sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2113 			JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
2114 		}
2115 
2116 		/* Reclaim transferred mbufs. */
2117 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2118 		m_freem(txd->tx_m);
2119 		txd->tx_m = NULL;
2120 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2121 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2122 			("%s: Active Tx desc counter was garbled\n", __func__));
2123 		txd->tx_ndesc = 0;
2124 	}
2125 	sc->jme_cdata.jme_tx_cons = cons;
2126 
2127 	if (sc->jme_cdata.jme_tx_cnt == 0)
2128 		ifp->if_timer = 0;
2129 
2130 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2131 	    sc->jme_tx_desc_cnt - JME_TXD_RSVD)
2132 		ifp->if_flags &= ~IFF_OACTIVE;
2133 
2134 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2135 			sc->jme_cdata.jme_tx_ring_map,
2136 			BUS_DMASYNC_PREWRITE);
2137 }
2138 
2139 static __inline void
2140 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
2141 {
2142 	int i;
2143 
2144 	for (i = 0; i < count; ++i) {
2145 		struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
2146 
2147 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2148 		desc->buflen = htole32(MCLBYTES);
2149 		JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2150 	}
2151 }
2152 
2153 /* Receive a frame. */
2154 static void
2155 jme_rxpkt(struct jme_softc *sc)
2156 {
2157 	struct ifnet *ifp = &sc->arpcom.ac_if;
2158 	struct jme_desc *desc;
2159 	struct jme_rxdesc *rxd;
2160 	struct mbuf *mp, *m;
2161 	uint32_t flags, status;
2162 	int cons, count, nsegs;
2163 
2164 	cons = sc->jme_cdata.jme_rx_cons;
2165 	desc = &sc->jme_rdata.jme_rx_ring[cons];
2166 	flags = le32toh(desc->flags);
2167 	status = le32toh(desc->buflen);
2168 	nsegs = JME_RX_NSEGS(status);
2169 
2170 	if (status & JME_RX_ERR_STAT) {
2171 		ifp->if_ierrors++;
2172 		jme_discard_rxbufs(sc, cons, nsegs);
2173 #ifdef JME_SHOW_ERRORS
2174 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2175 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2176 #endif
2177 		sc->jme_cdata.jme_rx_cons += nsegs;
2178 		sc->jme_cdata.jme_rx_cons %= sc->jme_rx_desc_cnt;
2179 		return;
2180 	}
2181 
2182 	sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2183 	for (count = 0; count < nsegs; count++,
2184 	     JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2185 		rxd = &sc->jme_cdata.jme_rxdesc[cons];
2186 		mp = rxd->rx_m;
2187 
2188 		/* Add a new receive buffer to the ring. */
2189 		if (jme_newbuf(sc, rxd, 0) != 0) {
2190 			ifp->if_iqdrops++;
2191 			/* Reuse buffer. */
2192 			jme_discard_rxbufs(sc, cons, nsegs - count);
2193 			if (sc->jme_cdata.jme_rxhead != NULL) {
2194 				m_freem(sc->jme_cdata.jme_rxhead);
2195 				JME_RXCHAIN_RESET(sc);
2196 			}
2197 			break;
2198 		}
2199 
2200 		/*
2201 		 * Assume we've received a full sized frame.
2202 		 * Actual size is fixed when we encounter the end of
2203 		 * multi-segmented frame.
2204 		 */
2205 		mp->m_len = MCLBYTES;
2206 
2207 		/* Chain received mbufs. */
2208 		if (sc->jme_cdata.jme_rxhead == NULL) {
2209 			sc->jme_cdata.jme_rxhead = mp;
2210 			sc->jme_cdata.jme_rxtail = mp;
2211 		} else {
2212 			/*
2213 			 * Receive processor can receive a maximum frame
2214 			 * size of 65535 bytes.
2215 			 */
2216 			mp->m_flags &= ~M_PKTHDR;
2217 			sc->jme_cdata.jme_rxtail->m_next = mp;
2218 			sc->jme_cdata.jme_rxtail = mp;
2219 		}
2220 
2221 		if (count == nsegs - 1) {
2222 			/* Last desc. for this frame. */
2223 			m = sc->jme_cdata.jme_rxhead;
2224 			/* XXX assert PKTHDR? */
2225 			m->m_flags |= M_PKTHDR;
2226 			m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2227 			if (nsegs > 1) {
2228 				/* Set first mbuf size. */
2229 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2230 				/* Set last mbuf size. */
2231 				mp->m_len = sc->jme_cdata.jme_rxlen -
2232 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2233 				    (MCLBYTES * (nsegs - 2)));
2234 			} else {
2235 				m->m_len = sc->jme_cdata.jme_rxlen;
2236 			}
2237 			m->m_pkthdr.rcvif = ifp;
2238 
2239 			/*
2240 			 * Account for 10bytes auto padding which is used
2241 			 * to align IP header on 32bit boundary. Also note,
2242 			 * CRC bytes is automatically removed by the
2243 			 * hardware.
2244 			 */
2245 			m->m_data += JME_RX_PAD_BYTES;
2246 
2247 			/* Set checksum information. */
2248 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2249 			    (flags & JME_RD_IPV4)) {
2250 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2251 				if (flags & JME_RD_IPCSUM)
2252 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2253 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2254 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2255 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2256 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2257 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2258 					m->m_pkthdr.csum_flags |=
2259 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2260 					m->m_pkthdr.csum_data = 0xffff;
2261 				}
2262 			}
2263 
2264 			/* Check for VLAN tagged packets. */
2265 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2266 			    (flags & JME_RD_VLAN_TAG)) {
2267 				m->m_pkthdr.ether_vlantag =
2268 				    flags & JME_RD_VLAN_MASK;
2269 				m->m_flags |= M_VLANTAG;
2270 			}
2271 
2272 			ifp->if_ipackets++;
2273 			/* Pass it on. */
2274 			ifp->if_input(ifp, m);
2275 
2276 			/* Reset mbuf chains. */
2277 			JME_RXCHAIN_RESET(sc);
2278 		}
2279 	}
2280 
2281 	sc->jme_cdata.jme_rx_cons += nsegs;
2282 	sc->jme_cdata.jme_rx_cons %= sc->jme_rx_desc_cnt;
2283 }
2284 
2285 static void
2286 jme_rxeof(struct jme_softc *sc)
2287 {
2288 	struct jme_desc *desc;
2289 	int nsegs, prog, pktlen;
2290 
2291 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2292 			sc->jme_cdata.jme_rx_ring_map,
2293 			BUS_DMASYNC_POSTREAD);
2294 
2295 	prog = 0;
2296 	for (;;) {
2297 		desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2298 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2299 			break;
2300 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2301 			break;
2302 
2303 		/*
2304 		 * Check number of segments against received bytes.
2305 		 * Non-matching value would indicate that hardware
2306 		 * is still trying to update Rx descriptors. I'm not
2307 		 * sure whether this check is needed.
2308 		 */
2309 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2310 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2311 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2312 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2313 				  "and packet size(%d) mismach\n",
2314 				  nsegs, pktlen);
2315 			break;
2316 		}
2317 
2318 		/* Received a frame. */
2319 		jme_rxpkt(sc);
2320 		prog++;
2321 	}
2322 
2323 	if (prog > 0) {
2324 		bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2325 				sc->jme_cdata.jme_rx_ring_map,
2326 				BUS_DMASYNC_PREWRITE);
2327 	}
2328 }
2329 
2330 static void
2331 jme_tick(void *xsc)
2332 {
2333 	struct jme_softc *sc = xsc;
2334 	struct ifnet *ifp = &sc->arpcom.ac_if;
2335 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2336 
2337 	lwkt_serialize_enter(ifp->if_serializer);
2338 
2339 	mii_tick(mii);
2340 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2341 
2342 	lwkt_serialize_exit(ifp->if_serializer);
2343 }
2344 
2345 static void
2346 jme_reset(struct jme_softc *sc)
2347 {
2348 #ifdef foo
2349 	/* Stop receiver, transmitter. */
2350 	jme_stop_rx(sc);
2351 	jme_stop_tx(sc);
2352 #endif
2353 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2354 	DELAY(10);
2355 	CSR_WRITE_4(sc, JME_GHC, 0);
2356 }
2357 
2358 static void
2359 jme_init(void *xsc)
2360 {
2361 	struct jme_softc *sc = xsc;
2362 	struct ifnet *ifp = &sc->arpcom.ac_if;
2363 	struct mii_data *mii;
2364 	uint8_t eaddr[ETHER_ADDR_LEN];
2365 	bus_addr_t paddr;
2366 	uint32_t reg;
2367 	int error;
2368 
2369 	ASSERT_SERIALIZED(ifp->if_serializer);
2370 
2371 	/*
2372 	 * Cancel any pending I/O.
2373 	 */
2374 	jme_stop(sc);
2375 
2376 	/*
2377 	 * Reset the chip to a known state.
2378 	 */
2379 	jme_reset(sc);
2380 
2381 	sc->jme_txd_spare =
2382 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2383 	KKASSERT(sc->jme_txd_spare >= 1);
2384 
2385 	/*
2386 	 * If we use 64bit address mode for transmitting, each Tx request
2387 	 * needs one more symbol descriptor.
2388 	 */
2389 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2390 		sc->jme_txd_spare += 1;
2391 
2392 	/* Init descriptors. */
2393 	error = jme_init_rx_ring(sc);
2394         if (error != 0) {
2395                 device_printf(sc->jme_dev,
2396                     "%s: initialization failed: no memory for Rx buffers.\n",
2397 		    __func__);
2398                 jme_stop(sc);
2399 		return;
2400         }
2401 	jme_init_tx_ring(sc);
2402 
2403 	/* Initialize shadow status block. */
2404 	jme_init_ssb(sc);
2405 
2406 	/* Reprogram the station address. */
2407 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2408 	CSR_WRITE_4(sc, JME_PAR0,
2409 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2410 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2411 
2412 	/*
2413 	 * Configure Tx queue.
2414 	 *  Tx priority queue weight value : 0
2415 	 *  Tx FIFO threshold for processing next packet : 16QW
2416 	 *  Maximum Tx DMA length : 512
2417 	 *  Allow Tx DMA burst.
2418 	 */
2419 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2420 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2421 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2422 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2423 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2424 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2425 
2426 	/* Set Tx descriptor counter. */
2427 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2428 
2429 	/* Set Tx ring address to the hardware. */
2430 	paddr = JME_TX_RING_ADDR(sc, 0);
2431 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2432 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2433 
2434 	/* Configure TxMAC parameters. */
2435 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2436 	reg |= TXMAC_THRESH_1_PKT;
2437 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2438 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2439 
2440 	/*
2441 	 * Configure Rx queue.
2442 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2443 	 *  FIFO threshold for processing next packet : 128QW
2444 	 *  Rx queue 0 select
2445 	 *  Max Rx DMA length : 128
2446 	 *  Rx descriptor retry : 32
2447 	 *  Rx descriptor retry time gap : 256ns
2448 	 *  Don't receive runt/bad frame.
2449 	 */
2450 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2451 	/*
2452 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2453 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2454 	 * decrease FIFO threshold to reduce the FIFO overruns for
2455 	 * frames larger than 4000 bytes.
2456 	 * For best performance of standard MTU sized frames use
2457 	 * maximum allowable FIFO threshold, 128QW.
2458 	 */
2459 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2460 	    JME_RX_FIFO_SIZE)
2461 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2462 	else
2463 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2464 	sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2465 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2466 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2467 	/* XXX TODO DROP_BAD */
2468 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2469 
2470 	/* Set Rx descriptor counter. */
2471 	CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2472 
2473 	/* Set Rx ring address to the hardware. */
2474 	paddr = JME_RX_RING_ADDR(sc, 0);
2475 	CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2476 	CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2477 
2478 	/* Clear receive filter. */
2479 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2480 
2481 	/* Set up the receive filter. */
2482 	jme_set_filter(sc);
2483 	jme_set_vlan(sc);
2484 
2485 	/*
2486 	 * Disable all WOL bits as WOL can interfere normal Rx
2487 	 * operation. Also clear WOL detection status bits.
2488 	 */
2489 	reg = CSR_READ_4(sc, JME_PMCS);
2490 	reg &= ~PMCS_WOL_ENB_MASK;
2491 	CSR_WRITE_4(sc, JME_PMCS, reg);
2492 
2493 	/*
2494 	 * Pad 10bytes right before received frame. This will greatly
2495 	 * help Rx performance on strict-alignment architectures as
2496 	 * it does not need to copy the frame to align the payload.
2497 	 */
2498 	reg = CSR_READ_4(sc, JME_RXMAC);
2499 	reg |= RXMAC_PAD_10BYTES;
2500 
2501 	if (ifp->if_capenable & IFCAP_RXCSUM)
2502 		reg |= RXMAC_CSUM_ENB;
2503 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2504 
2505 	/* Configure general purpose reg0 */
2506 	reg = CSR_READ_4(sc, JME_GPREG0);
2507 	reg &= ~GPREG0_PCC_UNIT_MASK;
2508 	/* Set PCC timer resolution to micro-seconds unit. */
2509 	reg |= GPREG0_PCC_UNIT_US;
2510 	/*
2511 	 * Disable all shadow register posting as we have to read
2512 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2513 	 * that it's hard to synchronize interrupt status between
2514 	 * hardware and software with shadow posting due to
2515 	 * requirements of bus_dmamap_sync(9).
2516 	 */
2517 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2518 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2519 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2520 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2521 	/* Disable posting of DW0. */
2522 	reg &= ~GPREG0_POST_DW0_ENB;
2523 	/* Clear PME message. */
2524 	reg &= ~GPREG0_PME_ENB;
2525 	/* Set PHY address. */
2526 	reg &= ~GPREG0_PHY_ADDR_MASK;
2527 	reg |= sc->jme_phyaddr;
2528 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2529 
2530 	/* Configure Tx queue 0 packet completion coalescing. */
2531 	jme_set_tx_coal(sc);
2532 
2533 	/* Configure Rx queue 0 packet completion coalescing. */
2534 	jme_set_rx_coal(sc);
2535 
2536 	/* Configure shadow status block but don't enable posting. */
2537 	paddr = sc->jme_rdata.jme_ssb_block_paddr;
2538 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2539 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2540 
2541 	/* Disable Timer 1 and Timer 2. */
2542 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2543 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2544 
2545 	/* Configure retry transmit period, retry limit value. */
2546 	CSR_WRITE_4(sc, JME_TXTRHD,
2547 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2548 	    TXTRHD_RT_PERIOD_MASK) |
2549 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2550 	    TXTRHD_RT_LIMIT_SHIFT));
2551 
2552 	/* Disable RSS. */
2553 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2554 
2555 #ifdef DEVICE_POLLING
2556 	if (!(ifp->if_flags & IFF_POLLING))
2557 #endif
2558 	/* Initialize the interrupt mask. */
2559 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2560 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2561 
2562 	/*
2563 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2564 	 * done after detection of valid link in jme_miibus_statchg.
2565 	 */
2566 	sc->jme_flags &= ~JME_FLAG_LINK;
2567 
2568 	/* Set the current media. */
2569 	mii = device_get_softc(sc->jme_miibus);
2570 	mii_mediachg(mii);
2571 
2572 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2573 
2574 	ifp->if_flags |= IFF_RUNNING;
2575 	ifp->if_flags &= ~IFF_OACTIVE;
2576 }
2577 
2578 static void
2579 jme_stop(struct jme_softc *sc)
2580 {
2581 	struct ifnet *ifp = &sc->arpcom.ac_if;
2582 	struct jme_txdesc *txd;
2583 	struct jme_rxdesc *rxd;
2584 	int i;
2585 
2586 	ASSERT_SERIALIZED(ifp->if_serializer);
2587 
2588 	/*
2589 	 * Mark the interface down and cancel the watchdog timer.
2590 	 */
2591 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2592 	ifp->if_timer = 0;
2593 
2594 	callout_stop(&sc->jme_tick_ch);
2595 	sc->jme_flags &= ~JME_FLAG_LINK;
2596 
2597 	/*
2598 	 * Disable interrupts.
2599 	 */
2600 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2601 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2602 
2603 	/* Disable updating shadow status block. */
2604 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2605 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2606 
2607 	/* Stop receiver, transmitter. */
2608 	jme_stop_rx(sc);
2609 	jme_stop_tx(sc);
2610 
2611 #ifdef foo
2612 	 /* Reclaim Rx/Tx buffers that have been completed. */
2613 	jme_rxeof(sc);
2614 	if (sc->jme_cdata.jme_rxhead != NULL)
2615 		m_freem(sc->jme_cdata.jme_rxhead);
2616 	JME_RXCHAIN_RESET(sc);
2617 	jme_txeof(sc);
2618 #endif
2619 
2620 	/*
2621 	 * Free partial finished RX segments
2622 	 */
2623 	if (sc->jme_cdata.jme_rxhead != NULL)
2624 		m_freem(sc->jme_cdata.jme_rxhead);
2625 	JME_RXCHAIN_RESET(sc);
2626 
2627 	/*
2628 	 * Free RX and TX mbufs still in the queues.
2629 	 */
2630 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2631 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2632 		if (rxd->rx_m != NULL) {
2633 			bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2634 			    rxd->rx_dmamap);
2635 			m_freem(rxd->rx_m);
2636 			rxd->rx_m = NULL;
2637 		}
2638         }
2639 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2640 		txd = &sc->jme_cdata.jme_txdesc[i];
2641 		if (txd->tx_m != NULL) {
2642 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2643 			    txd->tx_dmamap);
2644 			m_freem(txd->tx_m);
2645 			txd->tx_m = NULL;
2646 			txd->tx_ndesc = 0;
2647 		}
2648         }
2649 }
2650 
2651 static void
2652 jme_stop_tx(struct jme_softc *sc)
2653 {
2654 	uint32_t reg;
2655 	int i;
2656 
2657 	reg = CSR_READ_4(sc, JME_TXCSR);
2658 	if ((reg & TXCSR_TX_ENB) == 0)
2659 		return;
2660 	reg &= ~TXCSR_TX_ENB;
2661 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2662 	for (i = JME_TIMEOUT; i > 0; i--) {
2663 		DELAY(1);
2664 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2665 			break;
2666 	}
2667 	if (i == 0)
2668 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2669 }
2670 
2671 static void
2672 jme_stop_rx(struct jme_softc *sc)
2673 {
2674 	uint32_t reg;
2675 	int i;
2676 
2677 	reg = CSR_READ_4(sc, JME_RXCSR);
2678 	if ((reg & RXCSR_RX_ENB) == 0)
2679 		return;
2680 	reg &= ~RXCSR_RX_ENB;
2681 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2682 	for (i = JME_TIMEOUT; i > 0; i--) {
2683 		DELAY(1);
2684 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2685 			break;
2686 	}
2687 	if (i == 0)
2688 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2689 }
2690 
2691 static void
2692 jme_init_tx_ring(struct jme_softc *sc)
2693 {
2694 	struct jme_ring_data *rd;
2695 	struct jme_txdesc *txd;
2696 	int i;
2697 
2698 	sc->jme_cdata.jme_tx_prod = 0;
2699 	sc->jme_cdata.jme_tx_cons = 0;
2700 	sc->jme_cdata.jme_tx_cnt = 0;
2701 
2702 	rd = &sc->jme_rdata;
2703 	bzero(rd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2704 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2705 		txd = &sc->jme_cdata.jme_txdesc[i];
2706 		txd->tx_m = NULL;
2707 		txd->tx_desc = &rd->jme_tx_ring[i];
2708 		txd->tx_ndesc = 0;
2709 	}
2710 
2711 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2712 			sc->jme_cdata.jme_tx_ring_map,
2713 			BUS_DMASYNC_PREWRITE);
2714 }
2715 
2716 static void
2717 jme_init_ssb(struct jme_softc *sc)
2718 {
2719 	struct jme_ring_data *rd;
2720 
2721 	rd = &sc->jme_rdata;
2722 	bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2723 	bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2724 			BUS_DMASYNC_PREWRITE);
2725 }
2726 
2727 static int
2728 jme_init_rx_ring(struct jme_softc *sc)
2729 {
2730 	struct jme_ring_data *rd;
2731 	struct jme_rxdesc *rxd;
2732 	int i;
2733 
2734 	KKASSERT(sc->jme_cdata.jme_rxhead == NULL &&
2735 		 sc->jme_cdata.jme_rxtail == NULL &&
2736 		 sc->jme_cdata.jme_rxlen == 0);
2737 	sc->jme_cdata.jme_rx_cons = 0;
2738 
2739 	rd = &sc->jme_rdata;
2740 	bzero(rd->jme_rx_ring, JME_RX_RING_SIZE(sc));
2741 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2742 		int error;
2743 
2744 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2745 		rxd->rx_m = NULL;
2746 		rxd->rx_desc = &rd->jme_rx_ring[i];
2747 		error = jme_newbuf(sc, rxd, 1);
2748 		if (error)
2749 			return (error);
2750 	}
2751 
2752 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2753 			sc->jme_cdata.jme_rx_ring_map,
2754 			BUS_DMASYNC_PREWRITE);
2755 	return (0);
2756 }
2757 
2758 static int
2759 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd, int init)
2760 {
2761 	struct jme_desc *desc;
2762 	struct mbuf *m;
2763 	struct jme_dmamap_ctx ctx;
2764 	bus_dma_segment_t segs;
2765 	bus_dmamap_t map;
2766 	int error;
2767 
2768 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2769 	if (m == NULL)
2770 		return (ENOBUFS);
2771 	/*
2772 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2773 	 * takes advantage of 10 bytes padding feature of hardware
2774 	 * in order not to copy entire frame to align IP header on
2775 	 * 32bit boundary.
2776 	 */
2777 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2778 
2779 	ctx.nsegs = 1;
2780 	ctx.segs = &segs;
2781 	error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_rx_tag,
2782 				     sc->jme_cdata.jme_rx_sparemap,
2783 				     m, jme_dmamap_buf_cb, &ctx,
2784 				     BUS_DMA_NOWAIT);
2785 	if (error || ctx.nsegs == 0) {
2786 		if (!error) {
2787 			bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2788 					  sc->jme_cdata.jme_rx_sparemap);
2789 			error = EFBIG;
2790 			if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2791 		}
2792 		m_freem(m);
2793 
2794 		if (init)
2795 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2796 		return (error);
2797 	}
2798 
2799 	if (rxd->rx_m != NULL) {
2800 		bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
2801 				BUS_DMASYNC_POSTREAD);
2802 		bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
2803 	}
2804 	map = rxd->rx_dmamap;
2805 	rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2806 	sc->jme_cdata.jme_rx_sparemap = map;
2807 	rxd->rx_m = m;
2808 
2809 	desc = rxd->rx_desc;
2810 	desc->buflen = htole32(segs.ds_len);
2811 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2812 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2813 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2814 
2815 	return (0);
2816 }
2817 
2818 static void
2819 jme_set_vlan(struct jme_softc *sc)
2820 {
2821 	struct ifnet *ifp = &sc->arpcom.ac_if;
2822 	uint32_t reg;
2823 
2824 	ASSERT_SERIALIZED(ifp->if_serializer);
2825 
2826 	reg = CSR_READ_4(sc, JME_RXMAC);
2827 	reg &= ~RXMAC_VLAN_ENB;
2828 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2829 		reg |= RXMAC_VLAN_ENB;
2830 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2831 }
2832 
2833 static void
2834 jme_set_filter(struct jme_softc *sc)
2835 {
2836 	struct ifnet *ifp = &sc->arpcom.ac_if;
2837 	struct ifmultiaddr *ifma;
2838 	uint32_t crc;
2839 	uint32_t mchash[2];
2840 	uint32_t rxcfg;
2841 
2842 	ASSERT_SERIALIZED(ifp->if_serializer);
2843 
2844 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2845 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2846 	    RXMAC_ALLMULTI);
2847 
2848 	/*
2849 	 * Always accept frames destined to our station address.
2850 	 * Always accept broadcast frames.
2851 	 */
2852 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2853 
2854 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2855 		if (ifp->if_flags & IFF_PROMISC)
2856 			rxcfg |= RXMAC_PROMISC;
2857 		if (ifp->if_flags & IFF_ALLMULTI)
2858 			rxcfg |= RXMAC_ALLMULTI;
2859 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2860 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2861 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2862 		return;
2863 	}
2864 
2865 	/*
2866 	 * Set up the multicast address filter by passing all multicast
2867 	 * addresses through a CRC generator, and then using the low-order
2868 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2869 	 * high order bits select the register, while the rest of the bits
2870 	 * select the bit within the register.
2871 	 */
2872 	rxcfg |= RXMAC_MULTICAST;
2873 	bzero(mchash, sizeof(mchash));
2874 
2875 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2876 		if (ifma->ifma_addr->sa_family != AF_LINK)
2877 			continue;
2878 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2879 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2880 
2881 		/* Just want the 6 least significant bits. */
2882 		crc &= 0x3f;
2883 
2884 		/* Set the corresponding bit in the hash table. */
2885 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2886 	}
2887 
2888 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2889 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2890 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2891 }
2892 
2893 static int
2894 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2895 {
2896 	struct jme_softc *sc = arg1;
2897 	struct ifnet *ifp = &sc->arpcom.ac_if;
2898 	int error, v;
2899 
2900 	lwkt_serialize_enter(ifp->if_serializer);
2901 
2902 	v = sc->jme_tx_coal_to;
2903 	error = sysctl_handle_int(oidp, &v, 0, req);
2904 	if (error || req->newptr == NULL)
2905 		goto back;
2906 
2907 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2908 		error = EINVAL;
2909 		goto back;
2910 	}
2911 
2912 	if (v != sc->jme_tx_coal_to) {
2913 		sc->jme_tx_coal_to = v;
2914 		if (ifp->if_flags & IFF_RUNNING)
2915 			jme_set_tx_coal(sc);
2916 	}
2917 back:
2918 	lwkt_serialize_exit(ifp->if_serializer);
2919 	return error;
2920 }
2921 
2922 static int
2923 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2924 {
2925 	struct jme_softc *sc = arg1;
2926 	struct ifnet *ifp = &sc->arpcom.ac_if;
2927 	int error, v;
2928 
2929 	lwkt_serialize_enter(ifp->if_serializer);
2930 
2931 	v = sc->jme_tx_coal_pkt;
2932 	error = sysctl_handle_int(oidp, &v, 0, req);
2933 	if (error || req->newptr == NULL)
2934 		goto back;
2935 
2936 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2937 		error = EINVAL;
2938 		goto back;
2939 	}
2940 
2941 	if (v != sc->jme_tx_coal_pkt) {
2942 		sc->jme_tx_coal_pkt = v;
2943 		if (ifp->if_flags & IFF_RUNNING)
2944 			jme_set_tx_coal(sc);
2945 	}
2946 back:
2947 	lwkt_serialize_exit(ifp->if_serializer);
2948 	return error;
2949 }
2950 
2951 static int
2952 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2953 {
2954 	struct jme_softc *sc = arg1;
2955 	struct ifnet *ifp = &sc->arpcom.ac_if;
2956 	int error, v;
2957 
2958 	lwkt_serialize_enter(ifp->if_serializer);
2959 
2960 	v = sc->jme_rx_coal_to;
2961 	error = sysctl_handle_int(oidp, &v, 0, req);
2962 	if (error || req->newptr == NULL)
2963 		goto back;
2964 
2965 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2966 		error = EINVAL;
2967 		goto back;
2968 	}
2969 
2970 	if (v != sc->jme_rx_coal_to) {
2971 		sc->jme_rx_coal_to = v;
2972 		if (ifp->if_flags & IFF_RUNNING)
2973 			jme_set_rx_coal(sc);
2974 	}
2975 back:
2976 	lwkt_serialize_exit(ifp->if_serializer);
2977 	return error;
2978 }
2979 
2980 static int
2981 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2982 {
2983 	struct jme_softc *sc = arg1;
2984 	struct ifnet *ifp = &sc->arpcom.ac_if;
2985 	int error, v;
2986 
2987 	lwkt_serialize_enter(ifp->if_serializer);
2988 
2989 	v = sc->jme_rx_coal_pkt;
2990 	error = sysctl_handle_int(oidp, &v, 0, req);
2991 	if (error || req->newptr == NULL)
2992 		goto back;
2993 
2994 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2995 		error = EINVAL;
2996 		goto back;
2997 	}
2998 
2999 	if (v != sc->jme_rx_coal_pkt) {
3000 		sc->jme_rx_coal_pkt = v;
3001 		if (ifp->if_flags & IFF_RUNNING)
3002 			jme_set_rx_coal(sc);
3003 	}
3004 back:
3005 	lwkt_serialize_exit(ifp->if_serializer);
3006 	return error;
3007 }
3008 
3009 static void
3010 jme_set_tx_coal(struct jme_softc *sc)
3011 {
3012 	uint32_t reg;
3013 
3014 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3015 	    PCCTX_COAL_TO_MASK;
3016 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3017 	    PCCTX_COAL_PKT_MASK;
3018 	reg |= PCCTX_COAL_TXQ0;
3019 	CSR_WRITE_4(sc, JME_PCCTX, reg);
3020 }
3021 
3022 static void
3023 jme_set_rx_coal(struct jme_softc *sc)
3024 {
3025 	uint32_t reg;
3026 
3027 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3028 	    PCCRX_COAL_TO_MASK;
3029 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3030 	    PCCRX_COAL_PKT_MASK;
3031 	CSR_WRITE_4(sc, JME_PCCRX0, reg);
3032 }
3033 
3034 #ifdef DEVICE_POLLING
3035 
3036 static void
3037 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3038 {
3039 	struct jme_softc *sc = ifp->if_softc;
3040 	uint32_t status;
3041 
3042 	ASSERT_SERIALIZED(ifp->if_serializer);
3043 
3044 	switch (cmd) {
3045 	case POLL_REGISTER:
3046 		CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3047 		break;
3048 
3049 	case POLL_DEREGISTER:
3050 		CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3051 		break;
3052 
3053 	case POLL_AND_CHECK_STATUS:
3054 	case POLL_ONLY:
3055 		status = CSR_READ_4(sc, JME_INTR_STATUS);
3056 		jme_rxeof(sc);
3057 
3058 		if (status & INTR_RXQ_DESC_EMPTY) {
3059 			CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3060 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3061 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
3062 		}
3063 
3064 		jme_txeof(sc);
3065 		if (!ifq_is_empty(&ifp->if_snd))
3066 			if_devstart(ifp);
3067 		break;
3068 	}
3069 }
3070 
3071 #endif	/* DEVICE_POLLING */
3072