xref: /dflybsd-src/sys/dev/netif/jme/if_jme.c (revision cb8d752c91e50d49493e6841a71a3add51e2f2da)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.2 2008/08/03 11:00:32 sephe Exp $
29  */
30 
31 #include "opt_ethernet.h"
32 
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/bpf.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/ifq_var.h>
53 #include <net/vlan/if_vlan_var.h>
54 #include <net/vlan/if_vlan_ether.h>
55 
56 #include <dev/netif/mii_layer/miivar.h>
57 
58 #include <bus/pci/pcireg.h>
59 #include <bus/pci/pcivar.h>
60 #include <bus/pci/pcidevs.h>
61 
62 #include "if_jmereg.h"
63 #include "if_jmevar.h"
64 
65 #include "miibus_if.h"
66 
67 /* Define the following to disable printing Rx errors. */
68 #undef	JME_SHOW_ERRORS
69 
70 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
71 
72 static int	jme_probe(device_t);
73 static int	jme_attach(device_t);
74 static int	jme_detach(device_t);
75 static int	jme_shutdown(device_t);
76 static int	jme_suspend(device_t);
77 static int	jme_resume(device_t);
78 
79 static int	jme_miibus_readreg(device_t, int, int);
80 static int	jme_miibus_writereg(device_t, int, int, int);
81 static void	jme_miibus_statchg(device_t);
82 
83 static void	jme_init(void *);
84 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
85 static void	jme_start(struct ifnet *);
86 static void	jme_watchdog(struct ifnet *);
87 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
88 static int	jme_mediachange(struct ifnet *);
89 
90 static void	jme_intr(void *);
91 static void	jme_txeof(struct jme_softc *);
92 static void	jme_rxeof(struct jme_softc *);
93 
94 static int	jme_dma_alloc(struct jme_softc *);
95 static void	jme_dma_free(struct jme_softc *);
96 static void	jme_dmamap_ring_cb(void *, bus_dma_segment_t *, int, int);
97 static void	jme_dmamap_buf_cb(void *, bus_dma_segment_t *, int,
98 				  bus_size_t, int);
99 static int	jme_init_rx_ring(struct jme_softc *);
100 static void	jme_init_tx_ring(struct jme_softc *);
101 static void	jme_init_ssb(struct jme_softc *);
102 static int	jme_newbuf(struct jme_softc *, struct jme_rxdesc *, int);
103 static int	jme_encap(struct jme_softc *, struct mbuf **);
104 static void	jme_rxpkt(struct jme_softc *);
105 
106 static void	jme_tick(void *);
107 static void	jme_stop(struct jme_softc *);
108 static void	jme_reset(struct jme_softc *);
109 static void	jme_set_vlan(struct jme_softc *);
110 static void	jme_set_filter(struct jme_softc *);
111 static void	jme_stop_tx(struct jme_softc *);
112 static void	jme_stop_rx(struct jme_softc *);
113 static void	jme_mac_config(struct jme_softc *);
114 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
115 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
116 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
117 #ifdef notyet
118 static void	jme_setwol(struct jme_softc *);
119 static void	jme_setlinkspeed(struct jme_softc *);
120 #endif
121 
122 static void	jme_sysctl_node(struct jme_softc *);
123 static int	sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
124 static int	sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
125 static int	sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
126 static int	sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
127 
128 /*
129  * Devices supported by this driver.
130  */
131 static const struct jme_dev {
132 	uint16_t	jme_vendorid;
133 	uint16_t	jme_deviceid;
134 	const char	*jme_name;
135 } jme_devs[] = {
136 	{ VENDORID_JMICRON, DEVICEID_JMC250,
137 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
138 	{ VENDORID_JMICRON, DEVICEID_JMC260,
139 	    "JMicron Inc, JMC260 Fast Ethernet" },
140 	{ 0, 0, NULL }
141 };
142 
143 static device_method_t jme_methods[] = {
144 	/* Device interface. */
145 	DEVMETHOD(device_probe,		jme_probe),
146 	DEVMETHOD(device_attach,	jme_attach),
147 	DEVMETHOD(device_detach,	jme_detach),
148 	DEVMETHOD(device_shutdown,	jme_shutdown),
149 	DEVMETHOD(device_suspend,	jme_suspend),
150 	DEVMETHOD(device_resume,	jme_resume),
151 
152 	/* Bus interface. */
153 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
154 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
155 
156 	/* MII interface. */
157 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
158 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
159 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
160 
161 	{ NULL, NULL }
162 };
163 
164 static driver_t jme_driver = {
165 	"jme",
166 	jme_methods,
167 	sizeof(struct jme_softc)
168 };
169 
170 static devclass_t jme_devclass;
171 
172 DECLARE_DUMMY_MODULE(if_jme);
173 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
174 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
175 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
176 
177 /*
178  *	Read a PHY register on the MII of the JMC250.
179  */
180 static int
181 jme_miibus_readreg(device_t dev, int phy, int reg)
182 {
183 	struct jme_softc *sc = device_get_softc(dev);
184 	uint32_t val;
185 	int i;
186 
187 	/* For FPGA version, PHY address 0 should be ignored. */
188 	if (sc->jme_flags & JME_FLAG_FPGA) {
189 		if (phy == 0)
190 			return (0);
191 	} else {
192 		if (sc->jme_phyaddr != phy)
193 			return (0);
194 	}
195 
196 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
197 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
198 
199 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
200 		DELAY(1);
201 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
202 			break;
203 	}
204 	if (i == 0) {
205 		device_printf(sc->jme_dev, "phy read timeout: "
206 			      "phy %d, reg %d\n", phy, reg);
207 		return (0);
208 	}
209 
210 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
211 }
212 
213 /*
214  *	Write a PHY register on the MII of the JMC250.
215  */
216 static int
217 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
218 {
219 	struct jme_softc *sc = device_get_softc(dev);
220 	int i;
221 
222 	/* For FPGA version, PHY address 0 should be ignored. */
223 	if (sc->jme_flags & JME_FLAG_FPGA) {
224 		if (phy == 0)
225 			return (0);
226 	} else {
227 		if (sc->jme_phyaddr != phy)
228 			return (0);
229 	}
230 
231 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
232 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
233 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
234 
235 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
236 		DELAY(1);
237 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
238 			break;
239 	}
240 	if (i == 0) {
241 		device_printf(sc->jme_dev, "phy write timeout: "
242 			      "phy %d, reg %d\n", phy, reg);
243 	}
244 
245 	return (0);
246 }
247 
248 /*
249  *	Callback from MII layer when media changes.
250  */
251 static void
252 jme_miibus_statchg(device_t dev)
253 {
254 	struct jme_softc *sc = device_get_softc(dev);
255 	struct ifnet *ifp = &sc->arpcom.ac_if;
256 	struct mii_data *mii;
257 	struct jme_txdesc *txd;
258 	bus_addr_t paddr;
259 	int i;
260 
261 	ASSERT_SERIALIZED(ifp->if_serializer);
262 
263 	if ((ifp->if_flags & IFF_RUNNING) == 0)
264 		return;
265 
266 	mii = device_get_softc(sc->jme_miibus);
267 
268 	sc->jme_flags &= ~JME_FLAG_LINK;
269 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
270 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
271 		case IFM_10_T:
272 		case IFM_100_TX:
273 			sc->jme_flags |= JME_FLAG_LINK;
274 			break;
275 		case IFM_1000_T:
276 			if (sc->jme_flags & JME_FLAG_FASTETH)
277 				break;
278 			sc->jme_flags |= JME_FLAG_LINK;
279 			break;
280 		default:
281 			break;
282 		}
283 	}
284 
285 	/*
286 	 * Disabling Rx/Tx MACs have a side-effect of resetting
287 	 * JME_TXNDA/JME_RXNDA register to the first address of
288 	 * Tx/Rx descriptor address. So driver should reset its
289 	 * internal procucer/consumer pointer and reclaim any
290 	 * allocated resources.  Note, just saving the value of
291 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
292 	 * and restoring JME_TXNDA/JME_RXNDA register is not
293 	 * sufficient to make sure correct MAC state because
294 	 * stopping MAC operation can take a while and hardware
295 	 * might have updated JME_TXNDA/JME_RXNDA registers
296 	 * during the stop operation.
297 	 */
298 
299 	/* Disable interrupts */
300 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
301 
302 	/* Stop driver */
303 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
304 	ifp->if_timer = 0;
305 	callout_stop(&sc->jme_tick_ch);
306 
307 	/* Stop receiver/transmitter. */
308 	jme_stop_rx(sc);
309 	jme_stop_tx(sc);
310 
311 	jme_rxeof(sc);
312 	if (sc->jme_cdata.jme_rxhead != NULL)
313 		m_freem(sc->jme_cdata.jme_rxhead);
314 	JME_RXCHAIN_RESET(sc);
315 
316 	jme_txeof(sc);
317 	if (sc->jme_cdata.jme_tx_cnt != 0) {
318 		/* Remove queued packets for transmit. */
319 		for (i = 0; i < JME_TX_RING_CNT; i++) {
320 			txd = &sc->jme_cdata.jme_txdesc[i];
321 			if (txd->tx_m != NULL) {
322 				bus_dmamap_unload(
323 				    sc->jme_cdata.jme_tx_tag,
324 				    txd->tx_dmamap);
325 				m_freem(txd->tx_m);
326 				txd->tx_m = NULL;
327 				txd->tx_ndesc = 0;
328 				ifp->if_oerrors++;
329 			}
330 		}
331 	}
332 
333 	/*
334 	 * Reuse configured Rx descriptors and reset
335 	 * procuder/consumer index.
336 	 */
337 	sc->jme_cdata.jme_rx_cons = 0;
338 
339 	jme_init_tx_ring(sc);
340 
341 	/* Initialize shadow status block. */
342 	jme_init_ssb(sc);
343 
344 	/* Program MAC with resolved speed/duplex/flow-control. */
345 	if (sc->jme_flags & JME_FLAG_LINK) {
346 		jme_mac_config(sc);
347 
348 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
349 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
350 
351 		/* Set Tx ring address to the hardware. */
352 		paddr = JME_TX_RING_ADDR(sc, 0);
353 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
354 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
355 
356 		/* Set Rx ring address to the hardware. */
357 		paddr = JME_RX_RING_ADDR(sc, 0);
358 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
359 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
360 
361 		/* Restart receiver/transmitter. */
362 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
363 		    RXCSR_RXQ_START);
364 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
365 	}
366 
367 	ifp->if_flags |= IFF_RUNNING;
368 	ifp->if_flags &= ~IFF_OACTIVE;
369 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
370 
371 	/* Reenable interrupts. */
372 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
373 }
374 
375 /*
376  *	Get the current interface media status.
377  */
378 static void
379 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
380 {
381 	struct jme_softc *sc = ifp->if_softc;
382 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
383 
384 	ASSERT_SERIALIZED(ifp->if_serializer);
385 
386 	mii_pollstat(mii);
387 	ifmr->ifm_status = mii->mii_media_status;
388 	ifmr->ifm_active = mii->mii_media_active;
389 }
390 
391 /*
392  *	Set hardware to newly-selected media.
393  */
394 static int
395 jme_mediachange(struct ifnet *ifp)
396 {
397 	struct jme_softc *sc = ifp->if_softc;
398 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
399 	int error;
400 
401 	ASSERT_SERIALIZED(ifp->if_serializer);
402 
403 	if (mii->mii_instance != 0) {
404 		struct mii_softc *miisc;
405 
406 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
407 			mii_phy_reset(miisc);
408 	}
409 	error = mii_mediachg(mii);
410 
411 	return (error);
412 }
413 
414 static int
415 jme_probe(device_t dev)
416 {
417 	const struct jme_dev *sp;
418 	uint16_t vid, did;
419 
420 	vid = pci_get_vendor(dev);
421 	did = pci_get_device(dev);
422 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
423 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
424 			device_set_desc(dev, sp->jme_name);
425 			if (vid == DEVICEID_JMC260) {
426 				struct jme_softc *sc = device_get_softc(dev);
427 				sc->jme_flags = JME_FLAG_FASTETH |
428 						JME_FLAG_NOJUMBO;
429 			}
430 			return (0);
431 		}
432 	}
433 	return (ENXIO);
434 }
435 
436 static int
437 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
438 {
439 	uint32_t reg;
440 	int i;
441 
442 	*val = 0;
443 	for (i = JME_TIMEOUT; i > 0; i--) {
444 		reg = CSR_READ_4(sc, JME_SMBCSR);
445 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
446 			break;
447 		DELAY(1);
448 	}
449 
450 	if (i == 0) {
451 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
452 		return (ETIMEDOUT);
453 	}
454 
455 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
456 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
457 	for (i = JME_TIMEOUT; i > 0; i--) {
458 		DELAY(1);
459 		reg = CSR_READ_4(sc, JME_SMBINTF);
460 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
461 			break;
462 	}
463 
464 	if (i == 0) {
465 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
466 		return (ETIMEDOUT);
467 	}
468 
469 	reg = CSR_READ_4(sc, JME_SMBINTF);
470 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
471 
472 	return (0);
473 }
474 
475 static int
476 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
477 {
478 	uint8_t fup, reg, val;
479 	uint32_t offset;
480 	int match;
481 
482 	offset = 0;
483 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
484 	    fup != JME_EEPROM_SIG0)
485 		return (ENOENT);
486 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
487 	    fup != JME_EEPROM_SIG1)
488 		return (ENOENT);
489 	match = 0;
490 	do {
491 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
492 			break;
493 		/* Check for the end of EEPROM descriptor. */
494 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
495 			break;
496 		if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0,
497 		    JME_EEPROM_PAGE_BAR1) == fup) {
498 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
499 				break;
500 			if (reg >= JME_PAR0 &&
501 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
502 				if (jme_eeprom_read_byte(sc, offset + 2,
503 				    &val) != 0)
504 					break;
505 				eaddr[reg - JME_PAR0] = val;
506 				match++;
507 			}
508 		}
509 		/* Try next eeprom descriptor. */
510 		offset += JME_EEPROM_DESC_BYTES;
511 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
512 
513 	if (match == ETHER_ADDR_LEN)
514 		return (0);
515 
516 	return (ENOENT);
517 }
518 
519 static void
520 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
521 {
522 	uint32_t par0, par1;
523 
524 	/* Read station address. */
525 	par0 = CSR_READ_4(sc, JME_PAR0);
526 	par1 = CSR_READ_4(sc, JME_PAR1);
527 	par1 &= 0xFFFF;
528 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
529 		device_printf(sc->jme_dev,
530 		    "generating fake ethernet address.\n");
531 		par0 = karc4random();
532 		/* Set OUI to JMicron. */
533 		eaddr[0] = 0x00;
534 		eaddr[1] = 0x1B;
535 		eaddr[2] = 0x8C;
536 		eaddr[3] = (par0 >> 16) & 0xff;
537 		eaddr[4] = (par0 >> 8) & 0xff;
538 		eaddr[5] = par0 & 0xff;
539 	} else {
540 		eaddr[0] = (par0 >> 0) & 0xFF;
541 		eaddr[1] = (par0 >> 8) & 0xFF;
542 		eaddr[2] = (par0 >> 16) & 0xFF;
543 		eaddr[3] = (par0 >> 24) & 0xFF;
544 		eaddr[4] = (par1 >> 0) & 0xFF;
545 		eaddr[5] = (par1 >> 8) & 0xFF;
546 	}
547 }
548 
549 static int
550 jme_attach(device_t dev)
551 {
552 	struct jme_softc *sc = device_get_softc(dev);
553 	struct ifnet *ifp = &sc->arpcom.ac_if;
554 	uint32_t reg;
555 	uint8_t pcie_ptr;
556 	int error = 0;
557 	uint8_t eaddr[ETHER_ADDR_LEN];
558 
559 	sc->jme_dev = dev;
560 	ifp = &sc->arpcom.ac_if;
561 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
562 
563 	callout_init(&sc->jme_tick_ch);
564 
565 #ifndef BURN_BRIDGES
566 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
567 		uint32_t irq, mem;
568 
569 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
570 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
571 
572 		device_printf(dev, "chip is in D%d power mode "
573 		    "-- setting to D0\n", pci_get_powerstate(dev));
574 
575 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
576 
577 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
578 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
579 	}
580 #endif	/* !BURN_BRIDGE */
581 
582 	/* Enable bus mastering */
583 	pci_enable_busmaster(dev);
584 
585 	/*
586 	 * Allocate IO memory
587 	 *
588 	 * JMC250 supports both memory mapped and I/O register space
589 	 * access.  Because I/O register access should use different
590 	 * BARs to access registers it's waste of time to use I/O
591 	 * register spce access.  JMC250 uses 16K to map entire memory
592 	 * space.
593 	 */
594 	sc->jme_mem_rid = JME_PCIR_BAR;
595 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
596 						 &sc->jme_mem_rid, RF_ACTIVE);
597 	if (sc->jme_mem_res == NULL) {
598 		device_printf(dev, "can't allocate IO memory\n");
599 		return ENXIO;
600 	}
601 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
602 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
603 
604 	/*
605 	 * Allocate IRQ
606 	 */
607 	sc->jme_irq_rid = 0;
608 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
609 						 &sc->jme_irq_rid,
610 						 RF_SHAREABLE | RF_ACTIVE);
611 	if (sc->jme_irq_res == NULL) {
612 		device_printf(dev, "can't allocate irq\n");
613 		error = ENXIO;
614 		goto fail;
615 	}
616 
617 	/*
618 	 * Extract FPGA revision
619 	 */
620 	reg = CSR_READ_4(sc, JME_CHIPMODE);
621 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
622 	    CHIPMODE_NOT_FPGA) {
623 		sc->jme_flags |= JME_FLAG_FPGA;
624 		if (bootverbose) {
625 			device_printf(dev, "FPGA revision : 0x%04x\n",
626 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
627 				      CHIPMODE_FPGA_REV_SHIFT);
628 		}
629 	}
630 
631 	/* Reset the ethernet controller. */
632 	jme_reset(sc);
633 
634 	/* Get station address. */
635 	reg = CSR_READ_4(sc, JME_SMBCSR);
636 	if (reg & SMBCSR_EEPROM_PRESENT)
637 		error = jme_eeprom_macaddr(sc, eaddr);
638 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
639 		if (error != 0 && (bootverbose)) {
640 			device_printf(dev, "ethernet hardware address "
641 				      "not found in EEPROM.\n");
642 		}
643 		jme_reg_macaddr(sc, eaddr);
644 	}
645 
646 	/*
647 	 * Save PHY address.
648 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
649 	 * requires PHY probing to get correct PHY address.
650 	 */
651 	if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
652 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
653 		    GPREG0_PHY_ADDR_MASK;
654 		if (bootverbose) {
655 			device_printf(dev, "PHY is at address %d.\n",
656 			    sc->jme_phyaddr);
657 		}
658 	} else {
659 		sc->jme_phyaddr = 0;
660 	}
661 
662 	/* Set max allowable DMA size. */
663 	pcie_ptr = pci_get_pciecap_ptr(dev);
664 	if (pcie_ptr != 0) {
665 		uint16_t ctrl;
666 
667 		sc->jme_flags |= JME_FLAG_PCIE;
668 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
669 		if (bootverbose) {
670 			device_printf(dev, "Read request size : %d bytes.\n",
671 			    128 << ((ctrl >> 12) & 0x07));
672 			device_printf(dev, "TLP payload size : %d bytes.\n",
673 			    128 << ((ctrl >> 5) & 0x07));
674 		}
675 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
676 		case PCIEM_DEVCTL_MAX_READRQ_128:
677 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
678 			break;
679 		case PCIEM_DEVCTL_MAX_READRQ_256:
680 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
681 			break;
682 		default:
683 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
684 			break;
685 		}
686 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
687 	} else {
688 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
689 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
690 	}
691 
692 #ifdef notyet
693 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
694 		sc->jme_flags |= JME_FLAG_PMCAP;
695 #endif
696 
697 	/*
698 	 * Create sysctl tree
699 	 */
700 	jme_sysctl_node(sc);
701 
702 	/* Allocate DMA stuffs */
703 	error = jme_dma_alloc(sc);
704 	if (error)
705 		goto fail;
706 
707 	ifp->if_softc = sc;
708 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
709 	ifp->if_init = jme_init;
710 	ifp->if_ioctl = jme_ioctl;
711 	ifp->if_start = jme_start;
712 	ifp->if_watchdog = jme_watchdog;
713 	ifq_set_maxlen(&ifp->if_snd, JME_TX_RING_CNT - 1);
714 	ifq_set_ready(&ifp->if_snd);
715 
716 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
717 	ifp->if_capabilities = IFCAP_HWCSUM |
718 			       IFCAP_VLAN_MTU |
719 			       IFCAP_VLAN_HWTAGGING;
720 	ifp->if_hwassist = JME_CSUM_FEATURES;
721 	ifp->if_capenable = ifp->if_capabilities;
722 
723 	/* Set up MII bus. */
724 	error = mii_phy_probe(dev, &sc->jme_miibus,
725 			      jme_mediachange, jme_mediastatus);
726 	if (error) {
727 		device_printf(dev, "no PHY found!\n");
728 		goto fail;
729 	}
730 
731 	/*
732 	 * Save PHYADDR for FPGA mode PHY.
733 	 */
734 	if (sc->jme_flags & JME_FLAG_FPGA) {
735 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
736 
737 		if (mii->mii_instance != 0) {
738 			struct mii_softc *miisc;
739 
740 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
741 				if (miisc->mii_phy != 0) {
742 					sc->jme_phyaddr = miisc->mii_phy;
743 					break;
744 				}
745 			}
746 			if (sc->jme_phyaddr != 0) {
747 				device_printf(sc->jme_dev,
748 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
749 				/* vendor magic. */
750 				jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
751 				    0x0004);
752 			}
753 		}
754 	}
755 
756 	ether_ifattach(ifp, eaddr, NULL);
757 
758 	/* Tell the upper layer(s) we support long frames. */
759 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
760 
761 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
762 			       &sc->jme_irq_handle, ifp->if_serializer);
763 	if (error) {
764 		device_printf(dev, "could not set up interrupt handler.\n");
765 		ether_ifdetach(ifp);
766 		goto fail;
767 	}
768 
769 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
770 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
771 	return 0;
772 fail:
773 	jme_detach(dev);
774 	return (error);
775 }
776 
777 static int
778 jme_detach(device_t dev)
779 {
780 	struct jme_softc *sc = device_get_softc(dev);
781 
782 	if (device_is_attached(dev)) {
783 		struct ifnet *ifp = &sc->arpcom.ac_if;
784 
785 		lwkt_serialize_enter(ifp->if_serializer);
786 		jme_stop(sc);
787 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
788 		lwkt_serialize_exit(ifp->if_serializer);
789 
790 		ether_ifdetach(ifp);
791 	}
792 
793 	if (sc->jme_sysctl_tree != NULL)
794 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
795 
796 	if (sc->jme_miibus != NULL)
797 		device_delete_child(dev, sc->jme_miibus);
798 	bus_generic_detach(dev);
799 
800 	if (sc->jme_irq_res != NULL) {
801 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
802 				     sc->jme_irq_res);
803 	}
804 
805 	if (sc->jme_mem_res != NULL) {
806 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
807 				     sc->jme_mem_res);
808 	}
809 
810 	jme_dma_free(sc);
811 
812 	return (0);
813 }
814 
815 static void
816 jme_sysctl_node(struct jme_softc *sc)
817 {
818 	int error;
819 
820 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
821 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
822 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
823 				device_get_nameunit(sc->jme_dev),
824 				CTLFLAG_RD, 0, "");
825 	if (sc->jme_sysctl_tree == NULL) {
826 		device_printf(sc->jme_dev, "can't add sysctl node\n");
827 		return;
828 	}
829 
830 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
831 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
832 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to,
833 	    0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
834 
835 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
836 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
837 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt,
838 	    0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
839 
840 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
841 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
842 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to,
843 	    0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
844 
845 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
846 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
847 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt,
848 	    0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
849 
850 	/* Pull in device tunables. */
851 	sc->jme_process_limit = JME_PROC_DEFAULT;
852 
853 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
854 	error = resource_int_value(device_get_name(sc->jme_dev),
855 	    device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
856 	if (error == 0) {
857 		if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
858 		    sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
859 			device_printf(sc->jme_dev,
860 			    "tx_coal_to value out of range; "
861 			    "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
862 			sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
863 		}
864 	}
865 
866 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
867 	error = resource_int_value(device_get_name(sc->jme_dev),
868 	    device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
869 	if (error == 0) {
870 		if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
871 		    sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
872 			device_printf(sc->jme_dev,
873 			    "tx_coal_pkt value out of range; "
874 			    "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
875 			sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
876 		}
877 	}
878 
879 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
880 	error = resource_int_value(device_get_name(sc->jme_dev),
881 	    device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
882 	if (error == 0) {
883 		if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
884 		    sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
885 			device_printf(sc->jme_dev,
886 			    "rx_coal_to value out of range; "
887 			    "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
888 			sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
889 		}
890 	}
891 
892 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
893 	error = resource_int_value(device_get_name(sc->jme_dev),
894 	    device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
895 	if (error == 0) {
896 		if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
897 		    sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
898 			device_printf(sc->jme_dev,
899 			    "tx_coal_pkt value out of range; "
900 			    "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
901 			sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
902 		}
903 	}
904 }
905 
906 static void
907 jme_dmamap_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
908 {
909 	if (error)
910 		return;
911 
912 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
913 	*((bus_addr_t *)arg) = segs->ds_addr;
914 }
915 
916 static void
917 jme_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs,
918 		  bus_size_t mapsz __unused, int error)
919 {
920 	struct jme_dmamap_ctx *ctx = xctx;
921 	int i;
922 
923 	if (error)
924 		return;
925 
926 	if (nsegs > ctx->nsegs) {
927 		ctx->nsegs = 0;
928 		return;
929 	}
930 
931 	ctx->nsegs = nsegs;
932 	for (i = 0; i < nsegs; ++i)
933 		ctx->segs[i] = segs[i];
934 }
935 
936 static int
937 jme_dma_alloc(struct jme_softc *sc)
938 {
939 	struct jme_txdesc *txd;
940 	struct jme_rxdesc *rxd;
941 	bus_addr_t busaddr, lowaddr, rx_ring_end, tx_ring_end;
942 	int error, i;
943 
944 	lowaddr = BUS_SPACE_MAXADDR;
945 
946 again:
947 	/* Create parent ring tag. */
948 	error = bus_dma_tag_create(NULL,/* parent */
949 	    1, 0,			/* algnmnt, boundary */
950 	    lowaddr,			/* lowaddr */
951 	    BUS_SPACE_MAXADDR,		/* highaddr */
952 	    NULL, NULL,			/* filter, filterarg */
953 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
954 	    0,				/* nsegments */
955 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
956 	    0,				/* flags */
957 	    &sc->jme_cdata.jme_ring_tag);
958 	if (error) {
959 		device_printf(sc->jme_dev,
960 		    "could not create parent ring DMA tag.\n");
961 		return error;
962 	}
963 
964 	/*
965 	 * Create DMA stuffs for TX ring
966 	 */
967 
968 	/* Create tag for Tx ring. */
969 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
970 	    JME_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
971 	    BUS_SPACE_MAXADDR,		/* lowaddr */
972 	    BUS_SPACE_MAXADDR,		/* highaddr */
973 	    NULL, NULL,			/* filter, filterarg */
974 	    JME_TX_RING_SIZE,		/* maxsize */
975 	    1,				/* nsegments */
976 	    JME_TX_RING_SIZE,		/* maxsegsize */
977 	    0,				/* flags */
978 	    &sc->jme_cdata.jme_tx_ring_tag);
979 	if (error) {
980 		device_printf(sc->jme_dev,
981 		    "could not allocate Tx ring DMA tag.\n");
982 		return error;
983 	}
984 
985 	/* Allocate DMA'able memory for TX ring */
986 	error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
987 	    (void **)&sc->jme_rdata.jme_tx_ring,
988 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
989 	    &sc->jme_cdata.jme_tx_ring_map);
990 	if (error) {
991 		device_printf(sc->jme_dev,
992 		    "could not allocate DMA'able memory for Tx ring.\n");
993 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
994 		sc->jme_cdata.jme_tx_ring_tag = NULL;
995 		return error;
996 	}
997 
998 	/*  Load the DMA map for Tx ring. */
999 	error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1000 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1001 	    JME_TX_RING_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1002 	if (error) {
1003 		device_printf(sc->jme_dev,
1004 		    "could not load DMA'able memory for Tx ring.\n");
1005 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1006 				sc->jme_rdata.jme_tx_ring,
1007 				sc->jme_cdata.jme_tx_ring_map);
1008 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1009 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1010 		return error;
1011 	}
1012 	sc->jme_rdata.jme_tx_ring_paddr = busaddr;
1013 
1014 	/*
1015 	 * Create DMA stuffs for RX ring
1016 	 */
1017 
1018 	/* Create tag for Rx ring. */
1019 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1020 	    JME_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
1021 	    lowaddr,			/* lowaddr */
1022 	    BUS_SPACE_MAXADDR,		/* highaddr */
1023 	    NULL, NULL,			/* filter, filterarg */
1024 	    JME_RX_RING_SIZE,		/* maxsize */
1025 	    1,				/* nsegments */
1026 	    JME_RX_RING_SIZE,		/* maxsegsize */
1027 	    0,				/* flags */
1028 	    &sc->jme_cdata.jme_rx_ring_tag);
1029 	if (error) {
1030 		device_printf(sc->jme_dev,
1031 		    "could not allocate Rx ring DMA tag.\n");
1032 		return error;
1033 	}
1034 
1035 	/* Allocate DMA'able memory for RX ring */
1036 	error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1037 	    (void **)&sc->jme_rdata.jme_rx_ring,
1038 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
1039 	    &sc->jme_cdata.jme_rx_ring_map);
1040 	if (error) {
1041 		device_printf(sc->jme_dev,
1042 		    "could not allocate DMA'able memory for Rx ring.\n");
1043 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1044 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1045 		return error;
1046 	}
1047 
1048 	/* Load the DMA map for Rx ring. */
1049 	error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1050 	    sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1051 	    JME_RX_RING_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1052 	if (error) {
1053 		device_printf(sc->jme_dev,
1054 		    "could not load DMA'able memory for Rx ring.\n");
1055 		bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1056 				sc->jme_rdata.jme_rx_ring,
1057 				sc->jme_cdata.jme_rx_ring_map);
1058 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1059 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1060 		return error;
1061 	}
1062 	sc->jme_rdata.jme_rx_ring_paddr = busaddr;
1063 
1064 	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
1065 	tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
1066 	rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
1067 	if ((JME_ADDR_HI(tx_ring_end) !=
1068 	     JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1069 	    (JME_ADDR_HI(rx_ring_end) !=
1070 	     JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1071 		device_printf(sc->jme_dev, "4GB boundary crossed, "
1072 		    "switching to 32bit DMA address mode.\n");
1073 		jme_dma_free(sc);
1074 		/* Limit DMA address space to 32bit and try again. */
1075 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1076 		goto again;
1077 	}
1078 
1079 	/* Create parent buffer tag. */
1080 	error = bus_dma_tag_create(NULL,/* parent */
1081 	    1, 0,			/* algnmnt, boundary */
1082 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1083 	    BUS_SPACE_MAXADDR,		/* highaddr */
1084 	    NULL, NULL,			/* filter, filterarg */
1085 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1086 	    0,				/* nsegments */
1087 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1088 	    0,				/* flags */
1089 	    &sc->jme_cdata.jme_buffer_tag);
1090 	if (error) {
1091 		device_printf(sc->jme_dev,
1092 		    "could not create parent buffer DMA tag.\n");
1093 		return error;
1094 	}
1095 
1096 	/*
1097 	 * Create DMA stuffs for shadow status block
1098 	 */
1099 
1100 	/* Create shadow status block tag. */
1101 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1102 	    JME_SSB_ALIGN, 0,		/* algnmnt, boundary */
1103 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1104 	    BUS_SPACE_MAXADDR,		/* highaddr */
1105 	    NULL, NULL,			/* filter, filterarg */
1106 	    JME_SSB_SIZE,		/* maxsize */
1107 	    1,				/* nsegments */
1108 	    JME_SSB_SIZE,		/* maxsegsize */
1109 	    0,				/* flags */
1110 	    &sc->jme_cdata.jme_ssb_tag);
1111 	if (error) {
1112 		device_printf(sc->jme_dev,
1113 		    "could not create shared status block DMA tag.\n");
1114 		return error;
1115 	}
1116 
1117 	/* Allocate DMA'able memory for shared status block. */
1118 	error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1119 	    (void **)&sc->jme_rdata.jme_ssb_block,
1120 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
1121 	    &sc->jme_cdata.jme_ssb_map);
1122 	if (error) {
1123 		device_printf(sc->jme_dev, "could not allocate DMA'able "
1124 		    "memory for shared status block.\n");
1125 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1126 		sc->jme_cdata.jme_ssb_tag = NULL;
1127 		return error;
1128 	}
1129 
1130 	/* Load the DMA map for shared status block */
1131 	error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1132 	    sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1133 	    JME_SSB_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1134 	if (error) {
1135 		device_printf(sc->jme_dev, "could not load DMA'able memory "
1136 		    "for shared status block.\n");
1137 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1138 				sc->jme_rdata.jme_ssb_block,
1139 				sc->jme_cdata.jme_ssb_map);
1140 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1141 		sc->jme_cdata.jme_ssb_tag = NULL;
1142 		return error;
1143 	}
1144 	sc->jme_rdata.jme_ssb_block_paddr = busaddr;
1145 
1146 	/*
1147 	 * Create DMA stuffs for TX buffers
1148 	 */
1149 
1150 	/* Create tag for Tx buffers. */
1151 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1152 	    1, 0,			/* algnmnt, boundary */
1153 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1154 	    BUS_SPACE_MAXADDR,		/* highaddr */
1155 	    NULL, NULL,			/* filter, filterarg */
1156 	    JME_TSO_MAXSIZE,		/* maxsize */
1157 	    JME_MAXTXSEGS,		/* nsegments */
1158 	    JME_TSO_MAXSEGSIZE,		/* maxsegsize */
1159 	    0,				/* flags */
1160 	    &sc->jme_cdata.jme_tx_tag);
1161 	if (error != 0) {
1162 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1163 		return error;
1164 	}
1165 
1166 	/* Create DMA maps for Tx buffers. */
1167 	for (i = 0; i < JME_TX_RING_CNT; i++) {
1168 		txd = &sc->jme_cdata.jme_txdesc[i];
1169 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1170 		    &txd->tx_dmamap);
1171 		if (error) {
1172 			int j;
1173 
1174 			device_printf(sc->jme_dev,
1175 			    "could not create %dth Tx dmamap.\n", i);
1176 
1177 			for (j = 0; j < i; ++j) {
1178 				txd = &sc->jme_cdata.jme_txdesc[j];
1179 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1180 						   txd->tx_dmamap);
1181 			}
1182 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1183 			sc->jme_cdata.jme_tx_tag = NULL;
1184 			return error;
1185 		}
1186 	}
1187 
1188 	/*
1189 	 * Create DMA stuffs for RX buffers
1190 	 */
1191 
1192 	/* Create tag for Rx buffers. */
1193 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1194 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
1195 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1196 	    BUS_SPACE_MAXADDR,		/* highaddr */
1197 	    NULL, NULL,			/* filter, filterarg */
1198 	    MCLBYTES,			/* maxsize */
1199 	    1,				/* nsegments */
1200 	    MCLBYTES,			/* maxsegsize */
1201 	    0,				/* flags */
1202 	    &sc->jme_cdata.jme_rx_tag);
1203 	if (error) {
1204 		device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1205 		return error;
1206 	}
1207 
1208 	/* Create DMA maps for Rx buffers. */
1209 	error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1210 				  &sc->jme_cdata.jme_rx_sparemap);
1211 	if (error) {
1212 		device_printf(sc->jme_dev,
1213 		    "could not create spare Rx dmamap.\n");
1214 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1215 		sc->jme_cdata.jme_rx_tag = NULL;
1216 		return error;
1217 	}
1218 	for (i = 0; i < JME_RX_RING_CNT; i++) {
1219 		rxd = &sc->jme_cdata.jme_rxdesc[i];
1220 		error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1221 		    &rxd->rx_dmamap);
1222 		if (error) {
1223 			int j;
1224 
1225 			device_printf(sc->jme_dev,
1226 			    "could not create %dth Rx dmamap.\n", i);
1227 
1228 			for (j = 0; j < i; ++j) {
1229 				rxd = &sc->jme_cdata.jme_rxdesc[j];
1230 				bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1231 						   rxd->rx_dmamap);
1232 			}
1233 			bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1234 			    sc->jme_cdata.jme_rx_sparemap);
1235 			bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1236 			sc->jme_cdata.jme_rx_tag = NULL;
1237 			return error;
1238 		}
1239 	}
1240 	return 0;
1241 }
1242 
1243 static void
1244 jme_dma_free(struct jme_softc *sc)
1245 {
1246 	struct jme_txdesc *txd;
1247 	struct jme_rxdesc *rxd;
1248 	int i;
1249 
1250 	/* Tx ring */
1251 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1252 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1253 		    sc->jme_cdata.jme_tx_ring_map);
1254 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1255 		    sc->jme_rdata.jme_tx_ring,
1256 		    sc->jme_cdata.jme_tx_ring_map);
1257 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1258 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1259 	}
1260 
1261 	/* Rx ring */
1262 	if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1263 		bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1264 		    sc->jme_cdata.jme_rx_ring_map);
1265 		bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1266 		    sc->jme_rdata.jme_rx_ring,
1267 		    sc->jme_cdata.jme_rx_ring_map);
1268 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1269 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1270 	}
1271 
1272 	/* Tx buffers */
1273 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1274 		for (i = 0; i < JME_TX_RING_CNT; i++) {
1275 			txd = &sc->jme_cdata.jme_txdesc[i];
1276 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1277 			    txd->tx_dmamap);
1278 		}
1279 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1280 		sc->jme_cdata.jme_tx_tag = NULL;
1281 	}
1282 
1283 	/* Rx buffers */
1284 	if (sc->jme_cdata.jme_rx_tag != NULL) {
1285 		for (i = 0; i < JME_RX_RING_CNT; i++) {
1286 			rxd = &sc->jme_cdata.jme_rxdesc[i];
1287 			bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1288 			    rxd->rx_dmamap);
1289 		}
1290 		bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1291 		    sc->jme_cdata.jme_rx_sparemap);
1292 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1293 		sc->jme_cdata.jme_rx_tag = NULL;
1294 	}
1295 
1296 	/* Shadow status block. */
1297 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1298 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1299 		    sc->jme_cdata.jme_ssb_map);
1300 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1301 		    sc->jme_rdata.jme_ssb_block,
1302 		    sc->jme_cdata.jme_ssb_map);
1303 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1304 		sc->jme_cdata.jme_ssb_tag = NULL;
1305 	}
1306 
1307 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1308 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1309 		sc->jme_cdata.jme_buffer_tag = NULL;
1310 	}
1311 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1312 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1313 		sc->jme_cdata.jme_ring_tag = NULL;
1314 	}
1315 }
1316 
1317 /*
1318  *	Make sure the interface is stopped at reboot time.
1319  */
1320 static int
1321 jme_shutdown(device_t dev)
1322 {
1323 	return jme_suspend(dev);
1324 }
1325 
1326 #ifdef notyet
1327 /*
1328  * Unlike other ethernet controllers, JMC250 requires
1329  * explicit resetting link speed to 10/100Mbps as gigabit
1330  * link will cunsume more power than 375mA.
1331  * Note, we reset the link speed to 10/100Mbps with
1332  * auto-negotiation but we don't know whether that operation
1333  * would succeed or not as we have no control after powering
1334  * off. If the renegotiation fail WOL may not work. Running
1335  * at 1Gbps draws more power than 375mA at 3.3V which is
1336  * specified in PCI specification and that would result in
1337  * complete shutdowning power to ethernet controller.
1338  *
1339  * TODO
1340  *  Save current negotiated media speed/duplex/flow-control
1341  *  to softc and restore the same link again after resuming.
1342  *  PHY handling such as power down/resetting to 100Mbps
1343  *  may be better handled in suspend method in phy driver.
1344  */
1345 static void
1346 jme_setlinkspeed(struct jme_softc *sc)
1347 {
1348 	struct mii_data *mii;
1349 	int aneg, i;
1350 
1351 	JME_LOCK_ASSERT(sc);
1352 
1353 	mii = device_get_softc(sc->jme_miibus);
1354 	mii_pollstat(mii);
1355 	aneg = 0;
1356 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1357 		switch IFM_SUBTYPE(mii->mii_media_active) {
1358 		case IFM_10_T:
1359 		case IFM_100_TX:
1360 			return;
1361 		case IFM_1000_T:
1362 			aneg++;
1363 		default:
1364 			break;
1365 		}
1366 	}
1367 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1368 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1369 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1370 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1371 	    BMCR_AUTOEN | BMCR_STARTNEG);
1372 	DELAY(1000);
1373 	if (aneg != 0) {
1374 		/* Poll link state until jme(4) get a 10/100 link. */
1375 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1376 			mii_pollstat(mii);
1377 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1378 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1379 				case IFM_10_T:
1380 				case IFM_100_TX:
1381 					jme_mac_config(sc);
1382 					return;
1383 				default:
1384 					break;
1385 				}
1386 			}
1387 			JME_UNLOCK(sc);
1388 			pause("jmelnk", hz);
1389 			JME_LOCK(sc);
1390 		}
1391 		if (i == MII_ANEGTICKS_GIGE)
1392 			device_printf(sc->jme_dev, "establishing link failed, "
1393 			    "WOL may not work!");
1394 	}
1395 	/*
1396 	 * No link, force MAC to have 100Mbps, full-duplex link.
1397 	 * This is the last resort and may/may not work.
1398 	 */
1399 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1400 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1401 	jme_mac_config(sc);
1402 }
1403 
1404 static void
1405 jme_setwol(struct jme_softc *sc)
1406 {
1407 	struct ifnet *ifp = &sc->arpcom.ac_if;
1408 	uint32_t gpr, pmcs;
1409 	uint16_t pmstat;
1410 	int pmc;
1411 
1412 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1413 		/* No PME capability, PHY power down. */
1414 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1415 		    MII_BMCR, BMCR_PDOWN);
1416 		return;
1417 	}
1418 
1419 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1420 	pmcs = CSR_READ_4(sc, JME_PMCS);
1421 	pmcs &= ~PMCS_WOL_ENB_MASK;
1422 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1423 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1424 		/* Enable PME message. */
1425 		gpr |= GPREG0_PME_ENB;
1426 		/* For gigabit controllers, reset link speed to 10/100. */
1427 		if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1428 			jme_setlinkspeed(sc);
1429 	}
1430 
1431 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1432 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1433 
1434 	/* Request PME. */
1435 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1436 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1437 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1438 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1439 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1440 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1441 		/* No WOL, PHY power down. */
1442 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1443 		    MII_BMCR, BMCR_PDOWN);
1444 	}
1445 }
1446 #endif
1447 
1448 static int
1449 jme_suspend(device_t dev)
1450 {
1451 	struct jme_softc *sc = device_get_softc(dev);
1452 	struct ifnet *ifp = &sc->arpcom.ac_if;
1453 
1454 	lwkt_serialize_enter(ifp->if_serializer);
1455 	jme_stop(sc);
1456 #ifdef notyet
1457 	jme_setwol(sc);
1458 #endif
1459 	lwkt_serialize_exit(ifp->if_serializer);
1460 
1461 	return (0);
1462 }
1463 
1464 static int
1465 jme_resume(device_t dev)
1466 {
1467 	struct jme_softc *sc = device_get_softc(dev);
1468 	struct ifnet *ifp = &sc->arpcom.ac_if;
1469 #ifdef notyet
1470 	int pmc;
1471 #endif
1472 
1473 	lwkt_serialize_enter(ifp->if_serializer);
1474 
1475 #ifdef notyet
1476 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1477 		uint16_t pmstat;
1478 
1479 		pmstat = pci_read_config(sc->jme_dev,
1480 		    pmc + PCIR_POWER_STATUS, 2);
1481 		/* Disable PME clear PME status. */
1482 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1483 		pci_write_config(sc->jme_dev,
1484 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1485 	}
1486 #endif
1487 
1488 	if (ifp->if_flags & IFF_UP)
1489 		jme_init(sc);
1490 
1491 	lwkt_serialize_exit(ifp->if_serializer);
1492 
1493 	return (0);
1494 }
1495 
1496 static int
1497 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1498 {
1499 	struct jme_txdesc *txd;
1500 	struct jme_desc *desc;
1501 	struct mbuf *m;
1502 	struct jme_dmamap_ctx ctx;
1503 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1504 	int maxsegs;
1505 	int error, i, prod;
1506 	uint32_t cflags;
1507 
1508 	M_ASSERTPKTHDR((*m_head));
1509 
1510 	prod = sc->jme_cdata.jme_tx_prod;
1511 	txd = &sc->jme_cdata.jme_txdesc[prod];
1512 
1513 	maxsegs = (JME_TX_RING_CNT - sc->jme_cdata.jme_tx_cnt) -
1514 		  (JME_TXD_RSVD + 1);
1515 	if (maxsegs > JME_MAXTXSEGS)
1516 		maxsegs = JME_MAXTXSEGS;
1517 	KASSERT(maxsegs >= (sc->jme_txd_spare - 1),
1518 		("not enough segments %d\n", maxsegs));
1519 
1520 	ctx.nsegs = maxsegs;
1521 	ctx.segs = txsegs;
1522 	error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1523 				     *m_head, jme_dmamap_buf_cb, &ctx,
1524 				     BUS_DMA_NOWAIT);
1525 	if (!error && ctx.nsegs == 0) {
1526 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1527 		error = EFBIG;
1528 	}
1529 	if (error == EFBIG) {
1530 		m = m_defrag(*m_head, MB_DONTWAIT);
1531 		if (m == NULL) {
1532 			if_printf(&sc->arpcom.ac_if,
1533 				  "could not defrag TX mbuf\n");
1534 			m_freem(*m_head);
1535 			*m_head = NULL;
1536 			return (ENOMEM);
1537 		}
1538 		*m_head = m;
1539 
1540 		ctx.nsegs = maxsegs;
1541 		ctx.segs = txsegs;
1542 		error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag,
1543 					     txd->tx_dmamap, *m_head,
1544 					     jme_dmamap_buf_cb, &ctx,
1545 					     BUS_DMA_NOWAIT);
1546 		if (error || ctx.nsegs == 0) {
1547 			if_printf(&sc->arpcom.ac_if,
1548 				  "could not load defragged TX mbuf\n");
1549 			if (!error) {
1550 				bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
1551 						  txd->tx_dmamap);
1552 				error = EFBIG;
1553 			}
1554 			m_freem(*m_head);
1555 			*m_head = NULL;
1556 			return (error);
1557 		}
1558 	} else if (error) {
1559 		if_printf(&sc->arpcom.ac_if, "could not load TX mbuf\n");
1560 		return (error);
1561 	}
1562 
1563 	m = *m_head;
1564 	cflags = 0;
1565 
1566 	/* Configure checksum offload. */
1567 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1568 		cflags |= JME_TD_IPCSUM;
1569 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1570 		cflags |= JME_TD_TCPCSUM;
1571 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1572 		cflags |= JME_TD_UDPCSUM;
1573 
1574 	/* Configure VLAN. */
1575 	if (m->m_flags & M_VLANTAG) {
1576 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1577 		cflags |= JME_TD_VLAN_TAG;
1578 	}
1579 
1580 	desc = &sc->jme_rdata.jme_tx_ring[prod];
1581 	desc->flags = htole32(cflags);
1582 	desc->buflen = 0;
1583 	desc->addr_hi = htole32(m->m_pkthdr.len);
1584 	desc->addr_lo = 0;
1585 	sc->jme_cdata.jme_tx_cnt++;
1586 	KKASSERT(sc->jme_cdata.jme_tx_cnt < JME_TX_RING_CNT - JME_TXD_RSVD);
1587 	JME_DESC_INC(prod, JME_TX_RING_CNT);
1588 	for (i = 0; i < ctx.nsegs; i++) {
1589 		desc = &sc->jme_rdata.jme_tx_ring[prod];
1590 		desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1591 		desc->buflen = htole32(txsegs[i].ds_len);
1592 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1593 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1594 
1595 		sc->jme_cdata.jme_tx_cnt++;
1596 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1597 			 JME_TX_RING_CNT - JME_TXD_RSVD);
1598 		JME_DESC_INC(prod, JME_TX_RING_CNT);
1599 	}
1600 
1601 	/* Update producer index. */
1602 	sc->jme_cdata.jme_tx_prod = prod;
1603 	/*
1604 	 * Finally request interrupt and give the first descriptor
1605 	 * owenership to hardware.
1606 	 */
1607 	desc = txd->tx_desc;
1608 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1609 
1610 	txd->tx_m = m;
1611 	txd->tx_ndesc = ctx.nsegs + 1;
1612 
1613 	/* Sync descriptors. */
1614 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1615 			BUS_DMASYNC_PREWRITE);
1616 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1617 			sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE);
1618 	return (0);
1619 }
1620 
1621 static void
1622 jme_start(struct ifnet *ifp)
1623 {
1624 	struct jme_softc *sc = ifp->if_softc;
1625 	struct mbuf *m_head;
1626 	int enq = 0;
1627 
1628 	ASSERT_SERIALIZED(ifp->if_serializer);
1629 
1630 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1631 		ifq_purge(&ifp->if_snd);
1632 		return;
1633 	}
1634 
1635 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1636 		return;
1637 
1638 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1639 		jme_txeof(sc);
1640 
1641 	while (!ifq_is_empty(&ifp->if_snd)) {
1642 		/*
1643 		 * Check number of available TX descs, always
1644 		 * leave JME_TXD_RSVD free TX descs.
1645 		 */
1646 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1647 		    JME_TX_RING_CNT - JME_TXD_RSVD) {
1648 			ifp->if_flags |= IFF_OACTIVE;
1649 			break;
1650 		}
1651 
1652 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1653 		if (m_head == NULL)
1654 			break;
1655 
1656 		/*
1657 		 * Pack the data into the transmit ring. If we
1658 		 * don't have room, set the OACTIVE flag and wait
1659 		 * for the NIC to drain the ring.
1660 		 */
1661 		if (jme_encap(sc, &m_head)) {
1662 			if (m_head == NULL) {
1663 				ifp->if_oerrors++;
1664 				break;
1665 			}
1666 			ifq_prepend(&ifp->if_snd, m_head);
1667 			ifp->if_flags |= IFF_OACTIVE;
1668 			break;
1669 		}
1670 		enq++;
1671 
1672 		/*
1673 		 * If there's a BPF listener, bounce a copy of this frame
1674 		 * to him.
1675 		 */
1676 		ETHER_BPF_MTAP(ifp, m_head);
1677 	}
1678 
1679 	if (enq > 0) {
1680 		/*
1681 		 * Reading TXCSR takes very long time under heavy load
1682 		 * so cache TXCSR value and writes the ORed value with
1683 		 * the kick command to the TXCSR. This saves one register
1684 		 * access cycle.
1685 		 */
1686 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1687 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1688 		/* Set a timeout in case the chip goes out to lunch. */
1689 		ifp->if_timer = JME_TX_TIMEOUT;
1690 	}
1691 }
1692 
1693 static void
1694 jme_watchdog(struct ifnet *ifp)
1695 {
1696 	struct jme_softc *sc = ifp->if_softc;
1697 
1698 	ASSERT_SERIALIZED(ifp->if_serializer);
1699 
1700 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1701 		if_printf(ifp, "watchdog timeout (missed link)\n");
1702 		ifp->if_oerrors++;
1703 		jme_init(sc);
1704 		return;
1705 	}
1706 
1707 	jme_txeof(sc);
1708 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1709 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1710 			  "-- recovering\n");
1711 		if (!ifq_is_empty(&ifp->if_snd))
1712 			if_devstart(ifp);
1713 		return;
1714 	}
1715 
1716 	if_printf(ifp, "watchdog timeout\n");
1717 	ifp->if_oerrors++;
1718 	jme_init(sc);
1719 	if (!ifq_is_empty(&ifp->if_snd))
1720 		if_devstart(ifp);
1721 }
1722 
1723 static int
1724 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1725 {
1726 	struct jme_softc *sc = ifp->if_softc;
1727 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1728 	struct ifreq *ifr = (struct ifreq *)data;
1729 	int error = 0, mask;
1730 
1731 	ASSERT_SERIALIZED(ifp->if_serializer);
1732 
1733 	switch (cmd) {
1734 	case SIOCSIFMTU:
1735 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1736 		    ((sc->jme_flags & JME_FLAG_NOJUMBO) &&
1737 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1738 			error = EINVAL;
1739 			break;
1740 		}
1741 
1742 		if (ifp->if_mtu != ifr->ifr_mtu) {
1743 			/*
1744 			 * No special configuration is required when interface
1745 			 * MTU is changed but availability of Tx checksum
1746 			 * offload should be chcked against new MTU size as
1747 			 * FIFO size is just 2K.
1748 			 */
1749 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1750 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1751 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1752 			}
1753 			ifp->if_mtu = ifr->ifr_mtu;
1754 			if (ifp->if_flags & IFF_RUNNING)
1755 				jme_init(sc);
1756 		}
1757 		break;
1758 
1759 	case SIOCSIFFLAGS:
1760 		if (ifp->if_flags & IFF_UP) {
1761 			if (ifp->if_flags & IFF_RUNNING) {
1762 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1763 				    (IFF_PROMISC | IFF_ALLMULTI))
1764 					jme_set_filter(sc);
1765 			} else {
1766 				jme_init(sc);
1767 			}
1768 		} else {
1769 			if (ifp->if_flags & IFF_RUNNING)
1770 				jme_stop(sc);
1771 		}
1772 		sc->jme_if_flags = ifp->if_flags;
1773 		break;
1774 
1775 	case SIOCADDMULTI:
1776 	case SIOCDELMULTI:
1777 		if (ifp->if_flags & IFF_RUNNING)
1778 			jme_set_filter(sc);
1779 		break;
1780 
1781 	case SIOCSIFMEDIA:
1782 	case SIOCGIFMEDIA:
1783 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1784 		break;
1785 
1786 	case SIOCSIFCAP:
1787 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1788 
1789 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1790 			if (IFCAP_TXCSUM & ifp->if_capabilities) {
1791 				ifp->if_capenable ^= IFCAP_TXCSUM;
1792 				if (IFCAP_TXCSUM & ifp->if_capenable)
1793 					ifp->if_hwassist |= JME_CSUM_FEATURES;
1794 				else
1795 					ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1796 			}
1797 		}
1798 		if ((mask & IFCAP_RXCSUM) &&
1799 		    (IFCAP_RXCSUM & ifp->if_capabilities)) {
1800 			uint32_t reg;
1801 
1802 			ifp->if_capenable ^= IFCAP_RXCSUM;
1803 			reg = CSR_READ_4(sc, JME_RXMAC);
1804 			reg &= ~RXMAC_CSUM_ENB;
1805 			if (ifp->if_capenable & IFCAP_RXCSUM)
1806 				reg |= RXMAC_CSUM_ENB;
1807 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1808 		}
1809 
1810 		if ((mask & IFCAP_VLAN_HWTAGGING) &&
1811 		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) {
1812 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1813 			jme_set_vlan(sc);
1814 		}
1815 		break;
1816 
1817 	default:
1818 		error = ether_ioctl(ifp, cmd, data);
1819 		break;
1820 	}
1821 	return (error);
1822 }
1823 
1824 static void
1825 jme_mac_config(struct jme_softc *sc)
1826 {
1827 	struct mii_data *mii;
1828 	uint32_t ghc, rxmac, txmac, txpause;
1829 
1830 	mii = device_get_softc(sc->jme_miibus);
1831 
1832 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1833 	DELAY(10);
1834 	CSR_WRITE_4(sc, JME_GHC, 0);
1835 	ghc = 0;
1836 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1837 	rxmac &= ~RXMAC_FC_ENB;
1838 	txmac = CSR_READ_4(sc, JME_TXMAC);
1839 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1840 	txpause = CSR_READ_4(sc, JME_TXPFC);
1841 	txpause &= ~TXPFC_PAUSE_ENB;
1842 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1843 		ghc |= GHC_FULL_DUPLEX;
1844 		rxmac &= ~RXMAC_COLL_DET_ENB;
1845 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1846 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1847 		    TXMAC_FRAME_BURST);
1848 #ifdef notyet
1849 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1850 			txpause |= TXPFC_PAUSE_ENB;
1851 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1852 			rxmac |= RXMAC_FC_ENB;
1853 #endif
1854 		/* Disable retry transmit timer/retry limit. */
1855 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1856 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1857 	} else {
1858 		rxmac |= RXMAC_COLL_DET_ENB;
1859 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1860 		/* Enable retry transmit timer/retry limit. */
1861 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1862 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1863 	}
1864 
1865 	/* Reprogram Tx/Rx MACs with resolved speed/duplex. */
1866 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1867 	case IFM_10_T:
1868 		ghc |= GHC_SPEED_10;
1869 		break;
1870 	case IFM_100_TX:
1871 		ghc |= GHC_SPEED_100;
1872 		break;
1873 	case IFM_1000_T:
1874 		if (sc->jme_flags & JME_FLAG_FASTETH)
1875 			break;
1876 		ghc |= GHC_SPEED_1000;
1877 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1878 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1879 		break;
1880 	default:
1881 		break;
1882 	}
1883 	CSR_WRITE_4(sc, JME_GHC, ghc);
1884 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1885 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1886 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1887 }
1888 
1889 static void
1890 jme_intr(void *xsc)
1891 {
1892 	struct jme_softc *sc = xsc;
1893 	struct ifnet *ifp = &sc->arpcom.ac_if;
1894 	uint32_t status;
1895 
1896 	ASSERT_SERIALIZED(ifp->if_serializer);
1897 
1898 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1899 	if (status == 0 || status == 0xFFFFFFFF)
1900 		return;
1901 
1902 	/* Disable interrupts. */
1903 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1904 
1905 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1906 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1907 		goto back;
1908 
1909 	/* Reset PCC counter/timer and Ack interrupts. */
1910 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1911 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1912 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1913 	if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1914 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
1915 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1916 
1917 	if (ifp->if_flags & IFF_RUNNING) {
1918 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1919 			jme_rxeof(sc);
1920 
1921 		if (status & INTR_RXQ_DESC_EMPTY) {
1922 			/*
1923 			 * Notify hardware availability of new Rx buffers.
1924 			 * Reading RXCSR takes very long time under heavy
1925 			 * load so cache RXCSR value and writes the ORed
1926 			 * value with the kick command to the RXCSR. This
1927 			 * saves one register access cycle.
1928 			 */
1929 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1930 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1931 		}
1932 
1933 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1934 			jme_txeof(sc);
1935 			if (!ifq_is_empty(&ifp->if_snd))
1936 				if_devstart(ifp);
1937 		}
1938 	}
1939 back:
1940 	/* Reenable interrupts. */
1941 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1942 }
1943 
1944 static void
1945 jme_txeof(struct jme_softc *sc)
1946 {
1947 	struct ifnet *ifp = &sc->arpcom.ac_if;
1948 	struct jme_txdesc *txd;
1949 	uint32_t status;
1950 	int cons, nsegs;
1951 
1952 	cons = sc->jme_cdata.jme_tx_cons;
1953 	if (cons == sc->jme_cdata.jme_tx_prod)
1954 		return;
1955 
1956 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1957 			sc->jme_cdata.jme_tx_ring_map,
1958 			BUS_DMASYNC_POSTREAD);
1959 
1960 	/*
1961 	 * Go through our Tx list and free mbufs for those
1962 	 * frames which have been transmitted.
1963 	 */
1964 	while (cons != sc->jme_cdata.jme_tx_prod) {
1965 		txd = &sc->jme_cdata.jme_txdesc[cons];
1966 		KASSERT(txd->tx_m != NULL,
1967 			("%s: freeing NULL mbuf!\n", __func__));
1968 
1969 		status = le32toh(txd->tx_desc->flags);
1970 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1971 			break;
1972 
1973 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1974 			ifp->if_oerrors++;
1975 		} else {
1976 			ifp->if_opackets++;
1977 			if (status & JME_TD_COLLISION) {
1978 				ifp->if_collisions +=
1979 				    le32toh(txd->tx_desc->buflen) &
1980 				    JME_TD_BUF_LEN_MASK;
1981 			}
1982 		}
1983 
1984 		/*
1985 		 * Only the first descriptor of multi-descriptor
1986 		 * transmission is updated so driver have to skip entire
1987 		 * chained buffers for the transmiited frame. In other
1988 		 * words, JME_TD_OWN bit is valid only at the first
1989 		 * descriptor of a multi-descriptor transmission.
1990 		 */
1991 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1992 			sc->jme_rdata.jme_tx_ring[cons].flags = 0;
1993 			JME_DESC_INC(cons, JME_TX_RING_CNT);
1994 		}
1995 
1996 		/* Reclaim transferred mbufs. */
1997 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1998 		m_freem(txd->tx_m);
1999 		txd->tx_m = NULL;
2000 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2001 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2002 			("%s: Active Tx desc counter was garbled\n", __func__));
2003 		txd->tx_ndesc = 0;
2004 	}
2005 	sc->jme_cdata.jme_tx_cons = cons;
2006 
2007 	if (sc->jme_cdata.jme_tx_cnt == 0)
2008 		ifp->if_timer = 0;
2009 
2010 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2011 	    JME_TX_RING_CNT - JME_TXD_RSVD)
2012 		ifp->if_flags &= ~IFF_OACTIVE;
2013 
2014 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2015 			sc->jme_cdata.jme_tx_ring_map,
2016 			BUS_DMASYNC_PREWRITE);
2017 }
2018 
2019 static __inline void
2020 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
2021 {
2022 	int i;
2023 
2024 	for (i = 0; i < count; ++i) {
2025 		struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
2026 
2027 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2028 		desc->buflen = htole32(MCLBYTES);
2029 		JME_DESC_INC(cons, JME_RX_RING_CNT);
2030 	}
2031 }
2032 
2033 /* Receive a frame. */
2034 static void
2035 jme_rxpkt(struct jme_softc *sc)
2036 {
2037 	struct ifnet *ifp = &sc->arpcom.ac_if;
2038 	struct jme_desc *desc;
2039 	struct jme_rxdesc *rxd;
2040 	struct mbuf *mp, *m;
2041 	uint32_t flags, status;
2042 	int cons, count, nsegs;
2043 
2044 	cons = sc->jme_cdata.jme_rx_cons;
2045 	desc = &sc->jme_rdata.jme_rx_ring[cons];
2046 	flags = le32toh(desc->flags);
2047 	status = le32toh(desc->buflen);
2048 	nsegs = JME_RX_NSEGS(status);
2049 
2050 	if (status & JME_RX_ERR_STAT) {
2051 		ifp->if_ierrors++;
2052 		jme_discard_rxbufs(sc, cons, nsegs);
2053 #ifdef JME_SHOW_ERRORS
2054 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2055 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2056 #endif
2057 		sc->jme_cdata.jme_rx_cons += nsegs;
2058 		sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2059 		return;
2060 	}
2061 
2062 	sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2063 	for (count = 0; count < nsegs; count++,
2064 	     JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2065 		rxd = &sc->jme_cdata.jme_rxdesc[cons];
2066 		mp = rxd->rx_m;
2067 
2068 		/* Add a new receive buffer to the ring. */
2069 		if (jme_newbuf(sc, rxd, 0) != 0) {
2070 			ifp->if_iqdrops++;
2071 			/* Reuse buffer. */
2072 			jme_discard_rxbufs(sc, cons, nsegs - count);
2073 			if (sc->jme_cdata.jme_rxhead != NULL) {
2074 				m_freem(sc->jme_cdata.jme_rxhead);
2075 				JME_RXCHAIN_RESET(sc);
2076 			}
2077 			break;
2078 		}
2079 
2080 		/*
2081 		 * Assume we've received a full sized frame.
2082 		 * Actual size is fixed when we encounter the end of
2083 		 * multi-segmented frame.
2084 		 */
2085 		mp->m_len = MCLBYTES;
2086 
2087 		/* Chain received mbufs. */
2088 		if (sc->jme_cdata.jme_rxhead == NULL) {
2089 			sc->jme_cdata.jme_rxhead = mp;
2090 			sc->jme_cdata.jme_rxtail = mp;
2091 		} else {
2092 			/*
2093 			 * Receive processor can receive a maximum frame
2094 			 * size of 65535 bytes.
2095 			 */
2096 			mp->m_flags &= ~M_PKTHDR;
2097 			sc->jme_cdata.jme_rxtail->m_next = mp;
2098 			sc->jme_cdata.jme_rxtail = mp;
2099 		}
2100 
2101 		if (count == nsegs - 1) {
2102 			/* Last desc. for this frame. */
2103 			m = sc->jme_cdata.jme_rxhead;
2104 			/* XXX assert PKTHDR? */
2105 			m->m_flags |= M_PKTHDR;
2106 			m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2107 			if (nsegs > 1) {
2108 				/* Set first mbuf size. */
2109 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2110 				/* Set last mbuf size. */
2111 				mp->m_len = sc->jme_cdata.jme_rxlen -
2112 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2113 				    (MCLBYTES * (nsegs - 2)));
2114 			} else {
2115 				m->m_len = sc->jme_cdata.jme_rxlen;
2116 			}
2117 			m->m_pkthdr.rcvif = ifp;
2118 
2119 			/*
2120 			 * Account for 10bytes auto padding which is used
2121 			 * to align IP header on 32bit boundary. Also note,
2122 			 * CRC bytes is automatically removed by the
2123 			 * hardware.
2124 			 */
2125 			m->m_data += JME_RX_PAD_BYTES;
2126 
2127 			/* Set checksum information. */
2128 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2129 			    (flags & JME_RD_IPV4)) {
2130 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2131 				if (flags & JME_RD_IPCSUM)
2132 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2133 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2134 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2135 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2136 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2137 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2138 					m->m_pkthdr.csum_flags |=
2139 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2140 					m->m_pkthdr.csum_data = 0xffff;
2141 				}
2142 			}
2143 
2144 			/* Check for VLAN tagged packets. */
2145 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2146 			    (flags & JME_RD_VLAN_TAG)) {
2147 				m->m_pkthdr.ether_vlantag =
2148 				    flags & JME_RD_VLAN_MASK;
2149 				m->m_flags |= M_VLANTAG;
2150 			}
2151 
2152 			ifp->if_ipackets++;
2153 			/* Pass it on. */
2154 			ifp->if_input(ifp, m);
2155 
2156 			/* Reset mbuf chains. */
2157 			JME_RXCHAIN_RESET(sc);
2158 		}
2159 	}
2160 
2161 	sc->jme_cdata.jme_rx_cons += nsegs;
2162 	sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2163 }
2164 
2165 static void
2166 jme_rxeof(struct jme_softc *sc)
2167 {
2168 	struct jme_desc *desc;
2169 	int nsegs, prog, pktlen;
2170 
2171 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2172 			sc->jme_cdata.jme_rx_ring_map,
2173 			BUS_DMASYNC_POSTREAD);
2174 
2175 	prog = 0;
2176 	for (;;) {
2177 		desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2178 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2179 			break;
2180 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2181 			break;
2182 
2183 		/*
2184 		 * Check number of segments against received bytes.
2185 		 * Non-matching value would indicate that hardware
2186 		 * is still trying to update Rx descriptors. I'm not
2187 		 * sure whether this check is needed.
2188 		 */
2189 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2190 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2191 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2192 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2193 				  "and packet size(%d) mismach\n",
2194 				  nsegs, pktlen);
2195 			break;
2196 		}
2197 
2198 		/* Received a frame. */
2199 		jme_rxpkt(sc);
2200 		prog++;
2201 	}
2202 
2203 	if (prog > 0) {
2204 		bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2205 				sc->jme_cdata.jme_rx_ring_map,
2206 				BUS_DMASYNC_PREWRITE);
2207 	}
2208 }
2209 
2210 static void
2211 jme_tick(void *xsc)
2212 {
2213 	struct jme_softc *sc = xsc;
2214 	struct ifnet *ifp = &sc->arpcom.ac_if;
2215 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2216 
2217 	lwkt_serialize_enter(ifp->if_serializer);
2218 
2219 	mii_tick(mii);
2220 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2221 
2222 	lwkt_serialize_exit(ifp->if_serializer);
2223 }
2224 
2225 static void
2226 jme_reset(struct jme_softc *sc)
2227 {
2228 #ifdef foo
2229 	/* Stop receiver, transmitter. */
2230 	jme_stop_rx(sc);
2231 	jme_stop_tx(sc);
2232 #endif
2233 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2234 	DELAY(10);
2235 	CSR_WRITE_4(sc, JME_GHC, 0);
2236 }
2237 
2238 static void
2239 jme_init(void *xsc)
2240 {
2241 	struct jme_softc *sc = xsc;
2242 	struct ifnet *ifp = &sc->arpcom.ac_if;
2243 	struct mii_data *mii;
2244 	uint8_t eaddr[ETHER_ADDR_LEN];
2245 	bus_addr_t paddr;
2246 	uint32_t reg;
2247 	int error;
2248 
2249 	ASSERT_SERIALIZED(ifp->if_serializer);
2250 
2251 	/*
2252 	 * Cancel any pending I/O.
2253 	 */
2254 	jme_stop(sc);
2255 
2256 	/*
2257 	 * Reset the chip to a known state.
2258 	 */
2259 	jme_reset(sc);
2260 
2261 	/*
2262 	 * Since we always use 64bit address mode for transmitting,
2263 	 * each Tx request requires one more dummy descriptor.
2264 	 */
2265 	sc->jme_txd_spare =
2266 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES) + 1;
2267 	KKASSERT(sc->jme_txd_spare >= 2);
2268 
2269 	/* Init descriptors. */
2270 	error = jme_init_rx_ring(sc);
2271         if (error != 0) {
2272                 device_printf(sc->jme_dev,
2273                     "%s: initialization failed: no memory for Rx buffers.\n",
2274 		    __func__);
2275                 jme_stop(sc);
2276 		return;
2277         }
2278 	jme_init_tx_ring(sc);
2279 
2280 	/* Initialize shadow status block. */
2281 	jme_init_ssb(sc);
2282 
2283 	/* Reprogram the station address. */
2284 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2285 	CSR_WRITE_4(sc, JME_PAR0,
2286 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2287 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2288 
2289 	/*
2290 	 * Configure Tx queue.
2291 	 *  Tx priority queue weight value : 0
2292 	 *  Tx FIFO threshold for processing next packet : 16QW
2293 	 *  Maximum Tx DMA length : 512
2294 	 *  Allow Tx DMA burst.
2295 	 */
2296 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2297 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2298 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2299 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2300 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2301 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2302 
2303 	/* Set Tx descriptor counter. */
2304 	CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2305 
2306 	/* Set Tx ring address to the hardware. */
2307 	paddr = JME_TX_RING_ADDR(sc, 0);
2308 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2309 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2310 
2311 	/* Configure TxMAC parameters. */
2312 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2313 	reg |= TXMAC_THRESH_1_PKT;
2314 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2315 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2316 
2317 	/*
2318 	 * Configure Rx queue.
2319 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2320 	 *  FIFO threshold for processing next packet : 128QW
2321 	 *  Rx queue 0 select
2322 	 *  Max Rx DMA length : 128
2323 	 *  Rx descriptor retry : 32
2324 	 *  Rx descriptor retry time gap : 256ns
2325 	 *  Don't receive runt/bad frame.
2326 	 */
2327 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2328 	/*
2329 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2330 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2331 	 * decrease FIFO threshold to reduce the FIFO overruns for
2332 	 * frames larger than 4000 bytes.
2333 	 * For best performance of standard MTU sized frames use
2334 	 * maximum allowable FIFO threshold, 128QW.
2335 	 */
2336 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2337 	    JME_RX_FIFO_SIZE)
2338 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2339 	else
2340 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2341 	sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2342 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2343 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2344 	/* XXX TODO DROP_BAD */
2345 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2346 
2347 	/* Set Rx descriptor counter. */
2348 	CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2349 
2350 	/* Set Rx ring address to the hardware. */
2351 	paddr = JME_RX_RING_ADDR(sc, 0);
2352 	CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2353 	CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2354 
2355 	/* Clear receive filter. */
2356 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2357 
2358 	/* Set up the receive filter. */
2359 	jme_set_filter(sc);
2360 	jme_set_vlan(sc);
2361 
2362 	/*
2363 	 * Disable all WOL bits as WOL can interfere normal Rx
2364 	 * operation. Also clear WOL detection status bits.
2365 	 */
2366 	reg = CSR_READ_4(sc, JME_PMCS);
2367 	reg &= ~PMCS_WOL_ENB_MASK;
2368 	CSR_WRITE_4(sc, JME_PMCS, reg);
2369 
2370 	/*
2371 	 * Pad 10bytes right before received frame. This will greatly
2372 	 * help Rx performance on strict-alignment architectures as
2373 	 * it does not need to copy the frame to align the payload.
2374 	 */
2375 	reg = CSR_READ_4(sc, JME_RXMAC);
2376 	reg |= RXMAC_PAD_10BYTES;
2377 
2378 	if (ifp->if_capenable & IFCAP_RXCSUM)
2379 		reg |= RXMAC_CSUM_ENB;
2380 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2381 
2382 	/* Configure general purpose reg0 */
2383 	reg = CSR_READ_4(sc, JME_GPREG0);
2384 	reg &= ~GPREG0_PCC_UNIT_MASK;
2385 	/* Set PCC timer resolution to micro-seconds unit. */
2386 	reg |= GPREG0_PCC_UNIT_US;
2387 	/*
2388 	 * Disable all shadow register posting as we have to read
2389 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2390 	 * that it's hard to synchronize interrupt status between
2391 	 * hardware and software with shadow posting due to
2392 	 * requirements of bus_dmamap_sync(9).
2393 	 */
2394 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2395 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2396 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2397 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2398 	/* Disable posting of DW0. */
2399 	reg &= ~GPREG0_POST_DW0_ENB;
2400 	/* Clear PME message. */
2401 	reg &= ~GPREG0_PME_ENB;
2402 	/* Set PHY address. */
2403 	reg &= ~GPREG0_PHY_ADDR_MASK;
2404 	reg |= sc->jme_phyaddr;
2405 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2406 
2407 	/* Configure Tx queue 0 packet completion coalescing. */
2408 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2409 	    PCCTX_COAL_TO_MASK;
2410 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2411 	    PCCTX_COAL_PKT_MASK;
2412 	reg |= PCCTX_COAL_TXQ0;
2413 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2414 
2415 	/* Configure Rx queue 0 packet completion coalescing. */
2416 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2417 	    PCCRX_COAL_TO_MASK;
2418 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2419 	    PCCRX_COAL_PKT_MASK;
2420 	CSR_WRITE_4(sc, JME_PCCRX0, reg);
2421 
2422 	/* Configure shadow status block but don't enable posting. */
2423 	paddr = sc->jme_rdata.jme_ssb_block_paddr;
2424 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2425 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2426 
2427 	/* Disable Timer 1 and Timer 2. */
2428 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2429 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2430 
2431 	/* Configure retry transmit period, retry limit value. */
2432 	CSR_WRITE_4(sc, JME_TXTRHD,
2433 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2434 	    TXTRHD_RT_PERIOD_MASK) |
2435 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2436 	    TXTRHD_RT_LIMIT_SHIFT));
2437 
2438 	/* Disable RSS. */
2439 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2440 
2441 	/* Initialize the interrupt mask. */
2442 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2443 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2444 
2445 	/*
2446 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2447 	 * done after detection of valid link in jme_miibus_statchg.
2448 	 */
2449 	sc->jme_flags &= ~JME_FLAG_LINK;
2450 
2451 	/* Set the current media. */
2452 	mii = device_get_softc(sc->jme_miibus);
2453 	mii_mediachg(mii);
2454 
2455 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2456 
2457 	ifp->if_flags |= IFF_RUNNING;
2458 	ifp->if_flags &= ~IFF_OACTIVE;
2459 }
2460 
2461 static void
2462 jme_stop(struct jme_softc *sc)
2463 {
2464 	struct ifnet *ifp = &sc->arpcom.ac_if;
2465 	struct jme_txdesc *txd;
2466 	struct jme_rxdesc *rxd;
2467 	int i;
2468 
2469 	ASSERT_SERIALIZED(ifp->if_serializer);
2470 
2471 	/*
2472 	 * Mark the interface down and cancel the watchdog timer.
2473 	 */
2474 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2475 	ifp->if_timer = 0;
2476 
2477 	callout_stop(&sc->jme_tick_ch);
2478 	sc->jme_flags &= ~JME_FLAG_LINK;
2479 
2480 	/*
2481 	 * Disable interrupts.
2482 	 */
2483 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2484 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2485 
2486 	/* Disable updating shadow status block. */
2487 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2488 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2489 
2490 	/* Stop receiver, transmitter. */
2491 	jme_stop_rx(sc);
2492 	jme_stop_tx(sc);
2493 
2494 #ifdef foo
2495 	 /* Reclaim Rx/Tx buffers that have been completed. */
2496 	jme_rxeof(sc);
2497 	if (sc->jme_cdata.jme_rxhead != NULL)
2498 		m_freem(sc->jme_cdata.jme_rxhead);
2499 	JME_RXCHAIN_RESET(sc);
2500 	jme_txeof(sc);
2501 #endif
2502 
2503 	/*
2504 	 * Free partial finished RX segments
2505 	 */
2506 	if (sc->jme_cdata.jme_rxhead != NULL)
2507 		m_freem(sc->jme_cdata.jme_rxhead);
2508 	JME_RXCHAIN_RESET(sc);
2509 
2510 	/*
2511 	 * Free RX and TX mbufs still in the queues.
2512 	 */
2513 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2514 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2515 		if (rxd->rx_m != NULL) {
2516 			bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2517 			    rxd->rx_dmamap);
2518 			m_freem(rxd->rx_m);
2519 			rxd->rx_m = NULL;
2520 		}
2521         }
2522 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2523 		txd = &sc->jme_cdata.jme_txdesc[i];
2524 		if (txd->tx_m != NULL) {
2525 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2526 			    txd->tx_dmamap);
2527 			m_freem(txd->tx_m);
2528 			txd->tx_m = NULL;
2529 			txd->tx_ndesc = 0;
2530 		}
2531         }
2532 }
2533 
2534 static void
2535 jme_stop_tx(struct jme_softc *sc)
2536 {
2537 	uint32_t reg;
2538 	int i;
2539 
2540 	reg = CSR_READ_4(sc, JME_TXCSR);
2541 	if ((reg & TXCSR_TX_ENB) == 0)
2542 		return;
2543 	reg &= ~TXCSR_TX_ENB;
2544 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2545 	for (i = JME_TIMEOUT; i > 0; i--) {
2546 		DELAY(1);
2547 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2548 			break;
2549 	}
2550 	if (i == 0)
2551 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2552 }
2553 
2554 static void
2555 jme_stop_rx(struct jme_softc *sc)
2556 {
2557 	uint32_t reg;
2558 	int i;
2559 
2560 	reg = CSR_READ_4(sc, JME_RXCSR);
2561 	if ((reg & RXCSR_RX_ENB) == 0)
2562 		return;
2563 	reg &= ~RXCSR_RX_ENB;
2564 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2565 	for (i = JME_TIMEOUT; i > 0; i--) {
2566 		DELAY(1);
2567 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2568 			break;
2569 	}
2570 	if (i == 0)
2571 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2572 }
2573 
2574 static void
2575 jme_init_tx_ring(struct jme_softc *sc)
2576 {
2577 	struct jme_ring_data *rd;
2578 	struct jme_txdesc *txd;
2579 	int i;
2580 
2581 	sc->jme_cdata.jme_tx_prod = 0;
2582 	sc->jme_cdata.jme_tx_cons = 0;
2583 	sc->jme_cdata.jme_tx_cnt = 0;
2584 
2585 	rd = &sc->jme_rdata;
2586 	bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2587 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2588 		txd = &sc->jme_cdata.jme_txdesc[i];
2589 		txd->tx_m = NULL;
2590 		txd->tx_desc = &rd->jme_tx_ring[i];
2591 		txd->tx_ndesc = 0;
2592 	}
2593 
2594 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2595 			sc->jme_cdata.jme_tx_ring_map,
2596 			BUS_DMASYNC_PREWRITE);
2597 }
2598 
2599 static void
2600 jme_init_ssb(struct jme_softc *sc)
2601 {
2602 	struct jme_ring_data *rd;
2603 
2604 	rd = &sc->jme_rdata;
2605 	bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2606 	bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2607 			BUS_DMASYNC_PREWRITE);
2608 }
2609 
2610 static int
2611 jme_init_rx_ring(struct jme_softc *sc)
2612 {
2613 	struct jme_ring_data *rd;
2614 	struct jme_rxdesc *rxd;
2615 	int i;
2616 
2617 	KKASSERT(sc->jme_cdata.jme_rxhead == NULL &&
2618 		 sc->jme_cdata.jme_rxtail == NULL &&
2619 		 sc->jme_cdata.jme_rxlen == 0);
2620 	sc->jme_cdata.jme_rx_cons = 0;
2621 
2622 	rd = &sc->jme_rdata;
2623 	bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2624 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2625 		int error;
2626 
2627 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2628 		rxd->rx_m = NULL;
2629 		rxd->rx_desc = &rd->jme_rx_ring[i];
2630 		error = jme_newbuf(sc, rxd, 1);
2631 		if (error)
2632 			return (error);
2633 	}
2634 
2635 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2636 			sc->jme_cdata.jme_rx_ring_map,
2637 			BUS_DMASYNC_PREWRITE);
2638 	return (0);
2639 }
2640 
2641 static int
2642 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd, int init)
2643 {
2644 	struct jme_desc *desc;
2645 	struct mbuf *m;
2646 	struct jme_dmamap_ctx ctx;
2647 	bus_dma_segment_t segs;
2648 	bus_dmamap_t map;
2649 	int error;
2650 
2651 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2652 	if (m == NULL)
2653 		return (ENOBUFS);
2654 	/*
2655 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2656 	 * takes advantage of 10 bytes padding feature of hardware
2657 	 * in order not to copy entire frame to align IP header on
2658 	 * 32bit boundary.
2659 	 */
2660 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2661 
2662 	ctx.nsegs = 1;
2663 	ctx.segs = &segs;
2664 	error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_rx_tag,
2665 				     sc->jme_cdata.jme_rx_sparemap,
2666 				     m, jme_dmamap_buf_cb, &ctx,
2667 				     BUS_DMA_NOWAIT);
2668 	if (error || ctx.nsegs == 0) {
2669 		if (!error) {
2670 			bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2671 					  sc->jme_cdata.jme_rx_sparemap);
2672 			error = EFBIG;
2673 			if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2674 		}
2675 		m_freem(m);
2676 
2677 		if (init)
2678 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2679 		return (error);
2680 	}
2681 
2682 	if (rxd->rx_m != NULL) {
2683 		bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
2684 				BUS_DMASYNC_POSTREAD);
2685 		bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
2686 	}
2687 	map = rxd->rx_dmamap;
2688 	rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2689 	sc->jme_cdata.jme_rx_sparemap = map;
2690 	rxd->rx_m = m;
2691 
2692 	desc = rxd->rx_desc;
2693 	desc->buflen = htole32(segs.ds_len);
2694 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2695 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2696 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2697 
2698 	return (0);
2699 }
2700 
2701 static void
2702 jme_set_vlan(struct jme_softc *sc)
2703 {
2704 	struct ifnet *ifp = &sc->arpcom.ac_if;
2705 	uint32_t reg;
2706 
2707 	ASSERT_SERIALIZED(ifp->if_serializer);
2708 
2709 	reg = CSR_READ_4(sc, JME_RXMAC);
2710 	reg &= ~RXMAC_VLAN_ENB;
2711 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2712 		reg |= RXMAC_VLAN_ENB;
2713 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2714 }
2715 
2716 static void
2717 jme_set_filter(struct jme_softc *sc)
2718 {
2719 	struct ifnet *ifp = &sc->arpcom.ac_if;
2720 	struct ifmultiaddr *ifma;
2721 	uint32_t crc;
2722 	uint32_t mchash[2];
2723 	uint32_t rxcfg;
2724 
2725 	ASSERT_SERIALIZED(ifp->if_serializer);
2726 
2727 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2728 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2729 	    RXMAC_ALLMULTI);
2730 
2731 	/*
2732 	 * Always accept frames destined to our station address.
2733 	 * Always accept broadcast frames.
2734 	 */
2735 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2736 
2737 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2738 		if (ifp->if_flags & IFF_PROMISC)
2739 			rxcfg |= RXMAC_PROMISC;
2740 		if (ifp->if_flags & IFF_ALLMULTI)
2741 			rxcfg |= RXMAC_ALLMULTI;
2742 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2743 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2744 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2745 		return;
2746 	}
2747 
2748 	/*
2749 	 * Set up the multicast address filter by passing all multicast
2750 	 * addresses through a CRC generator, and then using the low-order
2751 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2752 	 * high order bits select the register, while the rest of the bits
2753 	 * select the bit within the register.
2754 	 */
2755 	rxcfg |= RXMAC_MULTICAST;
2756 	bzero(mchash, sizeof(mchash));
2757 
2758 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2759 		if (ifma->ifma_addr->sa_family != AF_LINK)
2760 			continue;
2761 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2762 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2763 
2764 		/* Just want the 6 least significant bits. */
2765 		crc &= 0x3f;
2766 
2767 		/* Set the corresponding bit in the hash table. */
2768 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2769 	}
2770 
2771 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2772 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2773 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2774 }
2775 
2776 static int
2777 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
2778 {
2779 	return (sysctl_int_range(oidp, arg1, arg2, req,
2780 	    PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
2781 }
2782 
2783 static int
2784 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2785 {
2786 	return (sysctl_int_range(oidp, arg1, arg2, req,
2787 	    PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
2788 }
2789 
2790 static int
2791 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
2792 {
2793 	return (sysctl_int_range(oidp, arg1, arg2, req,
2794 	    PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
2795 }
2796 
2797 static int
2798 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2799 {
2800 	return (sysctl_int_range(oidp, arg1, arg2, req,
2801 	    PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
2802 }
2803