xref: /dflybsd-src/sys/dev/netif/jme/if_jme.c (revision c6ddf9d06769c30e7be20c1d090f3f4c6b2919fb)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.10 2008/09/19 11:36:40 sephe Exp $
29  */
30 
31 #include <sys/param.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
34 #include <sys/bus.h>
35 #include <sys/interrupt.h>
36 #include <sys/malloc.h>
37 #include <sys/proc.h>
38 #include <sys/rman.h>
39 #include <sys/serialize.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43 
44 #include <net/ethernet.h>
45 #include <net/if.h>
46 #include <net/bpf.h>
47 #include <net/if_arp.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 #include <net/ifq_var.h>
51 #include <net/vlan/if_vlan_var.h>
52 #include <net/vlan/if_vlan_ether.h>
53 
54 #include <dev/netif/mii_layer/miivar.h>
55 #include <dev/netif/mii_layer/jmphyreg.h>
56 
57 #include <bus/pci/pcireg.h>
58 #include <bus/pci/pcivar.h>
59 #include <bus/pci/pcidevs.h>
60 
61 #include <dev/netif/jme/if_jmereg.h>
62 #include <dev/netif/jme/if_jmevar.h>
63 
64 #include "miibus_if.h"
65 
66 /* Define the following to disable printing Rx errors. */
67 #undef	JME_SHOW_ERRORS
68 
69 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
70 
71 static int	jme_probe(device_t);
72 static int	jme_attach(device_t);
73 static int	jme_detach(device_t);
74 static int	jme_shutdown(device_t);
75 static int	jme_suspend(device_t);
76 static int	jme_resume(device_t);
77 
78 static int	jme_miibus_readreg(device_t, int, int);
79 static int	jme_miibus_writereg(device_t, int, int, int);
80 static void	jme_miibus_statchg(device_t);
81 
82 static void	jme_init(void *);
83 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
84 static void	jme_start(struct ifnet *);
85 static void	jme_watchdog(struct ifnet *);
86 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
87 static int	jme_mediachange(struct ifnet *);
88 
89 static void	jme_intr(void *);
90 static void	jme_txeof(struct jme_softc *);
91 static void	jme_rxeof(struct jme_softc *);
92 
93 static int	jme_dma_alloc(struct jme_softc *);
94 static void	jme_dma_free(struct jme_softc *);
95 static void	jme_dmamap_ring_cb(void *, bus_dma_segment_t *, int, int);
96 static void	jme_dmamap_buf_cb(void *, bus_dma_segment_t *, int,
97 				  bus_size_t, int);
98 static int	jme_init_rx_ring(struct jme_softc *);
99 static void	jme_init_tx_ring(struct jme_softc *);
100 static void	jme_init_ssb(struct jme_softc *);
101 static int	jme_newbuf(struct jme_softc *, struct jme_rxdesc *, int);
102 static int	jme_encap(struct jme_softc *, struct mbuf **);
103 static void	jme_rxpkt(struct jme_softc *);
104 
105 static void	jme_tick(void *);
106 static void	jme_stop(struct jme_softc *);
107 static void	jme_reset(struct jme_softc *);
108 static void	jme_set_vlan(struct jme_softc *);
109 static void	jme_set_filter(struct jme_softc *);
110 static void	jme_stop_tx(struct jme_softc *);
111 static void	jme_stop_rx(struct jme_softc *);
112 static void	jme_mac_config(struct jme_softc *);
113 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
114 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
115 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
116 #ifdef notyet
117 static void	jme_setwol(struct jme_softc *);
118 static void	jme_setlinkspeed(struct jme_softc *);
119 #endif
120 
121 static void	jme_sysctl_node(struct jme_softc *);
122 static int	sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
123 static int	sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
124 static int	sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
125 static int	sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
126 
127 /*
128  * Devices supported by this driver.
129  */
130 static const struct jme_dev {
131 	uint16_t	jme_vendorid;
132 	uint16_t	jme_deviceid;
133 	uint32_t	jme_caps;
134 	const char	*jme_name;
135 } jme_devs[] = {
136 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
137 	    JME_CAP_JUMBO,
138 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
139 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
140 	    JME_CAP_FASTETH,
141 	    "JMicron Inc, JMC260 Fast Ethernet" },
142 	{ 0, 0, 0, NULL }
143 };
144 
145 static device_method_t jme_methods[] = {
146 	/* Device interface. */
147 	DEVMETHOD(device_probe,		jme_probe),
148 	DEVMETHOD(device_attach,	jme_attach),
149 	DEVMETHOD(device_detach,	jme_detach),
150 	DEVMETHOD(device_shutdown,	jme_shutdown),
151 	DEVMETHOD(device_suspend,	jme_suspend),
152 	DEVMETHOD(device_resume,	jme_resume),
153 
154 	/* Bus interface. */
155 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
156 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
157 
158 	/* MII interface. */
159 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
160 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
161 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
162 
163 	{ NULL, NULL }
164 };
165 
166 static driver_t jme_driver = {
167 	"jme",
168 	jme_methods,
169 	sizeof(struct jme_softc)
170 };
171 
172 static devclass_t jme_devclass;
173 
174 DECLARE_DUMMY_MODULE(if_jme);
175 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
176 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
177 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
178 
179 /*
180  *	Read a PHY register on the MII of the JMC250.
181  */
182 static int
183 jme_miibus_readreg(device_t dev, int phy, int reg)
184 {
185 	struct jme_softc *sc = device_get_softc(dev);
186 	uint32_t val;
187 	int i;
188 
189 	/* For FPGA version, PHY address 0 should be ignored. */
190 	if (sc->jme_caps & JME_CAP_FPGA) {
191 		if (phy == 0)
192 			return (0);
193 	} else {
194 		if (sc->jme_phyaddr != phy)
195 			return (0);
196 	}
197 
198 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
199 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
200 
201 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
202 		DELAY(1);
203 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
204 			break;
205 	}
206 	if (i == 0) {
207 		device_printf(sc->jme_dev, "phy read timeout: "
208 			      "phy %d, reg %d\n", phy, reg);
209 		return (0);
210 	}
211 
212 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
213 }
214 
215 /*
216  *	Write a PHY register on the MII of the JMC250.
217  */
218 static int
219 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
220 {
221 	struct jme_softc *sc = device_get_softc(dev);
222 	int i;
223 
224 	/* For FPGA version, PHY address 0 should be ignored. */
225 	if (sc->jme_caps & JME_CAP_FPGA) {
226 		if (phy == 0)
227 			return (0);
228 	} else {
229 		if (sc->jme_phyaddr != phy)
230 			return (0);
231 	}
232 
233 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
234 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
235 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
236 
237 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
238 		DELAY(1);
239 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
240 			break;
241 	}
242 	if (i == 0) {
243 		device_printf(sc->jme_dev, "phy write timeout: "
244 			      "phy %d, reg %d\n", phy, reg);
245 	}
246 
247 	return (0);
248 }
249 
250 /*
251  *	Callback from MII layer when media changes.
252  */
253 static void
254 jme_miibus_statchg(device_t dev)
255 {
256 	struct jme_softc *sc = device_get_softc(dev);
257 	struct ifnet *ifp = &sc->arpcom.ac_if;
258 	struct mii_data *mii;
259 	struct jme_txdesc *txd;
260 	bus_addr_t paddr;
261 	int i;
262 
263 	ASSERT_SERIALIZED(ifp->if_serializer);
264 
265 	if ((ifp->if_flags & IFF_RUNNING) == 0)
266 		return;
267 
268 	mii = device_get_softc(sc->jme_miibus);
269 
270 	sc->jme_flags &= ~JME_FLAG_LINK;
271 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
272 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
273 		case IFM_10_T:
274 		case IFM_100_TX:
275 			sc->jme_flags |= JME_FLAG_LINK;
276 			break;
277 		case IFM_1000_T:
278 			if (sc->jme_caps & JME_CAP_FASTETH)
279 				break;
280 			sc->jme_flags |= JME_FLAG_LINK;
281 			break;
282 		default:
283 			break;
284 		}
285 	}
286 
287 	/*
288 	 * Disabling Rx/Tx MACs have a side-effect of resetting
289 	 * JME_TXNDA/JME_RXNDA register to the first address of
290 	 * Tx/Rx descriptor address. So driver should reset its
291 	 * internal procucer/consumer pointer and reclaim any
292 	 * allocated resources.  Note, just saving the value of
293 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
294 	 * and restoring JME_TXNDA/JME_RXNDA register is not
295 	 * sufficient to make sure correct MAC state because
296 	 * stopping MAC operation can take a while and hardware
297 	 * might have updated JME_TXNDA/JME_RXNDA registers
298 	 * during the stop operation.
299 	 */
300 
301 	/* Disable interrupts */
302 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
303 
304 	/* Stop driver */
305 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
306 	ifp->if_timer = 0;
307 	callout_stop(&sc->jme_tick_ch);
308 
309 	/* Stop receiver/transmitter. */
310 	jme_stop_rx(sc);
311 	jme_stop_tx(sc);
312 
313 	jme_rxeof(sc);
314 	if (sc->jme_cdata.jme_rxhead != NULL)
315 		m_freem(sc->jme_cdata.jme_rxhead);
316 	JME_RXCHAIN_RESET(sc);
317 
318 	jme_txeof(sc);
319 	if (sc->jme_cdata.jme_tx_cnt != 0) {
320 		/* Remove queued packets for transmit. */
321 		for (i = 0; i < JME_TX_RING_CNT; i++) {
322 			txd = &sc->jme_cdata.jme_txdesc[i];
323 			if (txd->tx_m != NULL) {
324 				bus_dmamap_unload(
325 				    sc->jme_cdata.jme_tx_tag,
326 				    txd->tx_dmamap);
327 				m_freem(txd->tx_m);
328 				txd->tx_m = NULL;
329 				txd->tx_ndesc = 0;
330 				ifp->if_oerrors++;
331 			}
332 		}
333 	}
334 
335 	/*
336 	 * Reuse configured Rx descriptors and reset
337 	 * procuder/consumer index.
338 	 */
339 	sc->jme_cdata.jme_rx_cons = 0;
340 
341 	jme_init_tx_ring(sc);
342 
343 	/* Initialize shadow status block. */
344 	jme_init_ssb(sc);
345 
346 	/* Program MAC with resolved speed/duplex/flow-control. */
347 	if (sc->jme_flags & JME_FLAG_LINK) {
348 		jme_mac_config(sc);
349 
350 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
351 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
352 
353 		/* Set Tx ring address to the hardware. */
354 		paddr = JME_TX_RING_ADDR(sc, 0);
355 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
356 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
357 
358 		/* Set Rx ring address to the hardware. */
359 		paddr = JME_RX_RING_ADDR(sc, 0);
360 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
361 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
362 
363 		/* Restart receiver/transmitter. */
364 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
365 		    RXCSR_RXQ_START);
366 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
367 	}
368 
369 	ifp->if_flags |= IFF_RUNNING;
370 	ifp->if_flags &= ~IFF_OACTIVE;
371 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
372 
373 	/* Reenable interrupts. */
374 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
375 }
376 
377 /*
378  *	Get the current interface media status.
379  */
380 static void
381 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
382 {
383 	struct jme_softc *sc = ifp->if_softc;
384 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
385 
386 	ASSERT_SERIALIZED(ifp->if_serializer);
387 
388 	mii_pollstat(mii);
389 	ifmr->ifm_status = mii->mii_media_status;
390 	ifmr->ifm_active = mii->mii_media_active;
391 }
392 
393 /*
394  *	Set hardware to newly-selected media.
395  */
396 static int
397 jme_mediachange(struct ifnet *ifp)
398 {
399 	struct jme_softc *sc = ifp->if_softc;
400 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
401 	int error;
402 
403 	ASSERT_SERIALIZED(ifp->if_serializer);
404 
405 	if (mii->mii_instance != 0) {
406 		struct mii_softc *miisc;
407 
408 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
409 			mii_phy_reset(miisc);
410 	}
411 	error = mii_mediachg(mii);
412 
413 	return (error);
414 }
415 
416 static int
417 jme_probe(device_t dev)
418 {
419 	const struct jme_dev *sp;
420 	uint16_t vid, did;
421 
422 	vid = pci_get_vendor(dev);
423 	did = pci_get_device(dev);
424 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
425 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
426 			struct jme_softc *sc = device_get_softc(dev);
427 
428 			sc->jme_caps = sp->jme_caps;
429 			if (did == PCI_PRODUCT_JMICRON_JMC250 &&
430 			    pci_get_revid(dev) == JME_REV_JMC250_A2) {
431 				sc->jme_workaround |= JME_WA_EXTFIFO |
432 						      JME_WA_HDX;
433 			}
434 
435 			device_set_desc(dev, sp->jme_name);
436 			return (0);
437 		}
438 	}
439 	return (ENXIO);
440 }
441 
442 static int
443 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
444 {
445 	uint32_t reg;
446 	int i;
447 
448 	*val = 0;
449 	for (i = JME_TIMEOUT; i > 0; i--) {
450 		reg = CSR_READ_4(sc, JME_SMBCSR);
451 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
452 			break;
453 		DELAY(1);
454 	}
455 
456 	if (i == 0) {
457 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
458 		return (ETIMEDOUT);
459 	}
460 
461 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
462 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
463 	for (i = JME_TIMEOUT; i > 0; i--) {
464 		DELAY(1);
465 		reg = CSR_READ_4(sc, JME_SMBINTF);
466 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
467 			break;
468 	}
469 
470 	if (i == 0) {
471 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
472 		return (ETIMEDOUT);
473 	}
474 
475 	reg = CSR_READ_4(sc, JME_SMBINTF);
476 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
477 
478 	return (0);
479 }
480 
481 static int
482 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
483 {
484 	uint8_t fup, reg, val;
485 	uint32_t offset;
486 	int match;
487 
488 	offset = 0;
489 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
490 	    fup != JME_EEPROM_SIG0)
491 		return (ENOENT);
492 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
493 	    fup != JME_EEPROM_SIG1)
494 		return (ENOENT);
495 	match = 0;
496 	do {
497 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
498 			break;
499 		/* Check for the end of EEPROM descriptor. */
500 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
501 			break;
502 		if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0,
503 		    JME_EEPROM_PAGE_BAR1) == fup) {
504 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
505 				break;
506 			if (reg >= JME_PAR0 &&
507 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
508 				if (jme_eeprom_read_byte(sc, offset + 2,
509 				    &val) != 0)
510 					break;
511 				eaddr[reg - JME_PAR0] = val;
512 				match++;
513 			}
514 		}
515 		/* Try next eeprom descriptor. */
516 		offset += JME_EEPROM_DESC_BYTES;
517 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
518 
519 	if (match == ETHER_ADDR_LEN)
520 		return (0);
521 
522 	return (ENOENT);
523 }
524 
525 static void
526 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
527 {
528 	uint32_t par0, par1;
529 
530 	/* Read station address. */
531 	par0 = CSR_READ_4(sc, JME_PAR0);
532 	par1 = CSR_READ_4(sc, JME_PAR1);
533 	par1 &= 0xFFFF;
534 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
535 		device_printf(sc->jme_dev,
536 		    "generating fake ethernet address.\n");
537 		par0 = karc4random();
538 		/* Set OUI to JMicron. */
539 		eaddr[0] = 0x00;
540 		eaddr[1] = 0x1B;
541 		eaddr[2] = 0x8C;
542 		eaddr[3] = (par0 >> 16) & 0xff;
543 		eaddr[4] = (par0 >> 8) & 0xff;
544 		eaddr[5] = par0 & 0xff;
545 	} else {
546 		eaddr[0] = (par0 >> 0) & 0xFF;
547 		eaddr[1] = (par0 >> 8) & 0xFF;
548 		eaddr[2] = (par0 >> 16) & 0xFF;
549 		eaddr[3] = (par0 >> 24) & 0xFF;
550 		eaddr[4] = (par1 >> 0) & 0xFF;
551 		eaddr[5] = (par1 >> 8) & 0xFF;
552 	}
553 }
554 
555 static int
556 jme_attach(device_t dev)
557 {
558 	struct jme_softc *sc = device_get_softc(dev);
559 	struct ifnet *ifp = &sc->arpcom.ac_if;
560 	uint32_t reg;
561 	uint8_t pcie_ptr;
562 	int error = 0;
563 	uint8_t eaddr[ETHER_ADDR_LEN];
564 
565 	sc->jme_dev = dev;
566 	ifp = &sc->arpcom.ac_if;
567 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
568 
569 	callout_init(&sc->jme_tick_ch);
570 
571 #ifndef BURN_BRIDGES
572 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
573 		uint32_t irq, mem;
574 
575 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
576 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
577 
578 		device_printf(dev, "chip is in D%d power mode "
579 		    "-- setting to D0\n", pci_get_powerstate(dev));
580 
581 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
582 
583 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
584 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
585 	}
586 #endif	/* !BURN_BRIDGE */
587 
588 	/* Enable bus mastering */
589 	pci_enable_busmaster(dev);
590 
591 	/*
592 	 * Allocate IO memory
593 	 *
594 	 * JMC250 supports both memory mapped and I/O register space
595 	 * access.  Because I/O register access should use different
596 	 * BARs to access registers it's waste of time to use I/O
597 	 * register spce access.  JMC250 uses 16K to map entire memory
598 	 * space.
599 	 */
600 	sc->jme_mem_rid = JME_PCIR_BAR;
601 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
602 						 &sc->jme_mem_rid, RF_ACTIVE);
603 	if (sc->jme_mem_res == NULL) {
604 		device_printf(dev, "can't allocate IO memory\n");
605 		return ENXIO;
606 	}
607 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
608 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
609 
610 	/*
611 	 * Allocate IRQ
612 	 */
613 	sc->jme_irq_rid = 0;
614 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
615 						 &sc->jme_irq_rid,
616 						 RF_SHAREABLE | RF_ACTIVE);
617 	if (sc->jme_irq_res == NULL) {
618 		device_printf(dev, "can't allocate irq\n");
619 		error = ENXIO;
620 		goto fail;
621 	}
622 
623 	/*
624 	 * Extract FPGA revision
625 	 */
626 	reg = CSR_READ_4(sc, JME_CHIPMODE);
627 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
628 	    CHIPMODE_NOT_FPGA) {
629 		sc->jme_caps |= JME_CAP_FPGA;
630 		if (bootverbose) {
631 			device_printf(dev, "FPGA revision : 0x%04x\n",
632 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
633 				      CHIPMODE_FPGA_REV_SHIFT);
634 		}
635 	}
636 
637 	/* Reset the ethernet controller. */
638 	jme_reset(sc);
639 
640 	/* Get station address. */
641 	reg = CSR_READ_4(sc, JME_SMBCSR);
642 	if (reg & SMBCSR_EEPROM_PRESENT)
643 		error = jme_eeprom_macaddr(sc, eaddr);
644 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
645 		if (error != 0 && (bootverbose)) {
646 			device_printf(dev, "ethernet hardware address "
647 				      "not found in EEPROM.\n");
648 		}
649 		jme_reg_macaddr(sc, eaddr);
650 	}
651 
652 	/*
653 	 * Save PHY address.
654 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
655 	 * requires PHY probing to get correct PHY address.
656 	 */
657 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
658 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
659 		    GPREG0_PHY_ADDR_MASK;
660 		if (bootverbose) {
661 			device_printf(dev, "PHY is at address %d.\n",
662 			    sc->jme_phyaddr);
663 		}
664 	} else {
665 		sc->jme_phyaddr = 0;
666 	}
667 
668 	/* Set max allowable DMA size. */
669 	pcie_ptr = pci_get_pciecap_ptr(dev);
670 	if (pcie_ptr != 0) {
671 		uint16_t ctrl;
672 
673 		sc->jme_caps |= JME_CAP_PCIE;
674 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
675 		if (bootverbose) {
676 			device_printf(dev, "Read request size : %d bytes.\n",
677 			    128 << ((ctrl >> 12) & 0x07));
678 			device_printf(dev, "TLP payload size : %d bytes.\n",
679 			    128 << ((ctrl >> 5) & 0x07));
680 		}
681 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
682 		case PCIEM_DEVCTL_MAX_READRQ_128:
683 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
684 			break;
685 		case PCIEM_DEVCTL_MAX_READRQ_256:
686 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
687 			break;
688 		default:
689 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
690 			break;
691 		}
692 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
693 	} else {
694 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
695 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
696 	}
697 
698 #ifdef notyet
699 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
700 		sc->jme_caps |= JME_CAP_PMCAP;
701 #endif
702 
703 	/*
704 	 * Create sysctl tree
705 	 */
706 	jme_sysctl_node(sc);
707 
708 	/* Allocate DMA stuffs */
709 	error = jme_dma_alloc(sc);
710 	if (error)
711 		goto fail;
712 
713 	ifp->if_softc = sc;
714 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
715 	ifp->if_init = jme_init;
716 	ifp->if_ioctl = jme_ioctl;
717 	ifp->if_start = jme_start;
718 	ifp->if_watchdog = jme_watchdog;
719 	ifq_set_maxlen(&ifp->if_snd, JME_TX_RING_CNT - 1);
720 	ifq_set_ready(&ifp->if_snd);
721 
722 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
723 	ifp->if_capabilities = IFCAP_HWCSUM |
724 			       IFCAP_VLAN_MTU |
725 			       IFCAP_VLAN_HWTAGGING;
726 	ifp->if_hwassist = JME_CSUM_FEATURES;
727 	ifp->if_capenable = ifp->if_capabilities;
728 
729 	/* Set up MII bus. */
730 	error = mii_phy_probe(dev, &sc->jme_miibus,
731 			      jme_mediachange, jme_mediastatus);
732 	if (error) {
733 		device_printf(dev, "no PHY found!\n");
734 		goto fail;
735 	}
736 
737 	/*
738 	 * Save PHYADDR for FPGA mode PHY.
739 	 */
740 	if (sc->jme_caps & JME_CAP_FPGA) {
741 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
742 
743 		if (mii->mii_instance != 0) {
744 			struct mii_softc *miisc;
745 
746 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
747 				if (miisc->mii_phy != 0) {
748 					sc->jme_phyaddr = miisc->mii_phy;
749 					break;
750 				}
751 			}
752 			if (sc->jme_phyaddr != 0) {
753 				device_printf(sc->jme_dev,
754 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
755 				/* vendor magic. */
756 				jme_miibus_writereg(dev, sc->jme_phyaddr,
757 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
758 
759 				/* XXX should we clear JME_WA_EXTFIFO */
760 			}
761 		}
762 	}
763 
764 	ether_ifattach(ifp, eaddr, NULL);
765 
766 	/* Tell the upper layer(s) we support long frames. */
767 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
768 
769 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
770 			       &sc->jme_irq_handle, ifp->if_serializer);
771 	if (error) {
772 		device_printf(dev, "could not set up interrupt handler.\n");
773 		ether_ifdetach(ifp);
774 		goto fail;
775 	}
776 
777 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
778 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
779 	return 0;
780 fail:
781 	jme_detach(dev);
782 	return (error);
783 }
784 
785 static int
786 jme_detach(device_t dev)
787 {
788 	struct jme_softc *sc = device_get_softc(dev);
789 
790 	if (device_is_attached(dev)) {
791 		struct ifnet *ifp = &sc->arpcom.ac_if;
792 
793 		lwkt_serialize_enter(ifp->if_serializer);
794 		jme_stop(sc);
795 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
796 		lwkt_serialize_exit(ifp->if_serializer);
797 
798 		ether_ifdetach(ifp);
799 	}
800 
801 	if (sc->jme_sysctl_tree != NULL)
802 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
803 
804 	if (sc->jme_miibus != NULL)
805 		device_delete_child(dev, sc->jme_miibus);
806 	bus_generic_detach(dev);
807 
808 	if (sc->jme_irq_res != NULL) {
809 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
810 				     sc->jme_irq_res);
811 	}
812 
813 	if (sc->jme_mem_res != NULL) {
814 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
815 				     sc->jme_mem_res);
816 	}
817 
818 	jme_dma_free(sc);
819 
820 	return (0);
821 }
822 
823 static void
824 jme_sysctl_node(struct jme_softc *sc)
825 {
826 	int error;
827 
828 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
829 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
830 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
831 				device_get_nameunit(sc->jme_dev),
832 				CTLFLAG_RD, 0, "");
833 	if (sc->jme_sysctl_tree == NULL) {
834 		device_printf(sc->jme_dev, "can't add sysctl node\n");
835 		return;
836 	}
837 
838 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
839 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
840 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to,
841 	    0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
842 
843 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
844 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
845 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt,
846 	    0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
847 
848 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
849 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
850 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to,
851 	    0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
852 
853 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
854 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
855 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt,
856 	    0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
857 
858 	/* Pull in device tunables. */
859 	sc->jme_process_limit = JME_PROC_DEFAULT;
860 
861 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
862 	error = resource_int_value(device_get_name(sc->jme_dev),
863 	    device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
864 	if (error == 0) {
865 		if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
866 		    sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
867 			device_printf(sc->jme_dev,
868 			    "tx_coal_to value out of range; "
869 			    "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
870 			sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
871 		}
872 	}
873 
874 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
875 	error = resource_int_value(device_get_name(sc->jme_dev),
876 	    device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
877 	if (error == 0) {
878 		if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
879 		    sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
880 			device_printf(sc->jme_dev,
881 			    "tx_coal_pkt value out of range; "
882 			    "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
883 			sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
884 		}
885 	}
886 
887 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
888 	error = resource_int_value(device_get_name(sc->jme_dev),
889 	    device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
890 	if (error == 0) {
891 		if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
892 		    sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
893 			device_printf(sc->jme_dev,
894 			    "rx_coal_to value out of range; "
895 			    "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
896 			sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
897 		}
898 	}
899 
900 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
901 	error = resource_int_value(device_get_name(sc->jme_dev),
902 	    device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
903 	if (error == 0) {
904 		if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
905 		    sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
906 			device_printf(sc->jme_dev,
907 			    "tx_coal_pkt value out of range; "
908 			    "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
909 			sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
910 		}
911 	}
912 }
913 
914 static void
915 jme_dmamap_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
916 {
917 	if (error)
918 		return;
919 
920 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
921 	*((bus_addr_t *)arg) = segs->ds_addr;
922 }
923 
924 static void
925 jme_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs,
926 		  bus_size_t mapsz __unused, int error)
927 {
928 	struct jme_dmamap_ctx *ctx = xctx;
929 	int i;
930 
931 	if (error)
932 		return;
933 
934 	if (nsegs > ctx->nsegs) {
935 		ctx->nsegs = 0;
936 		return;
937 	}
938 
939 	ctx->nsegs = nsegs;
940 	for (i = 0; i < nsegs; ++i)
941 		ctx->segs[i] = segs[i];
942 }
943 
944 static int
945 jme_dma_alloc(struct jme_softc *sc)
946 {
947 	struct jme_txdesc *txd;
948 	struct jme_rxdesc *rxd;
949 	bus_addr_t busaddr, lowaddr, rx_ring_end, tx_ring_end;
950 	int error, i;
951 
952 	lowaddr = BUS_SPACE_MAXADDR;
953 
954 again:
955 	/* Create parent ring tag. */
956 	error = bus_dma_tag_create(NULL,/* parent */
957 	    1, 0,			/* algnmnt, boundary */
958 	    lowaddr,			/* lowaddr */
959 	    BUS_SPACE_MAXADDR,		/* highaddr */
960 	    NULL, NULL,			/* filter, filterarg */
961 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
962 	    0,				/* nsegments */
963 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
964 	    0,				/* flags */
965 	    &sc->jme_cdata.jme_ring_tag);
966 	if (error) {
967 		device_printf(sc->jme_dev,
968 		    "could not create parent ring DMA tag.\n");
969 		return error;
970 	}
971 
972 	/*
973 	 * Create DMA stuffs for TX ring
974 	 */
975 
976 	/* Create tag for Tx ring. */
977 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
978 	    JME_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
979 	    BUS_SPACE_MAXADDR,		/* lowaddr */
980 	    BUS_SPACE_MAXADDR,		/* highaddr */
981 	    NULL, NULL,			/* filter, filterarg */
982 	    JME_TX_RING_SIZE,		/* maxsize */
983 	    1,				/* nsegments */
984 	    JME_TX_RING_SIZE,		/* maxsegsize */
985 	    0,				/* flags */
986 	    &sc->jme_cdata.jme_tx_ring_tag);
987 	if (error) {
988 		device_printf(sc->jme_dev,
989 		    "could not allocate Tx ring DMA tag.\n");
990 		return error;
991 	}
992 
993 	/* Allocate DMA'able memory for TX ring */
994 	error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
995 	    (void **)&sc->jme_rdata.jme_tx_ring,
996 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
997 	    &sc->jme_cdata.jme_tx_ring_map);
998 	if (error) {
999 		device_printf(sc->jme_dev,
1000 		    "could not allocate DMA'able memory for Tx ring.\n");
1001 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1002 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1003 		return error;
1004 	}
1005 
1006 	/*  Load the DMA map for Tx ring. */
1007 	error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1008 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1009 	    JME_TX_RING_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1010 	if (error) {
1011 		device_printf(sc->jme_dev,
1012 		    "could not load DMA'able memory for Tx ring.\n");
1013 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1014 				sc->jme_rdata.jme_tx_ring,
1015 				sc->jme_cdata.jme_tx_ring_map);
1016 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1017 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1018 		return error;
1019 	}
1020 	sc->jme_rdata.jme_tx_ring_paddr = busaddr;
1021 
1022 	/*
1023 	 * Create DMA stuffs for RX ring
1024 	 */
1025 
1026 	/* Create tag for Rx ring. */
1027 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1028 	    JME_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
1029 	    lowaddr,			/* lowaddr */
1030 	    BUS_SPACE_MAXADDR,		/* highaddr */
1031 	    NULL, NULL,			/* filter, filterarg */
1032 	    JME_RX_RING_SIZE,		/* maxsize */
1033 	    1,				/* nsegments */
1034 	    JME_RX_RING_SIZE,		/* maxsegsize */
1035 	    0,				/* flags */
1036 	    &sc->jme_cdata.jme_rx_ring_tag);
1037 	if (error) {
1038 		device_printf(sc->jme_dev,
1039 		    "could not allocate Rx ring DMA tag.\n");
1040 		return error;
1041 	}
1042 
1043 	/* Allocate DMA'able memory for RX ring */
1044 	error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1045 	    (void **)&sc->jme_rdata.jme_rx_ring,
1046 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
1047 	    &sc->jme_cdata.jme_rx_ring_map);
1048 	if (error) {
1049 		device_printf(sc->jme_dev,
1050 		    "could not allocate DMA'able memory for Rx ring.\n");
1051 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1052 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1053 		return error;
1054 	}
1055 
1056 	/* Load the DMA map for Rx ring. */
1057 	error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1058 	    sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1059 	    JME_RX_RING_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1060 	if (error) {
1061 		device_printf(sc->jme_dev,
1062 		    "could not load DMA'able memory for Rx ring.\n");
1063 		bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1064 				sc->jme_rdata.jme_rx_ring,
1065 				sc->jme_cdata.jme_rx_ring_map);
1066 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1067 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1068 		return error;
1069 	}
1070 	sc->jme_rdata.jme_rx_ring_paddr = busaddr;
1071 
1072 	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
1073 	tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
1074 	rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
1075 	if ((JME_ADDR_HI(tx_ring_end) !=
1076 	     JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1077 	    (JME_ADDR_HI(rx_ring_end) !=
1078 	     JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1079 		device_printf(sc->jme_dev, "4GB boundary crossed, "
1080 		    "switching to 32bit DMA address mode.\n");
1081 		jme_dma_free(sc);
1082 		/* Limit DMA address space to 32bit and try again. */
1083 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1084 		goto again;
1085 	}
1086 
1087 	/* Create parent buffer tag. */
1088 	error = bus_dma_tag_create(NULL,/* parent */
1089 	    1, 0,			/* algnmnt, boundary */
1090 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1091 	    BUS_SPACE_MAXADDR,		/* highaddr */
1092 	    NULL, NULL,			/* filter, filterarg */
1093 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1094 	    0,				/* nsegments */
1095 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1096 	    0,				/* flags */
1097 	    &sc->jme_cdata.jme_buffer_tag);
1098 	if (error) {
1099 		device_printf(sc->jme_dev,
1100 		    "could not create parent buffer DMA tag.\n");
1101 		return error;
1102 	}
1103 
1104 	/*
1105 	 * Create DMA stuffs for shadow status block
1106 	 */
1107 
1108 	/* Create shadow status block tag. */
1109 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1110 	    JME_SSB_ALIGN, 0,		/* algnmnt, boundary */
1111 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1112 	    BUS_SPACE_MAXADDR,		/* highaddr */
1113 	    NULL, NULL,			/* filter, filterarg */
1114 	    JME_SSB_SIZE,		/* maxsize */
1115 	    1,				/* nsegments */
1116 	    JME_SSB_SIZE,		/* maxsegsize */
1117 	    0,				/* flags */
1118 	    &sc->jme_cdata.jme_ssb_tag);
1119 	if (error) {
1120 		device_printf(sc->jme_dev,
1121 		    "could not create shared status block DMA tag.\n");
1122 		return error;
1123 	}
1124 
1125 	/* Allocate DMA'able memory for shared status block. */
1126 	error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1127 	    (void **)&sc->jme_rdata.jme_ssb_block,
1128 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
1129 	    &sc->jme_cdata.jme_ssb_map);
1130 	if (error) {
1131 		device_printf(sc->jme_dev, "could not allocate DMA'able "
1132 		    "memory for shared status block.\n");
1133 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1134 		sc->jme_cdata.jme_ssb_tag = NULL;
1135 		return error;
1136 	}
1137 
1138 	/* Load the DMA map for shared status block */
1139 	error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1140 	    sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1141 	    JME_SSB_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1142 	if (error) {
1143 		device_printf(sc->jme_dev, "could not load DMA'able memory "
1144 		    "for shared status block.\n");
1145 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1146 				sc->jme_rdata.jme_ssb_block,
1147 				sc->jme_cdata.jme_ssb_map);
1148 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1149 		sc->jme_cdata.jme_ssb_tag = NULL;
1150 		return error;
1151 	}
1152 	sc->jme_rdata.jme_ssb_block_paddr = busaddr;
1153 
1154 	/*
1155 	 * Create DMA stuffs for TX buffers
1156 	 */
1157 
1158 	/* Create tag for Tx buffers. */
1159 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1160 	    1, 0,			/* algnmnt, boundary */
1161 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1162 	    BUS_SPACE_MAXADDR,		/* highaddr */
1163 	    NULL, NULL,			/* filter, filterarg */
1164 	    JME_TSO_MAXSIZE,		/* maxsize */
1165 	    JME_MAXTXSEGS,		/* nsegments */
1166 	    JME_TSO_MAXSEGSIZE,		/* maxsegsize */
1167 	    0,				/* flags */
1168 	    &sc->jme_cdata.jme_tx_tag);
1169 	if (error != 0) {
1170 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1171 		return error;
1172 	}
1173 
1174 	/* Create DMA maps for Tx buffers. */
1175 	for (i = 0; i < JME_TX_RING_CNT; i++) {
1176 		txd = &sc->jme_cdata.jme_txdesc[i];
1177 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1178 		    &txd->tx_dmamap);
1179 		if (error) {
1180 			int j;
1181 
1182 			device_printf(sc->jme_dev,
1183 			    "could not create %dth Tx dmamap.\n", i);
1184 
1185 			for (j = 0; j < i; ++j) {
1186 				txd = &sc->jme_cdata.jme_txdesc[j];
1187 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1188 						   txd->tx_dmamap);
1189 			}
1190 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1191 			sc->jme_cdata.jme_tx_tag = NULL;
1192 			return error;
1193 		}
1194 	}
1195 
1196 	/*
1197 	 * Create DMA stuffs for RX buffers
1198 	 */
1199 
1200 	/* Create tag for Rx buffers. */
1201 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1202 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
1203 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1204 	    BUS_SPACE_MAXADDR,		/* highaddr */
1205 	    NULL, NULL,			/* filter, filterarg */
1206 	    MCLBYTES,			/* maxsize */
1207 	    1,				/* nsegments */
1208 	    MCLBYTES,			/* maxsegsize */
1209 	    0,				/* flags */
1210 	    &sc->jme_cdata.jme_rx_tag);
1211 	if (error) {
1212 		device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1213 		return error;
1214 	}
1215 
1216 	/* Create DMA maps for Rx buffers. */
1217 	error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1218 				  &sc->jme_cdata.jme_rx_sparemap);
1219 	if (error) {
1220 		device_printf(sc->jme_dev,
1221 		    "could not create spare Rx dmamap.\n");
1222 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1223 		sc->jme_cdata.jme_rx_tag = NULL;
1224 		return error;
1225 	}
1226 	for (i = 0; i < JME_RX_RING_CNT; i++) {
1227 		rxd = &sc->jme_cdata.jme_rxdesc[i];
1228 		error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1229 		    &rxd->rx_dmamap);
1230 		if (error) {
1231 			int j;
1232 
1233 			device_printf(sc->jme_dev,
1234 			    "could not create %dth Rx dmamap.\n", i);
1235 
1236 			for (j = 0; j < i; ++j) {
1237 				rxd = &sc->jme_cdata.jme_rxdesc[j];
1238 				bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1239 						   rxd->rx_dmamap);
1240 			}
1241 			bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1242 			    sc->jme_cdata.jme_rx_sparemap);
1243 			bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1244 			sc->jme_cdata.jme_rx_tag = NULL;
1245 			return error;
1246 		}
1247 	}
1248 	return 0;
1249 }
1250 
1251 static void
1252 jme_dma_free(struct jme_softc *sc)
1253 {
1254 	struct jme_txdesc *txd;
1255 	struct jme_rxdesc *rxd;
1256 	int i;
1257 
1258 	/* Tx ring */
1259 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1260 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1261 		    sc->jme_cdata.jme_tx_ring_map);
1262 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1263 		    sc->jme_rdata.jme_tx_ring,
1264 		    sc->jme_cdata.jme_tx_ring_map);
1265 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1266 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1267 	}
1268 
1269 	/* Rx ring */
1270 	if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1271 		bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1272 		    sc->jme_cdata.jme_rx_ring_map);
1273 		bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1274 		    sc->jme_rdata.jme_rx_ring,
1275 		    sc->jme_cdata.jme_rx_ring_map);
1276 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1277 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1278 	}
1279 
1280 	/* Tx buffers */
1281 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1282 		for (i = 0; i < JME_TX_RING_CNT; i++) {
1283 			txd = &sc->jme_cdata.jme_txdesc[i];
1284 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1285 			    txd->tx_dmamap);
1286 		}
1287 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1288 		sc->jme_cdata.jme_tx_tag = NULL;
1289 	}
1290 
1291 	/* Rx buffers */
1292 	if (sc->jme_cdata.jme_rx_tag != NULL) {
1293 		for (i = 0; i < JME_RX_RING_CNT; i++) {
1294 			rxd = &sc->jme_cdata.jme_rxdesc[i];
1295 			bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1296 			    rxd->rx_dmamap);
1297 		}
1298 		bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1299 		    sc->jme_cdata.jme_rx_sparemap);
1300 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1301 		sc->jme_cdata.jme_rx_tag = NULL;
1302 	}
1303 
1304 	/* Shadow status block. */
1305 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1306 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1307 		    sc->jme_cdata.jme_ssb_map);
1308 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1309 		    sc->jme_rdata.jme_ssb_block,
1310 		    sc->jme_cdata.jme_ssb_map);
1311 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1312 		sc->jme_cdata.jme_ssb_tag = NULL;
1313 	}
1314 
1315 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1316 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1317 		sc->jme_cdata.jme_buffer_tag = NULL;
1318 	}
1319 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1320 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1321 		sc->jme_cdata.jme_ring_tag = NULL;
1322 	}
1323 }
1324 
1325 /*
1326  *	Make sure the interface is stopped at reboot time.
1327  */
1328 static int
1329 jme_shutdown(device_t dev)
1330 {
1331 	return jme_suspend(dev);
1332 }
1333 
1334 #ifdef notyet
1335 /*
1336  * Unlike other ethernet controllers, JMC250 requires
1337  * explicit resetting link speed to 10/100Mbps as gigabit
1338  * link will cunsume more power than 375mA.
1339  * Note, we reset the link speed to 10/100Mbps with
1340  * auto-negotiation but we don't know whether that operation
1341  * would succeed or not as we have no control after powering
1342  * off. If the renegotiation fail WOL may not work. Running
1343  * at 1Gbps draws more power than 375mA at 3.3V which is
1344  * specified in PCI specification and that would result in
1345  * complete shutdowning power to ethernet controller.
1346  *
1347  * TODO
1348  *  Save current negotiated media speed/duplex/flow-control
1349  *  to softc and restore the same link again after resuming.
1350  *  PHY handling such as power down/resetting to 100Mbps
1351  *  may be better handled in suspend method in phy driver.
1352  */
1353 static void
1354 jme_setlinkspeed(struct jme_softc *sc)
1355 {
1356 	struct mii_data *mii;
1357 	int aneg, i;
1358 
1359 	JME_LOCK_ASSERT(sc);
1360 
1361 	mii = device_get_softc(sc->jme_miibus);
1362 	mii_pollstat(mii);
1363 	aneg = 0;
1364 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1365 		switch IFM_SUBTYPE(mii->mii_media_active) {
1366 		case IFM_10_T:
1367 		case IFM_100_TX:
1368 			return;
1369 		case IFM_1000_T:
1370 			aneg++;
1371 		default:
1372 			break;
1373 		}
1374 	}
1375 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1376 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1377 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1378 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1379 	    BMCR_AUTOEN | BMCR_STARTNEG);
1380 	DELAY(1000);
1381 	if (aneg != 0) {
1382 		/* Poll link state until jme(4) get a 10/100 link. */
1383 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1384 			mii_pollstat(mii);
1385 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1386 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1387 				case IFM_10_T:
1388 				case IFM_100_TX:
1389 					jme_mac_config(sc);
1390 					return;
1391 				default:
1392 					break;
1393 				}
1394 			}
1395 			JME_UNLOCK(sc);
1396 			pause("jmelnk", hz);
1397 			JME_LOCK(sc);
1398 		}
1399 		if (i == MII_ANEGTICKS_GIGE)
1400 			device_printf(sc->jme_dev, "establishing link failed, "
1401 			    "WOL may not work!");
1402 	}
1403 	/*
1404 	 * No link, force MAC to have 100Mbps, full-duplex link.
1405 	 * This is the last resort and may/may not work.
1406 	 */
1407 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1408 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1409 	jme_mac_config(sc);
1410 }
1411 
1412 static void
1413 jme_setwol(struct jme_softc *sc)
1414 {
1415 	struct ifnet *ifp = &sc->arpcom.ac_if;
1416 	uint32_t gpr, pmcs;
1417 	uint16_t pmstat;
1418 	int pmc;
1419 
1420 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1421 		/* No PME capability, PHY power down. */
1422 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1423 		    MII_BMCR, BMCR_PDOWN);
1424 		return;
1425 	}
1426 
1427 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1428 	pmcs = CSR_READ_4(sc, JME_PMCS);
1429 	pmcs &= ~PMCS_WOL_ENB_MASK;
1430 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1431 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1432 		/* Enable PME message. */
1433 		gpr |= GPREG0_PME_ENB;
1434 		/* For gigabit controllers, reset link speed to 10/100. */
1435 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1436 			jme_setlinkspeed(sc);
1437 	}
1438 
1439 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1440 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1441 
1442 	/* Request PME. */
1443 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1444 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1445 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1446 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1447 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1448 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1449 		/* No WOL, PHY power down. */
1450 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1451 		    MII_BMCR, BMCR_PDOWN);
1452 	}
1453 }
1454 #endif
1455 
1456 static int
1457 jme_suspend(device_t dev)
1458 {
1459 	struct jme_softc *sc = device_get_softc(dev);
1460 	struct ifnet *ifp = &sc->arpcom.ac_if;
1461 
1462 	lwkt_serialize_enter(ifp->if_serializer);
1463 	jme_stop(sc);
1464 #ifdef notyet
1465 	jme_setwol(sc);
1466 #endif
1467 	lwkt_serialize_exit(ifp->if_serializer);
1468 
1469 	return (0);
1470 }
1471 
1472 static int
1473 jme_resume(device_t dev)
1474 {
1475 	struct jme_softc *sc = device_get_softc(dev);
1476 	struct ifnet *ifp = &sc->arpcom.ac_if;
1477 #ifdef notyet
1478 	int pmc;
1479 #endif
1480 
1481 	lwkt_serialize_enter(ifp->if_serializer);
1482 
1483 #ifdef notyet
1484 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1485 		uint16_t pmstat;
1486 
1487 		pmstat = pci_read_config(sc->jme_dev,
1488 		    pmc + PCIR_POWER_STATUS, 2);
1489 		/* Disable PME clear PME status. */
1490 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1491 		pci_write_config(sc->jme_dev,
1492 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1493 	}
1494 #endif
1495 
1496 	if (ifp->if_flags & IFF_UP)
1497 		jme_init(sc);
1498 
1499 	lwkt_serialize_exit(ifp->if_serializer);
1500 
1501 	return (0);
1502 }
1503 
1504 static int
1505 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1506 {
1507 	struct jme_txdesc *txd;
1508 	struct jme_desc *desc;
1509 	struct mbuf *m;
1510 	struct jme_dmamap_ctx ctx;
1511 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1512 	int maxsegs;
1513 	int error, i, prod;
1514 	uint32_t cflags;
1515 
1516 	M_ASSERTPKTHDR((*m_head));
1517 
1518 	prod = sc->jme_cdata.jme_tx_prod;
1519 	txd = &sc->jme_cdata.jme_txdesc[prod];
1520 
1521 	maxsegs = (JME_TX_RING_CNT - sc->jme_cdata.jme_tx_cnt) -
1522 		  (JME_TXD_RSVD + 1);
1523 	if (maxsegs > JME_MAXTXSEGS)
1524 		maxsegs = JME_MAXTXSEGS;
1525 	KASSERT(maxsegs >= (sc->jme_txd_spare - 1),
1526 		("not enough segments %d\n", maxsegs));
1527 
1528 	ctx.nsegs = maxsegs;
1529 	ctx.segs = txsegs;
1530 	error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1531 				     *m_head, jme_dmamap_buf_cb, &ctx,
1532 				     BUS_DMA_NOWAIT);
1533 	if (!error && ctx.nsegs == 0) {
1534 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1535 		error = EFBIG;
1536 	}
1537 	if (error == EFBIG) {
1538 		m = m_defrag(*m_head, MB_DONTWAIT);
1539 		if (m == NULL) {
1540 			if_printf(&sc->arpcom.ac_if,
1541 				  "could not defrag TX mbuf\n");
1542 			m_freem(*m_head);
1543 			*m_head = NULL;
1544 			return (ENOMEM);
1545 		}
1546 		*m_head = m;
1547 
1548 		ctx.nsegs = maxsegs;
1549 		ctx.segs = txsegs;
1550 		error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag,
1551 					     txd->tx_dmamap, *m_head,
1552 					     jme_dmamap_buf_cb, &ctx,
1553 					     BUS_DMA_NOWAIT);
1554 		if (error || ctx.nsegs == 0) {
1555 			if_printf(&sc->arpcom.ac_if,
1556 				  "could not load defragged TX mbuf\n");
1557 			if (!error) {
1558 				bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
1559 						  txd->tx_dmamap);
1560 				error = EFBIG;
1561 			}
1562 			m_freem(*m_head);
1563 			*m_head = NULL;
1564 			return (error);
1565 		}
1566 	} else if (error) {
1567 		if_printf(&sc->arpcom.ac_if, "could not load TX mbuf\n");
1568 		return (error);
1569 	}
1570 
1571 	m = *m_head;
1572 	cflags = 0;
1573 
1574 	/* Configure checksum offload. */
1575 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1576 		cflags |= JME_TD_IPCSUM;
1577 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1578 		cflags |= JME_TD_TCPCSUM;
1579 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1580 		cflags |= JME_TD_UDPCSUM;
1581 
1582 	/* Configure VLAN. */
1583 	if (m->m_flags & M_VLANTAG) {
1584 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1585 		cflags |= JME_TD_VLAN_TAG;
1586 	}
1587 
1588 	desc = &sc->jme_rdata.jme_tx_ring[prod];
1589 	desc->flags = htole32(cflags);
1590 	desc->buflen = 0;
1591 	desc->addr_hi = htole32(m->m_pkthdr.len);
1592 	desc->addr_lo = 0;
1593 	sc->jme_cdata.jme_tx_cnt++;
1594 	KKASSERT(sc->jme_cdata.jme_tx_cnt < JME_TX_RING_CNT - JME_TXD_RSVD);
1595 	JME_DESC_INC(prod, JME_TX_RING_CNT);
1596 	for (i = 0; i < ctx.nsegs; i++) {
1597 		desc = &sc->jme_rdata.jme_tx_ring[prod];
1598 		desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1599 		desc->buflen = htole32(txsegs[i].ds_len);
1600 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1601 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1602 
1603 		sc->jme_cdata.jme_tx_cnt++;
1604 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1605 			 JME_TX_RING_CNT - JME_TXD_RSVD);
1606 		JME_DESC_INC(prod, JME_TX_RING_CNT);
1607 	}
1608 
1609 	/* Update producer index. */
1610 	sc->jme_cdata.jme_tx_prod = prod;
1611 	/*
1612 	 * Finally request interrupt and give the first descriptor
1613 	 * owenership to hardware.
1614 	 */
1615 	desc = txd->tx_desc;
1616 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1617 
1618 	txd->tx_m = m;
1619 	txd->tx_ndesc = ctx.nsegs + 1;
1620 
1621 	/* Sync descriptors. */
1622 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1623 			BUS_DMASYNC_PREWRITE);
1624 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1625 			sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE);
1626 	return (0);
1627 }
1628 
1629 static void
1630 jme_start(struct ifnet *ifp)
1631 {
1632 	struct jme_softc *sc = ifp->if_softc;
1633 	struct mbuf *m_head;
1634 	int enq = 0;
1635 
1636 	ASSERT_SERIALIZED(ifp->if_serializer);
1637 
1638 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1639 		ifq_purge(&ifp->if_snd);
1640 		return;
1641 	}
1642 
1643 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1644 		return;
1645 
1646 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1647 		jme_txeof(sc);
1648 
1649 	while (!ifq_is_empty(&ifp->if_snd)) {
1650 		/*
1651 		 * Check number of available TX descs, always
1652 		 * leave JME_TXD_RSVD free TX descs.
1653 		 */
1654 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1655 		    JME_TX_RING_CNT - JME_TXD_RSVD) {
1656 			ifp->if_flags |= IFF_OACTIVE;
1657 			break;
1658 		}
1659 
1660 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1661 		if (m_head == NULL)
1662 			break;
1663 
1664 		/*
1665 		 * Pack the data into the transmit ring. If we
1666 		 * don't have room, set the OACTIVE flag and wait
1667 		 * for the NIC to drain the ring.
1668 		 */
1669 		if (jme_encap(sc, &m_head)) {
1670 			if (m_head == NULL) {
1671 				ifp->if_oerrors++;
1672 				break;
1673 			}
1674 			ifq_prepend(&ifp->if_snd, m_head);
1675 			ifp->if_flags |= IFF_OACTIVE;
1676 			break;
1677 		}
1678 		enq++;
1679 
1680 		/*
1681 		 * If there's a BPF listener, bounce a copy of this frame
1682 		 * to him.
1683 		 */
1684 		ETHER_BPF_MTAP(ifp, m_head);
1685 	}
1686 
1687 	if (enq > 0) {
1688 		/*
1689 		 * Reading TXCSR takes very long time under heavy load
1690 		 * so cache TXCSR value and writes the ORed value with
1691 		 * the kick command to the TXCSR. This saves one register
1692 		 * access cycle.
1693 		 */
1694 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1695 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1696 		/* Set a timeout in case the chip goes out to lunch. */
1697 		ifp->if_timer = JME_TX_TIMEOUT;
1698 	}
1699 }
1700 
1701 static void
1702 jme_watchdog(struct ifnet *ifp)
1703 {
1704 	struct jme_softc *sc = ifp->if_softc;
1705 
1706 	ASSERT_SERIALIZED(ifp->if_serializer);
1707 
1708 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1709 		if_printf(ifp, "watchdog timeout (missed link)\n");
1710 		ifp->if_oerrors++;
1711 		jme_init(sc);
1712 		return;
1713 	}
1714 
1715 	jme_txeof(sc);
1716 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1717 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1718 			  "-- recovering\n");
1719 		if (!ifq_is_empty(&ifp->if_snd))
1720 			if_devstart(ifp);
1721 		return;
1722 	}
1723 
1724 	if_printf(ifp, "watchdog timeout\n");
1725 	ifp->if_oerrors++;
1726 	jme_init(sc);
1727 	if (!ifq_is_empty(&ifp->if_snd))
1728 		if_devstart(ifp);
1729 }
1730 
1731 static int
1732 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1733 {
1734 	struct jme_softc *sc = ifp->if_softc;
1735 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1736 	struct ifreq *ifr = (struct ifreq *)data;
1737 	int error = 0, mask;
1738 
1739 	ASSERT_SERIALIZED(ifp->if_serializer);
1740 
1741 	switch (cmd) {
1742 	case SIOCSIFMTU:
1743 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1744 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1745 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1746 			error = EINVAL;
1747 			break;
1748 		}
1749 
1750 		if (ifp->if_mtu != ifr->ifr_mtu) {
1751 			/*
1752 			 * No special configuration is required when interface
1753 			 * MTU is changed but availability of Tx checksum
1754 			 * offload should be chcked against new MTU size as
1755 			 * FIFO size is just 2K.
1756 			 */
1757 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1758 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1759 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1760 			}
1761 			ifp->if_mtu = ifr->ifr_mtu;
1762 			if (ifp->if_flags & IFF_RUNNING)
1763 				jme_init(sc);
1764 		}
1765 		break;
1766 
1767 	case SIOCSIFFLAGS:
1768 		if (ifp->if_flags & IFF_UP) {
1769 			if (ifp->if_flags & IFF_RUNNING) {
1770 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1771 				    (IFF_PROMISC | IFF_ALLMULTI))
1772 					jme_set_filter(sc);
1773 			} else {
1774 				jme_init(sc);
1775 			}
1776 		} else {
1777 			if (ifp->if_flags & IFF_RUNNING)
1778 				jme_stop(sc);
1779 		}
1780 		sc->jme_if_flags = ifp->if_flags;
1781 		break;
1782 
1783 	case SIOCADDMULTI:
1784 	case SIOCDELMULTI:
1785 		if (ifp->if_flags & IFF_RUNNING)
1786 			jme_set_filter(sc);
1787 		break;
1788 
1789 	case SIOCSIFMEDIA:
1790 	case SIOCGIFMEDIA:
1791 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1792 		break;
1793 
1794 	case SIOCSIFCAP:
1795 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1796 
1797 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1798 			if (IFCAP_TXCSUM & ifp->if_capabilities) {
1799 				ifp->if_capenable ^= IFCAP_TXCSUM;
1800 				if (IFCAP_TXCSUM & ifp->if_capenable)
1801 					ifp->if_hwassist |= JME_CSUM_FEATURES;
1802 				else
1803 					ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1804 			}
1805 		}
1806 		if ((mask & IFCAP_RXCSUM) &&
1807 		    (IFCAP_RXCSUM & ifp->if_capabilities)) {
1808 			uint32_t reg;
1809 
1810 			ifp->if_capenable ^= IFCAP_RXCSUM;
1811 			reg = CSR_READ_4(sc, JME_RXMAC);
1812 			reg &= ~RXMAC_CSUM_ENB;
1813 			if (ifp->if_capenable & IFCAP_RXCSUM)
1814 				reg |= RXMAC_CSUM_ENB;
1815 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1816 		}
1817 
1818 		if ((mask & IFCAP_VLAN_HWTAGGING) &&
1819 		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) {
1820 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1821 			jme_set_vlan(sc);
1822 		}
1823 		break;
1824 
1825 	default:
1826 		error = ether_ioctl(ifp, cmd, data);
1827 		break;
1828 	}
1829 	return (error);
1830 }
1831 
1832 static void
1833 jme_mac_config(struct jme_softc *sc)
1834 {
1835 	struct mii_data *mii;
1836 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1837 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1838 
1839 	mii = device_get_softc(sc->jme_miibus);
1840 
1841 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1842 	DELAY(10);
1843 	CSR_WRITE_4(sc, JME_GHC, 0);
1844 	ghc = 0;
1845 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1846 	rxmac &= ~RXMAC_FC_ENB;
1847 	txmac = CSR_READ_4(sc, JME_TXMAC);
1848 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1849 	txpause = CSR_READ_4(sc, JME_TXPFC);
1850 	txpause &= ~TXPFC_PAUSE_ENB;
1851 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1852 		ghc |= GHC_FULL_DUPLEX;
1853 		rxmac &= ~RXMAC_COLL_DET_ENB;
1854 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1855 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1856 		    TXMAC_FRAME_BURST);
1857 #ifdef notyet
1858 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1859 			txpause |= TXPFC_PAUSE_ENB;
1860 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1861 			rxmac |= RXMAC_FC_ENB;
1862 #endif
1863 		/* Disable retry transmit timer/retry limit. */
1864 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1865 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1866 	} else {
1867 		rxmac |= RXMAC_COLL_DET_ENB;
1868 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1869 		/* Enable retry transmit timer/retry limit. */
1870 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1871 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1872 	}
1873 
1874 	/*
1875 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1876 	 */
1877 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1878 	gp1 &= ~GPREG1_WA_HDX;
1879 
1880 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1881 		hdx = 1;
1882 
1883 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1884 	case IFM_10_T:
1885 		ghc |= GHC_SPEED_10;
1886 		if (hdx)
1887 			gp1 |= GPREG1_WA_HDX;
1888 		break;
1889 
1890 	case IFM_100_TX:
1891 		ghc |= GHC_SPEED_100;
1892 		if (hdx)
1893 			gp1 |= GPREG1_WA_HDX;
1894 
1895 		/*
1896 		 * Use extended FIFO depth to workaround CRC errors
1897 		 * emitted by chips before JMC250B
1898 		 */
1899 		phyconf = JMPHY_CONF_EXTFIFO;
1900 		break;
1901 
1902 	case IFM_1000_T:
1903 		if (sc->jme_caps & JME_CAP_FASTETH)
1904 			break;
1905 
1906 		ghc |= GHC_SPEED_1000;
1907 		if (hdx)
1908 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1909 		break;
1910 
1911 	default:
1912 		break;
1913 	}
1914 	CSR_WRITE_4(sc, JME_GHC, ghc);
1915 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1916 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1917 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1918 
1919 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
1920 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1921 				    JMPHY_CONF, phyconf);
1922 	}
1923 	if (sc->jme_workaround & JME_WA_HDX)
1924 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1925 }
1926 
1927 static void
1928 jme_intr(void *xsc)
1929 {
1930 	struct jme_softc *sc = xsc;
1931 	struct ifnet *ifp = &sc->arpcom.ac_if;
1932 	uint32_t status;
1933 
1934 	ASSERT_SERIALIZED(ifp->if_serializer);
1935 
1936 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1937 	if (status == 0 || status == 0xFFFFFFFF)
1938 		return;
1939 
1940 	/* Disable interrupts. */
1941 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1942 
1943 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1944 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1945 		goto back;
1946 
1947 	/* Reset PCC counter/timer and Ack interrupts. */
1948 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1949 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1950 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1951 	if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1952 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
1953 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1954 
1955 	if (ifp->if_flags & IFF_RUNNING) {
1956 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1957 			jme_rxeof(sc);
1958 
1959 		if (status & INTR_RXQ_DESC_EMPTY) {
1960 			/*
1961 			 * Notify hardware availability of new Rx buffers.
1962 			 * Reading RXCSR takes very long time under heavy
1963 			 * load so cache RXCSR value and writes the ORed
1964 			 * value with the kick command to the RXCSR. This
1965 			 * saves one register access cycle.
1966 			 */
1967 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1968 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1969 		}
1970 
1971 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1972 			jme_txeof(sc);
1973 			if (!ifq_is_empty(&ifp->if_snd))
1974 				if_devstart(ifp);
1975 		}
1976 	}
1977 back:
1978 	/* Reenable interrupts. */
1979 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1980 }
1981 
1982 static void
1983 jme_txeof(struct jme_softc *sc)
1984 {
1985 	struct ifnet *ifp = &sc->arpcom.ac_if;
1986 	struct jme_txdesc *txd;
1987 	uint32_t status;
1988 	int cons, nsegs;
1989 
1990 	cons = sc->jme_cdata.jme_tx_cons;
1991 	if (cons == sc->jme_cdata.jme_tx_prod)
1992 		return;
1993 
1994 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1995 			sc->jme_cdata.jme_tx_ring_map,
1996 			BUS_DMASYNC_POSTREAD);
1997 
1998 	/*
1999 	 * Go through our Tx list and free mbufs for those
2000 	 * frames which have been transmitted.
2001 	 */
2002 	while (cons != sc->jme_cdata.jme_tx_prod) {
2003 		txd = &sc->jme_cdata.jme_txdesc[cons];
2004 		KASSERT(txd->tx_m != NULL,
2005 			("%s: freeing NULL mbuf!\n", __func__));
2006 
2007 		status = le32toh(txd->tx_desc->flags);
2008 		if ((status & JME_TD_OWN) == JME_TD_OWN)
2009 			break;
2010 
2011 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2012 			ifp->if_oerrors++;
2013 		} else {
2014 			ifp->if_opackets++;
2015 			if (status & JME_TD_COLLISION) {
2016 				ifp->if_collisions +=
2017 				    le32toh(txd->tx_desc->buflen) &
2018 				    JME_TD_BUF_LEN_MASK;
2019 			}
2020 		}
2021 
2022 		/*
2023 		 * Only the first descriptor of multi-descriptor
2024 		 * transmission is updated so driver have to skip entire
2025 		 * chained buffers for the transmiited frame. In other
2026 		 * words, JME_TD_OWN bit is valid only at the first
2027 		 * descriptor of a multi-descriptor transmission.
2028 		 */
2029 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2030 			sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2031 			JME_DESC_INC(cons, JME_TX_RING_CNT);
2032 		}
2033 
2034 		/* Reclaim transferred mbufs. */
2035 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2036 		m_freem(txd->tx_m);
2037 		txd->tx_m = NULL;
2038 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2039 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2040 			("%s: Active Tx desc counter was garbled\n", __func__));
2041 		txd->tx_ndesc = 0;
2042 	}
2043 	sc->jme_cdata.jme_tx_cons = cons;
2044 
2045 	if (sc->jme_cdata.jme_tx_cnt == 0)
2046 		ifp->if_timer = 0;
2047 
2048 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2049 	    JME_TX_RING_CNT - JME_TXD_RSVD)
2050 		ifp->if_flags &= ~IFF_OACTIVE;
2051 
2052 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2053 			sc->jme_cdata.jme_tx_ring_map,
2054 			BUS_DMASYNC_PREWRITE);
2055 }
2056 
2057 static __inline void
2058 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
2059 {
2060 	int i;
2061 
2062 	for (i = 0; i < count; ++i) {
2063 		struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
2064 
2065 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2066 		desc->buflen = htole32(MCLBYTES);
2067 		JME_DESC_INC(cons, JME_RX_RING_CNT);
2068 	}
2069 }
2070 
2071 /* Receive a frame. */
2072 static void
2073 jme_rxpkt(struct jme_softc *sc)
2074 {
2075 	struct ifnet *ifp = &sc->arpcom.ac_if;
2076 	struct jme_desc *desc;
2077 	struct jme_rxdesc *rxd;
2078 	struct mbuf *mp, *m;
2079 	uint32_t flags, status;
2080 	int cons, count, nsegs;
2081 
2082 	cons = sc->jme_cdata.jme_rx_cons;
2083 	desc = &sc->jme_rdata.jme_rx_ring[cons];
2084 	flags = le32toh(desc->flags);
2085 	status = le32toh(desc->buflen);
2086 	nsegs = JME_RX_NSEGS(status);
2087 
2088 	if (status & JME_RX_ERR_STAT) {
2089 		ifp->if_ierrors++;
2090 		jme_discard_rxbufs(sc, cons, nsegs);
2091 #ifdef JME_SHOW_ERRORS
2092 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2093 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2094 #endif
2095 		sc->jme_cdata.jme_rx_cons += nsegs;
2096 		sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2097 		return;
2098 	}
2099 
2100 	sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2101 	for (count = 0; count < nsegs; count++,
2102 	     JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2103 		rxd = &sc->jme_cdata.jme_rxdesc[cons];
2104 		mp = rxd->rx_m;
2105 
2106 		/* Add a new receive buffer to the ring. */
2107 		if (jme_newbuf(sc, rxd, 0) != 0) {
2108 			ifp->if_iqdrops++;
2109 			/* Reuse buffer. */
2110 			jme_discard_rxbufs(sc, cons, nsegs - count);
2111 			if (sc->jme_cdata.jme_rxhead != NULL) {
2112 				m_freem(sc->jme_cdata.jme_rxhead);
2113 				JME_RXCHAIN_RESET(sc);
2114 			}
2115 			break;
2116 		}
2117 
2118 		/*
2119 		 * Assume we've received a full sized frame.
2120 		 * Actual size is fixed when we encounter the end of
2121 		 * multi-segmented frame.
2122 		 */
2123 		mp->m_len = MCLBYTES;
2124 
2125 		/* Chain received mbufs. */
2126 		if (sc->jme_cdata.jme_rxhead == NULL) {
2127 			sc->jme_cdata.jme_rxhead = mp;
2128 			sc->jme_cdata.jme_rxtail = mp;
2129 		} else {
2130 			/*
2131 			 * Receive processor can receive a maximum frame
2132 			 * size of 65535 bytes.
2133 			 */
2134 			mp->m_flags &= ~M_PKTHDR;
2135 			sc->jme_cdata.jme_rxtail->m_next = mp;
2136 			sc->jme_cdata.jme_rxtail = mp;
2137 		}
2138 
2139 		if (count == nsegs - 1) {
2140 			/* Last desc. for this frame. */
2141 			m = sc->jme_cdata.jme_rxhead;
2142 			/* XXX assert PKTHDR? */
2143 			m->m_flags |= M_PKTHDR;
2144 			m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2145 			if (nsegs > 1) {
2146 				/* Set first mbuf size. */
2147 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2148 				/* Set last mbuf size. */
2149 				mp->m_len = sc->jme_cdata.jme_rxlen -
2150 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2151 				    (MCLBYTES * (nsegs - 2)));
2152 			} else {
2153 				m->m_len = sc->jme_cdata.jme_rxlen;
2154 			}
2155 			m->m_pkthdr.rcvif = ifp;
2156 
2157 			/*
2158 			 * Account for 10bytes auto padding which is used
2159 			 * to align IP header on 32bit boundary. Also note,
2160 			 * CRC bytes is automatically removed by the
2161 			 * hardware.
2162 			 */
2163 			m->m_data += JME_RX_PAD_BYTES;
2164 
2165 			/* Set checksum information. */
2166 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2167 			    (flags & JME_RD_IPV4)) {
2168 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2169 				if (flags & JME_RD_IPCSUM)
2170 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2171 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2172 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2173 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2174 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2175 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2176 					m->m_pkthdr.csum_flags |=
2177 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2178 					m->m_pkthdr.csum_data = 0xffff;
2179 				}
2180 			}
2181 
2182 			/* Check for VLAN tagged packets. */
2183 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2184 			    (flags & JME_RD_VLAN_TAG)) {
2185 				m->m_pkthdr.ether_vlantag =
2186 				    flags & JME_RD_VLAN_MASK;
2187 				m->m_flags |= M_VLANTAG;
2188 			}
2189 
2190 			ifp->if_ipackets++;
2191 			/* Pass it on. */
2192 			ifp->if_input(ifp, m);
2193 
2194 			/* Reset mbuf chains. */
2195 			JME_RXCHAIN_RESET(sc);
2196 		}
2197 	}
2198 
2199 	sc->jme_cdata.jme_rx_cons += nsegs;
2200 	sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2201 }
2202 
2203 static void
2204 jme_rxeof(struct jme_softc *sc)
2205 {
2206 	struct jme_desc *desc;
2207 	int nsegs, prog, pktlen;
2208 
2209 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2210 			sc->jme_cdata.jme_rx_ring_map,
2211 			BUS_DMASYNC_POSTREAD);
2212 
2213 	prog = 0;
2214 	for (;;) {
2215 		desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2216 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2217 			break;
2218 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2219 			break;
2220 
2221 		/*
2222 		 * Check number of segments against received bytes.
2223 		 * Non-matching value would indicate that hardware
2224 		 * is still trying to update Rx descriptors. I'm not
2225 		 * sure whether this check is needed.
2226 		 */
2227 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2228 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2229 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2230 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2231 				  "and packet size(%d) mismach\n",
2232 				  nsegs, pktlen);
2233 			break;
2234 		}
2235 
2236 		/* Received a frame. */
2237 		jme_rxpkt(sc);
2238 		prog++;
2239 	}
2240 
2241 	if (prog > 0) {
2242 		bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2243 				sc->jme_cdata.jme_rx_ring_map,
2244 				BUS_DMASYNC_PREWRITE);
2245 	}
2246 }
2247 
2248 static void
2249 jme_tick(void *xsc)
2250 {
2251 	struct jme_softc *sc = xsc;
2252 	struct ifnet *ifp = &sc->arpcom.ac_if;
2253 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2254 
2255 	lwkt_serialize_enter(ifp->if_serializer);
2256 
2257 	mii_tick(mii);
2258 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2259 
2260 	lwkt_serialize_exit(ifp->if_serializer);
2261 }
2262 
2263 static void
2264 jme_reset(struct jme_softc *sc)
2265 {
2266 #ifdef foo
2267 	/* Stop receiver, transmitter. */
2268 	jme_stop_rx(sc);
2269 	jme_stop_tx(sc);
2270 #endif
2271 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2272 	DELAY(10);
2273 	CSR_WRITE_4(sc, JME_GHC, 0);
2274 }
2275 
2276 static void
2277 jme_init(void *xsc)
2278 {
2279 	struct jme_softc *sc = xsc;
2280 	struct ifnet *ifp = &sc->arpcom.ac_if;
2281 	struct mii_data *mii;
2282 	uint8_t eaddr[ETHER_ADDR_LEN];
2283 	bus_addr_t paddr;
2284 	uint32_t reg;
2285 	int error;
2286 
2287 	ASSERT_SERIALIZED(ifp->if_serializer);
2288 
2289 	/*
2290 	 * Cancel any pending I/O.
2291 	 */
2292 	jme_stop(sc);
2293 
2294 	/*
2295 	 * Reset the chip to a known state.
2296 	 */
2297 	jme_reset(sc);
2298 
2299 	/*
2300 	 * Since we always use 64bit address mode for transmitting,
2301 	 * each Tx request requires one more dummy descriptor.
2302 	 */
2303 	sc->jme_txd_spare =
2304 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES) + 1;
2305 	KKASSERT(sc->jme_txd_spare >= 2);
2306 
2307 	/* Init descriptors. */
2308 	error = jme_init_rx_ring(sc);
2309         if (error != 0) {
2310                 device_printf(sc->jme_dev,
2311                     "%s: initialization failed: no memory for Rx buffers.\n",
2312 		    __func__);
2313                 jme_stop(sc);
2314 		return;
2315         }
2316 	jme_init_tx_ring(sc);
2317 
2318 	/* Initialize shadow status block. */
2319 	jme_init_ssb(sc);
2320 
2321 	/* Reprogram the station address. */
2322 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2323 	CSR_WRITE_4(sc, JME_PAR0,
2324 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2325 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2326 
2327 	/*
2328 	 * Configure Tx queue.
2329 	 *  Tx priority queue weight value : 0
2330 	 *  Tx FIFO threshold for processing next packet : 16QW
2331 	 *  Maximum Tx DMA length : 512
2332 	 *  Allow Tx DMA burst.
2333 	 */
2334 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2335 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2336 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2337 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2338 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2339 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2340 
2341 	/* Set Tx descriptor counter. */
2342 	CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2343 
2344 	/* Set Tx ring address to the hardware. */
2345 	paddr = JME_TX_RING_ADDR(sc, 0);
2346 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2347 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2348 
2349 	/* Configure TxMAC parameters. */
2350 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2351 	reg |= TXMAC_THRESH_1_PKT;
2352 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2353 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2354 
2355 	/*
2356 	 * Configure Rx queue.
2357 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2358 	 *  FIFO threshold for processing next packet : 128QW
2359 	 *  Rx queue 0 select
2360 	 *  Max Rx DMA length : 128
2361 	 *  Rx descriptor retry : 32
2362 	 *  Rx descriptor retry time gap : 256ns
2363 	 *  Don't receive runt/bad frame.
2364 	 */
2365 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2366 	/*
2367 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2368 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2369 	 * decrease FIFO threshold to reduce the FIFO overruns for
2370 	 * frames larger than 4000 bytes.
2371 	 * For best performance of standard MTU sized frames use
2372 	 * maximum allowable FIFO threshold, 128QW.
2373 	 */
2374 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2375 	    JME_RX_FIFO_SIZE)
2376 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2377 	else
2378 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2379 	sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2380 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2381 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2382 	/* XXX TODO DROP_BAD */
2383 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2384 
2385 	/* Set Rx descriptor counter. */
2386 	CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2387 
2388 	/* Set Rx ring address to the hardware. */
2389 	paddr = JME_RX_RING_ADDR(sc, 0);
2390 	CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2391 	CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2392 
2393 	/* Clear receive filter. */
2394 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2395 
2396 	/* Set up the receive filter. */
2397 	jme_set_filter(sc);
2398 	jme_set_vlan(sc);
2399 
2400 	/*
2401 	 * Disable all WOL bits as WOL can interfere normal Rx
2402 	 * operation. Also clear WOL detection status bits.
2403 	 */
2404 	reg = CSR_READ_4(sc, JME_PMCS);
2405 	reg &= ~PMCS_WOL_ENB_MASK;
2406 	CSR_WRITE_4(sc, JME_PMCS, reg);
2407 
2408 	/*
2409 	 * Pad 10bytes right before received frame. This will greatly
2410 	 * help Rx performance on strict-alignment architectures as
2411 	 * it does not need to copy the frame to align the payload.
2412 	 */
2413 	reg = CSR_READ_4(sc, JME_RXMAC);
2414 	reg |= RXMAC_PAD_10BYTES;
2415 
2416 	if (ifp->if_capenable & IFCAP_RXCSUM)
2417 		reg |= RXMAC_CSUM_ENB;
2418 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2419 
2420 	/* Configure general purpose reg0 */
2421 	reg = CSR_READ_4(sc, JME_GPREG0);
2422 	reg &= ~GPREG0_PCC_UNIT_MASK;
2423 	/* Set PCC timer resolution to micro-seconds unit. */
2424 	reg |= GPREG0_PCC_UNIT_US;
2425 	/*
2426 	 * Disable all shadow register posting as we have to read
2427 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2428 	 * that it's hard to synchronize interrupt status between
2429 	 * hardware and software with shadow posting due to
2430 	 * requirements of bus_dmamap_sync(9).
2431 	 */
2432 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2433 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2434 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2435 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2436 	/* Disable posting of DW0. */
2437 	reg &= ~GPREG0_POST_DW0_ENB;
2438 	/* Clear PME message. */
2439 	reg &= ~GPREG0_PME_ENB;
2440 	/* Set PHY address. */
2441 	reg &= ~GPREG0_PHY_ADDR_MASK;
2442 	reg |= sc->jme_phyaddr;
2443 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2444 
2445 	/* Configure Tx queue 0 packet completion coalescing. */
2446 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2447 	    PCCTX_COAL_TO_MASK;
2448 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2449 	    PCCTX_COAL_PKT_MASK;
2450 	reg |= PCCTX_COAL_TXQ0;
2451 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2452 
2453 	/* Configure Rx queue 0 packet completion coalescing. */
2454 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2455 	    PCCRX_COAL_TO_MASK;
2456 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2457 	    PCCRX_COAL_PKT_MASK;
2458 	CSR_WRITE_4(sc, JME_PCCRX0, reg);
2459 
2460 	/* Configure shadow status block but don't enable posting. */
2461 	paddr = sc->jme_rdata.jme_ssb_block_paddr;
2462 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2463 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2464 
2465 	/* Disable Timer 1 and Timer 2. */
2466 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2467 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2468 
2469 	/* Configure retry transmit period, retry limit value. */
2470 	CSR_WRITE_4(sc, JME_TXTRHD,
2471 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2472 	    TXTRHD_RT_PERIOD_MASK) |
2473 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2474 	    TXTRHD_RT_LIMIT_SHIFT));
2475 
2476 	/* Disable RSS. */
2477 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2478 
2479 	/* Initialize the interrupt mask. */
2480 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2481 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2482 
2483 	/*
2484 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2485 	 * done after detection of valid link in jme_miibus_statchg.
2486 	 */
2487 	sc->jme_flags &= ~JME_FLAG_LINK;
2488 
2489 	/* Set the current media. */
2490 	mii = device_get_softc(sc->jme_miibus);
2491 	mii_mediachg(mii);
2492 
2493 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2494 
2495 	ifp->if_flags |= IFF_RUNNING;
2496 	ifp->if_flags &= ~IFF_OACTIVE;
2497 }
2498 
2499 static void
2500 jme_stop(struct jme_softc *sc)
2501 {
2502 	struct ifnet *ifp = &sc->arpcom.ac_if;
2503 	struct jme_txdesc *txd;
2504 	struct jme_rxdesc *rxd;
2505 	int i;
2506 
2507 	ASSERT_SERIALIZED(ifp->if_serializer);
2508 
2509 	/*
2510 	 * Mark the interface down and cancel the watchdog timer.
2511 	 */
2512 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2513 	ifp->if_timer = 0;
2514 
2515 	callout_stop(&sc->jme_tick_ch);
2516 	sc->jme_flags &= ~JME_FLAG_LINK;
2517 
2518 	/*
2519 	 * Disable interrupts.
2520 	 */
2521 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2522 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2523 
2524 	/* Disable updating shadow status block. */
2525 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2526 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2527 
2528 	/* Stop receiver, transmitter. */
2529 	jme_stop_rx(sc);
2530 	jme_stop_tx(sc);
2531 
2532 #ifdef foo
2533 	 /* Reclaim Rx/Tx buffers that have been completed. */
2534 	jme_rxeof(sc);
2535 	if (sc->jme_cdata.jme_rxhead != NULL)
2536 		m_freem(sc->jme_cdata.jme_rxhead);
2537 	JME_RXCHAIN_RESET(sc);
2538 	jme_txeof(sc);
2539 #endif
2540 
2541 	/*
2542 	 * Free partial finished RX segments
2543 	 */
2544 	if (sc->jme_cdata.jme_rxhead != NULL)
2545 		m_freem(sc->jme_cdata.jme_rxhead);
2546 	JME_RXCHAIN_RESET(sc);
2547 
2548 	/*
2549 	 * Free RX and TX mbufs still in the queues.
2550 	 */
2551 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2552 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2553 		if (rxd->rx_m != NULL) {
2554 			bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2555 			    rxd->rx_dmamap);
2556 			m_freem(rxd->rx_m);
2557 			rxd->rx_m = NULL;
2558 		}
2559         }
2560 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2561 		txd = &sc->jme_cdata.jme_txdesc[i];
2562 		if (txd->tx_m != NULL) {
2563 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2564 			    txd->tx_dmamap);
2565 			m_freem(txd->tx_m);
2566 			txd->tx_m = NULL;
2567 			txd->tx_ndesc = 0;
2568 		}
2569         }
2570 }
2571 
2572 static void
2573 jme_stop_tx(struct jme_softc *sc)
2574 {
2575 	uint32_t reg;
2576 	int i;
2577 
2578 	reg = CSR_READ_4(sc, JME_TXCSR);
2579 	if ((reg & TXCSR_TX_ENB) == 0)
2580 		return;
2581 	reg &= ~TXCSR_TX_ENB;
2582 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2583 	for (i = JME_TIMEOUT; i > 0; i--) {
2584 		DELAY(1);
2585 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2586 			break;
2587 	}
2588 	if (i == 0)
2589 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2590 }
2591 
2592 static void
2593 jme_stop_rx(struct jme_softc *sc)
2594 {
2595 	uint32_t reg;
2596 	int i;
2597 
2598 	reg = CSR_READ_4(sc, JME_RXCSR);
2599 	if ((reg & RXCSR_RX_ENB) == 0)
2600 		return;
2601 	reg &= ~RXCSR_RX_ENB;
2602 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2603 	for (i = JME_TIMEOUT; i > 0; i--) {
2604 		DELAY(1);
2605 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2606 			break;
2607 	}
2608 	if (i == 0)
2609 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2610 }
2611 
2612 static void
2613 jme_init_tx_ring(struct jme_softc *sc)
2614 {
2615 	struct jme_ring_data *rd;
2616 	struct jme_txdesc *txd;
2617 	int i;
2618 
2619 	sc->jme_cdata.jme_tx_prod = 0;
2620 	sc->jme_cdata.jme_tx_cons = 0;
2621 	sc->jme_cdata.jme_tx_cnt = 0;
2622 
2623 	rd = &sc->jme_rdata;
2624 	bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2625 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2626 		txd = &sc->jme_cdata.jme_txdesc[i];
2627 		txd->tx_m = NULL;
2628 		txd->tx_desc = &rd->jme_tx_ring[i];
2629 		txd->tx_ndesc = 0;
2630 	}
2631 
2632 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2633 			sc->jme_cdata.jme_tx_ring_map,
2634 			BUS_DMASYNC_PREWRITE);
2635 }
2636 
2637 static void
2638 jme_init_ssb(struct jme_softc *sc)
2639 {
2640 	struct jme_ring_data *rd;
2641 
2642 	rd = &sc->jme_rdata;
2643 	bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2644 	bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2645 			BUS_DMASYNC_PREWRITE);
2646 }
2647 
2648 static int
2649 jme_init_rx_ring(struct jme_softc *sc)
2650 {
2651 	struct jme_ring_data *rd;
2652 	struct jme_rxdesc *rxd;
2653 	int i;
2654 
2655 	KKASSERT(sc->jme_cdata.jme_rxhead == NULL &&
2656 		 sc->jme_cdata.jme_rxtail == NULL &&
2657 		 sc->jme_cdata.jme_rxlen == 0);
2658 	sc->jme_cdata.jme_rx_cons = 0;
2659 
2660 	rd = &sc->jme_rdata;
2661 	bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2662 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2663 		int error;
2664 
2665 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2666 		rxd->rx_m = NULL;
2667 		rxd->rx_desc = &rd->jme_rx_ring[i];
2668 		error = jme_newbuf(sc, rxd, 1);
2669 		if (error)
2670 			return (error);
2671 	}
2672 
2673 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2674 			sc->jme_cdata.jme_rx_ring_map,
2675 			BUS_DMASYNC_PREWRITE);
2676 	return (0);
2677 }
2678 
2679 static int
2680 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd, int init)
2681 {
2682 	struct jme_desc *desc;
2683 	struct mbuf *m;
2684 	struct jme_dmamap_ctx ctx;
2685 	bus_dma_segment_t segs;
2686 	bus_dmamap_t map;
2687 	int error;
2688 
2689 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2690 	if (m == NULL)
2691 		return (ENOBUFS);
2692 	/*
2693 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2694 	 * takes advantage of 10 bytes padding feature of hardware
2695 	 * in order not to copy entire frame to align IP header on
2696 	 * 32bit boundary.
2697 	 */
2698 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2699 
2700 	ctx.nsegs = 1;
2701 	ctx.segs = &segs;
2702 	error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_rx_tag,
2703 				     sc->jme_cdata.jme_rx_sparemap,
2704 				     m, jme_dmamap_buf_cb, &ctx,
2705 				     BUS_DMA_NOWAIT);
2706 	if (error || ctx.nsegs == 0) {
2707 		if (!error) {
2708 			bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2709 					  sc->jme_cdata.jme_rx_sparemap);
2710 			error = EFBIG;
2711 			if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2712 		}
2713 		m_freem(m);
2714 
2715 		if (init)
2716 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2717 		return (error);
2718 	}
2719 
2720 	if (rxd->rx_m != NULL) {
2721 		bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
2722 				BUS_DMASYNC_POSTREAD);
2723 		bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
2724 	}
2725 	map = rxd->rx_dmamap;
2726 	rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2727 	sc->jme_cdata.jme_rx_sparemap = map;
2728 	rxd->rx_m = m;
2729 
2730 	desc = rxd->rx_desc;
2731 	desc->buflen = htole32(segs.ds_len);
2732 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2733 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2734 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2735 
2736 	return (0);
2737 }
2738 
2739 static void
2740 jme_set_vlan(struct jme_softc *sc)
2741 {
2742 	struct ifnet *ifp = &sc->arpcom.ac_if;
2743 	uint32_t reg;
2744 
2745 	ASSERT_SERIALIZED(ifp->if_serializer);
2746 
2747 	reg = CSR_READ_4(sc, JME_RXMAC);
2748 	reg &= ~RXMAC_VLAN_ENB;
2749 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2750 		reg |= RXMAC_VLAN_ENB;
2751 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2752 }
2753 
2754 static void
2755 jme_set_filter(struct jme_softc *sc)
2756 {
2757 	struct ifnet *ifp = &sc->arpcom.ac_if;
2758 	struct ifmultiaddr *ifma;
2759 	uint32_t crc;
2760 	uint32_t mchash[2];
2761 	uint32_t rxcfg;
2762 
2763 	ASSERT_SERIALIZED(ifp->if_serializer);
2764 
2765 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2766 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2767 	    RXMAC_ALLMULTI);
2768 
2769 	/*
2770 	 * Always accept frames destined to our station address.
2771 	 * Always accept broadcast frames.
2772 	 */
2773 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2774 
2775 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2776 		if (ifp->if_flags & IFF_PROMISC)
2777 			rxcfg |= RXMAC_PROMISC;
2778 		if (ifp->if_flags & IFF_ALLMULTI)
2779 			rxcfg |= RXMAC_ALLMULTI;
2780 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2781 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2782 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2783 		return;
2784 	}
2785 
2786 	/*
2787 	 * Set up the multicast address filter by passing all multicast
2788 	 * addresses through a CRC generator, and then using the low-order
2789 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2790 	 * high order bits select the register, while the rest of the bits
2791 	 * select the bit within the register.
2792 	 */
2793 	rxcfg |= RXMAC_MULTICAST;
2794 	bzero(mchash, sizeof(mchash));
2795 
2796 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2797 		if (ifma->ifma_addr->sa_family != AF_LINK)
2798 			continue;
2799 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2800 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2801 
2802 		/* Just want the 6 least significant bits. */
2803 		crc &= 0x3f;
2804 
2805 		/* Set the corresponding bit in the hash table. */
2806 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2807 	}
2808 
2809 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2810 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2811 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2812 }
2813 
2814 static int
2815 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
2816 {
2817 	return (sysctl_int_range(oidp, arg1, arg2, req,
2818 	    PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
2819 }
2820 
2821 static int
2822 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2823 {
2824 	return (sysctl_int_range(oidp, arg1, arg2, req,
2825 	    PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
2826 }
2827 
2828 static int
2829 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
2830 {
2831 	return (sysctl_int_range(oidp, arg1, arg2, req,
2832 	    PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
2833 }
2834 
2835 static int
2836 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2837 {
2838 	return (sysctl_int_range(oidp, arg1, arg2, req,
2839 	    PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
2840 }
2841