xref: /openbsd-src/sys/dev/pci/if_jme.c (revision 850e275390052b330d93020bf619a739a3c277ac)
1 /*	$OpenBSD: if_jme.c,v 1.2 2008/09/27 13:03:30 jsg Exp $	*/
2 /*-
3  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
29  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $
30  */
31 
32 #include "bpfilter.h"
33 #include "vlan.h"
34 
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/sockio.h>
40 #include <sys/mbuf.h>
41 #include <sys/queue.h>
42 #include <sys/kernel.h>
43 #include <sys/device.h>
44 #include <sys/timeout.h>
45 #include <sys/socket.h>
46 
47 #include <machine/bus.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 
53 #ifdef INET
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/in_var.h>
57 #include <netinet/ip.h>
58 #include <netinet/if_ether.h>
59 #endif
60 
61 #if NVLAN > 0
62 #include <net/if_types.h>
63 #include <net/if_vlan_var.h>
64 #endif
65 
66 #if NBPFILTER > 0
67 #include <net/bpf.h>
68 #endif
69 
70 #include <dev/rndvar.h>
71 
72 #include <dev/mii/mii.h>
73 #include <dev/mii/miivar.h>
74 #include <dev/mii/jmphyreg.h>
75 
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcidevs.h>
79 
80 #include <dev/pci/if_jmereg.h>
81 #include <dev/pci/if_jmevar.h>
82 
83 /* Define the following to disable printing Rx errors. */
84 #undef	JME_SHOW_ERRORS
85 
86 //#define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
87 
88 int	jme_match(struct device *, void *, void *);
89 void	jme_attach(struct device *, struct device *, void *);
90 int	jme_detach(struct device *, int);
91 
92 int	jme_miibus_readreg(struct device *, int, int);
93 void	jme_miibus_writereg(struct device *, int, int, int);
94 void	jme_miibus_statchg(struct device *);
95 
96 int	jme_init(struct ifnet *);
97 int	jme_ioctl(struct ifnet *, u_long, caddr_t);
98 
99 void	jme_start(struct ifnet *);
100 void	jme_watchdog(struct ifnet *);
101 void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
102 int	jme_mediachange(struct ifnet *);
103 
104 int	jme_intr(void *);
105 void	jme_txeof(struct jme_softc *);
106 void	jme_rxeof(struct jme_softc *);
107 
108 int	jme_dma_alloc(struct jme_softc *);
109 void	jme_dma_free(struct jme_softc *);
110 int	jme_init_rx_ring(struct jme_softc *);
111 void	jme_init_tx_ring(struct jme_softc *);
112 void	jme_init_ssb(struct jme_softc *);
113 int	jme_newbuf(struct jme_softc *, struct jme_rxdesc *, int);
114 int	jme_encap(struct jme_softc *, struct mbuf **);
115 void	jme_rxpkt(struct jme_softc *);
116 
117 void	jme_tick(void *);
118 void	jme_stop(struct jme_softc *);
119 void	jme_reset(struct jme_softc *);
120 void	jme_set_vlan(struct jme_softc *);
121 void	jme_set_filter(struct jme_softc *);
122 void	jme_stop_tx(struct jme_softc *);
123 void	jme_stop_rx(struct jme_softc *);
124 void	jme_mac_config(struct jme_softc *);
125 void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
126 int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
127 int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
128 void	jme_discard_rxbufs(struct jme_softc *, int, int);
129 #ifdef notyet
130 void	jme_setwol(struct jme_softc *);
131 void	jme_setlinkspeed(struct jme_softc *);
132 #endif
133 
134 /*
135  * Devices supported by this driver.
136  */
137 const struct pci_matchid jme_devices[] = {
138 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250 },
139 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260 }
140 };
141 
142 struct cfattach jme_ca = {
143 	sizeof (struct jme_softc), jme_match, jme_attach
144 };
145 
146 struct cfdriver jme_cd = {
147 	NULL, "jme", DV_IFNET
148 };
149 
150 int jmedebug = 0;
151 #define DPRINTF(x)	do { if (jmedebug) printf x; } while (0)
152 
153 /*
154  *	Read a PHY register on the MII of the JMC250.
155  */
156 int
157 jme_miibus_readreg(struct device *dev, int phy, int reg)
158 {
159 	struct jme_softc *sc = (struct jme_softc *)dev;
160 	uint32_t val;
161 	int i;
162 
163 	/* For FPGA version, PHY address 0 should be ignored. */
164 	if (sc->jme_caps & JME_CAP_FPGA) {
165 		if (phy == 0)
166 			return (0);
167 	} else {
168 		if (sc->jme_phyaddr != phy)
169 			return (0);
170 	}
171 
172 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
173 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
174 
175 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
176 		DELAY(1);
177 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
178 			break;
179 	}
180 	if (i == 0) {
181 		printf("%s: phy read timeout: phy %d, reg %d\n",
182 		    sc->sc_dev.dv_xname, phy, reg);
183 		return (0);
184 	}
185 
186 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
187 }
188 
189 /*
190  *	Write a PHY register on the MII of the JMC250.
191  */
192 void
193 jme_miibus_writereg(struct device *dev, int phy, int reg, int val)
194 {
195 	struct jme_softc *sc = (struct jme_softc *)dev;
196 	int i;
197 
198 	/* For FPGA version, PHY address 0 should be ignored. */
199 	if (sc->jme_caps & JME_CAP_FPGA) {
200 		if (phy == 0)
201 			return;
202 	} else {
203 		if (sc->jme_phyaddr != phy)
204 			return;
205 	}
206 
207 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
208 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
209 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
210 
211 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
212 		DELAY(1);
213 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
214 			break;
215 	}
216 	if (i == 0) {
217 		printf("%s: phy write timeout: phy %d, reg %d\n",
218 		    sc->sc_dev.dv_xname, phy, reg);
219 	}
220 }
221 
222 /*
223  *	Callback from MII layer when media changes.
224  */
225 void
226 jme_miibus_statchg(struct device *dev)
227 {
228 	struct jme_softc *sc = (struct jme_softc *)dev;
229 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
230 	struct mii_data *mii;
231 	struct jme_txdesc *txd;
232 	bus_addr_t paddr;
233 	int i;
234 
235 	if ((ifp->if_flags & IFF_RUNNING) == 0)
236 		return;
237 
238 	mii = &sc->sc_miibus;
239 
240 	sc->jme_flags &= ~JME_FLAG_LINK;
241 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
242 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
243 		case IFM_10_T:
244 		case IFM_100_TX:
245 			sc->jme_flags |= JME_FLAG_LINK;
246 			break;
247 		case IFM_1000_T:
248 			if (sc->jme_caps & JME_CAP_FASTETH)
249 				break;
250 			sc->jme_flags |= JME_FLAG_LINK;
251 			break;
252 		default:
253 			break;
254 		}
255 	}
256 
257 	/*
258 	 * Disabling Rx/Tx MACs have a side-effect of resetting
259 	 * JME_TXNDA/JME_RXNDA register to the first address of
260 	 * Tx/Rx descriptor address. So driver should reset its
261 	 * internal procucer/consumer pointer and reclaim any
262 	 * allocated resources.  Note, just saving the value of
263 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
264 	 * and restoring JME_TXNDA/JME_RXNDA register is not
265 	 * sufficient to make sure correct MAC state because
266 	 * stopping MAC operation can take a while and hardware
267 	 * might have updated JME_TXNDA/JME_RXNDA registers
268 	 * during the stop operation.
269 	 */
270 
271 	/* Disable interrupts */
272 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
273 
274 	/* Stop driver */
275 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
276 	ifp->if_timer = 0;
277 	timeout_del(&sc->jme_tick_ch);
278 
279 	/* Stop receiver/transmitter. */
280 	jme_stop_rx(sc);
281 	jme_stop_tx(sc);
282 
283 	jme_rxeof(sc);
284 	if (sc->jme_cdata.jme_rxhead != NULL)
285 		m_freem(sc->jme_cdata.jme_rxhead);
286 	JME_RXCHAIN_RESET(sc);
287 
288 	jme_txeof(sc);
289 	if (sc->jme_cdata.jme_tx_cnt != 0) {
290 		/* Remove queued packets for transmit. */
291 		for (i = 0; i < JME_TX_RING_CNT; i++) {
292 			txd = &sc->jme_cdata.jme_txdesc[i];
293 			if (txd->tx_m != NULL) {
294 				bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
295 				m_freem(txd->tx_m);
296 				txd->tx_m = NULL;
297 				txd->tx_ndesc = 0;
298 				ifp->if_oerrors++;
299 			}
300 		}
301 	}
302 
303 	/*
304 	 * Reuse configured Rx descriptors and reset
305 	 * procuder/consumer index.
306 	 */
307 	sc->jme_cdata.jme_rx_cons = 0;
308 
309 	jme_init_tx_ring(sc);
310 
311 	/* Initialize shadow status block. */
312 	jme_init_ssb(sc);
313 
314 	/* Program MAC with resolved speed/duplex/flow-control. */
315 	if (sc->jme_flags & JME_FLAG_LINK) {
316 		jme_mac_config(sc);
317 
318 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
319 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
320 
321 		/* Set Tx ring address to the hardware. */
322 		paddr = JME_TX_RING_ADDR(sc, 0);
323 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
324 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
325 
326 		/* Set Rx ring address to the hardware. */
327 		paddr = JME_RX_RING_ADDR(sc, 0);
328 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
329 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
330 
331 		/* Restart receiver/transmitter. */
332 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
333 		    RXCSR_RXQ_START);
334 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
335 	}
336 
337 	ifp->if_flags |= IFF_RUNNING;
338 	ifp->if_flags &= ~IFF_OACTIVE;
339 	timeout_add_sec(&sc->jme_tick_ch, 1);
340 
341 	/* Reenable interrupts. */
342 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
343 }
344 
345 /*
346  *	Get the current interface media status.
347  */
348 void
349 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
350 {
351 	struct jme_softc *sc = ifp->if_softc;
352 	struct mii_data *mii = &sc->sc_miibus;
353 
354 	mii_pollstat(mii);
355 	ifmr->ifm_status = mii->mii_media_status;
356 	ifmr->ifm_active = mii->mii_media_active;
357 }
358 
359 /*
360  *	Set hardware to newly-selected media.
361  */
362 int
363 jme_mediachange(struct ifnet *ifp)
364 {
365 	struct jme_softc *sc = ifp->if_softc;
366 	struct mii_data *mii = &sc->sc_miibus;
367 	int error;
368 
369 	if (mii->mii_instance != 0) {
370 		struct mii_softc *miisc;
371 
372 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
373 			mii_phy_reset(miisc);
374 	}
375 	error = mii_mediachg(mii);
376 
377 	return (error);
378 }
379 
380 int
381 jme_match(struct device *dev, void *match, void *aux)
382 {
383 	return pci_matchbyid((struct pci_attach_args *)aux, jme_devices,
384 	    sizeof (jme_devices) / sizeof (jme_devices[0]));
385 }
386 
387 int
388 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
389 {
390 	uint32_t reg;
391 	int i;
392 
393 	*val = 0;
394 	for (i = JME_TIMEOUT; i > 0; i--) {
395 		reg = CSR_READ_4(sc, JME_SMBCSR);
396 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
397 			break;
398 		DELAY(1);
399 	}
400 
401 	if (i == 0) {
402 		printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname);
403 		return (ETIMEDOUT);
404 	}
405 
406 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
407 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
408 	for (i = JME_TIMEOUT; i > 0; i--) {
409 		DELAY(1);
410 		reg = CSR_READ_4(sc, JME_SMBINTF);
411 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
412 			break;
413 	}
414 
415 	if (i == 0) {
416 		printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname);
417 		return (ETIMEDOUT);
418 	}
419 
420 	reg = CSR_READ_4(sc, JME_SMBINTF);
421 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
422 
423 	return (0);
424 }
425 
426 int
427 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
428 {
429 	uint8_t fup, reg, val;
430 	uint32_t offset;
431 	int match;
432 
433 	offset = 0;
434 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
435 	    fup != JME_EEPROM_SIG0)
436 		return (ENOENT);
437 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
438 	    fup != JME_EEPROM_SIG1)
439 		return (ENOENT);
440 	match = 0;
441 	do {
442 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
443 			break;
444 		/* Check for the end of EEPROM descriptor. */
445 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
446 			break;
447 		if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0,
448 		    JME_EEPROM_PAGE_BAR1) == fup) {
449 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
450 				break;
451 			if (reg >= JME_PAR0 &&
452 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
453 				if (jme_eeprom_read_byte(sc, offset + 2,
454 				    &val) != 0)
455 					break;
456 				eaddr[reg - JME_PAR0] = val;
457 				match++;
458 			}
459 		}
460 		/* Try next eeprom descriptor. */
461 		offset += JME_EEPROM_DESC_BYTES;
462 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
463 
464 	if (match == ETHER_ADDR_LEN)
465 		return (0);
466 
467 	return (ENOENT);
468 }
469 
470 void
471 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
472 {
473 	uint32_t par0, par1;
474 
475 	/* Read station address. */
476 	par0 = CSR_READ_4(sc, JME_PAR0);
477 	par1 = CSR_READ_4(sc, JME_PAR1);
478 	par1 &= 0xFFFF;
479 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
480 		printf("%s: generating fake ethernet address.\n",
481 		    sc->sc_dev.dv_xname);
482 		par0 = arc4random();
483 		/* Set OUI to JMicron. */
484 		eaddr[0] = 0x00;
485 		eaddr[1] = 0x1B;
486 		eaddr[2] = 0x8C;
487 		eaddr[3] = (par0 >> 16) & 0xff;
488 		eaddr[4] = (par0 >> 8) & 0xff;
489 		eaddr[5] = par0 & 0xff;
490 	} else {
491 		eaddr[0] = (par0 >> 0) & 0xFF;
492 		eaddr[1] = (par0 >> 8) & 0xFF;
493 		eaddr[2] = (par0 >> 16) & 0xFF;
494 		eaddr[3] = (par0 >> 24) & 0xFF;
495 		eaddr[4] = (par1 >> 0) & 0xFF;
496 		eaddr[5] = (par1 >> 8) & 0xFF;
497 	}
498 }
499 
500 void
501 jme_attach(struct device *parent, struct device *self, void *aux)
502 {
503 	struct jme_softc *sc = (struct jme_softc *)self;
504 	struct pci_attach_args *pa = aux;
505 	pci_chipset_tag_t pc = pa->pa_pc;
506 	pci_intr_handle_t ih;
507 	const char *intrstr;
508 	pcireg_t memtype;
509 
510 	struct ifnet *ifp;
511 	uint32_t reg;
512 //	uint8_t pcie_ptr;
513 	int error = 0;
514 //	uint8_t eaddr[ETHER_ADDR_LEN];
515 
516 	/*
517 	 * Allocate IO memory
518 	 *
519 	 * JMC250 supports both memory mapped and I/O register space
520 	 * access.  Because I/O register access should use different
521 	 * BARs to access registers it's waste of time to use I/O
522 	 * register spce access.  JMC250 uses 16K to map entire memory
523 	 * space.
524 	 */
525 
526 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR);
527 	if (pci_mapreg_map(pa, JME_PCIR_BAR, memtype, 0, &sc->jme_mem_bt,
528 	    &sc->jme_mem_bh, NULL, &sc->jme_mem_size, 0)) {
529 		printf(": could not map mem space\n");
530 		return;
531 	}
532 
533 	if (pci_intr_map(pa, &ih) != 0) {
534 		printf(": could not map interrupt\n");
535 		return;
536 	}
537 
538 	/*
539 	 * Allocate IRQ
540 	 */
541 	intrstr = pci_intr_string(pc, ih);
542 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, jme_intr, sc,
543 	    sc->sc_dev.dv_xname);
544 	if (sc->sc_irq_handle == NULL) {
545 		printf(": could not establish interrupt");
546 		if (intrstr != NULL)
547 			printf(" at %s", intrstr);
548 		printf("\n");
549 		return;
550 	}
551 	printf(": %s", intrstr);
552 
553 	sc->sc_dmat = pa->pa_dmat;
554 	sc->jme_pct = pa->pa_pc;
555 	sc->jme_pcitag = pa->pa_tag;
556 
557 	/*
558 	 * Extract FPGA revision
559 	 */
560 	reg = CSR_READ_4(sc, JME_CHIPMODE);
561 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
562 	    CHIPMODE_NOT_FPGA) {
563 		sc->jme_caps |= JME_CAP_FPGA;
564 
565 		if (jmedebug) {
566 			printf("%s: FPGA revision : 0x%04x\n",
567 			    sc->sc_dev.dv_xname,
568 			    (reg & CHIPMODE_FPGA_REV_MASK) >>
569 			    CHIPMODE_FPGA_REV_SHIFT);
570 		}
571 	}
572 
573 	/* Reset the ethernet controller. */
574 	jme_reset(sc);
575 
576 	/* Get station address. */
577 	reg = CSR_READ_4(sc, JME_SMBCSR);
578 	if (reg & SMBCSR_EEPROM_PRESENT)
579 		error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr);
580 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
581 		if (error != 0 && (jmedebug)) {
582 			printf("%s: ethernet hardware address "
583 			    "not found in EEPROM.\n", sc->sc_dev.dv_xname);
584 		}
585 		jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr);
586 	}
587 
588 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
589 
590 	/*
591 	 * Save PHY address.
592 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
593 	 * requires PHY probing to get correct PHY address.
594 	 */
595 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
596 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
597 		    GPREG0_PHY_ADDR_MASK;
598 		if (jmedebug) {
599 			printf("%s: PHY is at address %d.\n",
600 			    sc->sc_dev.dv_xname, sc->jme_phyaddr);
601 		}
602 	} else {
603 		sc->jme_phyaddr = 0;
604 	}
605 
606 	/* Set max allowable DMA size. */
607 	sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
608 	sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
609 
610 #ifdef notyet
611 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
612 		sc->jme_caps |= JME_CAP_PMCAP;
613 #endif
614 
615 	/* Allocate DMA stuffs */
616 	error = jme_dma_alloc(sc);
617 	if (error)
618 		goto fail;
619 
620 	ifp = &sc->sc_arpcom.ac_if;
621 	ifp->if_softc = sc;
622 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
623 	ifp->if_init = jme_init;
624 	ifp->if_ioctl = jme_ioctl;
625 	ifp->if_start = jme_start;
626 	ifp->if_watchdog = jme_watchdog;
627 	ifp->if_baudrate = IF_Gbps(1);
628 	IFQ_SET_MAXLEN(&ifp->if_snd, JME_TX_RING_CNT - 1);
629 	IFQ_SET_READY(&ifp->if_snd);
630 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
631 
632 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
633 #if 0
634 	ifp->if_capabilities = IFCAP_HWCSUM |
635 			       IFCAP_VLAN_MTU |
636 			       IFCAP_VLAN_HWTAGGING;
637 	ifp->if_hwassist = JME_CSUM_FEATURES;
638 	ifp->if_capenable = ifp->if_capabilities;
639 #endif
640 	ifp->if_capabilities = IFCAP_VLAN_MTU;
641 
642 	/* Set up MII bus. */
643 	sc->sc_miibus.mii_ifp = ifp;
644 	sc->sc_miibus.mii_readreg = jme_miibus_readreg;
645 	sc->sc_miibus.mii_writereg = jme_miibus_writereg;
646 	sc->sc_miibus.mii_statchg = jme_miibus_statchg;
647 
648 	ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange,
649 	    jme_mediastatus);
650 	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
651 	    MII_OFFSET_ANY, 0);
652 
653 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
654 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
655 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
656 		    0, NULL);
657 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
658 	} else
659 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
660 
661 	/*
662 	 * Save PHYADDR for FPGA mode PHY not handled, not production hw
663 	 */
664 
665 	if_attach(ifp);
666 	ether_ifattach(ifp);
667 
668 	timeout_set(&sc->jme_tick_ch, jme_tick, sc);
669 
670 	return;
671 fail:
672 	jme_detach(&sc->sc_dev, 0);
673 }
674 
675 int
676 jme_detach(struct device *self, int flags)
677 {
678 	struct jme_softc *sc = (struct jme_softc *)self;
679 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
680 	int s;
681 
682 	s = splnet();
683 	jme_stop(sc);
684 	splx(s);
685 
686 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
687 
688 	/* Delete all remaining media. */
689 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
690 
691 	ether_ifdetach(ifp);
692 	if_detach(ifp);
693 	jme_dma_free(sc);
694 
695 	if (sc->sc_irq_handle != NULL) {
696 		pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle);
697 		sc->sc_irq_handle = NULL;
698 	}
699 
700 	return (0);
701 }
702 
703 int
704 jme_dma_alloc(struct jme_softc *sc)
705 {
706 	struct jme_txdesc *txd;
707 	struct jme_rxdesc *rxd;
708 	int error, i, nsegs;
709 
710 	/*
711 	 * Create DMA stuffs for TX ring
712 	 */
713 
714 	error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1,
715 	    JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT,
716 	    &sc->jme_cdata.jme_tx_ring_map);
717 	if (error)
718 		return (ENOBUFS);
719 
720 	/* Allocate DMA'able memory for TX ring */
721 	error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0,
722 	    &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs,
723 	    BUS_DMA_WAITOK);
724 /* XXX zero */
725 	if (error) {
726 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
727 		    sc->sc_dev.dv_xname);
728 		return error;
729 	}
730 
731 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg,
732 	    nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring,
733 	    BUS_DMA_NOWAIT);
734 	if (error)
735 		return (ENOBUFS);
736 
737 	/*  Load the DMA map for Tx ring. */
738 	error = bus_dmamap_load(sc->sc_dmat,
739 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
740 	    JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
741 	if (error) {
742 		printf("%s: could not load DMA'able memory for Tx ring.\n",
743 		    sc->sc_dev.dv_xname);
744 		bus_dmamem_free(sc->sc_dmat,
745 		    (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1);
746 		return error;
747 	}
748 	sc->jme_rdata.jme_tx_ring_paddr =
749 	    sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr;
750 
751 	/*
752 	 * Create DMA stuffs for RX ring
753 	 */
754 
755 	error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1,
756 	    JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT,
757 	    &sc->jme_cdata.jme_rx_ring_map);
758 	if (error)
759 		return (ENOBUFS);
760 
761 	/* Allocate DMA'able memory for RX ring */
762 	error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0,
763 	    &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs,
764 	    BUS_DMA_WAITOK);
765 /* XXX zero */
766 	if (error) {
767 		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
768 		    sc->sc_dev.dv_xname);
769 		return error;
770 	}
771 
772 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg,
773 	    nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring,
774 	    BUS_DMA_NOWAIT);
775 	if (error)
776 		return (ENOBUFS);
777 
778 	bzero(sc->jme_rdata.jme_rx_ring, JME_RX_RING_SIZE);
779 
780 	/* Load the DMA map for Rx ring. */
781 	error = bus_dmamap_load(sc->sc_dmat,
782 	    sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
783 	    JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
784 	if (error) {
785 		printf("%s: could not load DMA'able memory for Rx ring.\n",
786 		    sc->sc_dev.dv_xname);
787 		bus_dmamem_free(sc->sc_dmat,
788 		    (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
789 		return error;
790 	}
791 	sc->jme_rdata.jme_rx_ring_paddr =
792 	    sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr;
793 
794 #if 0
795 	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
796 	tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
797 	rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
798 	if ((JME_ADDR_HI(tx_ring_end) !=
799 	     JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
800 	    (JME_ADDR_HI(rx_ring_end) !=
801 	     JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
802 		printf("%s: 4GB boundary crossed, switching to 32bit "
803 		    "DMA address mode.\n", sc->sc_dev.dv_xname);
804 		jme_dma_free(sc);
805 		/* Limit DMA address space to 32bit and try again. */
806 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
807 		goto again;
808 	}
809 #endif
810 
811 	/*
812 	 * Create DMA stuffs for shadow status block
813 	 */
814 
815 	error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1,
816 	    JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map);
817 	if (error)
818 		return (ENOBUFS);
819 
820 	/* Allocate DMA'able memory for shared status block. */
821 	error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0,
822 	    &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK);
823 	if (error) {
824 		printf("%s: could not allocate DMA'able "
825 		    "memory for shared status block.\n", sc->sc_dev.dv_xname);
826 		return error;
827 	}
828 
829 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg,
830 	    nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block,
831 	    BUS_DMA_NOWAIT);
832 	if (error)
833 		return (ENOBUFS);
834 
835 	/* Load the DMA map for shared status block */
836 	error = bus_dmamap_load(sc->sc_dmat,
837 	    sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
838 	    JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT);
839 	if (error) {
840 		printf("%s: could not load DMA'able memory "
841 		    "for shared status block.\n", sc->sc_dev.dv_xname);
842 		bus_dmamem_free(sc->sc_dmat,
843 		    (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
844 		return error;
845 	}
846 	sc->jme_rdata.jme_ssb_block_paddr =
847 	    sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr;
848 
849 	/*
850 	 * Create DMA stuffs for TX buffers
851 	 */
852 
853 	/* Create DMA maps for Tx buffers. */
854 	for (i = 0; i < JME_TX_RING_CNT; i++) {
855 		txd = &sc->jme_cdata.jme_txdesc[i];
856 		error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE,
857 		    JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
858 		    &txd->tx_dmamap);
859 		if (error) {
860 			int j;
861 
862 			printf("%s: could not create %dth Tx dmamap.\n",
863 			    sc->sc_dev.dv_xname, i);
864 
865 			for (j = 0; j < i; ++j) {
866 				txd = &sc->jme_cdata.jme_txdesc[j];
867 				bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
868 			}
869 			sc->jme_cdata.jme_tx_tag = NULL;
870 			return error;
871 		}
872 
873 	}
874 
875 	/*
876 	 * Create DMA stuffs for RX buffers
877 	 */
878 
879 	/* Create DMA maps for Rx buffers. */
880 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
881 	    0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap);
882 	if (error) {
883 		printf("%s: could not create spare Rx dmamap.\n",
884 		    sc->sc_dev.dv_xname);
885 		return error;
886 	}
887 	for (i = 0; i < JME_RX_RING_CNT; i++) {
888 		rxd = &sc->jme_cdata.jme_rxdesc[i];
889 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
890 		    0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
891 		if (error) {
892 			int j;
893 
894 			printf("%s: could not create %dth Rx dmamap.\n",
895 			    sc->sc_dev.dv_xname, i);
896 
897 			for (j = 0; j < i; ++j) {
898 				rxd = &sc->jme_cdata.jme_rxdesc[j];
899 				bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
900 			}
901 			bus_dmamap_destroy(sc->sc_dmat,
902 			    sc->jme_cdata.jme_rx_sparemap);
903 			sc->jme_cdata.jme_rx_tag = NULL;
904 			return error;
905 		}
906 	}
907 
908 	return 0;
909 }
910 
911 void
912 jme_dma_free(struct jme_softc *sc)
913 {
914 	struct jme_txdesc *txd;
915 	struct jme_rxdesc *rxd;
916 	int i;
917 
918 	/* Tx ring */
919 	bus_dmamap_unload(sc->sc_dmat,
920 	    sc->jme_cdata.jme_tx_ring_map);
921 	bus_dmamem_free(sc->sc_dmat,
922 	    (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1);
923 
924 	/* Rx ring */
925 	bus_dmamap_unload(sc->sc_dmat,
926 	    sc->jme_cdata.jme_rx_ring_map);
927 	bus_dmamem_free(sc->sc_dmat,
928 	    (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
929 
930 	/* Tx buffers */
931 	for (i = 0; i < JME_TX_RING_CNT; i++) {
932 		txd = &sc->jme_cdata.jme_txdesc[i];
933 		bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
934 	}
935 
936 	/* Rx buffers */
937 	for (i = 0; i < JME_RX_RING_CNT; i++) {
938 		rxd = &sc->jme_cdata.jme_rxdesc[i];
939 		bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
940 	}
941 	bus_dmamap_destroy(sc->sc_dmat,
942 	    sc->jme_cdata.jme_rx_sparemap);
943 
944 	/* Shadow status block. */
945 	bus_dmamap_unload(sc->sc_dmat,
946 	    sc->jme_cdata.jme_ssb_map);
947 	bus_dmamem_free(sc->sc_dmat,
948 	    (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
949 }
950 
951 #ifdef notyet
952 /*
953  * Unlike other ethernet controllers, JMC250 requires
954  * explicit resetting link speed to 10/100Mbps as gigabit
955  * link will cunsume more power than 375mA.
956  * Note, we reset the link speed to 10/100Mbps with
957  * auto-negotiation but we don't know whether that operation
958  * would succeed or not as we have no control after powering
959  * off. If the renegotiation fail WOL may not work. Running
960  * at 1Gbps draws more power than 375mA at 3.3V which is
961  * specified in PCI specification and that would result in
962  * complete shutdowning power to ethernet controller.
963  *
964  * TODO
965  *  Save current negotiated media speed/duplex/flow-control
966  *  to softc and restore the same link again after resuming.
967  *  PHY handling such as power down/resetting to 100Mbps
968  *  may be better handled in suspend method in phy driver.
969  */
970 void
971 jme_setlinkspeed(struct jme_softc *sc)
972 {
973 	struct mii_data *mii;
974 	int aneg, i;
975 
976 	JME_LOCK_ASSERT(sc);
977 
978 	mii = &sc->sc_miibus;
979 	mii_pollstat(mii);
980 	aneg = 0;
981 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
982 		switch IFM_SUBTYPE(mii->mii_media_active) {
983 		case IFM_10_T:
984 		case IFM_100_TX:
985 			return;
986 		case IFM_1000_T:
987 			aneg++;
988 		default:
989 			break;
990 		}
991 	}
992 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0);
993 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR,
994 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
995 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR,
996 	    BMCR_AUTOEN | BMCR_STARTNEG);
997 	DELAY(1000);
998 	if (aneg != 0) {
999 		/* Poll link state until jme(4) get a 10/100 link. */
1000 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1001 			mii_pollstat(mii);
1002 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1003 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1004 				case IFM_10_T:
1005 				case IFM_100_TX:
1006 					jme_mac_config(sc);
1007 					return;
1008 				default:
1009 					break;
1010 				}
1011 			}
1012 			JME_UNLOCK(sc);
1013 			pause("jmelnk", hz);
1014 			JME_LOCK(sc);
1015 		}
1016 		if (i == MII_ANEGTICKS_GIGE)
1017 			printf("%s: establishing link failed, "
1018 			    "WOL may not work!\n", sc->sc_dev.dv_xname);
1019 	}
1020 	/*
1021 	 * No link, force MAC to have 100Mbps, full-duplex link.
1022 	 * This is the last resort and may/may not work.
1023 	 */
1024 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1025 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1026 	jme_mac_config(sc);
1027 }
1028 
1029 void
1030 jme_setwol(struct jme_softc *sc)
1031 {
1032 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1033 	uint32_t gpr, pmcs;
1034 	uint16_t pmstat;
1035 	int pmc;
1036 
1037 	if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) {
1038 		/* No PME capability, PHY power down. */
1039 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1040 		    MII_BMCR, BMCR_PDOWN);
1041 		return;
1042 	}
1043 
1044 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1045 	pmcs = CSR_READ_4(sc, JME_PMCS);
1046 	pmcs &= ~PMCS_WOL_ENB_MASK;
1047 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1048 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1049 		/* Enable PME message. */
1050 		gpr |= GPREG0_PME_ENB;
1051 		/* For gigabit controllers, reset link speed to 10/100. */
1052 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1053 			jme_setlinkspeed(sc);
1054 	}
1055 
1056 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1057 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1058 
1059 	/* Request PME. */
1060 	pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2);
1061 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1062 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1063 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1064 	pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1065 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1066 		/* No WOL, PHY power down. */
1067 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1068 		    MII_BMCR, BMCR_PDOWN);
1069 	}
1070 }
1071 #endif
1072 
1073 int
1074 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1075 {
1076 	struct jme_txdesc *txd;
1077 	struct jme_desc *desc;
1078 	struct mbuf *m;
1079 	int maxsegs;
1080 	int error, i, prod;
1081 	uint32_t cflags;
1082 
1083 	prod = sc->jme_cdata.jme_tx_prod;
1084 	txd = &sc->jme_cdata.jme_txdesc[prod];
1085 
1086 	maxsegs = (JME_TX_RING_CNT - sc->jme_cdata.jme_tx_cnt) -
1087 		  (JME_TXD_RSVD + 1);
1088 	if (maxsegs > JME_MAXTXSEGS)
1089 		maxsegs = JME_MAXTXSEGS;
1090 	if (maxsegs < (sc->jme_txd_spare - 1))
1091 		panic("%s: not enough segments %d\n", sc->sc_dev.dv_xname,
1092 		    maxsegs);
1093 
1094 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
1095 				     *m_head, BUS_DMA_NOWAIT);
1096 	if (error != 0) {
1097 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1098 		error = EFBIG;
1099 	}
1100 	if (error == EFBIG) {
1101 		error = 0;
1102 
1103 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1104 		if (m == NULL) {
1105 			printf("%s: can't defrag TX mbuf\n",
1106 			    sc->sc_dev.dv_xname);
1107 			m_freem(*m_head);
1108 			*m_head = NULL;
1109 			return (ENOBUFS);
1110 		}
1111 
1112 		M_DUP_PKTHDR(m, *m_head);
1113 		if ((*m_head)->m_pkthdr.len > MHLEN) {
1114 			MCLGET(m, M_DONTWAIT);
1115 			if (!(m->m_flags & M_EXT)) {
1116 				m_freem(*m_head);
1117 				m_freem(m);
1118 				*m_head = NULL;
1119 				return (ENOBUFS);
1120 			}
1121 		}
1122 
1123 		m_copydata(*m_head, 0, (*m_head)->m_pkthdr.len, mtod(m, caddr_t));
1124 		m_freem(*m_head);
1125 		m->m_len = m->m_pkthdr.len;
1126 		*m_head = m;
1127 
1128 		error = bus_dmamap_load_mbuf(sc->sc_dmat,
1129 					     txd->tx_dmamap, *m_head,
1130 					     BUS_DMA_NOWAIT);
1131 		if (error != 0) {
1132 			printf("%s: could not load defragged TX mbuf\n",
1133 			    sc->sc_dev.dv_xname);
1134 			if (!error) {
1135 				bus_dmamap_unload(sc->sc_dmat,
1136 						  txd->tx_dmamap);
1137 				error = EFBIG;
1138 			}
1139 			m_freem(*m_head);
1140 			*m_head = NULL;
1141 			return (error);
1142 		}
1143 	} else if (error) {
1144 		printf("%s: could not load TX mbuf\n", sc->sc_dev.dv_xname);
1145 		return (error);
1146 	}
1147 
1148 	m = *m_head;
1149 	cflags = 0;
1150 
1151 #if 0
1152 	/* Configure checksum offload. */
1153 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1154 		cflags |= JME_TD_IPCSUM;
1155 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1156 		cflags |= JME_TD_TCPCSUM;
1157 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1158 		cflags |= JME_TD_UDPCSUM;
1159 
1160 	/* Configure VLAN. */
1161 	if (m->m_flags & M_VLANTAG) {
1162 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1163 		cflags |= JME_TD_VLAN_TAG;
1164 	}
1165 #endif
1166 
1167 	desc = &sc->jme_rdata.jme_tx_ring[prod];
1168 	desc->flags = htole32(cflags);
1169 	desc->buflen = 0;
1170 	desc->addr_hi = htole32(m->m_pkthdr.len);
1171 	desc->addr_lo = 0;
1172 	sc->jme_cdata.jme_tx_cnt++;
1173 	KASSERT(sc->jme_cdata.jme_tx_cnt < JME_TX_RING_CNT - JME_TXD_RSVD);
1174 	JME_DESC_INC(prod, JME_TX_RING_CNT);
1175 	for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) {
1176 		desc = &sc->jme_rdata.jme_tx_ring[prod];
1177 		desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1178 		desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len);
1179 		desc->addr_hi =
1180 		    htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr));
1181 		desc->addr_lo =
1182 		    htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr));
1183 
1184 		sc->jme_cdata.jme_tx_cnt++;
1185 		KASSERT(sc->jme_cdata.jme_tx_cnt <=
1186 			 JME_TX_RING_CNT - JME_TXD_RSVD);
1187 		JME_DESC_INC(prod, JME_TX_RING_CNT);
1188 	}
1189 
1190 	/* Update producer index. */
1191 	sc->jme_cdata.jme_tx_prod = prod;
1192 	/*
1193 	 * Finally request interrupt and give the first descriptor
1194 	 * owenership to hardware.
1195 	 */
1196 	desc = txd->tx_desc;
1197 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1198 
1199 	txd->tx_m = m;
1200 	txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + 1;
1201 
1202 	/* Sync descriptors. */
1203 	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1204 	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1205 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1206 	     sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1207 
1208 	return (0);
1209 }
1210 
1211 void
1212 jme_start(struct ifnet *ifp)
1213 {
1214 	struct jme_softc *sc = ifp->if_softc;
1215 	struct mbuf *m_head;
1216 	int enq = 0;
1217 
1218 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1219 		return;
1220 
1221 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1222 		jme_txeof(sc);
1223 
1224 	for (;;) {
1225 		/*
1226 		 * Check number of available TX descs, always
1227 		 * leave JME_TXD_RSVD free TX descs.
1228 		 */
1229 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1230 		    JME_TX_RING_CNT - JME_TXD_RSVD) {
1231 			ifp->if_flags |= IFF_OACTIVE;
1232 			break;
1233 		}
1234 
1235 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1236 		if (m_head == NULL)
1237 			break;
1238 
1239 #if NBPFILTER > 0
1240 		if (ifp->if_bpf != NULL)
1241 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1242 #endif
1243 
1244 		/*
1245 		 * Pack the data into the transmit ring. If we
1246 		 * don't have room, set the OACTIVE flag and wait
1247 		 * for the NIC to drain the ring.
1248 		 */
1249 		if (jme_encap(sc, &m_head)) {
1250 			if (m_head == NULL) {
1251 				ifp->if_oerrors++;
1252 				break;
1253 			}
1254 //			ifq_prepend(&ifp->if_snd, m_head);
1255 			ifp->if_flags |= IFF_OACTIVE;
1256 			break;
1257 		}
1258 		enq++;
1259 
1260 		/*
1261 		 * If there's a BPF listener, bounce a copy of this frame
1262 		 * to him.
1263 		 */
1264 #if NBPFILTER > 0
1265 		if (ifp->if_bpf != NULL)
1266 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1267 #endif
1268 	}
1269 
1270 	if (enq > 0) {
1271 		/*
1272 		 * Reading TXCSR takes very long time under heavy load
1273 		 * so cache TXCSR value and writes the ORed value with
1274 		 * the kick command to the TXCSR. This saves one register
1275 		 * access cycle.
1276 		 */
1277 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1278 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1279 		/* Set a timeout in case the chip goes out to lunch. */
1280 		ifp->if_timer = JME_TX_TIMEOUT;
1281 	}
1282 }
1283 
1284 void
1285 jme_watchdog(struct ifnet *ifp)
1286 {
1287 	struct jme_softc *sc = ifp->if_softc;
1288 
1289 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1290 		printf("%s: watchdog timeout (missed link)\n",
1291 		    sc->sc_dev.dv_xname);
1292 		ifp->if_oerrors++;
1293 		jme_init(ifp);
1294 		return;
1295 	}
1296 
1297 	jme_txeof(sc);
1298 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1299 		printf("%s: watchdog timeout (missed Tx interrupts) "
1300 			  "-- recovering\n", sc->sc_dev.dv_xname);
1301 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1302 			jme_start(ifp);
1303 		return;
1304 	}
1305 
1306 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1307 	ifp->if_oerrors++;
1308 	jme_init(ifp);
1309 
1310 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1311 		jme_start(ifp);
1312 }
1313 
1314 int
1315 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1316 {
1317 	struct jme_softc *sc = ifp->if_softc;
1318 	struct mii_data *mii = &sc->sc_miibus;
1319 	struct ifreq *ifr = (struct ifreq *)data;
1320 	struct ifaddr *ifa = (struct ifaddr *)data;
1321 	int error = 0, s;
1322 
1323 	s = splnet();
1324 
1325 	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
1326 		splx(s);
1327 		return error;
1328 	}
1329 
1330 	switch (cmd) {
1331 	case SIOCSIFADDR:
1332 		ifp->if_flags |= IFF_UP;
1333 		if (!(ifp->if_flags & IFF_RUNNING))
1334 			jme_init(ifp);
1335 #ifdef INET
1336 		if (ifa->ifa_addr->sa_family == AF_INET)
1337 			arp_ifinit(&sc->sc_arpcom, ifa);
1338 #endif
1339 		break;
1340 	case SIOCSIFMTU:
1341 #if 0
1342 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1343 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1344 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1345 			error = EINVAL;
1346 			break;
1347 		}
1348 
1349 		if (ifp->if_mtu != ifr->ifr_mtu) {
1350 			/*
1351 			 * No special configuration is required when interface
1352 			 * MTU is changed but availability of Tx checksum
1353 			 * offload should be chcked against new MTU size as
1354 			 * FIFO size is just 2K.
1355 			 */
1356 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1357 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1358 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1359 			}
1360 			ifp->if_mtu = ifr->ifr_mtu;
1361 			if (ifp->if_flags & IFF_RUNNING)
1362 				jme_init(ifp);
1363 		}
1364 #endif
1365 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
1366 			error = EINVAL;
1367 		else if (ifp->if_mtu != ifr->ifr_mtu)
1368 			ifp->if_mtu = ifr->ifr_mtu;
1369 		break;
1370 
1371 	case SIOCSIFFLAGS:
1372 		if (ifp->if_flags & IFF_UP) {
1373 			if (ifp->if_flags & IFF_RUNNING) {
1374 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1375 				    (IFF_PROMISC | IFF_ALLMULTI))
1376 					jme_set_filter(sc);
1377 			} else {
1378 				if (!(ifp->if_flags & IFF_RUNNING))
1379 					jme_init(ifp);
1380 			}
1381 		} else {
1382 			if (ifp->if_flags & IFF_RUNNING)
1383 				jme_stop(sc);
1384 		}
1385 		sc->jme_if_flags = ifp->if_flags;
1386 		break;
1387 
1388 	case SIOCADDMULTI:
1389 	case SIOCDELMULTI:
1390 		error = (cmd == SIOCADDMULTI) ?
1391 		    ether_addmulti(ifr, &sc->sc_arpcom) :
1392 		    ether_delmulti(ifr, &sc->sc_arpcom);
1393 
1394 		if (error == ENETRESET) {
1395 			if (ifp->if_flags & IFF_RUNNING)
1396 				jme_set_filter(sc);
1397 			error = 0;
1398 		}
1399 		break;
1400 
1401 	case SIOCSIFMEDIA:
1402 	case SIOCGIFMEDIA:
1403 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1404 		break;
1405 	default:
1406 		error = ENOTTY;
1407 		break;
1408 	}
1409 
1410 	splx(s);
1411 	return (error);
1412 }
1413 
1414 void
1415 jme_mac_config(struct jme_softc *sc)
1416 {
1417 	struct mii_data *mii;
1418 	uint32_t ghc, rxmac, txmac, txpause;
1419 	int phyconf = JMPHY_CONF_DEFFIFO;
1420 
1421 	mii = &sc->sc_miibus;
1422 
1423 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1424 	DELAY(10);
1425 	CSR_WRITE_4(sc, JME_GHC, 0);
1426 	ghc = 0;
1427 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1428 	rxmac &= ~RXMAC_FC_ENB;
1429 	txmac = CSR_READ_4(sc, JME_TXMAC);
1430 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1431 	txpause = CSR_READ_4(sc, JME_TXPFC);
1432 	txpause &= ~TXPFC_PAUSE_ENB;
1433 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1434 		ghc |= GHC_FULL_DUPLEX;
1435 		rxmac &= ~RXMAC_COLL_DET_ENB;
1436 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1437 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1438 		    TXMAC_FRAME_BURST);
1439 #ifdef notyet
1440 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1441 			txpause |= TXPFC_PAUSE_ENB;
1442 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1443 			rxmac |= RXMAC_FC_ENB;
1444 #endif
1445 		/* Disable retry transmit timer/retry limit. */
1446 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1447 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1448 	} else {
1449 		rxmac |= RXMAC_COLL_DET_ENB;
1450 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1451 		/* Enable retry transmit timer/retry limit. */
1452 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1453 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1454 	}
1455 
1456 	/* Reprogram Tx/Rx MACs with resolved speed/duplex. */
1457 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1458 	case IFM_10_T:
1459 		ghc |= GHC_SPEED_10;
1460 		break;
1461 
1462 	case IFM_100_TX:
1463 		ghc |= GHC_SPEED_100;
1464 
1465 		/*
1466 		 * Use extended FIFO depth to workaround CRC errors
1467 		 * emitted by chips before JMC250B
1468 		 */
1469 		phyconf = JMPHY_CONF_EXTFIFO;
1470 		break;
1471 
1472 	case IFM_1000_T:
1473 		if (sc->jme_caps & JME_CAP_FASTETH)
1474 			break;
1475 
1476 		ghc |= GHC_SPEED_1000;
1477 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1478 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1479 		break;
1480 
1481 	default:
1482 		break;
1483 	}
1484 	CSR_WRITE_4(sc, JME_GHC, ghc);
1485 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1486 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1487 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1488 
1489 	if (sc->jme_caps & JME_CAP_EXTFIFO) {
1490 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1491 				    JMPHY_CONF, phyconf);
1492 	}
1493 }
1494 
1495 int
1496 jme_intr(void *xsc)
1497 {
1498 	struct jme_softc *sc = xsc;
1499 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1500 	uint32_t status;
1501 	int claimed = 0;
1502 
1503 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1504 	if (status == 0 || status == 0xFFFFFFFF)
1505 		return (0);
1506 
1507 	/* Disable interrupts. */
1508 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1509 
1510 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1511 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1512 		goto back;
1513 
1514 	/* Reset PCC counter/timer and Ack interrupts. */
1515 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1516 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1517 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1518 	if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1519 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
1520 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1521 
1522 	if (ifp->if_flags & IFF_RUNNING) {
1523 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1524 			jme_rxeof(sc);
1525 
1526 		if (status & INTR_RXQ_DESC_EMPTY) {
1527 			/*
1528 			 * Notify hardware availability of new Rx buffers.
1529 			 * Reading RXCSR takes very long time under heavy
1530 			 * load so cache RXCSR value and writes the ORed
1531 			 * value with the kick command to the RXCSR. This
1532 			 * saves one register access cycle.
1533 			 */
1534 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1535 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1536 		}
1537 
1538 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1539 			jme_txeof(sc);
1540 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
1541 				jme_start(ifp);
1542 		}
1543 	}
1544 	claimed = 1;
1545 back:
1546 	/* Reenable interrupts. */
1547 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1548 
1549 	return (claimed);
1550 }
1551 
1552 void
1553 jme_txeof(struct jme_softc *sc)
1554 {
1555 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1556 	struct jme_txdesc *txd;
1557 	uint32_t status;
1558 	int cons, nsegs;
1559 
1560 	cons = sc->jme_cdata.jme_tx_cons;
1561 	if (cons == sc->jme_cdata.jme_tx_prod)
1562 		return;
1563 
1564 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1565 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1566 
1567 	/*
1568 	 * Go through our Tx list and free mbufs for those
1569 	 * frames which have been transmitted.
1570 	 */
1571 	while (cons != sc->jme_cdata.jme_tx_prod) {
1572 		txd = &sc->jme_cdata.jme_txdesc[cons];
1573 
1574 		if (txd->tx_m == NULL)
1575 			panic("%s: freeing NULL mbuf!\n", sc->sc_dev.dv_xname);
1576 
1577 		status = letoh32(txd->tx_desc->flags);
1578 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1579 			break;
1580 
1581 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1582 			ifp->if_oerrors++;
1583 		} else {
1584 			ifp->if_opackets++;
1585 			if (status & JME_TD_COLLISION) {
1586 				ifp->if_collisions +=
1587 				    letoh32(txd->tx_desc->buflen) &
1588 				    JME_TD_BUF_LEN_MASK;
1589 			}
1590 		}
1591 
1592 		/*
1593 		 * Only the first descriptor of multi-descriptor
1594 		 * transmission is updated so driver have to skip entire
1595 		 * chained buffers for the transmiited frame. In other
1596 		 * words, JME_TD_OWN bit is valid only at the first
1597 		 * descriptor of a multi-descriptor transmission.
1598 		 */
1599 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1600 			sc->jme_rdata.jme_tx_ring[cons].flags = 0;
1601 			JME_DESC_INC(cons, JME_TX_RING_CNT);
1602 		}
1603 
1604 		/* Reclaim transferred mbufs. */
1605 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1606 		m_freem(txd->tx_m);
1607 		txd->tx_m = NULL;
1608 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1609 		if (sc->jme_cdata.jme_tx_cnt < 0)
1610 			panic("%s: Active Tx desc counter was garbled\n",
1611 			    sc->sc_dev.dv_xname);
1612 		txd->tx_ndesc = 0;
1613 	}
1614 	sc->jme_cdata.jme_tx_cons = cons;
1615 
1616 	if (sc->jme_cdata.jme_tx_cnt == 0)
1617 		ifp->if_timer = 0;
1618 
1619 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
1620 	    JME_TX_RING_CNT - JME_TXD_RSVD)
1621 		ifp->if_flags &= ~IFF_OACTIVE;
1622 
1623 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1624 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1625 }
1626 
1627 void
1628 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
1629 {
1630 	int i;
1631 
1632 	for (i = 0; i < count; ++i) {
1633 		struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
1634 
1635 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
1636 		desc->buflen = htole32(MCLBYTES);
1637 		JME_DESC_INC(cons, JME_RX_RING_CNT);
1638 	}
1639 }
1640 
1641 /* Receive a frame. */
1642 void
1643 jme_rxpkt(struct jme_softc *sc)
1644 {
1645 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1646 	struct jme_desc *desc;
1647 	struct jme_rxdesc *rxd;
1648 	struct mbuf *mp, *m;
1649 	uint32_t flags, status;
1650 	int cons, count, nsegs;
1651 
1652 	cons = sc->jme_cdata.jme_rx_cons;
1653 	desc = &sc->jme_rdata.jme_rx_ring[cons];
1654 	flags = letoh32(desc->flags);
1655 	status = letoh32(desc->buflen);
1656 	nsegs = JME_RX_NSEGS(status);
1657 
1658 	if (status & JME_RX_ERR_STAT) {
1659 		ifp->if_ierrors++;
1660 		jme_discard_rxbufs(sc, cons, nsegs);
1661 #ifdef JME_SHOW_ERRORS
1662 		printf("%s : receive error = 0x%b\n",
1663 		    sc->sc_dev.dv_xname, JME_RX_ERR(status), JME_RX_ERR_BITS);
1664 #endif
1665 		sc->jme_cdata.jme_rx_cons += nsegs;
1666 		sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1667 		return;
1668 	}
1669 
1670 	sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
1671 	for (count = 0; count < nsegs; count++,
1672 	     JME_DESC_INC(cons, JME_RX_RING_CNT)) {
1673 		rxd = &sc->jme_cdata.jme_rxdesc[cons];
1674 		mp = rxd->rx_m;
1675 
1676 		/* Add a new receive buffer to the ring. */
1677 		if (jme_newbuf(sc, rxd, 0) != 0) {
1678 			ifp->if_iqdrops++;
1679 			/* Reuse buffer. */
1680 			jme_discard_rxbufs(sc, cons, nsegs - count);
1681 			if (sc->jme_cdata.jme_rxhead != NULL) {
1682 				m_freem(sc->jme_cdata.jme_rxhead);
1683 				JME_RXCHAIN_RESET(sc);
1684 			}
1685 			break;
1686 		}
1687 
1688 		/*
1689 		 * Assume we've received a full sized frame.
1690 		 * Actual size is fixed when we encounter the end of
1691 		 * multi-segmented frame.
1692 		 */
1693 		mp->m_len = MCLBYTES;
1694 
1695 		/* Chain received mbufs. */
1696 		if (sc->jme_cdata.jme_rxhead == NULL) {
1697 			sc->jme_cdata.jme_rxhead = mp;
1698 			sc->jme_cdata.jme_rxtail = mp;
1699 		} else {
1700 			/*
1701 			 * Receive processor can receive a maximum frame
1702 			 * size of 65535 bytes.
1703 			 */
1704 			mp->m_flags &= ~M_PKTHDR;
1705 			sc->jme_cdata.jme_rxtail->m_next = mp;
1706 			sc->jme_cdata.jme_rxtail = mp;
1707 		}
1708 
1709 		if (count == nsegs - 1) {
1710 			/* Last desc. for this frame. */
1711 			m = sc->jme_cdata.jme_rxhead;
1712 			/* XXX assert PKTHDR? */
1713 			m->m_flags |= M_PKTHDR;
1714 			m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
1715 			if (nsegs > 1) {
1716 				/* Set first mbuf size. */
1717 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
1718 				/* Set last mbuf size. */
1719 				mp->m_len = sc->jme_cdata.jme_rxlen -
1720 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
1721 				    (MCLBYTES * (nsegs - 2)));
1722 			} else {
1723 				m->m_len = sc->jme_cdata.jme_rxlen;
1724 			}
1725 			m->m_pkthdr.rcvif = ifp;
1726 
1727 			/*
1728 			 * Account for 10bytes auto padding which is used
1729 			 * to align IP header on 32bit boundary. Also note,
1730 			 * CRC bytes is automatically removed by the
1731 			 * hardware.
1732 			 */
1733 			m->m_data += JME_RX_PAD_BYTES;
1734 
1735 #if 0
1736 			/* Set checksum information. */
1737 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
1738 			    (flags & JME_RD_IPV4)) {
1739 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1740 				if (flags & JME_RD_IPCSUM)
1741 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1742 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
1743 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
1744 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
1745 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
1746 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
1747 					m->m_pkthdr.csum_flags |=
1748 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1749 					m->m_pkthdr.csum_data = 0xffff;
1750 				}
1751 			}
1752 
1753 			/* Check for VLAN tagged packets. */
1754 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1755 			    (flags & JME_RD_VLAN_TAG)) {
1756 				m->m_pkthdr.ether_vlantag =
1757 				    flags & JME_RD_VLAN_MASK;
1758 				m->m_flags |= M_VLANTAG;
1759 			}
1760 #endif
1761 
1762 #if NBPFILTER > 0
1763 			if (ifp->if_bpf)
1764 				bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1765 #endif
1766 
1767 			ifp->if_ipackets++;
1768 			/* Pass it on. */
1769 			ether_input_mbuf(ifp, m);
1770 
1771 			/* Reset mbuf chains. */
1772 			JME_RXCHAIN_RESET(sc);
1773 		}
1774 	}
1775 
1776 	sc->jme_cdata.jme_rx_cons += nsegs;
1777 	sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1778 }
1779 
1780 void
1781 jme_rxeof(struct jme_softc *sc)
1782 {
1783 	struct jme_desc *desc;
1784 	int nsegs, prog, pktlen;
1785 
1786 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1787 	    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1788 
1789 	prog = 0;
1790 	for (;;) {
1791 		desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
1792 		if ((letoh32(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
1793 			break;
1794 		if ((letoh32(desc->buflen) & JME_RD_VALID) == 0)
1795 			break;
1796 
1797 		/*
1798 		 * Check number of segments against received bytes.
1799 		 * Non-matching value would indicate that hardware
1800 		 * is still trying to update Rx descriptors. I'm not
1801 		 * sure whether this check is needed.
1802 		 */
1803 		nsegs = JME_RX_NSEGS(letoh32(desc->buflen));
1804 		pktlen = JME_RX_BYTES(letoh32(desc->buflen));
1805 		if (nsegs != howmany(pktlen, MCLBYTES)) {
1806 			printf("%s: RX fragment count(%d) "
1807 			    "and packet size(%d) mismach\n",
1808 			     sc->sc_dev.dv_xname, nsegs, pktlen);
1809 			break;
1810 		}
1811 
1812 		/* Received a frame. */
1813 		jme_rxpkt(sc);
1814 		prog++;
1815 	}
1816 
1817 	if (prog > 0) {
1818 		bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1819 		    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1820 	}
1821 }
1822 
1823 void
1824 jme_tick(void *xsc)
1825 {
1826 	struct jme_softc *sc = xsc;
1827 	struct mii_data *mii = &sc->sc_miibus;
1828 	int s;
1829 
1830 	s = splnet();
1831 	mii_tick(mii);
1832 	timeout_add_sec(&sc->jme_tick_ch, 1);
1833 	splx(s);
1834 }
1835 
1836 void
1837 jme_reset(struct jme_softc *sc)
1838 {
1839 #ifdef foo
1840 	/* Stop receiver, transmitter. */
1841 	jme_stop_rx(sc);
1842 	jme_stop_tx(sc);
1843 #endif
1844 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1845 	DELAY(10);
1846 	CSR_WRITE_4(sc, JME_GHC, 0);
1847 }
1848 
1849 int
1850 jme_init(struct ifnet *ifp)
1851 {
1852 	struct jme_softc *sc = ifp->if_softc;
1853 	struct mii_data *mii;
1854 	uint8_t eaddr[ETHER_ADDR_LEN];
1855 	bus_addr_t paddr;
1856 	uint32_t reg;
1857 	int error;
1858 
1859 	/*
1860 	 * Cancel any pending I/O.
1861 	 */
1862 	jme_stop(sc);
1863 
1864 	/*
1865 	 * Reset the chip to a known state.
1866 	 */
1867 	jme_reset(sc);
1868 
1869 	/*
1870 	 * Since we always use 64bit address mode for transmitting,
1871 	 * each Tx request requires one more dummy descriptor.
1872 	 */
1873 	sc->jme_txd_spare =
1874 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES) + 1;
1875 	KASSERT(sc->jme_txd_spare >= 2);
1876 
1877 	/* Init descriptors. */
1878 	error = jme_init_rx_ring(sc);
1879         if (error != 0) {
1880                 printf("%s: initialization failed: no memory for Rx buffers.\n",
1881 		    sc->sc_dev.dv_xname);
1882                 jme_stop(sc);
1883 		return (1);
1884         }
1885 	jme_init_tx_ring(sc);
1886 
1887 	/* Initialize shadow status block. */
1888 	jme_init_ssb(sc);
1889 
1890 	/* Reprogram the station address. */
1891 	bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
1892 	CSR_WRITE_4(sc, JME_PAR0,
1893 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
1894 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
1895 
1896 	/*
1897 	 * Configure Tx queue.
1898 	 *  Tx priority queue weight value : 0
1899 	 *  Tx FIFO threshold for processing next packet : 16QW
1900 	 *  Maximum Tx DMA length : 512
1901 	 *  Allow Tx DMA burst.
1902 	 */
1903 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
1904 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
1905 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
1906 	sc->jme_txcsr |= sc->jme_tx_dma_size;
1907 	sc->jme_txcsr |= TXCSR_DMA_BURST;
1908 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
1909 
1910 	/* Set Tx descriptor counter. */
1911 	CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
1912 
1913 	/* Set Tx ring address to the hardware. */
1914 	paddr = JME_TX_RING_ADDR(sc, 0);
1915 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
1916 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
1917 
1918 	/* Configure TxMAC parameters. */
1919 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
1920 	reg |= TXMAC_THRESH_1_PKT;
1921 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
1922 	CSR_WRITE_4(sc, JME_TXMAC, reg);
1923 
1924 	/*
1925 	 * Configure Rx queue.
1926 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
1927 	 *  FIFO threshold for processing next packet : 128QW
1928 	 *  Rx queue 0 select
1929 	 *  Max Rx DMA length : 128
1930 	 *  Rx descriptor retry : 32
1931 	 *  Rx descriptor retry time gap : 256ns
1932 	 *  Don't receive runt/bad frame.
1933 	 */
1934 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
1935 	/*
1936 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
1937 	 * than 4K bytes will suffer from Rx FIFO overruns. So
1938 	 * decrease FIFO threshold to reduce the FIFO overruns for
1939 	 * frames larger than 4000 bytes.
1940 	 * For best performance of standard MTU sized frames use
1941 	 * maximum allowable FIFO threshold, 128QW.
1942 	 */
1943 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
1944 	    JME_RX_FIFO_SIZE)
1945 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
1946 	else
1947 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
1948 	sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
1949 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
1950 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
1951 	/* XXX TODO DROP_BAD */
1952 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
1953 
1954 	/* Set Rx descriptor counter. */
1955 	CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
1956 
1957 	/* Set Rx ring address to the hardware. */
1958 	paddr = JME_RX_RING_ADDR(sc, 0);
1959 	CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
1960 	CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
1961 
1962 	/* Clear receive filter. */
1963 	CSR_WRITE_4(sc, JME_RXMAC, 0);
1964 
1965 	/* Set up the receive filter. */
1966 	jme_set_filter(sc);
1967 	jme_set_vlan(sc);
1968 
1969 	/*
1970 	 * Disable all WOL bits as WOL can interfere normal Rx
1971 	 * operation. Also clear WOL detection status bits.
1972 	 */
1973 	reg = CSR_READ_4(sc, JME_PMCS);
1974 	reg &= ~PMCS_WOL_ENB_MASK;
1975 	CSR_WRITE_4(sc, JME_PMCS, reg);
1976 
1977 	/*
1978 	 * Pad 10bytes right before received frame. This will greatly
1979 	 * help Rx performance on strict-alignment architectures as
1980 	 * it does not need to copy the frame to align the payload.
1981 	 */
1982 	reg = CSR_READ_4(sc, JME_RXMAC);
1983 	reg |= RXMAC_PAD_10BYTES;
1984 
1985 #if 0
1986 	if (ifp->if_capenable & IFCAP_RXCSUM)
1987 		reg |= RXMAC_CSUM_ENB;
1988 #endif
1989 	CSR_WRITE_4(sc, JME_RXMAC, reg);
1990 
1991 	/* Configure general purpose reg0 */
1992 	reg = CSR_READ_4(sc, JME_GPREG0);
1993 	reg &= ~GPREG0_PCC_UNIT_MASK;
1994 	/* Set PCC timer resolution to micro-seconds unit. */
1995 	reg |= GPREG0_PCC_UNIT_US;
1996 	/*
1997 	 * Disable all shadow register posting as we have to read
1998 	 * JME_INTR_STATUS register in jme_intr. Also it seems
1999 	 * that it's hard to synchronize interrupt status between
2000 	 * hardware and software with shadow posting due to
2001 	 * requirements of bus_dmamap_sync(9).
2002 	 */
2003 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2004 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2005 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2006 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2007 	/* Disable posting of DW0. */
2008 	reg &= ~GPREG0_POST_DW0_ENB;
2009 	/* Clear PME message. */
2010 	reg &= ~GPREG0_PME_ENB;
2011 	/* Set PHY address. */
2012 	reg &= ~GPREG0_PHY_ADDR_MASK;
2013 	reg |= sc->jme_phyaddr;
2014 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2015 
2016 	/* Configure Tx queue 0 packet completion coalescing. */
2017 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
2018 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2019 	    PCCTX_COAL_TO_MASK;
2020 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
2021 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2022 	    PCCTX_COAL_PKT_MASK;
2023 	reg |= PCCTX_COAL_TXQ0;
2024 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2025 
2026 	/* Configure Rx queue 0 packet completion coalescing. */
2027 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
2028 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2029 	    PCCRX_COAL_TO_MASK;
2030 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
2031 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2032 	    PCCRX_COAL_PKT_MASK;
2033 	CSR_WRITE_4(sc, JME_PCCRX0, reg);
2034 
2035 	/* Configure shadow status block but don't enable posting. */
2036 	paddr = sc->jme_rdata.jme_ssb_block_paddr;
2037 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2038 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2039 
2040 	/* Disable Timer 1 and Timer 2. */
2041 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2042 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2043 
2044 	/* Configure retry transmit period, retry limit value. */
2045 	CSR_WRITE_4(sc, JME_TXTRHD,
2046 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2047 	    TXTRHD_RT_PERIOD_MASK) |
2048 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2049 	    TXTRHD_RT_LIMIT_SHIFT));
2050 
2051 	/* Disable RSS. */
2052 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2053 
2054 	/* Initialize the interrupt mask. */
2055 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2056 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2057 
2058 	/*
2059 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2060 	 * done after detection of valid link in jme_miibus_statchg.
2061 	 */
2062 	sc->jme_flags &= ~JME_FLAG_LINK;
2063 
2064 	/* Set the current media. */
2065 	mii = &sc->sc_miibus;
2066 	mii_mediachg(mii);
2067 
2068 	timeout_add_sec(&sc->jme_tick_ch, 1);
2069 
2070 	ifp->if_flags |= IFF_RUNNING;
2071 	ifp->if_flags &= ~IFF_OACTIVE;
2072 
2073 	return (0);
2074 }
2075 
2076 void
2077 jme_stop(struct jme_softc *sc)
2078 {
2079 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2080 	struct jme_txdesc *txd;
2081 	struct jme_rxdesc *rxd;
2082 	int i;
2083 
2084 	/*
2085 	 * Mark the interface down and cancel the watchdog timer.
2086 	 */
2087 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2088 	ifp->if_timer = 0;
2089 
2090 	timeout_del(&sc->jme_tick_ch);
2091 	sc->jme_flags &= ~JME_FLAG_LINK;
2092 
2093 	/*
2094 	 * Disable interrupts.
2095 	 */
2096 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2097 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2098 
2099 	/* Disable updating shadow status block. */
2100 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2101 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2102 
2103 	/* Stop receiver, transmitter. */
2104 	jme_stop_rx(sc);
2105 	jme_stop_tx(sc);
2106 
2107 #ifdef foo
2108 	 /* Reclaim Rx/Tx buffers that have been completed. */
2109 	jme_rxeof(sc);
2110 	if (sc->jme_cdata.jme_rxhead != NULL)
2111 		m_freem(sc->jme_cdata.jme_rxhead);
2112 	JME_RXCHAIN_RESET(sc);
2113 	jme_txeof(sc);
2114 #endif
2115 
2116 	/*
2117 	 * Free partial finished RX segments
2118 	 */
2119 	if (sc->jme_cdata.jme_rxhead != NULL)
2120 		m_freem(sc->jme_cdata.jme_rxhead);
2121 	JME_RXCHAIN_RESET(sc);
2122 
2123 	/*
2124 	 * Free RX and TX mbufs still in the queues.
2125 	 */
2126 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2127 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2128 		if (rxd->rx_m != NULL) {
2129 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2130 			m_freem(rxd->rx_m);
2131 			rxd->rx_m = NULL;
2132 		}
2133         }
2134 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2135 		txd = &sc->jme_cdata.jme_txdesc[i];
2136 		if (txd->tx_m != NULL) {
2137 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2138 			m_freem(txd->tx_m);
2139 			txd->tx_m = NULL;
2140 			txd->tx_ndesc = 0;
2141 		}
2142         }
2143 }
2144 
2145 void
2146 jme_stop_tx(struct jme_softc *sc)
2147 {
2148 	uint32_t reg;
2149 	int i;
2150 
2151 	reg = CSR_READ_4(sc, JME_TXCSR);
2152 	if ((reg & TXCSR_TX_ENB) == 0)
2153 		return;
2154 	reg &= ~TXCSR_TX_ENB;
2155 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2156 	for (i = JME_TIMEOUT; i > 0; i--) {
2157 		DELAY(1);
2158 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2159 			break;
2160 	}
2161 	if (i == 0)
2162 		printf("%s: stopping transmitter timeout!\n",
2163 		    sc->sc_dev.dv_xname);
2164 }
2165 
2166 void
2167 jme_stop_rx(struct jme_softc *sc)
2168 {
2169 	uint32_t reg;
2170 	int i;
2171 
2172 	reg = CSR_READ_4(sc, JME_RXCSR);
2173 	if ((reg & RXCSR_RX_ENB) == 0)
2174 		return;
2175 	reg &= ~RXCSR_RX_ENB;
2176 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2177 	for (i = JME_TIMEOUT; i > 0; i--) {
2178 		DELAY(1);
2179 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2180 			break;
2181 	}
2182 	if (i == 0)
2183 		printf("%s: stopping recevier timeout!\n", sc->sc_dev.dv_xname);
2184 }
2185 
2186 void
2187 jme_init_tx_ring(struct jme_softc *sc)
2188 {
2189 	struct jme_ring_data *rd;
2190 	struct jme_txdesc *txd;
2191 	int i;
2192 
2193 	sc->jme_cdata.jme_tx_prod = 0;
2194 	sc->jme_cdata.jme_tx_cons = 0;
2195 	sc->jme_cdata.jme_tx_cnt = 0;
2196 
2197 	rd = &sc->jme_rdata;
2198 	bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2199 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2200 		txd = &sc->jme_cdata.jme_txdesc[i];
2201 		txd->tx_m = NULL;
2202 		txd->tx_desc = &rd->jme_tx_ring[i];
2203 		txd->tx_ndesc = 0;
2204 	}
2205 
2206 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
2207 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2208 }
2209 
2210 void
2211 jme_init_ssb(struct jme_softc *sc)
2212 {
2213 	struct jme_ring_data *rd;
2214 
2215 	rd = &sc->jme_rdata;
2216 	bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2217 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0,
2218 	    sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2219 }
2220 
2221 int
2222 jme_init_rx_ring(struct jme_softc *sc)
2223 {
2224 	struct jme_ring_data *rd;
2225 	struct jme_rxdesc *rxd;
2226 	int i;
2227 
2228 	KASSERT(sc->jme_cdata.jme_rxhead == NULL &&
2229 		 sc->jme_cdata.jme_rxtail == NULL &&
2230 		 sc->jme_cdata.jme_rxlen == 0);
2231 	sc->jme_cdata.jme_rx_cons = 0;
2232 
2233 	rd = &sc->jme_rdata;
2234 	bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2235 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2236 		int error;
2237 
2238 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2239 		rxd->rx_m = NULL;
2240 		rxd->rx_desc = &rd->jme_rx_ring[i];
2241 		error = jme_newbuf(sc, rxd, 1);
2242 		if (error)
2243 			return (error);
2244 	}
2245 
2246 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
2247 	    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2248 
2249 	return (0);
2250 }
2251 
2252 int
2253 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd, int init)
2254 {
2255 	struct jme_desc *desc;
2256 	struct mbuf *m;
2257 	bus_dmamap_t map;
2258 	int error;
2259 
2260 	MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2261 	if (m == NULL)
2262 		return (ENOBUFS);
2263 	MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
2264 	if (!(m->m_flags & M_EXT)) {
2265 		m_freem(m);
2266 		return (ENOBUFS);
2267 	}
2268 
2269 	/*
2270 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2271 	 * takes advantage of 10 bytes padding feature of hardware
2272 	 * in order not to copy entire frame to align IP header on
2273 	 * 32bit boundary.
2274 	 */
2275 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2276 
2277 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
2278 				     sc->jme_cdata.jme_rx_sparemap,
2279 				     m, BUS_DMA_NOWAIT);
2280 	if (error != 0) {
2281 		if (!error) {
2282 			bus_dmamap_unload(sc->sc_dmat,
2283 					  sc->jme_cdata.jme_rx_sparemap);
2284 			error = EFBIG;
2285 			printf("%s: too many segments?!\n",
2286 			    sc->sc_dev.dv_xname);
2287 		}
2288 		m_freem(m);
2289 
2290 		if (init)
2291 			printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2292 		return (error);
2293 	}
2294 
2295 	if (rxd->rx_m != NULL) {
2296 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2297 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2298 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2299 	}
2300 	map = rxd->rx_dmamap;
2301 	rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2302 	sc->jme_cdata.jme_rx_sparemap = map;
2303 	rxd->rx_m = m;
2304 
2305 	desc = rxd->rx_desc;
2306 	desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len);
2307 	desc->addr_lo =
2308 	    htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr));
2309 	desc->addr_hi =
2310 	    htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr));
2311 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2312 
2313 	return (0);
2314 }
2315 
2316 void
2317 jme_set_vlan(struct jme_softc *sc)
2318 {
2319 //	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2320 	uint32_t reg;
2321 
2322 	reg = CSR_READ_4(sc, JME_RXMAC);
2323 	reg &= ~RXMAC_VLAN_ENB;
2324 #if 0
2325 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2326 		reg |= RXMAC_VLAN_ENB;
2327 #endif
2328 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2329 }
2330 
2331 void
2332 jme_set_filter(struct jme_softc *sc)
2333 {
2334 	struct arpcom *ac = &sc->sc_arpcom;
2335 	struct ifnet *ifp = &ac->ac_if;
2336 	struct ether_multi *enm;
2337 	struct ether_multistep step;
2338 	uint32_t crc;
2339 	uint32_t mchash[2];
2340 	uint32_t rxcfg;
2341 
2342 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2343 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2344 	    RXMAC_ALLMULTI);
2345 
2346 	/*
2347 	 * Always accept frames destined to our station address.
2348 	 * Always accept broadcast frames.
2349 	 */
2350 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2351 
2352 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2353 		if (ifp->if_flags & IFF_PROMISC)
2354 			rxcfg |= RXMAC_PROMISC;
2355 		if (ifp->if_flags & IFF_ALLMULTI)
2356 			rxcfg |= RXMAC_ALLMULTI;
2357 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2358 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2359 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2360 		return;
2361 	}
2362 
2363 	/*
2364 	 * Set up the multicast address filter by passing all multicast
2365 	 * addresses through a CRC generator, and then using the low-order
2366 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2367 	 * high order bits select the register, while the rest of the bits
2368 	 * select the bit within the register.
2369 	 */
2370 	rxcfg |= RXMAC_MULTICAST;
2371 	bzero(mchash, sizeof(mchash));
2372 
2373 	ETHER_FIRST_MULTI(step, ac, enm);
2374 	while (enm != NULL) {
2375 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2376 		    enm->enm_addrlo), ETHER_ADDR_LEN);
2377 
2378 		/* Just want the 6 least significant bits. */
2379 		crc &= 0x3f;
2380 
2381 		/* Set the corresponding bit in the hash table. */
2382 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2383 
2384 		ETHER_NEXT_MULTI(step, enm);
2385 	}
2386 
2387 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2388 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2389 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2390 }
2391