xref: /openbsd-src/sys/dev/pci/if_jme.c (revision d59bb9942320b767f2a19aaa7690c8c6e30b724c)
1 /*	$OpenBSD: if_jme.c,v 1.49 2017/01/22 10:17:38 dlg Exp $	*/
2 /*-
3  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
29  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $
30  */
31 
32 #include "bpfilter.h"
33 #include "vlan.h"
34 
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/sockio.h>
40 #include <sys/mbuf.h>
41 #include <sys/queue.h>
42 #include <sys/kernel.h>
43 #include <sys/device.h>
44 #include <sys/timeout.h>
45 #include <sys/socket.h>
46 
47 #include <machine/bus.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 
53 #include <netinet/in.h>
54 #include <netinet/if_ether.h>
55 
56 #if NBPFILTER > 0
57 #include <net/bpf.h>
58 #endif
59 
60 #include <dev/mii/miivar.h>
61 #include <dev/mii/jmphyreg.h>
62 
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 #include <dev/pci/pcidevs.h>
66 
67 #include <dev/pci/if_jmereg.h>
68 #include <dev/pci/if_jmevar.h>
69 
70 /* Define the following to disable printing Rx errors. */
71 #undef	JME_SHOW_ERRORS
72 
73 int	jme_match(struct device *, void *, void *);
74 void	jme_map_intr_vector(struct jme_softc *);
75 void	jme_attach(struct device *, struct device *, void *);
76 int	jme_detach(struct device *, int);
77 
78 int	jme_miibus_readreg(struct device *, int, int);
79 void	jme_miibus_writereg(struct device *, int, int, int);
80 void	jme_miibus_statchg(struct device *);
81 
82 int	jme_init(struct ifnet *);
83 int	jme_ioctl(struct ifnet *, u_long, caddr_t);
84 
85 void	jme_start(struct ifnet *);
86 void	jme_watchdog(struct ifnet *);
87 void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
88 int	jme_mediachange(struct ifnet *);
89 
90 int	jme_intr(void *);
91 void	jme_txeof(struct jme_softc *);
92 void	jme_rxeof(struct jme_softc *);
93 
94 int	jme_dma_alloc(struct jme_softc *);
95 void	jme_dma_free(struct jme_softc *);
96 int	jme_init_rx_ring(struct jme_softc *);
97 void	jme_init_tx_ring(struct jme_softc *);
98 void	jme_init_ssb(struct jme_softc *);
99 int	jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
100 int	jme_encap(struct jme_softc *, struct mbuf *);
101 void	jme_rxpkt(struct jme_softc *);
102 
103 void	jme_tick(void *);
104 void	jme_stop(struct jme_softc *);
105 void	jme_reset(struct jme_softc *);
106 void	jme_set_vlan(struct jme_softc *);
107 void	jme_iff(struct jme_softc *);
108 void	jme_stop_tx(struct jme_softc *);
109 void	jme_stop_rx(struct jme_softc *);
110 void	jme_mac_config(struct jme_softc *);
111 void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
112 int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
113 int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
114 void	jme_discard_rxbufs(struct jme_softc *, int, int);
115 #ifdef notyet
116 void	jme_setwol(struct jme_softc *);
117 void	jme_setlinkspeed(struct jme_softc *);
118 #endif
119 
120 /*
121  * Devices supported by this driver.
122  */
123 const struct pci_matchid jme_devices[] = {
124 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250 },
125 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260 }
126 };
127 
128 struct cfattach jme_ca = {
129 	sizeof (struct jme_softc), jme_match, jme_attach
130 };
131 
132 struct cfdriver jme_cd = {
133 	NULL, "jme", DV_IFNET
134 };
135 
136 int jmedebug = 0;
137 #define DPRINTF(x)	do { if (jmedebug) printf x; } while (0)
138 
139 /*
140  *	Read a PHY register on the MII of the JMC250.
141  */
142 int
143 jme_miibus_readreg(struct device *dev, int phy, int reg)
144 {
145 	struct jme_softc *sc = (struct jme_softc *)dev;
146 	uint32_t val;
147 	int i;
148 
149 	/* For FPGA version, PHY address 0 should be ignored. */
150 	if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0)
151 		return (0);
152 
153 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
154 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
155 
156 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
157 		DELAY(1);
158 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
159 			break;
160 	}
161 	if (i == 0) {
162 		printf("%s: phy read timeout: phy %d, reg %d\n",
163 		    sc->sc_dev.dv_xname, phy, reg);
164 		return (0);
165 	}
166 
167 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
168 }
169 
170 /*
171  *	Write a PHY register on the MII of the JMC250.
172  */
173 void
174 jme_miibus_writereg(struct device *dev, int phy, int reg, int val)
175 {
176 	struct jme_softc *sc = (struct jme_softc *)dev;
177 	int i;
178 
179 	/* For FPGA version, PHY address 0 should be ignored. */
180 	if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0)
181 		return;
182 
183 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
184 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
185 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
186 
187 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
188 		DELAY(1);
189 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
190 			break;
191 	}
192 	if (i == 0) {
193 		printf("%s: phy write timeout: phy %d, reg %d\n",
194 		    sc->sc_dev.dv_xname, phy, reg);
195 	}
196 }
197 
198 /*
199  *	Callback from MII layer when media changes.
200  */
201 void
202 jme_miibus_statchg(struct device *dev)
203 {
204 	struct jme_softc *sc = (struct jme_softc *)dev;
205 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
206 	struct mii_data *mii;
207 	struct jme_txdesc *txd;
208 	bus_addr_t paddr;
209 	int i;
210 
211 	if ((ifp->if_flags & IFF_RUNNING) == 0)
212 		return;
213 
214 	mii = &sc->sc_miibus;
215 
216 	sc->jme_flags &= ~JME_FLAG_LINK;
217 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
218 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
219 		case IFM_10_T:
220 		case IFM_100_TX:
221 			sc->jme_flags |= JME_FLAG_LINK;
222 			break;
223 		case IFM_1000_T:
224 			if (sc->jme_caps & JME_CAP_FASTETH)
225 				break;
226 			sc->jme_flags |= JME_FLAG_LINK;
227 			break;
228 		default:
229 			break;
230 		}
231 	}
232 
233 	/*
234 	 * Disabling Rx/Tx MACs have a side-effect of resetting
235 	 * JME_TXNDA/JME_RXNDA register to the first address of
236 	 * Tx/Rx descriptor address. So driver should reset its
237 	 * internal procucer/consumer pointer and reclaim any
238 	 * allocated resources.  Note, just saving the value of
239 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
240 	 * and restoring JME_TXNDA/JME_RXNDA register is not
241 	 * sufficient to make sure correct MAC state because
242 	 * stopping MAC operation can take a while and hardware
243 	 * might have updated JME_TXNDA/JME_RXNDA registers
244 	 * during the stop operation.
245 	 */
246 
247 	/* Disable interrupts */
248 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
249 
250 	/* Stop driver */
251 	ifp->if_flags &= ~IFF_RUNNING;
252 	ifq_clr_oactive(&ifp->if_snd);
253 	ifp->if_timer = 0;
254 	timeout_del(&sc->jme_tick_ch);
255 
256 	/* Stop receiver/transmitter. */
257 	jme_stop_rx(sc);
258 	jme_stop_tx(sc);
259 
260 	jme_rxeof(sc);
261 	m_freem(sc->jme_cdata.jme_rxhead);
262 	JME_RXCHAIN_RESET(sc);
263 
264 	jme_txeof(sc);
265 	if (sc->jme_cdata.jme_tx_cnt != 0) {
266 		/* Remove queued packets for transmit. */
267 		for (i = 0; i < JME_TX_RING_CNT; i++) {
268 			txd = &sc->jme_cdata.jme_txdesc[i];
269 			if (txd->tx_m != NULL) {
270 				bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
271 				m_freem(txd->tx_m);
272 				txd->tx_m = NULL;
273 				txd->tx_ndesc = 0;
274 				ifp->if_oerrors++;
275 			}
276 		}
277 	}
278 
279 	/*
280 	 * Reuse configured Rx descriptors and reset
281 	 * procuder/consumer index.
282 	 */
283 	sc->jme_cdata.jme_rx_cons = 0;
284 
285 	jme_init_tx_ring(sc);
286 
287 	/* Initialize shadow status block. */
288 	jme_init_ssb(sc);
289 
290 	/* Program MAC with resolved speed/duplex/flow-control. */
291 	if (sc->jme_flags & JME_FLAG_LINK) {
292 		jme_mac_config(sc);
293 
294 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
295 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
296 
297 		/* Set Tx ring address to the hardware. */
298 		paddr = JME_TX_RING_ADDR(sc, 0);
299 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
300 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
301 
302 		/* Set Rx ring address to the hardware. */
303 		paddr = JME_RX_RING_ADDR(sc, 0);
304 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
305 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
306 
307 		/* Restart receiver/transmitter. */
308 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
309 		    RXCSR_RXQ_START);
310 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
311 	}
312 
313 	ifp->if_flags |= IFF_RUNNING;
314 	ifq_clr_oactive(&ifp->if_snd);
315 	timeout_add_sec(&sc->jme_tick_ch, 1);
316 
317 	/* Reenable interrupts. */
318 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
319 }
320 
321 /*
322  *	Get the current interface media status.
323  */
324 void
325 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
326 {
327 	struct jme_softc *sc = ifp->if_softc;
328 	struct mii_data *mii = &sc->sc_miibus;
329 
330 	mii_pollstat(mii);
331 	ifmr->ifm_status = mii->mii_media_status;
332 	ifmr->ifm_active = mii->mii_media_active;
333 }
334 
335 /*
336  *	Set hardware to newly-selected media.
337  */
338 int
339 jme_mediachange(struct ifnet *ifp)
340 {
341 	struct jme_softc *sc = ifp->if_softc;
342 	struct mii_data *mii = &sc->sc_miibus;
343 	int error;
344 
345 	if (mii->mii_instance != 0) {
346 		struct mii_softc *miisc;
347 
348 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
349 			mii_phy_reset(miisc);
350 	}
351 	error = mii_mediachg(mii);
352 
353 	return (error);
354 }
355 
356 int
357 jme_match(struct device *dev, void *match, void *aux)
358 {
359 	return pci_matchbyid((struct pci_attach_args *)aux, jme_devices,
360 	    sizeof (jme_devices) / sizeof (jme_devices[0]));
361 }
362 
363 int
364 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
365 {
366 	uint32_t reg;
367 	int i;
368 
369 	*val = 0;
370 	for (i = JME_TIMEOUT; i > 0; i--) {
371 		reg = CSR_READ_4(sc, JME_SMBCSR);
372 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
373 			break;
374 		DELAY(1);
375 	}
376 
377 	if (i == 0) {
378 		printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname);
379 		return (ETIMEDOUT);
380 	}
381 
382 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
383 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
384 	for (i = JME_TIMEOUT; i > 0; i--) {
385 		DELAY(1);
386 		reg = CSR_READ_4(sc, JME_SMBINTF);
387 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
388 			break;
389 	}
390 
391 	if (i == 0) {
392 		printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname);
393 		return (ETIMEDOUT);
394 	}
395 
396 	reg = CSR_READ_4(sc, JME_SMBINTF);
397 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
398 
399 	return (0);
400 }
401 
402 int
403 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
404 {
405 	uint8_t fup, reg, val;
406 	uint32_t offset;
407 	int match;
408 
409 	offset = 0;
410 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
411 	    fup != JME_EEPROM_SIG0)
412 		return (ENOENT);
413 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
414 	    fup != JME_EEPROM_SIG1)
415 		return (ENOENT);
416 	match = 0;
417 	do {
418 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
419 			break;
420 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
421 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
422 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
423 				break;
424 			if (reg >= JME_PAR0 &&
425 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
426 				if (jme_eeprom_read_byte(sc, offset + 2,
427 				    &val) != 0)
428 					break;
429 				eaddr[reg - JME_PAR0] = val;
430 				match++;
431 			}
432 		}
433 		/* Check for the end of EEPROM descriptor. */
434 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
435 			break;
436 		/* Try next eeprom descriptor. */
437 		offset += JME_EEPROM_DESC_BYTES;
438 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
439 
440 	if (match == ETHER_ADDR_LEN)
441 		return (0);
442 
443 	return (ENOENT);
444 }
445 
446 void
447 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
448 {
449 	uint32_t par0, par1;
450 
451 	/* Read station address. */
452 	par0 = CSR_READ_4(sc, JME_PAR0);
453 	par1 = CSR_READ_4(sc, JME_PAR1);
454 	par1 &= 0xFFFF;
455 
456 	eaddr[0] = (par0 >> 0) & 0xFF;
457 	eaddr[1] = (par0 >> 8) & 0xFF;
458 	eaddr[2] = (par0 >> 16) & 0xFF;
459 	eaddr[3] = (par0 >> 24) & 0xFF;
460 	eaddr[4] = (par1 >> 0) & 0xFF;
461 	eaddr[5] = (par1 >> 8) & 0xFF;
462 }
463 
464 void
465 jme_map_intr_vector(struct jme_softc *sc)
466 {
467 	uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
468 
469 	bzero(map, sizeof(map));
470 
471 	/* Map Tx interrupts source to MSI/MSIX vector 2. */
472 	map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
473 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
474 	map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
475 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
476 	map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
477 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
478 	map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
479 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
480 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
481 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
482 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
483 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
484 	map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
485 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
486 	map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
487 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
488 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
489 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
490 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
491 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
492 
493 	/* Map Rx interrupts source to MSI/MSIX vector 1. */
494 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
495 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
496 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
497 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
498 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
499 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
500 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
501 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
502 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
503 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
504 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
505 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
506 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
507 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
508 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
509 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
510 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
511 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
512 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
513 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
514 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
515 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
516 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
517 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
518 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
519 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
520 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
521 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
522 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
523 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
524 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
525 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
526 
527 	/* Map all other interrupts source to MSI/MSIX vector 0. */
528 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
529 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
530 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
531 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
532 }
533 
534 void
535 jme_attach(struct device *parent, struct device *self, void *aux)
536 {
537 	struct jme_softc *sc = (struct jme_softc *)self;
538 	struct pci_attach_args *pa = aux;
539 	pci_chipset_tag_t pc = pa->pa_pc;
540 	pci_intr_handle_t ih;
541 	const char *intrstr;
542 	pcireg_t memtype;
543 
544 	struct ifnet *ifp;
545 	uint32_t reg;
546 	int error = 0;
547 
548 	/*
549 	 * Allocate IO memory
550 	 *
551 	 * JMC250 supports both memory mapped and I/O register space
552 	 * access.  Because I/O register access should use different
553 	 * BARs to access registers it's waste of time to use I/O
554 	 * register spce access.  JMC250 uses 16K to map entire memory
555 	 * space.
556 	 */
557 
558 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR);
559 	if (pci_mapreg_map(pa, JME_PCIR_BAR, memtype, 0, &sc->jme_mem_bt,
560 	    &sc->jme_mem_bh, NULL, &sc->jme_mem_size, 0)) {
561 		printf(": can't map mem space\n");
562 		return;
563 	}
564 
565 	if (pci_intr_map_msi(pa, &ih) == 0)
566 		jme_map_intr_vector(sc);
567 	else if (pci_intr_map(pa, &ih) != 0) {
568 		printf(": can't map interrupt\n");
569 		return;
570 	}
571 
572 	/*
573 	 * Allocate IRQ
574 	 */
575 	intrstr = pci_intr_string(pc, ih);
576 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, jme_intr, sc,
577 	    sc->sc_dev.dv_xname);
578 	if (sc->sc_irq_handle == NULL) {
579 		printf(": could not establish interrupt");
580 		if (intrstr != NULL)
581 			printf(" at %s", intrstr);
582 		printf("\n");
583 		return;
584 	}
585 	printf(": %s", intrstr);
586 
587 	sc->sc_dmat = pa->pa_dmat;
588 	sc->jme_pct = pa->pa_pc;
589 	sc->jme_pcitag = pa->pa_tag;
590 
591 	/*
592 	 * Extract FPGA revision
593 	 */
594 	reg = CSR_READ_4(sc, JME_CHIPMODE);
595 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
596 	    CHIPMODE_NOT_FPGA) {
597 		sc->jme_caps |= JME_CAP_FPGA;
598 
599 		if (jmedebug) {
600 			printf("%s: FPGA revision : 0x%04x\n",
601 			    sc->sc_dev.dv_xname,
602 			    (reg & CHIPMODE_FPGA_REV_MASK) >>
603 			    CHIPMODE_FPGA_REV_SHIFT);
604 		}
605 	}
606 
607 	sc->jme_revfm = (reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT;
608 
609 	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_JMICRON_JMC250 &&
610 	    PCI_REVISION(pa->pa_class) == JME_REV_JMC250_A2)
611 		sc->jme_workaround |= JME_WA_CRCERRORS | JME_WA_PACKETLOSS;
612 
613 	/* Reset the ethernet controller. */
614 	jme_reset(sc);
615 
616 	/* Get station address. */
617 	reg = CSR_READ_4(sc, JME_SMBCSR);
618 	if (reg & SMBCSR_EEPROM_PRESENT)
619 		error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr);
620 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
621 		if (error != 0 && (jmedebug)) {
622 			printf("%s: ethernet hardware address "
623 			    "not found in EEPROM.\n", sc->sc_dev.dv_xname);
624 		}
625 		jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr);
626 	}
627 
628 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
629 
630 	/*
631 	 * Save PHY address.
632 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
633 	 * requires PHY probing to get correct PHY address.
634 	 */
635 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
636 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
637 		    GPREG0_PHY_ADDR_MASK;
638 		if (jmedebug) {
639 			printf("%s: PHY is at address %d.\n",
640 			    sc->sc_dev.dv_xname, sc->jme_phyaddr);
641 		}
642 	} else {
643 		sc->jme_phyaddr = 0;
644 	}
645 
646 	/* Set max allowable DMA size. */
647 	sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
648 	sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
649 
650 #ifdef notyet
651 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
652 		sc->jme_caps |= JME_CAP_PMCAP;
653 #endif
654 
655 	/* Allocate DMA stuffs */
656 	error = jme_dma_alloc(sc);
657 	if (error)
658 		goto fail;
659 
660 	ifp = &sc->sc_arpcom.ac_if;
661 	ifp->if_softc = sc;
662 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
663 	ifp->if_ioctl = jme_ioctl;
664 	ifp->if_start = jme_start;
665 	ifp->if_watchdog = jme_watchdog;
666 	IFQ_SET_MAXLEN(&ifp->if_snd, JME_TX_RING_CNT - 1);
667 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
668 
669 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
670 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 |
671 	    IFCAP_CSUM_UDPv6;
672 
673 #if NVLAN > 0
674 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
675 #endif
676 
677 	/* Set up MII bus. */
678 	sc->sc_miibus.mii_ifp = ifp;
679 	sc->sc_miibus.mii_readreg = jme_miibus_readreg;
680 	sc->sc_miibus.mii_writereg = jme_miibus_writereg;
681 	sc->sc_miibus.mii_statchg = jme_miibus_statchg;
682 
683 	ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange,
684 	    jme_mediastatus);
685 	mii_attach(self, &sc->sc_miibus, 0xffffffff,
686 	    sc->jme_caps & JME_CAP_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
687 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
688 
689 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
690 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
691 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
692 		    0, NULL);
693 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
694 	} else
695 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
696 
697 	/*
698 	 * Save PHYADDR for FPGA mode PHY not handled, not production hw
699 	 */
700 
701 	if_attach(ifp);
702 	ether_ifattach(ifp);
703 
704 	timeout_set(&sc->jme_tick_ch, jme_tick, sc);
705 
706 	return;
707 fail:
708 	jme_detach(&sc->sc_dev, 0);
709 }
710 
711 int
712 jme_detach(struct device *self, int flags)
713 {
714 	struct jme_softc *sc = (struct jme_softc *)self;
715 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
716 	int s;
717 
718 	s = splnet();
719 	jme_stop(sc);
720 	splx(s);
721 
722 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
723 
724 	/* Delete all remaining media. */
725 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
726 
727 	ether_ifdetach(ifp);
728 	if_detach(ifp);
729 	jme_dma_free(sc);
730 
731 	if (sc->sc_irq_handle != NULL) {
732 		pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle);
733 		sc->sc_irq_handle = NULL;
734 	}
735 
736 	return (0);
737 }
738 
739 int
740 jme_dma_alloc(struct jme_softc *sc)
741 {
742 	struct jme_txdesc *txd;
743 	struct jme_rxdesc *rxd;
744 	int error, i, nsegs;
745 
746 	/*
747 	 * Create DMA stuffs for TX ring
748 	 */
749 
750 	error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1,
751 	    JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT,
752 	    &sc->jme_cdata.jme_tx_ring_map);
753 	if (error)
754 		return (ENOBUFS);
755 
756 	/* Allocate DMA'able memory for TX ring */
757 	error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0,
758 	    &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs,
759 	    BUS_DMA_WAITOK);
760 /* XXX zero */
761 	if (error) {
762 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
763 		    sc->sc_dev.dv_xname);
764 		return error;
765 	}
766 
767 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg,
768 	    nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring,
769 	    BUS_DMA_NOWAIT);
770 	if (error)
771 		return (ENOBUFS);
772 
773 	/*  Load the DMA map for Tx ring. */
774 	error = bus_dmamap_load(sc->sc_dmat,
775 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
776 	    JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
777 	if (error) {
778 		printf("%s: could not load DMA'able memory for Tx ring.\n",
779 		    sc->sc_dev.dv_xname);
780 		bus_dmamem_free(sc->sc_dmat,
781 		    (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1);
782 		return error;
783 	}
784 	sc->jme_rdata.jme_tx_ring_paddr =
785 	    sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr;
786 
787 	/*
788 	 * Create DMA stuffs for RX ring
789 	 */
790 
791 	error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1,
792 	    JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT,
793 	    &sc->jme_cdata.jme_rx_ring_map);
794 	if (error)
795 		return (ENOBUFS);
796 
797 	/* Allocate DMA'able memory for RX ring */
798 	error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0,
799 	    &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs,
800 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
801 /* XXX zero */
802 	if (error) {
803 		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
804 		    sc->sc_dev.dv_xname);
805 		return error;
806 	}
807 
808 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg,
809 	    nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring,
810 	    BUS_DMA_NOWAIT);
811 	if (error)
812 		return (ENOBUFS);
813 
814 	/* Load the DMA map for Rx ring. */
815 	error = bus_dmamap_load(sc->sc_dmat,
816 	    sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
817 	    JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
818 	if (error) {
819 		printf("%s: could not load DMA'able memory for Rx ring.\n",
820 		    sc->sc_dev.dv_xname);
821 		bus_dmamem_free(sc->sc_dmat,
822 		    (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
823 		return error;
824 	}
825 	sc->jme_rdata.jme_rx_ring_paddr =
826 	    sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr;
827 
828 #if 0
829 	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
830 	tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
831 	rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
832 	if ((JME_ADDR_HI(tx_ring_end) !=
833 	     JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
834 	    (JME_ADDR_HI(rx_ring_end) !=
835 	     JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
836 		printf("%s: 4GB boundary crossed, switching to 32bit "
837 		    "DMA address mode.\n", sc->sc_dev.dv_xname);
838 		jme_dma_free(sc);
839 		/* Limit DMA address space to 32bit and try again. */
840 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
841 		goto again;
842 	}
843 #endif
844 
845 	/*
846 	 * Create DMA stuffs for shadow status block
847 	 */
848 
849 	error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1,
850 	    JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map);
851 	if (error)
852 		return (ENOBUFS);
853 
854 	/* Allocate DMA'able memory for shared status block. */
855 	error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0,
856 	    &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK);
857 	if (error) {
858 		printf("%s: could not allocate DMA'able "
859 		    "memory for shared status block.\n", sc->sc_dev.dv_xname);
860 		return error;
861 	}
862 
863 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg,
864 	    nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block,
865 	    BUS_DMA_NOWAIT);
866 	if (error)
867 		return (ENOBUFS);
868 
869 	/* Load the DMA map for shared status block */
870 	error = bus_dmamap_load(sc->sc_dmat,
871 	    sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
872 	    JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT);
873 	if (error) {
874 		printf("%s: could not load DMA'able memory "
875 		    "for shared status block.\n", sc->sc_dev.dv_xname);
876 		bus_dmamem_free(sc->sc_dmat,
877 		    (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
878 		return error;
879 	}
880 	sc->jme_rdata.jme_ssb_block_paddr =
881 	    sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr;
882 
883 	/*
884 	 * Create DMA stuffs for TX buffers
885 	 */
886 
887 	/* Create DMA maps for Tx buffers. */
888 	for (i = 0; i < JME_TX_RING_CNT; i++) {
889 		txd = &sc->jme_cdata.jme_txdesc[i];
890 		error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE,
891 		    JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
892 		    &txd->tx_dmamap);
893 		if (error) {
894 			int j;
895 
896 			printf("%s: could not create %dth Tx dmamap.\n",
897 			    sc->sc_dev.dv_xname, i);
898 
899 			for (j = 0; j < i; ++j) {
900 				txd = &sc->jme_cdata.jme_txdesc[j];
901 				bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
902 			}
903 			return error;
904 		}
905 
906 	}
907 
908 	/*
909 	 * Create DMA stuffs for RX buffers
910 	 */
911 
912 	/* Create DMA maps for Rx buffers. */
913 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
914 	    0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap);
915 	if (error) {
916 		printf("%s: could not create spare Rx dmamap.\n",
917 		    sc->sc_dev.dv_xname);
918 		return error;
919 	}
920 	for (i = 0; i < JME_RX_RING_CNT; i++) {
921 		rxd = &sc->jme_cdata.jme_rxdesc[i];
922 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
923 		    0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
924 		if (error) {
925 			int j;
926 
927 			printf("%s: could not create %dth Rx dmamap.\n",
928 			    sc->sc_dev.dv_xname, i);
929 
930 			for (j = 0; j < i; ++j) {
931 				rxd = &sc->jme_cdata.jme_rxdesc[j];
932 				bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
933 			}
934 			bus_dmamap_destroy(sc->sc_dmat,
935 			    sc->jme_cdata.jme_rx_sparemap);
936 			sc->jme_cdata.jme_rx_tag = NULL;
937 			return error;
938 		}
939 	}
940 
941 	return 0;
942 }
943 
944 void
945 jme_dma_free(struct jme_softc *sc)
946 {
947 	struct jme_txdesc *txd;
948 	struct jme_rxdesc *rxd;
949 	int i;
950 
951 	/* Tx ring */
952 	bus_dmamap_unload(sc->sc_dmat,
953 	    sc->jme_cdata.jme_tx_ring_map);
954 	bus_dmamem_free(sc->sc_dmat,
955 	    (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1);
956 
957 	/* Rx ring */
958 	bus_dmamap_unload(sc->sc_dmat,
959 	    sc->jme_cdata.jme_rx_ring_map);
960 	bus_dmamem_free(sc->sc_dmat,
961 	    (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
962 
963 	/* Tx buffers */
964 	for (i = 0; i < JME_TX_RING_CNT; i++) {
965 		txd = &sc->jme_cdata.jme_txdesc[i];
966 		bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
967 	}
968 
969 	/* Rx buffers */
970 	for (i = 0; i < JME_RX_RING_CNT; i++) {
971 		rxd = &sc->jme_cdata.jme_rxdesc[i];
972 		bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
973 	}
974 	bus_dmamap_destroy(sc->sc_dmat,
975 	    sc->jme_cdata.jme_rx_sparemap);
976 
977 	/* Shadow status block. */
978 	bus_dmamap_unload(sc->sc_dmat,
979 	    sc->jme_cdata.jme_ssb_map);
980 	bus_dmamem_free(sc->sc_dmat,
981 	    (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
982 }
983 
984 #ifdef notyet
985 /*
986  * Unlike other ethernet controllers, JMC250 requires
987  * explicit resetting link speed to 10/100Mbps as gigabit
988  * link will cunsume more power than 375mA.
989  * Note, we reset the link speed to 10/100Mbps with
990  * auto-negotiation but we don't know whether that operation
991  * would succeed or not as we have no control after powering
992  * off. If the renegotiation fail WOL may not work. Running
993  * at 1Gbps draws more power than 375mA at 3.3V which is
994  * specified in PCI specification and that would result in
995  * complete shutdowning power to ethernet controller.
996  *
997  * TODO
998  *  Save current negotiated media speed/duplex/flow-control
999  *  to softc and restore the same link again after resuming.
1000  *  PHY handling such as power down/resetting to 100Mbps
1001  *  may be better handled in suspend method in phy driver.
1002  */
1003 void
1004 jme_setlinkspeed(struct jme_softc *sc)
1005 {
1006 	struct mii_data *mii;
1007 	int aneg, i;
1008 
1009 	JME_LOCK_ASSERT(sc);
1010 
1011 	mii = &sc->sc_miibus;
1012 	mii_pollstat(mii);
1013 	aneg = 0;
1014 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1015 		switch IFM_SUBTYPE(mii->mii_media_active) {
1016 		case IFM_10_T:
1017 		case IFM_100_TX:
1018 			return;
1019 		case IFM_1000_T:
1020 			aneg++;
1021 		default:
1022 			break;
1023 		}
1024 	}
1025 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1026 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR,
1027 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1028 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR,
1029 	    BMCR_AUTOEN | BMCR_STARTNEG);
1030 	DELAY(1000);
1031 	if (aneg != 0) {
1032 		/* Poll link state until jme(4) get a 10/100 link. */
1033 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1034 			mii_pollstat(mii);
1035 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1036 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1037 				case IFM_10_T:
1038 				case IFM_100_TX:
1039 					jme_mac_config(sc);
1040 					return;
1041 				default:
1042 					break;
1043 				}
1044 			}
1045 			JME_UNLOCK(sc);
1046 			pause("jmelnk", hz);
1047 			JME_LOCK(sc);
1048 		}
1049 		if (i == MII_ANEGTICKS_GIGE)
1050 			printf("%s: establishing link failed, "
1051 			    "WOL may not work!\n", sc->sc_dev.dv_xname);
1052 	}
1053 	/*
1054 	 * No link, force MAC to have 100Mbps, full-duplex link.
1055 	 * This is the last resort and may/may not work.
1056 	 */
1057 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1058 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1059 	jme_mac_config(sc);
1060 }
1061 
1062 void
1063 jme_setwol(struct jme_softc *sc)
1064 {
1065 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1066 	uint32_t gpr, pmcs;
1067 	uint16_t pmstat;
1068 	int pmc;
1069 
1070 	if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) {
1071 		/* No PME capability, PHY power down. */
1072 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1073 		    MII_BMCR, BMCR_PDOWN);
1074 		return;
1075 	}
1076 
1077 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1078 	pmcs = CSR_READ_4(sc, JME_PMCS);
1079 	pmcs &= ~PMCS_WOL_ENB_MASK;
1080 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1081 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1082 		/* Enable PME message. */
1083 		gpr |= GPREG0_PME_ENB;
1084 		/* For gigabit controllers, reset link speed to 10/100. */
1085 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1086 			jme_setlinkspeed(sc);
1087 	}
1088 
1089 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1090 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1091 
1092 	/* Request PME. */
1093 	pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2);
1094 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1095 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1096 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1097 	pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1098 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1099 		/* No WOL, PHY power down. */
1100 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1101 		    MII_BMCR, BMCR_PDOWN);
1102 	}
1103 }
1104 #endif
1105 
1106 int
1107 jme_encap(struct jme_softc *sc, struct mbuf *m)
1108 {
1109 	struct jme_txdesc *txd;
1110 	struct jme_desc *desc;
1111 	int error, i, prod;
1112 	uint32_t cflags;
1113 
1114 	prod = sc->jme_cdata.jme_tx_prod;
1115 	txd = &sc->jme_cdata.jme_txdesc[prod];
1116 
1117 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
1118 	    m, BUS_DMA_NOWAIT);
1119 	if (error != 0 && error != EFBIG)
1120 		goto drop;
1121 	if (error != 0) {
1122 		if (m_defrag(m, M_DONTWAIT)) {
1123 			error = ENOBUFS;
1124 			goto drop;
1125 		}
1126 		error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
1127 					     m, BUS_DMA_NOWAIT);
1128 		if (error != 0)
1129 			goto drop;
1130 	}
1131 
1132 	cflags = 0;
1133 
1134 	/* Configure checksum offload. */
1135 	if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1136 		cflags |= JME_TD_IPCSUM;
1137 	if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1138 		cflags |= JME_TD_TCPCSUM;
1139 	if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1140 		cflags |= JME_TD_UDPCSUM;
1141 
1142 #if NVLAN > 0
1143 	/* Configure VLAN. */
1144 	if (m->m_flags & M_VLANTAG) {
1145 		cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1146 		cflags |= JME_TD_VLAN_TAG;
1147 	}
1148 #endif
1149 
1150 	desc = &sc->jme_rdata.jme_tx_ring[prod];
1151 	desc->flags = htole32(cflags);
1152 	desc->buflen = 0;
1153 	desc->addr_hi = htole32(m->m_pkthdr.len);
1154 	desc->addr_lo = 0;
1155 	sc->jme_cdata.jme_tx_cnt++;
1156 	JME_DESC_INC(prod, JME_TX_RING_CNT);
1157 	for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) {
1158 		desc = &sc->jme_rdata.jme_tx_ring[prod];
1159 		desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1160 		desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len);
1161 		desc->addr_hi =
1162 		    htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr));
1163 		desc->addr_lo =
1164 		    htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr));
1165 		sc->jme_cdata.jme_tx_cnt++;
1166 		JME_DESC_INC(prod, JME_TX_RING_CNT);
1167 	}
1168 
1169 	/* Update producer index. */
1170 	sc->jme_cdata.jme_tx_prod = prod;
1171 	/*
1172 	 * Finally request interrupt and give the first descriptor
1173 	 * owenership to hardware.
1174 	 */
1175 	desc = txd->tx_desc;
1176 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1177 
1178 	txd->tx_m = m;
1179 	txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + JME_TXD_RSVD;
1180 
1181 	/* Sync descriptors. */
1182 	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1183 	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1184 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1185 	     sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1186 
1187 	return (0);
1188 
1189   drop:
1190 	m_freem(m);
1191 	return (error);
1192 }
1193 
1194 void
1195 jme_start(struct ifnet *ifp)
1196 {
1197 	struct jme_softc *sc = ifp->if_softc;
1198 	struct mbuf *m;
1199 	int enq = 0;
1200 
1201 	/* Reclaim transmitted frames. */
1202 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1203 		jme_txeof(sc);
1204 
1205 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1206 		return;
1207 	if ((sc->jme_flags & JME_FLAG_LINK) == 0)
1208 		return;
1209 	if (IFQ_IS_EMPTY(&ifp->if_snd))
1210 		return;
1211 
1212 	for (;;) {
1213 		/*
1214 		 * Check number of available TX descs, always
1215 		 * leave JME_TXD_RSVD free TX descs.
1216 		 */
1217 		if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD >
1218 		    JME_TX_RING_CNT - JME_TXD_RSVD) {
1219 			ifq_set_oactive(&ifp->if_snd);
1220 			break;
1221 		}
1222 
1223 		IFQ_DEQUEUE(&ifp->if_snd, m);
1224 		if (m == NULL)
1225 			break;
1226 
1227 		/*
1228 		 * Pack the data into the transmit ring. If we
1229 		 * don't have room, set the OACTIVE flag and wait
1230 		 * for the NIC to drain the ring.
1231 		 */
1232 		if (jme_encap(sc, m) != 0) {
1233 			ifp->if_oerrors++;
1234 			continue;
1235 		}
1236 
1237 		enq++;
1238 
1239 #if NBPFILTER > 0
1240 		/*
1241 		 * If there's a BPF listener, bounce a copy of this frame
1242 		 * to him.
1243 		 */
1244 		if (ifp->if_bpf != NULL)
1245 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1246 #endif
1247 	}
1248 
1249 	if (enq > 0) {
1250 		/*
1251 		 * Reading TXCSR takes very long time under heavy load
1252 		 * so cache TXCSR value and writes the ORed value with
1253 		 * the kick command to the TXCSR. This saves one register
1254 		 * access cycle.
1255 		 */
1256 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1257 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1258 		/* Set a timeout in case the chip goes out to lunch. */
1259 		ifp->if_timer = JME_TX_TIMEOUT;
1260 	}
1261 }
1262 
1263 void
1264 jme_watchdog(struct ifnet *ifp)
1265 {
1266 	struct jme_softc *sc = ifp->if_softc;
1267 
1268 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1269 		printf("%s: watchdog timeout (missed link)\n",
1270 		    sc->sc_dev.dv_xname);
1271 		ifp->if_oerrors++;
1272 		jme_init(ifp);
1273 		return;
1274 	}
1275 
1276 	jme_txeof(sc);
1277 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1278 		printf("%s: watchdog timeout (missed Tx interrupts) "
1279 			  "-- recovering\n", sc->sc_dev.dv_xname);
1280 		jme_start(ifp);
1281 		return;
1282 	}
1283 
1284 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1285 	ifp->if_oerrors++;
1286 	jme_init(ifp);
1287 	jme_start(ifp);
1288 }
1289 
1290 int
1291 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1292 {
1293 	struct jme_softc *sc = ifp->if_softc;
1294 	struct mii_data *mii = &sc->sc_miibus;
1295 	struct ifreq *ifr = (struct ifreq *)data;
1296 	int error = 0, s;
1297 
1298 	s = splnet();
1299 
1300 	switch (cmd) {
1301 	case SIOCSIFADDR:
1302 		ifp->if_flags |= IFF_UP;
1303 		if (!(ifp->if_flags & IFF_RUNNING))
1304 			jme_init(ifp);
1305 		break;
1306 
1307 	case SIOCSIFFLAGS:
1308 		if (ifp->if_flags & IFF_UP) {
1309 			if (ifp->if_flags & IFF_RUNNING)
1310 				error = ENETRESET;
1311 			else
1312 				jme_init(ifp);
1313 		} else {
1314 			if (ifp->if_flags & IFF_RUNNING)
1315 				jme_stop(sc);
1316 		}
1317 		break;
1318 
1319 	case SIOCSIFMEDIA:
1320 	case SIOCGIFMEDIA:
1321 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1322 		break;
1323 
1324 	default:
1325 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1326 	}
1327 
1328 	if (error == ENETRESET) {
1329 		if (ifp->if_flags & IFF_RUNNING)
1330 			jme_iff(sc);
1331 		error = 0;
1332 	}
1333 
1334 	splx(s);
1335 	return (error);
1336 }
1337 
1338 void
1339 jme_mac_config(struct jme_softc *sc)
1340 {
1341 	struct mii_data *mii;
1342 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1343 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1344 
1345 	mii = &sc->sc_miibus;
1346 
1347 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1348 	DELAY(10);
1349 	CSR_WRITE_4(sc, JME_GHC, 0);
1350 	ghc = 0;
1351 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1352 	rxmac &= ~RXMAC_FC_ENB;
1353 	txmac = CSR_READ_4(sc, JME_TXMAC);
1354 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1355 	txpause = CSR_READ_4(sc, JME_TXPFC);
1356 	txpause &= ~TXPFC_PAUSE_ENB;
1357 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1358 		ghc |= GHC_FULL_DUPLEX;
1359 		rxmac &= ~RXMAC_COLL_DET_ENB;
1360 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1361 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1362 		    TXMAC_FRAME_BURST);
1363 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1364 			txpause |= TXPFC_PAUSE_ENB;
1365 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1366 			rxmac |= RXMAC_FC_ENB;
1367 		/* Disable retry transmit timer/retry limit. */
1368 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1369 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1370 	} else {
1371 		rxmac |= RXMAC_COLL_DET_ENB;
1372 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1373 		/* Enable retry transmit timer/retry limit. */
1374 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1375 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1376 	}
1377 
1378 	/*
1379 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1380 	 */
1381 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1382 	gp1 &= ~GPREG1_HALF_PATCH;
1383 
1384 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1385 		hdx = 1;
1386 
1387 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1388 	case IFM_10_T:
1389 		ghc |= GHC_SPEED_10;
1390 		if (hdx)
1391 			gp1 |= GPREG1_HALF_PATCH;
1392 		break;
1393 
1394 	case IFM_100_TX:
1395 		ghc |= GHC_SPEED_100;
1396 		if (hdx)
1397 			gp1 |= GPREG1_HALF_PATCH;
1398 
1399 		/*
1400 		 * Use extended FIFO depth to workaround CRC errors
1401 		 * emitted by chips before JMC250B
1402 		 */
1403 		phyconf = JMPHY_CONF_EXTFIFO;
1404 		break;
1405 
1406 	case IFM_1000_T:
1407 		if (sc->jme_caps & JME_CAP_FASTETH)
1408 			break;
1409 
1410 		ghc |= GHC_SPEED_1000;
1411 		if (hdx)
1412 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1413 		break;
1414 
1415 	default:
1416 		break;
1417 	}
1418 
1419 	if (sc->jme_revfm >= 2) {
1420 		/* set clock sources for tx mac and offload engine */
1421 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
1422 			ghc |= GHC_TCPCK_1000 | GHC_TXCK_1000;
1423 		else
1424 			ghc |= GHC_TCPCK_10_100 | GHC_TXCK_10_100;
1425 	}
1426 
1427 	CSR_WRITE_4(sc, JME_GHC, ghc);
1428 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1429 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1430 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1431 
1432 	if (sc->jme_workaround & JME_WA_CRCERRORS) {
1433 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1434 				    JMPHY_CONF, phyconf);
1435 	}
1436 	if (sc->jme_workaround & JME_WA_PACKETLOSS)
1437 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1438 }
1439 
1440 int
1441 jme_intr(void *xsc)
1442 {
1443 	struct jme_softc *sc = xsc;
1444 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1445 	uint32_t status;
1446 	int claimed = 0;
1447 
1448 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1449 	if (status == 0 || status == 0xFFFFFFFF)
1450 		return (0);
1451 
1452 	/* Disable interrupts. */
1453 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1454 
1455 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1456 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1457 		goto back;
1458 
1459 	/* Reset PCC counter/timer and Ack interrupts. */
1460 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1461 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1462 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1463 	if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1464 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
1465 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1466 
1467 	if (ifp->if_flags & IFF_RUNNING) {
1468 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1469 			jme_rxeof(sc);
1470 
1471 		if (status & INTR_RXQ_DESC_EMPTY) {
1472 			/*
1473 			 * Notify hardware availability of new Rx buffers.
1474 			 * Reading RXCSR takes very long time under heavy
1475 			 * load so cache RXCSR value and writes the ORed
1476 			 * value with the kick command to the RXCSR. This
1477 			 * saves one register access cycle.
1478 			 */
1479 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1480 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1481 		}
1482 
1483 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1484 			jme_txeof(sc);
1485 			jme_start(ifp);
1486 		}
1487 	}
1488 	claimed = 1;
1489 back:
1490 	/* Reenable interrupts. */
1491 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1492 
1493 	return (claimed);
1494 }
1495 
1496 void
1497 jme_txeof(struct jme_softc *sc)
1498 {
1499 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1500 	struct jme_txdesc *txd;
1501 	uint32_t status;
1502 	int cons, nsegs;
1503 
1504 	cons = sc->jme_cdata.jme_tx_cons;
1505 	if (cons == sc->jme_cdata.jme_tx_prod)
1506 		return;
1507 
1508 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1509 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1510 
1511 	/*
1512 	 * Go through our Tx list and free mbufs for those
1513 	 * frames which have been transmitted.
1514 	 */
1515 	while (cons != sc->jme_cdata.jme_tx_prod) {
1516 		txd = &sc->jme_cdata.jme_txdesc[cons];
1517 
1518 		if (txd->tx_m == NULL)
1519 			panic("%s: freeing NULL mbuf!", sc->sc_dev.dv_xname);
1520 
1521 		status = letoh32(txd->tx_desc->flags);
1522 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1523 			break;
1524 
1525 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1526 			ifp->if_oerrors++;
1527 		} else {
1528 			if (status & JME_TD_COLLISION) {
1529 				ifp->if_collisions +=
1530 				    letoh32(txd->tx_desc->buflen) &
1531 				    JME_TD_BUF_LEN_MASK;
1532 			}
1533 		}
1534 
1535 		/*
1536 		 * Only the first descriptor of multi-descriptor
1537 		 * transmission is updated so driver have to skip entire
1538 		 * chained buffers for the transmiited frame. In other
1539 		 * words, JME_TD_OWN bit is valid only at the first
1540 		 * descriptor of a multi-descriptor transmission.
1541 		 */
1542 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1543 			sc->jme_rdata.jme_tx_ring[cons].flags = 0;
1544 			JME_DESC_INC(cons, JME_TX_RING_CNT);
1545 		}
1546 
1547 		/* Reclaim transferred mbufs. */
1548 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1549 		m_freem(txd->tx_m);
1550 		txd->tx_m = NULL;
1551 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1552 		if (sc->jme_cdata.jme_tx_cnt < 0)
1553 			panic("%s: Active Tx desc counter was garbled",
1554 			    sc->sc_dev.dv_xname);
1555 		txd->tx_ndesc = 0;
1556 	}
1557 	sc->jme_cdata.jme_tx_cons = cons;
1558 
1559 	if (sc->jme_cdata.jme_tx_cnt == 0)
1560 		ifp->if_timer = 0;
1561 
1562 	if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD <=
1563 	    JME_TX_RING_CNT - JME_TXD_RSVD)
1564 		ifq_clr_oactive(&ifp->if_snd);
1565 
1566 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1567 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1568 }
1569 
1570 void
1571 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
1572 {
1573 	int i;
1574 
1575 	for (i = 0; i < count; ++i) {
1576 		struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
1577 
1578 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
1579 		desc->buflen = htole32(MCLBYTES);
1580 		JME_DESC_INC(cons, JME_RX_RING_CNT);
1581 	}
1582 }
1583 
1584 /* Receive a frame. */
1585 void
1586 jme_rxpkt(struct jme_softc *sc)
1587 {
1588 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1589 	struct jme_desc *desc;
1590 	struct jme_rxdesc *rxd;
1591 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1592 	struct mbuf *mp, *m;
1593 	uint32_t flags, status;
1594 	int cons, count, nsegs;
1595 
1596 	cons = sc->jme_cdata.jme_rx_cons;
1597 	desc = &sc->jme_rdata.jme_rx_ring[cons];
1598 	flags = letoh32(desc->flags);
1599 	status = letoh32(desc->buflen);
1600 	nsegs = JME_RX_NSEGS(status);
1601 
1602 	if (status & JME_RX_ERR_STAT) {
1603 		ifp->if_ierrors++;
1604 		jme_discard_rxbufs(sc, cons, nsegs);
1605 #ifdef JME_SHOW_ERRORS
1606 		printf("%s : receive error = 0x%b\n",
1607 		    sc->sc_dev.dv_xname, JME_RX_ERR(status), JME_RX_ERR_BITS);
1608 #endif
1609 		sc->jme_cdata.jme_rx_cons += nsegs;
1610 		sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1611 		return;
1612 	}
1613 
1614 	sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
1615 	for (count = 0; count < nsegs; count++,
1616 	     JME_DESC_INC(cons, JME_RX_RING_CNT)) {
1617 		rxd = &sc->jme_cdata.jme_rxdesc[cons];
1618 		mp = rxd->rx_m;
1619 
1620 		/* Add a new receive buffer to the ring. */
1621 		if (jme_newbuf(sc, rxd) != 0) {
1622 			ifp->if_iqdrops++;
1623 			/* Reuse buffer. */
1624 			jme_discard_rxbufs(sc, cons, nsegs - count);
1625 			if (sc->jme_cdata.jme_rxhead != NULL) {
1626 				m_freem(sc->jme_cdata.jme_rxhead);
1627 				JME_RXCHAIN_RESET(sc);
1628 			}
1629 			break;
1630 		}
1631 
1632 		/*
1633 		 * Assume we've received a full sized frame.
1634 		 * Actual size is fixed when we encounter the end of
1635 		 * multi-segmented frame.
1636 		 */
1637 		mp->m_len = MCLBYTES;
1638 
1639 		/* Chain received mbufs. */
1640 		if (sc->jme_cdata.jme_rxhead == NULL) {
1641 			sc->jme_cdata.jme_rxhead = mp;
1642 			sc->jme_cdata.jme_rxtail = mp;
1643 		} else {
1644 			/*
1645 			 * Receive processor can receive a maximum frame
1646 			 * size of 65535 bytes.
1647 			 */
1648 			mp->m_flags &= ~M_PKTHDR;
1649 			sc->jme_cdata.jme_rxtail->m_next = mp;
1650 			sc->jme_cdata.jme_rxtail = mp;
1651 		}
1652 
1653 		if (count == nsegs - 1) {
1654 			/* Last desc. for this frame. */
1655 			m = sc->jme_cdata.jme_rxhead;
1656 			/* XXX assert PKTHDR? */
1657 			m->m_flags |= M_PKTHDR;
1658 			m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
1659 			if (nsegs > 1) {
1660 				/* Set first mbuf size. */
1661 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
1662 				/* Set last mbuf size. */
1663 				mp->m_len = sc->jme_cdata.jme_rxlen -
1664 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
1665 				    (MCLBYTES * (nsegs - 2)));
1666 			} else {
1667 				m->m_len = sc->jme_cdata.jme_rxlen;
1668 			}
1669 
1670 			/*
1671 			 * Account for 10bytes auto padding which is used
1672 			 * to align IP header on 32bit boundary. Also note,
1673 			 * CRC bytes is automatically removed by the
1674 			 * hardware.
1675 			 */
1676 			m->m_data += JME_RX_PAD_BYTES;
1677 
1678 			/* Set checksum information. */
1679 			if (flags & (JME_RD_IPV4|JME_RD_IPV6)) {
1680 				if ((flags & JME_RD_IPV4) &&
1681 				    (flags & JME_RD_IPCSUM))
1682 					m->m_pkthdr.csum_flags |=
1683 					    M_IPV4_CSUM_IN_OK;
1684 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
1685 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
1686 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
1687 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
1688 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
1689 					m->m_pkthdr.csum_flags |=
1690 					    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1691 				}
1692 			}
1693 
1694 #if NVLAN > 0
1695 			/* Check for VLAN tagged packets. */
1696 			if (flags & JME_RD_VLAN_TAG) {
1697 				m->m_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK;
1698 				m->m_flags |= M_VLANTAG;
1699 			}
1700 #endif
1701 
1702 			ml_enqueue(&ml, m);
1703 
1704 			/* Reset mbuf chains. */
1705 			JME_RXCHAIN_RESET(sc);
1706 		}
1707 	}
1708 
1709 	if_input(ifp, &ml);
1710 
1711 	sc->jme_cdata.jme_rx_cons += nsegs;
1712 	sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1713 }
1714 
1715 void
1716 jme_rxeof(struct jme_softc *sc)
1717 {
1718 	struct jme_desc *desc;
1719 	int nsegs, prog, pktlen;
1720 
1721 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1722 	    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1723 
1724 	prog = 0;
1725 	for (;;) {
1726 		desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
1727 		if ((letoh32(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
1728 			break;
1729 		if ((letoh32(desc->buflen) & JME_RD_VALID) == 0)
1730 			break;
1731 
1732 		/*
1733 		 * Check number of segments against received bytes.
1734 		 * Non-matching value would indicate that hardware
1735 		 * is still trying to update Rx descriptors. I'm not
1736 		 * sure whether this check is needed.
1737 		 */
1738 		nsegs = JME_RX_NSEGS(letoh32(desc->buflen));
1739 		pktlen = JME_RX_BYTES(letoh32(desc->buflen));
1740 		if (nsegs != howmany(pktlen, MCLBYTES)) {
1741 			printf("%s: RX fragment count(%d) "
1742 			    "and packet size(%d) mismach\n",
1743 			     sc->sc_dev.dv_xname, nsegs, pktlen);
1744 			break;
1745 		}
1746 
1747 		/* Received a frame. */
1748 		jme_rxpkt(sc);
1749 		prog++;
1750 	}
1751 
1752 	if (prog > 0) {
1753 		bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1754 		    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1755 	}
1756 }
1757 
1758 void
1759 jme_tick(void *xsc)
1760 {
1761 	struct jme_softc *sc = xsc;
1762 	struct mii_data *mii = &sc->sc_miibus;
1763 	int s;
1764 
1765 	s = splnet();
1766 	mii_tick(mii);
1767 	timeout_add_sec(&sc->jme_tick_ch, 1);
1768 	splx(s);
1769 }
1770 
1771 void
1772 jme_reset(struct jme_softc *sc)
1773 {
1774 #ifdef foo
1775 	/* Stop receiver, transmitter. */
1776 	jme_stop_rx(sc);
1777 	jme_stop_tx(sc);
1778 #endif
1779 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1780 	DELAY(10);
1781 	CSR_WRITE_4(sc, JME_GHC, 0);
1782 }
1783 
1784 int
1785 jme_init(struct ifnet *ifp)
1786 {
1787 	struct jme_softc *sc = ifp->if_softc;
1788 	struct mii_data *mii;
1789 	uint8_t eaddr[ETHER_ADDR_LEN];
1790 	bus_addr_t paddr;
1791 	uint32_t reg;
1792 	int error;
1793 
1794 	/*
1795 	 * Cancel any pending I/O.
1796 	 */
1797 	jme_stop(sc);
1798 
1799 	/*
1800 	 * Reset the chip to a known state.
1801 	 */
1802 	jme_reset(sc);
1803 
1804 	/* Init descriptors. */
1805 	error = jme_init_rx_ring(sc);
1806         if (error != 0) {
1807                 printf("%s: initialization failed: no memory for Rx buffers.\n",
1808 		    sc->sc_dev.dv_xname);
1809                 jme_stop(sc);
1810 		return (error);
1811         }
1812 	jme_init_tx_ring(sc);
1813 
1814 	/* Initialize shadow status block. */
1815 	jme_init_ssb(sc);
1816 
1817 	/* Reprogram the station address. */
1818 	bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
1819 	CSR_WRITE_4(sc, JME_PAR0,
1820 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
1821 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
1822 
1823 	/*
1824 	 * Configure Tx queue.
1825 	 *  Tx priority queue weight value : 0
1826 	 *  Tx FIFO threshold for processing next packet : 16QW
1827 	 *  Maximum Tx DMA length : 512
1828 	 *  Allow Tx DMA burst.
1829 	 */
1830 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
1831 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
1832 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
1833 	sc->jme_txcsr |= sc->jme_tx_dma_size;
1834 	sc->jme_txcsr |= TXCSR_DMA_BURST;
1835 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
1836 
1837 	/* Set Tx descriptor counter. */
1838 	CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
1839 
1840 	/* Set Tx ring address to the hardware. */
1841 	paddr = JME_TX_RING_ADDR(sc, 0);
1842 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
1843 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
1844 
1845 	/* Configure TxMAC parameters. */
1846 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
1847 	reg |= TXMAC_THRESH_1_PKT;
1848 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
1849 	CSR_WRITE_4(sc, JME_TXMAC, reg);
1850 
1851 	/*
1852 	 * Configure Rx queue.
1853 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
1854 	 *  FIFO threshold for processing next packet : 128QW
1855 	 *  Rx queue 0 select
1856 	 *  Max Rx DMA length : 128
1857 	 *  Rx descriptor retry : 32
1858 	 *  Rx descriptor retry time gap : 256ns
1859 	 *  Don't receive runt/bad frame.
1860 	 */
1861 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
1862 
1863 	/*
1864 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
1865 	 * than 4K bytes will suffer from Rx FIFO overruns. So
1866 	 * decrease FIFO threshold to reduce the FIFO overruns for
1867 	 * frames larger than 4000 bytes.
1868 	 * For best performance of standard MTU sized frames use
1869 	 * maximum allowable FIFO threshold, which is 32QW for
1870 	 * chips with a full mask >= 2 otherwise 128QW. FIFO
1871 	 * thresholds of 64QW and 128QW are not valid for chips
1872 	 * with a full mask >= 2.
1873 	 */
1874 	if (sc->jme_revfm >= 2)
1875 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
1876 	else {
1877 		if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1878 		    ETHER_VLAN_ENCAP_LEN) > JME_RX_FIFO_SIZE)
1879 			sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
1880 		else
1881 			sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
1882 	}
1883 	sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
1884 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
1885 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
1886 	/* XXX TODO DROP_BAD */
1887 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
1888 
1889 	/* Set Rx descriptor counter. */
1890 	CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
1891 
1892 	/* Set Rx ring address to the hardware. */
1893 	paddr = JME_RX_RING_ADDR(sc, 0);
1894 	CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
1895 	CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
1896 
1897 	/* Clear receive filter. */
1898 	CSR_WRITE_4(sc, JME_RXMAC, 0);
1899 
1900 	/* Set up the receive filter. */
1901 	jme_iff(sc);
1902 
1903 	jme_set_vlan(sc);
1904 
1905 	/*
1906 	 * Disable all WOL bits as WOL can interfere normal Rx
1907 	 * operation. Also clear WOL detection status bits.
1908 	 */
1909 	reg = CSR_READ_4(sc, JME_PMCS);
1910 	reg &= ~PMCS_WOL_ENB_MASK;
1911 	CSR_WRITE_4(sc, JME_PMCS, reg);
1912 
1913 	/*
1914 	 * Pad 10bytes right before received frame. This will greatly
1915 	 * help Rx performance on strict-alignment architectures as
1916 	 * it does not need to copy the frame to align the payload.
1917 	 */
1918 	reg = CSR_READ_4(sc, JME_RXMAC);
1919 	reg |= RXMAC_PAD_10BYTES;
1920 	reg |= RXMAC_CSUM_ENB;
1921 	CSR_WRITE_4(sc, JME_RXMAC, reg);
1922 
1923 	/* Configure general purpose reg0 */
1924 	reg = CSR_READ_4(sc, JME_GPREG0);
1925 	reg &= ~GPREG0_PCC_UNIT_MASK;
1926 	/* Set PCC timer resolution to micro-seconds unit. */
1927 	reg |= GPREG0_PCC_UNIT_US;
1928 	/*
1929 	 * Disable all shadow register posting as we have to read
1930 	 * JME_INTR_STATUS register in jme_intr. Also it seems
1931 	 * that it's hard to synchronize interrupt status between
1932 	 * hardware and software with shadow posting due to
1933 	 * requirements of bus_dmamap_sync(9).
1934 	 */
1935 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
1936 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
1937 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
1938 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
1939 	/* Disable posting of DW0. */
1940 	reg &= ~GPREG0_POST_DW0_ENB;
1941 	/* Clear PME message. */
1942 	reg &= ~GPREG0_PME_ENB;
1943 	/* Set PHY address. */
1944 	reg &= ~GPREG0_PHY_ADDR_MASK;
1945 	reg |= sc->jme_phyaddr;
1946 	CSR_WRITE_4(sc, JME_GPREG0, reg);
1947 
1948 	/* Configure Tx queue 0 packet completion coalescing. */
1949 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1950 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
1951 	    PCCTX_COAL_TO_MASK;
1952 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1953 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
1954 	    PCCTX_COAL_PKT_MASK;
1955 	reg |= PCCTX_COAL_TXQ0;
1956 	CSR_WRITE_4(sc, JME_PCCTX, reg);
1957 
1958 	/* Configure Rx queue 0 packet completion coalescing. */
1959 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1960 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
1961 	    PCCRX_COAL_TO_MASK;
1962 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1963 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
1964 	    PCCRX_COAL_PKT_MASK;
1965 	CSR_WRITE_4(sc, JME_PCCRX0, reg);
1966 
1967 	/* Configure shadow status block but don't enable posting. */
1968 	paddr = sc->jme_rdata.jme_ssb_block_paddr;
1969 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
1970 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
1971 
1972 	/* Disable Timer 1 and Timer 2. */
1973 	CSR_WRITE_4(sc, JME_TIMER1, 0);
1974 	CSR_WRITE_4(sc, JME_TIMER2, 0);
1975 
1976 	/* Configure retry transmit period, retry limit value. */
1977 	CSR_WRITE_4(sc, JME_TXTRHD,
1978 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
1979 	    TXTRHD_RT_PERIOD_MASK) |
1980 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
1981 	    TXTRHD_RT_LIMIT_SHIFT));
1982 
1983 	/* Disable RSS. */
1984 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
1985 
1986 	/* Initialize the interrupt mask. */
1987 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1988 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
1989 
1990 	/*
1991 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
1992 	 * done after detection of valid link in jme_miibus_statchg.
1993 	 */
1994 	sc->jme_flags &= ~JME_FLAG_LINK;
1995 
1996 	/* Set the current media. */
1997 	mii = &sc->sc_miibus;
1998 	mii_mediachg(mii);
1999 
2000 	timeout_add_sec(&sc->jme_tick_ch, 1);
2001 
2002 	ifp->if_flags |= IFF_RUNNING;
2003 	ifq_clr_oactive(&ifp->if_snd);
2004 
2005 	return (0);
2006 }
2007 
2008 void
2009 jme_stop(struct jme_softc *sc)
2010 {
2011 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2012 	struct jme_txdesc *txd;
2013 	struct jme_rxdesc *rxd;
2014 	int i;
2015 
2016 	/*
2017 	 * Mark the interface down and cancel the watchdog timer.
2018 	 */
2019 	ifp->if_flags &= ~IFF_RUNNING;
2020 	ifq_clr_oactive(&ifp->if_snd);
2021 	ifp->if_timer = 0;
2022 
2023 	timeout_del(&sc->jme_tick_ch);
2024 	sc->jme_flags &= ~JME_FLAG_LINK;
2025 
2026 	/*
2027 	 * Disable interrupts.
2028 	 */
2029 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2030 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2031 
2032 	/* Disable updating shadow status block. */
2033 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2034 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2035 
2036 	/* Stop receiver, transmitter. */
2037 	jme_stop_rx(sc);
2038 	jme_stop_tx(sc);
2039 
2040 #ifdef foo
2041 	 /* Reclaim Rx/Tx buffers that have been completed. */
2042 	jme_rxeof(sc);
2043 	m_freem(sc->jme_cdata.jme_rxhead);
2044 	JME_RXCHAIN_RESET(sc);
2045 	jme_txeof(sc);
2046 #endif
2047 
2048 	/*
2049 	 * Free partial finished RX segments
2050 	 */
2051 	m_freem(sc->jme_cdata.jme_rxhead);
2052 	JME_RXCHAIN_RESET(sc);
2053 
2054 	/*
2055 	 * Free RX and TX mbufs still in the queues.
2056 	 */
2057 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2058 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2059 		if (rxd->rx_m != NULL) {
2060 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2061 			m_freem(rxd->rx_m);
2062 			rxd->rx_m = NULL;
2063 		}
2064         }
2065 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2066 		txd = &sc->jme_cdata.jme_txdesc[i];
2067 		if (txd->tx_m != NULL) {
2068 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2069 			m_freem(txd->tx_m);
2070 			txd->tx_m = NULL;
2071 			txd->tx_ndesc = 0;
2072 		}
2073         }
2074 }
2075 
2076 void
2077 jme_stop_tx(struct jme_softc *sc)
2078 {
2079 	uint32_t reg;
2080 	int i;
2081 
2082 	reg = CSR_READ_4(sc, JME_TXCSR);
2083 	if ((reg & TXCSR_TX_ENB) == 0)
2084 		return;
2085 	reg &= ~TXCSR_TX_ENB;
2086 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2087 	for (i = JME_TIMEOUT; i > 0; i--) {
2088 		DELAY(1);
2089 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2090 			break;
2091 	}
2092 	if (i == 0)
2093 		printf("%s: stopping transmitter timeout!\n",
2094 		    sc->sc_dev.dv_xname);
2095 }
2096 
2097 void
2098 jme_stop_rx(struct jme_softc *sc)
2099 {
2100 	uint32_t reg;
2101 	int i;
2102 
2103 	reg = CSR_READ_4(sc, JME_RXCSR);
2104 	if ((reg & RXCSR_RX_ENB) == 0)
2105 		return;
2106 	reg &= ~RXCSR_RX_ENB;
2107 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2108 	for (i = JME_TIMEOUT; i > 0; i--) {
2109 		DELAY(1);
2110 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2111 			break;
2112 	}
2113 	if (i == 0)
2114 		printf("%s: stopping recevier timeout!\n", sc->sc_dev.dv_xname);
2115 }
2116 
2117 void
2118 jme_init_tx_ring(struct jme_softc *sc)
2119 {
2120 	struct jme_ring_data *rd;
2121 	struct jme_txdesc *txd;
2122 	int i;
2123 
2124 	sc->jme_cdata.jme_tx_prod = 0;
2125 	sc->jme_cdata.jme_tx_cons = 0;
2126 	sc->jme_cdata.jme_tx_cnt = 0;
2127 
2128 	rd = &sc->jme_rdata;
2129 	bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2130 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2131 		txd = &sc->jme_cdata.jme_txdesc[i];
2132 		txd->tx_m = NULL;
2133 		txd->tx_desc = &rd->jme_tx_ring[i];
2134 		txd->tx_ndesc = 0;
2135 	}
2136 
2137 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
2138 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2139 }
2140 
2141 void
2142 jme_init_ssb(struct jme_softc *sc)
2143 {
2144 	struct jme_ring_data *rd;
2145 
2146 	rd = &sc->jme_rdata;
2147 	bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2148 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0,
2149 	    sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2150 }
2151 
2152 int
2153 jme_init_rx_ring(struct jme_softc *sc)
2154 {
2155 	struct jme_ring_data *rd;
2156 	struct jme_rxdesc *rxd;
2157 	int i;
2158 
2159 	KASSERT(sc->jme_cdata.jme_rxhead == NULL &&
2160 		 sc->jme_cdata.jme_rxtail == NULL &&
2161 		 sc->jme_cdata.jme_rxlen == 0);
2162 	sc->jme_cdata.jme_rx_cons = 0;
2163 
2164 	rd = &sc->jme_rdata;
2165 	bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2166 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2167 		int error;
2168 
2169 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2170 		rxd->rx_m = NULL;
2171 		rxd->rx_desc = &rd->jme_rx_ring[i];
2172 		error = jme_newbuf(sc, rxd);
2173 		if (error)
2174 			return (error);
2175 	}
2176 
2177 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
2178 	    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2179 
2180 	return (0);
2181 }
2182 
2183 int
2184 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
2185 {
2186 	struct jme_desc *desc;
2187 	struct mbuf *m;
2188 	bus_dmamap_t map;
2189 	int error;
2190 
2191 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2192 	if (m == NULL)
2193 		return (ENOBUFS);
2194 	MCLGET(m, M_DONTWAIT);
2195 	if (!(m->m_flags & M_EXT)) {
2196 		m_freem(m);
2197 		return (ENOBUFS);
2198 	}
2199 
2200 	/*
2201 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2202 	 * takes advantage of 10 bytes padding feature of hardware
2203 	 * in order not to copy entire frame to align IP header on
2204 	 * 32bit boundary.
2205 	 */
2206 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2207 
2208 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
2209 	    sc->jme_cdata.jme_rx_sparemap, m, BUS_DMA_NOWAIT);
2210 
2211 	if (error != 0) {
2212 		m_freem(m);
2213 		printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2214 		return (error);
2215 	}
2216 
2217 	if (rxd->rx_m != NULL) {
2218 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2219 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2220 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2221 	}
2222 	map = rxd->rx_dmamap;
2223 	rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2224 	sc->jme_cdata.jme_rx_sparemap = map;
2225 	rxd->rx_m = m;
2226 
2227 	desc = rxd->rx_desc;
2228 	desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len);
2229 	desc->addr_lo =
2230 	    htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr));
2231 	desc->addr_hi =
2232 	    htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr));
2233 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2234 
2235 	return (0);
2236 }
2237 
2238 void
2239 jme_set_vlan(struct jme_softc *sc)
2240 {
2241 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2242 	uint32_t reg;
2243 
2244 	reg = CSR_READ_4(sc, JME_RXMAC);
2245 	reg &= ~RXMAC_VLAN_ENB;
2246 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
2247 		reg |= RXMAC_VLAN_ENB;
2248 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2249 }
2250 
2251 void
2252 jme_iff(struct jme_softc *sc)
2253 {
2254 	struct arpcom *ac = &sc->sc_arpcom;
2255 	struct ifnet *ifp = &ac->ac_if;
2256 	struct ether_multi *enm;
2257 	struct ether_multistep step;
2258 	uint32_t crc;
2259 	uint32_t mchash[2];
2260 	uint32_t rxcfg;
2261 
2262 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2263 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2264 	    RXMAC_ALLMULTI);
2265 	ifp->if_flags &= ~IFF_ALLMULTI;
2266 
2267 	/*
2268 	 * Always accept frames destined to our station address.
2269 	 * Always accept broadcast frames.
2270 	 */
2271 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2272 
2273 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
2274 		ifp->if_flags |= IFF_ALLMULTI;
2275 		if (ifp->if_flags & IFF_PROMISC)
2276 			rxcfg |= RXMAC_PROMISC;
2277 		else
2278 			rxcfg |= RXMAC_ALLMULTI;
2279 		mchash[0] = mchash[1] = 0xFFFFFFFF;
2280 	} else {
2281 		/*
2282 		 * Set up the multicast address filter by passing all
2283 		 * multicast addresses through a CRC generator, and then
2284 		 * using the low-order 6 bits as an index into the 64 bit
2285 		 * multicast hash table.  The high order bits select the
2286 		 * register, while the rest of the bits select the bit
2287 		 * within the register.
2288 		 */
2289 		rxcfg |= RXMAC_MULTICAST;
2290 		bzero(mchash, sizeof(mchash));
2291 
2292 		ETHER_FIRST_MULTI(step, ac, enm);
2293 		while (enm != NULL) {
2294 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2295 
2296 			/* Just want the 6 least significant bits. */
2297 			crc &= 0x3f;
2298 
2299 			/* Set the corresponding bit in the hash table. */
2300 			mchash[crc >> 5] |= 1 << (crc & 0x1f);
2301 
2302 			ETHER_NEXT_MULTI(step, enm);
2303 		}
2304 	}
2305 
2306 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2307 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2308 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2309 }
2310