xref: /openbsd-src/sys/dev/pci/if_jme.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: if_jme.c,v 1.54 2020/07/10 13:26:38 patrick Exp $	*/
2 /*-
3  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
29  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $
30  */
31 
32 #include "bpfilter.h"
33 #include "vlan.h"
34 
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/systm.h>
38 #include <sys/sockio.h>
39 #include <sys/mbuf.h>
40 #include <sys/queue.h>
41 #include <sys/kernel.h>
42 #include <sys/device.h>
43 #include <sys/timeout.h>
44 #include <sys/socket.h>
45 
46 #include <machine/bus.h>
47 
48 #include <net/if.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 
52 #include <netinet/in.h>
53 #include <netinet/if_ether.h>
54 
55 #if NBPFILTER > 0
56 #include <net/bpf.h>
57 #endif
58 
59 #include <dev/mii/miivar.h>
60 #include <dev/mii/jmphyreg.h>
61 
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/pci/pcidevs.h>
65 
66 #include <dev/pci/if_jmereg.h>
67 #include <dev/pci/if_jmevar.h>
68 
69 /* Define the following to disable printing Rx errors. */
70 #undef	JME_SHOW_ERRORS
71 
72 int	jme_match(struct device *, void *, void *);
73 void	jme_map_intr_vector(struct jme_softc *);
74 void	jme_attach(struct device *, struct device *, void *);
75 int	jme_detach(struct device *, int);
76 
77 int	jme_miibus_readreg(struct device *, int, int);
78 void	jme_miibus_writereg(struct device *, int, int, int);
79 void	jme_miibus_statchg(struct device *);
80 
81 int	jme_init(struct ifnet *);
82 int	jme_ioctl(struct ifnet *, u_long, caddr_t);
83 
84 void	jme_start(struct ifnet *);
85 void	jme_watchdog(struct ifnet *);
86 void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
87 int	jme_mediachange(struct ifnet *);
88 
89 int	jme_intr(void *);
90 void	jme_txeof(struct jme_softc *);
91 void	jme_rxeof(struct jme_softc *);
92 
93 int	jme_dma_alloc(struct jme_softc *);
94 void	jme_dma_free(struct jme_softc *);
95 int	jme_init_rx_ring(struct jme_softc *);
96 void	jme_init_tx_ring(struct jme_softc *);
97 void	jme_init_ssb(struct jme_softc *);
98 int	jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
99 int	jme_encap(struct jme_softc *, struct mbuf *);
100 void	jme_rxpkt(struct jme_softc *);
101 
102 void	jme_tick(void *);
103 void	jme_stop(struct jme_softc *);
104 void	jme_reset(struct jme_softc *);
105 void	jme_set_vlan(struct jme_softc *);
106 void	jme_iff(struct jme_softc *);
107 void	jme_stop_tx(struct jme_softc *);
108 void	jme_stop_rx(struct jme_softc *);
109 void	jme_mac_config(struct jme_softc *);
110 void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
111 int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
112 int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
113 void	jme_discard_rxbufs(struct jme_softc *, int, int);
114 #ifdef notyet
115 void	jme_setwol(struct jme_softc *);
116 void	jme_setlinkspeed(struct jme_softc *);
117 #endif
118 
119 /*
120  * Devices supported by this driver.
121  */
122 const struct pci_matchid jme_devices[] = {
123 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250 },
124 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260 }
125 };
126 
127 struct cfattach jme_ca = {
128 	sizeof (struct jme_softc), jme_match, jme_attach
129 };
130 
131 struct cfdriver jme_cd = {
132 	NULL, "jme", DV_IFNET
133 };
134 
135 int jmedebug = 0;
136 #define DPRINTF(x)	do { if (jmedebug) printf x; } while (0)
137 
138 /*
139  *	Read a PHY register on the MII of the JMC250.
140  */
141 int
142 jme_miibus_readreg(struct device *dev, int phy, int reg)
143 {
144 	struct jme_softc *sc = (struct jme_softc *)dev;
145 	uint32_t val;
146 	int i;
147 
148 	/* For FPGA version, PHY address 0 should be ignored. */
149 	if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0)
150 		return (0);
151 
152 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
153 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
154 
155 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
156 		DELAY(1);
157 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
158 			break;
159 	}
160 	if (i == 0) {
161 		printf("%s: phy read timeout: phy %d, reg %d\n",
162 		    sc->sc_dev.dv_xname, phy, reg);
163 		return (0);
164 	}
165 
166 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
167 }
168 
169 /*
170  *	Write a PHY register on the MII of the JMC250.
171  */
172 void
173 jme_miibus_writereg(struct device *dev, int phy, int reg, int val)
174 {
175 	struct jme_softc *sc = (struct jme_softc *)dev;
176 	int i;
177 
178 	/* For FPGA version, PHY address 0 should be ignored. */
179 	if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0)
180 		return;
181 
182 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
183 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
184 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
185 
186 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
187 		DELAY(1);
188 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
189 			break;
190 	}
191 	if (i == 0) {
192 		printf("%s: phy write timeout: phy %d, reg %d\n",
193 		    sc->sc_dev.dv_xname, phy, reg);
194 	}
195 }
196 
197 /*
198  *	Callback from MII layer when media changes.
199  */
200 void
201 jme_miibus_statchg(struct device *dev)
202 {
203 	struct jme_softc *sc = (struct jme_softc *)dev;
204 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
205 	struct mii_data *mii;
206 	struct jme_txdesc *txd;
207 	bus_addr_t paddr;
208 	int i;
209 
210 	if ((ifp->if_flags & IFF_RUNNING) == 0)
211 		return;
212 
213 	mii = &sc->sc_miibus;
214 
215 	sc->jme_flags &= ~JME_FLAG_LINK;
216 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
217 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
218 		case IFM_10_T:
219 		case IFM_100_TX:
220 			sc->jme_flags |= JME_FLAG_LINK;
221 			break;
222 		case IFM_1000_T:
223 			if (sc->jme_caps & JME_CAP_FASTETH)
224 				break;
225 			sc->jme_flags |= JME_FLAG_LINK;
226 			break;
227 		default:
228 			break;
229 		}
230 	}
231 
232 	/*
233 	 * Disabling Rx/Tx MACs have a side-effect of resetting
234 	 * JME_TXNDA/JME_RXNDA register to the first address of
235 	 * Tx/Rx descriptor address. So driver should reset its
236 	 * internal producer/consumer pointer and reclaim any
237 	 * allocated resources.  Note, just saving the value of
238 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
239 	 * and restoring JME_TXNDA/JME_RXNDA register is not
240 	 * sufficient to make sure correct MAC state because
241 	 * stopping MAC operation can take a while and hardware
242 	 * might have updated JME_TXNDA/JME_RXNDA registers
243 	 * during the stop operation.
244 	 */
245 
246 	/* Disable interrupts */
247 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
248 
249 	/* Stop driver */
250 	ifp->if_flags &= ~IFF_RUNNING;
251 	ifq_clr_oactive(&ifp->if_snd);
252 	ifp->if_timer = 0;
253 	timeout_del(&sc->jme_tick_ch);
254 
255 	/* Stop receiver/transmitter. */
256 	jme_stop_rx(sc);
257 	jme_stop_tx(sc);
258 
259 	jme_rxeof(sc);
260 	m_freem(sc->jme_cdata.jme_rxhead);
261 	JME_RXCHAIN_RESET(sc);
262 
263 	jme_txeof(sc);
264 	if (sc->jme_cdata.jme_tx_cnt != 0) {
265 		/* Remove queued packets for transmit. */
266 		for (i = 0; i < JME_TX_RING_CNT; i++) {
267 			txd = &sc->jme_cdata.jme_txdesc[i];
268 			if (txd->tx_m != NULL) {
269 				bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
270 				m_freem(txd->tx_m);
271 				txd->tx_m = NULL;
272 				txd->tx_ndesc = 0;
273 				ifp->if_oerrors++;
274 			}
275 		}
276 	}
277 
278 	/*
279 	 * Reuse configured Rx descriptors and reset
280 	 * producer/consumer index.
281 	 */
282 	sc->jme_cdata.jme_rx_cons = 0;
283 
284 	jme_init_tx_ring(sc);
285 
286 	/* Initialize shadow status block. */
287 	jme_init_ssb(sc);
288 
289 	/* Program MAC with resolved speed/duplex/flow-control. */
290 	if (sc->jme_flags & JME_FLAG_LINK) {
291 		jme_mac_config(sc);
292 
293 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
294 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
295 
296 		/* Set Tx ring address to the hardware. */
297 		paddr = JME_TX_RING_ADDR(sc, 0);
298 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
299 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
300 
301 		/* Set Rx ring address to the hardware. */
302 		paddr = JME_RX_RING_ADDR(sc, 0);
303 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
304 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
305 
306 		/* Restart receiver/transmitter. */
307 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
308 		    RXCSR_RXQ_START);
309 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
310 	}
311 
312 	ifp->if_flags |= IFF_RUNNING;
313 	ifq_clr_oactive(&ifp->if_snd);
314 	timeout_add_sec(&sc->jme_tick_ch, 1);
315 
316 	/* Reenable interrupts. */
317 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
318 }
319 
320 /*
321  *	Get the current interface media status.
322  */
323 void
324 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
325 {
326 	struct jme_softc *sc = ifp->if_softc;
327 	struct mii_data *mii = &sc->sc_miibus;
328 
329 	mii_pollstat(mii);
330 	ifmr->ifm_status = mii->mii_media_status;
331 	ifmr->ifm_active = mii->mii_media_active;
332 }
333 
334 /*
335  *	Set hardware to newly-selected media.
336  */
337 int
338 jme_mediachange(struct ifnet *ifp)
339 {
340 	struct jme_softc *sc = ifp->if_softc;
341 	struct mii_data *mii = &sc->sc_miibus;
342 	int error;
343 
344 	if (mii->mii_instance != 0) {
345 		struct mii_softc *miisc;
346 
347 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
348 			mii_phy_reset(miisc);
349 	}
350 	error = mii_mediachg(mii);
351 
352 	return (error);
353 }
354 
355 int
356 jme_match(struct device *dev, void *match, void *aux)
357 {
358 	return pci_matchbyid((struct pci_attach_args *)aux, jme_devices,
359 	    sizeof (jme_devices) / sizeof (jme_devices[0]));
360 }
361 
362 int
363 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
364 {
365 	uint32_t reg;
366 	int i;
367 
368 	*val = 0;
369 	for (i = JME_TIMEOUT; i > 0; i--) {
370 		reg = CSR_READ_4(sc, JME_SMBCSR);
371 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
372 			break;
373 		DELAY(1);
374 	}
375 
376 	if (i == 0) {
377 		printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname);
378 		return (ETIMEDOUT);
379 	}
380 
381 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
382 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
383 	for (i = JME_TIMEOUT; i > 0; i--) {
384 		DELAY(1);
385 		reg = CSR_READ_4(sc, JME_SMBINTF);
386 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
387 			break;
388 	}
389 
390 	if (i == 0) {
391 		printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname);
392 		return (ETIMEDOUT);
393 	}
394 
395 	reg = CSR_READ_4(sc, JME_SMBINTF);
396 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
397 
398 	return (0);
399 }
400 
401 int
402 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
403 {
404 	uint8_t fup, reg, val;
405 	uint32_t offset;
406 	int match;
407 
408 	offset = 0;
409 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
410 	    fup != JME_EEPROM_SIG0)
411 		return (ENOENT);
412 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
413 	    fup != JME_EEPROM_SIG1)
414 		return (ENOENT);
415 	match = 0;
416 	do {
417 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
418 			break;
419 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
420 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
421 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
422 				break;
423 			if (reg >= JME_PAR0 &&
424 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
425 				if (jme_eeprom_read_byte(sc, offset + 2,
426 				    &val) != 0)
427 					break;
428 				eaddr[reg - JME_PAR0] = val;
429 				match++;
430 			}
431 		}
432 		/* Check for the end of EEPROM descriptor. */
433 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
434 			break;
435 		/* Try next eeprom descriptor. */
436 		offset += JME_EEPROM_DESC_BYTES;
437 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
438 
439 	if (match == ETHER_ADDR_LEN)
440 		return (0);
441 
442 	return (ENOENT);
443 }
444 
445 void
446 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
447 {
448 	uint32_t par0, par1;
449 
450 	/* Read station address. */
451 	par0 = CSR_READ_4(sc, JME_PAR0);
452 	par1 = CSR_READ_4(sc, JME_PAR1);
453 	par1 &= 0xFFFF;
454 
455 	eaddr[0] = (par0 >> 0) & 0xFF;
456 	eaddr[1] = (par0 >> 8) & 0xFF;
457 	eaddr[2] = (par0 >> 16) & 0xFF;
458 	eaddr[3] = (par0 >> 24) & 0xFF;
459 	eaddr[4] = (par1 >> 0) & 0xFF;
460 	eaddr[5] = (par1 >> 8) & 0xFF;
461 }
462 
463 void
464 jme_map_intr_vector(struct jme_softc *sc)
465 {
466 	uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
467 
468 	bzero(map, sizeof(map));
469 
470 	/* Map Tx interrupts source to MSI/MSIX vector 2. */
471 	map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
472 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
473 	map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
474 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
475 	map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
476 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
477 	map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
478 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
479 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
480 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
481 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
482 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
483 	map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
484 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
485 	map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
486 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
487 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
488 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
489 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
490 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
491 
492 	/* Map Rx interrupts source to MSI/MSIX vector 1. */
493 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
494 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
495 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
496 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
497 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
498 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
499 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
500 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
501 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
502 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
503 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
504 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
505 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
506 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
507 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
508 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
509 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
510 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
511 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
512 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
513 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
514 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
515 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
516 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
517 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
518 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
519 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
520 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
521 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
522 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
523 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
524 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
525 
526 	/* Map all other interrupts source to MSI/MSIX vector 0. */
527 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
528 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
529 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
530 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
531 }
532 
533 void
534 jme_attach(struct device *parent, struct device *self, void *aux)
535 {
536 	struct jme_softc *sc = (struct jme_softc *)self;
537 	struct pci_attach_args *pa = aux;
538 	pci_chipset_tag_t pc = pa->pa_pc;
539 	pci_intr_handle_t ih;
540 	const char *intrstr;
541 	pcireg_t memtype;
542 
543 	struct ifnet *ifp;
544 	uint32_t reg;
545 	int error = 0;
546 
547 	/*
548 	 * Allocate IO memory
549 	 *
550 	 * JMC250 supports both memory mapped and I/O register space
551 	 * access.  Because I/O register access should use different
552 	 * BARs to access registers it's waste of time to use I/O
553 	 * register space access.  JMC250 uses 16K to map entire memory
554 	 * space.
555 	 */
556 
557 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR);
558 	if (pci_mapreg_map(pa, JME_PCIR_BAR, memtype, 0, &sc->jme_mem_bt,
559 	    &sc->jme_mem_bh, NULL, &sc->jme_mem_size, 0)) {
560 		printf(": can't map mem space\n");
561 		return;
562 	}
563 
564 	if (pci_intr_map_msi(pa, &ih) == 0)
565 		jme_map_intr_vector(sc);
566 	else if (pci_intr_map(pa, &ih) != 0) {
567 		printf(": can't map interrupt\n");
568 		return;
569 	}
570 
571 	/*
572 	 * Allocate IRQ
573 	 */
574 	intrstr = pci_intr_string(pc, ih);
575 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, jme_intr, sc,
576 	    sc->sc_dev.dv_xname);
577 	if (sc->sc_irq_handle == NULL) {
578 		printf(": could not establish interrupt");
579 		if (intrstr != NULL)
580 			printf(" at %s", intrstr);
581 		printf("\n");
582 		return;
583 	}
584 	printf(": %s", intrstr);
585 
586 	sc->sc_dmat = pa->pa_dmat;
587 	sc->jme_pct = pa->pa_pc;
588 	sc->jme_pcitag = pa->pa_tag;
589 
590 	/*
591 	 * Extract FPGA revision
592 	 */
593 	reg = CSR_READ_4(sc, JME_CHIPMODE);
594 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
595 	    CHIPMODE_NOT_FPGA) {
596 		sc->jme_caps |= JME_CAP_FPGA;
597 
598 		if (jmedebug) {
599 			printf("%s: FPGA revision : 0x%04x\n",
600 			    sc->sc_dev.dv_xname,
601 			    (reg & CHIPMODE_FPGA_REV_MASK) >>
602 			    CHIPMODE_FPGA_REV_SHIFT);
603 		}
604 	}
605 
606 	sc->jme_revfm = (reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT;
607 
608 	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_JMICRON_JMC250 &&
609 	    PCI_REVISION(pa->pa_class) == JME_REV_JMC250_A2)
610 		sc->jme_workaround |= JME_WA_CRCERRORS | JME_WA_PACKETLOSS;
611 
612 	/* Reset the ethernet controller. */
613 	jme_reset(sc);
614 
615 	/* Get station address. */
616 	reg = CSR_READ_4(sc, JME_SMBCSR);
617 	if (reg & SMBCSR_EEPROM_PRESENT)
618 		error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr);
619 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
620 		if (error != 0 && (jmedebug)) {
621 			printf("%s: ethernet hardware address "
622 			    "not found in EEPROM.\n", sc->sc_dev.dv_xname);
623 		}
624 		jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr);
625 	}
626 
627 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
628 
629 	/*
630 	 * Save PHY address.
631 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
632 	 * requires PHY probing to get correct PHY address.
633 	 */
634 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
635 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
636 		    GPREG0_PHY_ADDR_MASK;
637 		if (jmedebug) {
638 			printf("%s: PHY is at address %d.\n",
639 			    sc->sc_dev.dv_xname, sc->jme_phyaddr);
640 		}
641 	} else {
642 		sc->jme_phyaddr = 0;
643 	}
644 
645 	/* Set max allowable DMA size. */
646 	sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
647 	sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
648 
649 #ifdef notyet
650 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
651 		sc->jme_caps |= JME_CAP_PMCAP;
652 #endif
653 
654 	/* Allocate DMA stuffs */
655 	error = jme_dma_alloc(sc);
656 	if (error)
657 		goto fail;
658 
659 	ifp = &sc->sc_arpcom.ac_if;
660 	ifp->if_softc = sc;
661 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
662 	ifp->if_ioctl = jme_ioctl;
663 	ifp->if_start = jme_start;
664 	ifp->if_watchdog = jme_watchdog;
665 	ifq_set_maxlen(&ifp->if_snd, JME_TX_RING_CNT - 1);
666 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
667 
668 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
669 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 |
670 	    IFCAP_CSUM_UDPv6;
671 
672 #if NVLAN > 0
673 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
674 #endif
675 
676 	/* Set up MII bus. */
677 	sc->sc_miibus.mii_ifp = ifp;
678 	sc->sc_miibus.mii_readreg = jme_miibus_readreg;
679 	sc->sc_miibus.mii_writereg = jme_miibus_writereg;
680 	sc->sc_miibus.mii_statchg = jme_miibus_statchg;
681 
682 	ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange,
683 	    jme_mediastatus);
684 	mii_attach(self, &sc->sc_miibus, 0xffffffff,
685 	    sc->jme_caps & JME_CAP_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
686 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
687 
688 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
689 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
690 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
691 		    0, NULL);
692 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
693 	} else
694 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
695 
696 	/*
697 	 * Save PHYADDR for FPGA mode PHY not handled, not production hw
698 	 */
699 
700 	if_attach(ifp);
701 	ether_ifattach(ifp);
702 
703 	timeout_set(&sc->jme_tick_ch, jme_tick, sc);
704 
705 	return;
706 fail:
707 	jme_detach(&sc->sc_dev, 0);
708 }
709 
710 int
711 jme_detach(struct device *self, int flags)
712 {
713 	struct jme_softc *sc = (struct jme_softc *)self;
714 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
715 	int s;
716 
717 	s = splnet();
718 	jme_stop(sc);
719 	splx(s);
720 
721 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
722 
723 	/* Delete all remaining media. */
724 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
725 
726 	ether_ifdetach(ifp);
727 	if_detach(ifp);
728 	jme_dma_free(sc);
729 
730 	if (sc->sc_irq_handle != NULL) {
731 		pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle);
732 		sc->sc_irq_handle = NULL;
733 	}
734 
735 	return (0);
736 }
737 
738 int
739 jme_dma_alloc(struct jme_softc *sc)
740 {
741 	struct jme_txdesc *txd;
742 	struct jme_rxdesc *rxd;
743 	int error, i, nsegs;
744 
745 	/*
746 	 * Create DMA stuffs for TX ring
747 	 */
748 
749 	error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1,
750 	    JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT,
751 	    &sc->jme_cdata.jme_tx_ring_map);
752 	if (error)
753 		return (ENOBUFS);
754 
755 	/* Allocate DMA'able memory for TX ring */
756 	error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0,
757 	    &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs,
758 	    BUS_DMA_WAITOK);
759 /* XXX zero */
760 	if (error) {
761 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
762 		    sc->sc_dev.dv_xname);
763 		return error;
764 	}
765 
766 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg,
767 	    nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring,
768 	    BUS_DMA_NOWAIT);
769 	if (error)
770 		return (ENOBUFS);
771 
772 	/*  Load the DMA map for Tx ring. */
773 	error = bus_dmamap_load(sc->sc_dmat,
774 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
775 	    JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
776 	if (error) {
777 		printf("%s: could not load DMA'able memory for Tx ring.\n",
778 		    sc->sc_dev.dv_xname);
779 		bus_dmamem_free(sc->sc_dmat,
780 		    (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1);
781 		return error;
782 	}
783 	sc->jme_rdata.jme_tx_ring_paddr =
784 	    sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr;
785 
786 	/*
787 	 * Create DMA stuffs for RX ring
788 	 */
789 
790 	error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1,
791 	    JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT,
792 	    &sc->jme_cdata.jme_rx_ring_map);
793 	if (error)
794 		return (ENOBUFS);
795 
796 	/* Allocate DMA'able memory for RX ring */
797 	error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0,
798 	    &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs,
799 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
800 /* XXX zero */
801 	if (error) {
802 		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
803 		    sc->sc_dev.dv_xname);
804 		return error;
805 	}
806 
807 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg,
808 	    nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring,
809 	    BUS_DMA_NOWAIT);
810 	if (error)
811 		return (ENOBUFS);
812 
813 	/* Load the DMA map for Rx ring. */
814 	error = bus_dmamap_load(sc->sc_dmat,
815 	    sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
816 	    JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
817 	if (error) {
818 		printf("%s: could not load DMA'able memory for Rx ring.\n",
819 		    sc->sc_dev.dv_xname);
820 		bus_dmamem_free(sc->sc_dmat,
821 		    (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
822 		return error;
823 	}
824 	sc->jme_rdata.jme_rx_ring_paddr =
825 	    sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr;
826 
827 #if 0
828 	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
829 	tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
830 	rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
831 	if ((JME_ADDR_HI(tx_ring_end) !=
832 	     JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
833 	    (JME_ADDR_HI(rx_ring_end) !=
834 	     JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
835 		printf("%s: 4GB boundary crossed, switching to 32bit "
836 		    "DMA address mode.\n", sc->sc_dev.dv_xname);
837 		jme_dma_free(sc);
838 		/* Limit DMA address space to 32bit and try again. */
839 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
840 		goto again;
841 	}
842 #endif
843 
844 	/*
845 	 * Create DMA stuffs for shadow status block
846 	 */
847 
848 	error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1,
849 	    JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map);
850 	if (error)
851 		return (ENOBUFS);
852 
853 	/* Allocate DMA'able memory for shared status block. */
854 	error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0,
855 	    &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK);
856 	if (error) {
857 		printf("%s: could not allocate DMA'able "
858 		    "memory for shared status block.\n", sc->sc_dev.dv_xname);
859 		return error;
860 	}
861 
862 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg,
863 	    nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block,
864 	    BUS_DMA_NOWAIT);
865 	if (error)
866 		return (ENOBUFS);
867 
868 	/* Load the DMA map for shared status block */
869 	error = bus_dmamap_load(sc->sc_dmat,
870 	    sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
871 	    JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT);
872 	if (error) {
873 		printf("%s: could not load DMA'able memory "
874 		    "for shared status block.\n", sc->sc_dev.dv_xname);
875 		bus_dmamem_free(sc->sc_dmat,
876 		    (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
877 		return error;
878 	}
879 	sc->jme_rdata.jme_ssb_block_paddr =
880 	    sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr;
881 
882 	/*
883 	 * Create DMA stuffs for TX buffers
884 	 */
885 
886 	/* Create DMA maps for Tx buffers. */
887 	for (i = 0; i < JME_TX_RING_CNT; i++) {
888 		txd = &sc->jme_cdata.jme_txdesc[i];
889 		error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE,
890 		    JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
891 		    &txd->tx_dmamap);
892 		if (error) {
893 			int j;
894 
895 			printf("%s: could not create %dth Tx dmamap.\n",
896 			    sc->sc_dev.dv_xname, i);
897 
898 			for (j = 0; j < i; ++j) {
899 				txd = &sc->jme_cdata.jme_txdesc[j];
900 				bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
901 			}
902 			return error;
903 		}
904 
905 	}
906 
907 	/*
908 	 * Create DMA stuffs for RX buffers
909 	 */
910 
911 	/* Create DMA maps for Rx buffers. */
912 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
913 	    0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap);
914 	if (error) {
915 		printf("%s: could not create spare Rx dmamap.\n",
916 		    sc->sc_dev.dv_xname);
917 		return error;
918 	}
919 	for (i = 0; i < JME_RX_RING_CNT; i++) {
920 		rxd = &sc->jme_cdata.jme_rxdesc[i];
921 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
922 		    0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
923 		if (error) {
924 			int j;
925 
926 			printf("%s: could not create %dth Rx dmamap.\n",
927 			    sc->sc_dev.dv_xname, i);
928 
929 			for (j = 0; j < i; ++j) {
930 				rxd = &sc->jme_cdata.jme_rxdesc[j];
931 				bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
932 			}
933 			bus_dmamap_destroy(sc->sc_dmat,
934 			    sc->jme_cdata.jme_rx_sparemap);
935 			sc->jme_cdata.jme_rx_tag = NULL;
936 			return error;
937 		}
938 	}
939 
940 	return 0;
941 }
942 
943 void
944 jme_dma_free(struct jme_softc *sc)
945 {
946 	struct jme_txdesc *txd;
947 	struct jme_rxdesc *rxd;
948 	int i;
949 
950 	/* Tx ring */
951 	bus_dmamap_unload(sc->sc_dmat,
952 	    sc->jme_cdata.jme_tx_ring_map);
953 	bus_dmamem_free(sc->sc_dmat,
954 	    (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1);
955 
956 	/* Rx ring */
957 	bus_dmamap_unload(sc->sc_dmat,
958 	    sc->jme_cdata.jme_rx_ring_map);
959 	bus_dmamem_free(sc->sc_dmat,
960 	    (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
961 
962 	/* Tx buffers */
963 	for (i = 0; i < JME_TX_RING_CNT; i++) {
964 		txd = &sc->jme_cdata.jme_txdesc[i];
965 		bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
966 	}
967 
968 	/* Rx buffers */
969 	for (i = 0; i < JME_RX_RING_CNT; i++) {
970 		rxd = &sc->jme_cdata.jme_rxdesc[i];
971 		bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
972 	}
973 	bus_dmamap_destroy(sc->sc_dmat,
974 	    sc->jme_cdata.jme_rx_sparemap);
975 
976 	/* Shadow status block. */
977 	bus_dmamap_unload(sc->sc_dmat,
978 	    sc->jme_cdata.jme_ssb_map);
979 	bus_dmamem_free(sc->sc_dmat,
980 	    (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
981 }
982 
983 #ifdef notyet
984 /*
985  * Unlike other ethernet controllers, JMC250 requires
986  * explicit resetting link speed to 10/100Mbps as gigabit
987  * link will consume more power than 375mA.
988  * Note, we reset the link speed to 10/100Mbps with
989  * auto-negotiation but we don't know whether that operation
990  * would succeed or not as we have no control after powering
991  * off. If the renegotiation fail WOL may not work. Running
992  * at 1Gbps draws more power than 375mA at 3.3V which is
993  * specified in PCI specification and that would result in
994  * complete shutdowning power to ethernet controller.
995  *
996  * TODO
997  *  Save current negotiated media speed/duplex/flow-control
998  *  to softc and restore the same link again after resuming.
999  *  PHY handling such as power down/resetting to 100Mbps
1000  *  may be better handled in suspend method in phy driver.
1001  */
1002 void
1003 jme_setlinkspeed(struct jme_softc *sc)
1004 {
1005 	struct mii_data *mii;
1006 	int aneg, i;
1007 
1008 	JME_LOCK_ASSERT(sc);
1009 
1010 	mii = &sc->sc_miibus;
1011 	mii_pollstat(mii);
1012 	aneg = 0;
1013 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1014 		switch IFM_SUBTYPE(mii->mii_media_active) {
1015 		case IFM_10_T:
1016 		case IFM_100_TX:
1017 			return;
1018 		case IFM_1000_T:
1019 			aneg++;
1020 		default:
1021 			break;
1022 		}
1023 	}
1024 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1025 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR,
1026 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1027 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR,
1028 	    BMCR_AUTOEN | BMCR_STARTNEG);
1029 	DELAY(1000);
1030 	if (aneg != 0) {
1031 		/* Poll link state until jme(4) get a 10/100 link. */
1032 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1033 			mii_pollstat(mii);
1034 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1035 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1036 				case IFM_10_T:
1037 				case IFM_100_TX:
1038 					jme_mac_config(sc);
1039 					return;
1040 				default:
1041 					break;
1042 				}
1043 			}
1044 			JME_UNLOCK(sc);
1045 			pause("jmelnk", hz);
1046 			JME_LOCK(sc);
1047 		}
1048 		if (i == MII_ANEGTICKS_GIGE)
1049 			printf("%s: establishing link failed, "
1050 			    "WOL may not work!\n", sc->sc_dev.dv_xname);
1051 	}
1052 	/*
1053 	 * No link, force MAC to have 100Mbps, full-duplex link.
1054 	 * This is the last resort and may/may not work.
1055 	 */
1056 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1057 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1058 	jme_mac_config(sc);
1059 }
1060 
1061 void
1062 jme_setwol(struct jme_softc *sc)
1063 {
1064 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1065 	uint32_t gpr, pmcs;
1066 	uint16_t pmstat;
1067 	int pmc;
1068 
1069 	if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) {
1070 		/* No PME capability, PHY power down. */
1071 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1072 		    MII_BMCR, BMCR_PDOWN);
1073 		return;
1074 	}
1075 
1076 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1077 	pmcs = CSR_READ_4(sc, JME_PMCS);
1078 	pmcs &= ~PMCS_WOL_ENB_MASK;
1079 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1080 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1081 		/* Enable PME message. */
1082 		gpr |= GPREG0_PME_ENB;
1083 		/* For gigabit controllers, reset link speed to 10/100. */
1084 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1085 			jme_setlinkspeed(sc);
1086 	}
1087 
1088 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1089 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1090 
1091 	/* Request PME. */
1092 	pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2);
1093 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1094 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1095 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1096 	pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1097 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1098 		/* No WOL, PHY power down. */
1099 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1100 		    MII_BMCR, BMCR_PDOWN);
1101 	}
1102 }
1103 #endif
1104 
1105 int
1106 jme_encap(struct jme_softc *sc, struct mbuf *m)
1107 {
1108 	struct jme_txdesc *txd;
1109 	struct jme_desc *desc;
1110 	int error, i, prod;
1111 	uint32_t cflags;
1112 
1113 	prod = sc->jme_cdata.jme_tx_prod;
1114 	txd = &sc->jme_cdata.jme_txdesc[prod];
1115 
1116 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
1117 	    m, BUS_DMA_NOWAIT);
1118 	if (error != 0 && error != EFBIG)
1119 		goto drop;
1120 	if (error != 0) {
1121 		if (m_defrag(m, M_DONTWAIT)) {
1122 			error = ENOBUFS;
1123 			goto drop;
1124 		}
1125 		error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
1126 					     m, BUS_DMA_NOWAIT);
1127 		if (error != 0)
1128 			goto drop;
1129 	}
1130 
1131 	cflags = 0;
1132 
1133 	/* Configure checksum offload. */
1134 	if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1135 		cflags |= JME_TD_IPCSUM;
1136 	if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1137 		cflags |= JME_TD_TCPCSUM;
1138 	if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1139 		cflags |= JME_TD_UDPCSUM;
1140 
1141 #if NVLAN > 0
1142 	/* Configure VLAN. */
1143 	if (m->m_flags & M_VLANTAG) {
1144 		cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1145 		cflags |= JME_TD_VLAN_TAG;
1146 	}
1147 #endif
1148 
1149 	desc = &sc->jme_rdata.jme_tx_ring[prod];
1150 	desc->flags = htole32(cflags);
1151 	desc->buflen = 0;
1152 	desc->addr_hi = htole32(m->m_pkthdr.len);
1153 	desc->addr_lo = 0;
1154 	sc->jme_cdata.jme_tx_cnt++;
1155 	JME_DESC_INC(prod, JME_TX_RING_CNT);
1156 	for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) {
1157 		desc = &sc->jme_rdata.jme_tx_ring[prod];
1158 		desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1159 		desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len);
1160 		desc->addr_hi =
1161 		    htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr));
1162 		desc->addr_lo =
1163 		    htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr));
1164 		sc->jme_cdata.jme_tx_cnt++;
1165 		JME_DESC_INC(prod, JME_TX_RING_CNT);
1166 	}
1167 
1168 	/* Update producer index. */
1169 	sc->jme_cdata.jme_tx_prod = prod;
1170 	/*
1171 	 * Finally request interrupt and give the first descriptor
1172 	 * ownership to hardware.
1173 	 */
1174 	desc = txd->tx_desc;
1175 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1176 
1177 	txd->tx_m = m;
1178 	txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + JME_TXD_RSVD;
1179 
1180 	/* Sync descriptors. */
1181 	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1182 	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1183 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1184 	     sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1185 
1186 	return (0);
1187 
1188   drop:
1189 	m_freem(m);
1190 	return (error);
1191 }
1192 
1193 void
1194 jme_start(struct ifnet *ifp)
1195 {
1196 	struct jme_softc *sc = ifp->if_softc;
1197 	struct mbuf *m;
1198 	int enq = 0;
1199 
1200 	/* Reclaim transmitted frames. */
1201 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1202 		jme_txeof(sc);
1203 
1204 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1205 		return;
1206 	if ((sc->jme_flags & JME_FLAG_LINK) == 0)
1207 		return;
1208 	if (ifq_empty(&ifp->if_snd))
1209 		return;
1210 
1211 	for (;;) {
1212 		/*
1213 		 * Check number of available TX descs, always
1214 		 * leave JME_TXD_RSVD free TX descs.
1215 		 */
1216 		if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD >
1217 		    JME_TX_RING_CNT - JME_TXD_RSVD) {
1218 			ifq_set_oactive(&ifp->if_snd);
1219 			break;
1220 		}
1221 
1222 		m = ifq_dequeue(&ifp->if_snd);
1223 		if (m == NULL)
1224 			break;
1225 
1226 		/*
1227 		 * Pack the data into the transmit ring. If we
1228 		 * don't have room, set the OACTIVE flag and wait
1229 		 * for the NIC to drain the ring.
1230 		 */
1231 		if (jme_encap(sc, m) != 0) {
1232 			ifp->if_oerrors++;
1233 			continue;
1234 		}
1235 
1236 		enq++;
1237 
1238 #if NBPFILTER > 0
1239 		/*
1240 		 * If there's a BPF listener, bounce a copy of this frame
1241 		 * to him.
1242 		 */
1243 		if (ifp->if_bpf != NULL)
1244 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1245 #endif
1246 	}
1247 
1248 	if (enq > 0) {
1249 		/*
1250 		 * Reading TXCSR takes very long time under heavy load
1251 		 * so cache TXCSR value and writes the ORed value with
1252 		 * the kick command to the TXCSR. This saves one register
1253 		 * access cycle.
1254 		 */
1255 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1256 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1257 		/* Set a timeout in case the chip goes out to lunch. */
1258 		ifp->if_timer = JME_TX_TIMEOUT;
1259 	}
1260 }
1261 
1262 void
1263 jme_watchdog(struct ifnet *ifp)
1264 {
1265 	struct jme_softc *sc = ifp->if_softc;
1266 
1267 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1268 		printf("%s: watchdog timeout (missed link)\n",
1269 		    sc->sc_dev.dv_xname);
1270 		ifp->if_oerrors++;
1271 		jme_init(ifp);
1272 		return;
1273 	}
1274 
1275 	jme_txeof(sc);
1276 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1277 		printf("%s: watchdog timeout (missed Tx interrupts) "
1278 			  "-- recovering\n", sc->sc_dev.dv_xname);
1279 		jme_start(ifp);
1280 		return;
1281 	}
1282 
1283 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1284 	ifp->if_oerrors++;
1285 	jme_init(ifp);
1286 	jme_start(ifp);
1287 }
1288 
1289 int
1290 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1291 {
1292 	struct jme_softc *sc = ifp->if_softc;
1293 	struct mii_data *mii = &sc->sc_miibus;
1294 	struct ifreq *ifr = (struct ifreq *)data;
1295 	int error = 0, s;
1296 
1297 	s = splnet();
1298 
1299 	switch (cmd) {
1300 	case SIOCSIFADDR:
1301 		ifp->if_flags |= IFF_UP;
1302 		if (!(ifp->if_flags & IFF_RUNNING))
1303 			jme_init(ifp);
1304 		break;
1305 
1306 	case SIOCSIFFLAGS:
1307 		if (ifp->if_flags & IFF_UP) {
1308 			if (ifp->if_flags & IFF_RUNNING)
1309 				error = ENETRESET;
1310 			else
1311 				jme_init(ifp);
1312 		} else {
1313 			if (ifp->if_flags & IFF_RUNNING)
1314 				jme_stop(sc);
1315 		}
1316 		break;
1317 
1318 	case SIOCSIFMEDIA:
1319 	case SIOCGIFMEDIA:
1320 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1321 		break;
1322 
1323 	default:
1324 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1325 	}
1326 
1327 	if (error == ENETRESET) {
1328 		if (ifp->if_flags & IFF_RUNNING)
1329 			jme_iff(sc);
1330 		error = 0;
1331 	}
1332 
1333 	splx(s);
1334 	return (error);
1335 }
1336 
1337 void
1338 jme_mac_config(struct jme_softc *sc)
1339 {
1340 	struct mii_data *mii;
1341 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1342 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1343 
1344 	mii = &sc->sc_miibus;
1345 
1346 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1347 	DELAY(10);
1348 	CSR_WRITE_4(sc, JME_GHC, 0);
1349 	ghc = 0;
1350 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1351 	rxmac &= ~RXMAC_FC_ENB;
1352 	txmac = CSR_READ_4(sc, JME_TXMAC);
1353 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1354 	txpause = CSR_READ_4(sc, JME_TXPFC);
1355 	txpause &= ~TXPFC_PAUSE_ENB;
1356 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1357 		ghc |= GHC_FULL_DUPLEX;
1358 		rxmac &= ~RXMAC_COLL_DET_ENB;
1359 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1360 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1361 		    TXMAC_FRAME_BURST);
1362 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1363 			txpause |= TXPFC_PAUSE_ENB;
1364 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1365 			rxmac |= RXMAC_FC_ENB;
1366 		/* Disable retry transmit timer/retry limit. */
1367 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1368 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1369 	} else {
1370 		rxmac |= RXMAC_COLL_DET_ENB;
1371 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1372 		/* Enable retry transmit timer/retry limit. */
1373 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1374 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1375 	}
1376 
1377 	/*
1378 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1379 	 */
1380 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1381 	gp1 &= ~GPREG1_HALF_PATCH;
1382 
1383 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1384 		hdx = 1;
1385 
1386 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1387 	case IFM_10_T:
1388 		ghc |= GHC_SPEED_10;
1389 		if (hdx)
1390 			gp1 |= GPREG1_HALF_PATCH;
1391 		break;
1392 
1393 	case IFM_100_TX:
1394 		ghc |= GHC_SPEED_100;
1395 		if (hdx)
1396 			gp1 |= GPREG1_HALF_PATCH;
1397 
1398 		/*
1399 		 * Use extended FIFO depth to workaround CRC errors
1400 		 * emitted by chips before JMC250B
1401 		 */
1402 		phyconf = JMPHY_CONF_EXTFIFO;
1403 		break;
1404 
1405 	case IFM_1000_T:
1406 		if (sc->jme_caps & JME_CAP_FASTETH)
1407 			break;
1408 
1409 		ghc |= GHC_SPEED_1000;
1410 		if (hdx)
1411 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1412 		break;
1413 
1414 	default:
1415 		break;
1416 	}
1417 
1418 	if (sc->jme_revfm >= 2) {
1419 		/* set clock sources for tx mac and offload engine */
1420 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
1421 			ghc |= GHC_TCPCK_1000 | GHC_TXCK_1000;
1422 		else
1423 			ghc |= GHC_TCPCK_10_100 | GHC_TXCK_10_100;
1424 	}
1425 
1426 	CSR_WRITE_4(sc, JME_GHC, ghc);
1427 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1428 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1429 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1430 
1431 	if (sc->jme_workaround & JME_WA_CRCERRORS) {
1432 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1433 				    JMPHY_CONF, phyconf);
1434 	}
1435 	if (sc->jme_workaround & JME_WA_PACKETLOSS)
1436 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1437 }
1438 
1439 int
1440 jme_intr(void *xsc)
1441 {
1442 	struct jme_softc *sc = xsc;
1443 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1444 	uint32_t status;
1445 	int claimed = 0;
1446 
1447 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1448 	if (status == 0 || status == 0xFFFFFFFF)
1449 		return (0);
1450 
1451 	/* Disable interrupts. */
1452 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1453 
1454 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1455 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1456 		goto back;
1457 
1458 	/* Reset PCC counter/timer and Ack interrupts. */
1459 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1460 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1461 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1462 	if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1463 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
1464 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1465 
1466 	if (ifp->if_flags & IFF_RUNNING) {
1467 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1468 			jme_rxeof(sc);
1469 
1470 		if (status & INTR_RXQ_DESC_EMPTY) {
1471 			/*
1472 			 * Notify hardware availability of new Rx buffers.
1473 			 * Reading RXCSR takes very long time under heavy
1474 			 * load so cache RXCSR value and writes the ORed
1475 			 * value with the kick command to the RXCSR. This
1476 			 * saves one register access cycle.
1477 			 */
1478 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1479 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1480 		}
1481 
1482 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1483 			jme_txeof(sc);
1484 			jme_start(ifp);
1485 		}
1486 	}
1487 	claimed = 1;
1488 back:
1489 	/* Reenable interrupts. */
1490 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1491 
1492 	return (claimed);
1493 }
1494 
1495 void
1496 jme_txeof(struct jme_softc *sc)
1497 {
1498 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1499 	struct jme_txdesc *txd;
1500 	uint32_t status;
1501 	int cons, nsegs;
1502 
1503 	cons = sc->jme_cdata.jme_tx_cons;
1504 	if (cons == sc->jme_cdata.jme_tx_prod)
1505 		return;
1506 
1507 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1508 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1509 
1510 	/*
1511 	 * Go through our Tx list and free mbufs for those
1512 	 * frames which have been transmitted.
1513 	 */
1514 	while (cons != sc->jme_cdata.jme_tx_prod) {
1515 		txd = &sc->jme_cdata.jme_txdesc[cons];
1516 
1517 		if (txd->tx_m == NULL)
1518 			panic("%s: freeing NULL mbuf!", sc->sc_dev.dv_xname);
1519 
1520 		status = letoh32(txd->tx_desc->flags);
1521 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1522 			break;
1523 
1524 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1525 			ifp->if_oerrors++;
1526 		} else {
1527 			if (status & JME_TD_COLLISION) {
1528 				ifp->if_collisions +=
1529 				    letoh32(txd->tx_desc->buflen) &
1530 				    JME_TD_BUF_LEN_MASK;
1531 			}
1532 		}
1533 
1534 		/*
1535 		 * Only the first descriptor of multi-descriptor
1536 		 * transmission is updated so driver have to skip entire
1537 		 * chained buffers for the transmitted frame. In other
1538 		 * words, JME_TD_OWN bit is valid only at the first
1539 		 * descriptor of a multi-descriptor transmission.
1540 		 */
1541 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1542 			sc->jme_rdata.jme_tx_ring[cons].flags = 0;
1543 			JME_DESC_INC(cons, JME_TX_RING_CNT);
1544 		}
1545 
1546 		/* Reclaim transferred mbufs. */
1547 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1548 		m_freem(txd->tx_m);
1549 		txd->tx_m = NULL;
1550 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1551 		if (sc->jme_cdata.jme_tx_cnt < 0)
1552 			panic("%s: Active Tx desc counter was garbled",
1553 			    sc->sc_dev.dv_xname);
1554 		txd->tx_ndesc = 0;
1555 	}
1556 	sc->jme_cdata.jme_tx_cons = cons;
1557 
1558 	if (sc->jme_cdata.jme_tx_cnt == 0)
1559 		ifp->if_timer = 0;
1560 
1561 	if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD <=
1562 	    JME_TX_RING_CNT - JME_TXD_RSVD)
1563 		ifq_clr_oactive(&ifp->if_snd);
1564 
1565 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1566 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1567 }
1568 
1569 void
1570 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
1571 {
1572 	int i;
1573 
1574 	for (i = 0; i < count; ++i) {
1575 		struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
1576 
1577 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
1578 		desc->buflen = htole32(MCLBYTES);
1579 		JME_DESC_INC(cons, JME_RX_RING_CNT);
1580 	}
1581 }
1582 
1583 /* Receive a frame. */
1584 void
1585 jme_rxpkt(struct jme_softc *sc)
1586 {
1587 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1588 	struct jme_desc *desc;
1589 	struct jme_rxdesc *rxd;
1590 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1591 	struct mbuf *mp, *m;
1592 	uint32_t flags, status;
1593 	int cons, count, nsegs;
1594 
1595 	cons = sc->jme_cdata.jme_rx_cons;
1596 	desc = &sc->jme_rdata.jme_rx_ring[cons];
1597 	flags = letoh32(desc->flags);
1598 	status = letoh32(desc->buflen);
1599 	nsegs = JME_RX_NSEGS(status);
1600 
1601 	if (status & JME_RX_ERR_STAT) {
1602 		ifp->if_ierrors++;
1603 		jme_discard_rxbufs(sc, cons, nsegs);
1604 #ifdef JME_SHOW_ERRORS
1605 		printf("%s : receive error = 0x%b\n",
1606 		    sc->sc_dev.dv_xname, JME_RX_ERR(status), JME_RX_ERR_BITS);
1607 #endif
1608 		sc->jme_cdata.jme_rx_cons += nsegs;
1609 		sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1610 		return;
1611 	}
1612 
1613 	sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
1614 	for (count = 0; count < nsegs; count++,
1615 	     JME_DESC_INC(cons, JME_RX_RING_CNT)) {
1616 		rxd = &sc->jme_cdata.jme_rxdesc[cons];
1617 		mp = rxd->rx_m;
1618 
1619 		/* Add a new receive buffer to the ring. */
1620 		if (jme_newbuf(sc, rxd) != 0) {
1621 			ifp->if_iqdrops++;
1622 			/* Reuse buffer. */
1623 			jme_discard_rxbufs(sc, cons, nsegs - count);
1624 			if (sc->jme_cdata.jme_rxhead != NULL) {
1625 				m_freem(sc->jme_cdata.jme_rxhead);
1626 				JME_RXCHAIN_RESET(sc);
1627 			}
1628 			break;
1629 		}
1630 
1631 		/*
1632 		 * Assume we've received a full sized frame.
1633 		 * Actual size is fixed when we encounter the end of
1634 		 * multi-segmented frame.
1635 		 */
1636 		mp->m_len = MCLBYTES;
1637 
1638 		/* Chain received mbufs. */
1639 		if (sc->jme_cdata.jme_rxhead == NULL) {
1640 			sc->jme_cdata.jme_rxhead = mp;
1641 			sc->jme_cdata.jme_rxtail = mp;
1642 		} else {
1643 			/*
1644 			 * Receive processor can receive a maximum frame
1645 			 * size of 65535 bytes.
1646 			 */
1647 			mp->m_flags &= ~M_PKTHDR;
1648 			sc->jme_cdata.jme_rxtail->m_next = mp;
1649 			sc->jme_cdata.jme_rxtail = mp;
1650 		}
1651 
1652 		if (count == nsegs - 1) {
1653 			/* Last desc. for this frame. */
1654 			m = sc->jme_cdata.jme_rxhead;
1655 			/* XXX assert PKTHDR? */
1656 			m->m_flags |= M_PKTHDR;
1657 			m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
1658 			if (nsegs > 1) {
1659 				/* Set first mbuf size. */
1660 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
1661 				/* Set last mbuf size. */
1662 				mp->m_len = sc->jme_cdata.jme_rxlen -
1663 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
1664 				    (MCLBYTES * (nsegs - 2)));
1665 			} else {
1666 				m->m_len = sc->jme_cdata.jme_rxlen;
1667 			}
1668 
1669 			/*
1670 			 * Account for 10bytes auto padding which is used
1671 			 * to align IP header on 32bit boundary. Also note,
1672 			 * CRC bytes is automatically removed by the
1673 			 * hardware.
1674 			 */
1675 			m->m_data += JME_RX_PAD_BYTES;
1676 
1677 			/* Set checksum information. */
1678 			if (flags & (JME_RD_IPV4|JME_RD_IPV6)) {
1679 				if ((flags & JME_RD_IPV4) &&
1680 				    (flags & JME_RD_IPCSUM))
1681 					m->m_pkthdr.csum_flags |=
1682 					    M_IPV4_CSUM_IN_OK;
1683 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
1684 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
1685 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
1686 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
1687 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
1688 					m->m_pkthdr.csum_flags |=
1689 					    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1690 				}
1691 			}
1692 
1693 #if NVLAN > 0
1694 			/* Check for VLAN tagged packets. */
1695 			if (flags & JME_RD_VLAN_TAG) {
1696 				m->m_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK;
1697 				m->m_flags |= M_VLANTAG;
1698 			}
1699 #endif
1700 
1701 			ml_enqueue(&ml, m);
1702 
1703 			/* Reset mbuf chains. */
1704 			JME_RXCHAIN_RESET(sc);
1705 		}
1706 	}
1707 
1708 	if_input(ifp, &ml);
1709 
1710 	sc->jme_cdata.jme_rx_cons += nsegs;
1711 	sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1712 }
1713 
1714 void
1715 jme_rxeof(struct jme_softc *sc)
1716 {
1717 	struct jme_desc *desc;
1718 	int nsegs, prog, pktlen;
1719 
1720 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1721 	    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1722 
1723 	prog = 0;
1724 	for (;;) {
1725 		desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
1726 		if ((letoh32(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
1727 			break;
1728 		if ((letoh32(desc->buflen) & JME_RD_VALID) == 0)
1729 			break;
1730 
1731 		/*
1732 		 * Check number of segments against received bytes.
1733 		 * Non-matching value would indicate that hardware
1734 		 * is still trying to update Rx descriptors. I'm not
1735 		 * sure whether this check is needed.
1736 		 */
1737 		nsegs = JME_RX_NSEGS(letoh32(desc->buflen));
1738 		pktlen = JME_RX_BYTES(letoh32(desc->buflen));
1739 		if (nsegs != howmany(pktlen, MCLBYTES)) {
1740 			printf("%s: RX fragment count(%d) "
1741 			    "and packet size(%d) mismatch\n",
1742 			     sc->sc_dev.dv_xname, nsegs, pktlen);
1743 			break;
1744 		}
1745 
1746 		/* Received a frame. */
1747 		jme_rxpkt(sc);
1748 		prog++;
1749 	}
1750 
1751 	if (prog > 0) {
1752 		bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1753 		    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1754 	}
1755 }
1756 
1757 void
1758 jme_tick(void *xsc)
1759 {
1760 	struct jme_softc *sc = xsc;
1761 	struct mii_data *mii = &sc->sc_miibus;
1762 	int s;
1763 
1764 	s = splnet();
1765 	mii_tick(mii);
1766 	timeout_add_sec(&sc->jme_tick_ch, 1);
1767 	splx(s);
1768 }
1769 
1770 void
1771 jme_reset(struct jme_softc *sc)
1772 {
1773 #ifdef foo
1774 	/* Stop receiver, transmitter. */
1775 	jme_stop_rx(sc);
1776 	jme_stop_tx(sc);
1777 #endif
1778 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1779 	DELAY(10);
1780 	CSR_WRITE_4(sc, JME_GHC, 0);
1781 }
1782 
1783 int
1784 jme_init(struct ifnet *ifp)
1785 {
1786 	struct jme_softc *sc = ifp->if_softc;
1787 	struct mii_data *mii;
1788 	uint8_t eaddr[ETHER_ADDR_LEN];
1789 	bus_addr_t paddr;
1790 	uint32_t reg;
1791 	int error;
1792 
1793 	/*
1794 	 * Cancel any pending I/O.
1795 	 */
1796 	jme_stop(sc);
1797 
1798 	/*
1799 	 * Reset the chip to a known state.
1800 	 */
1801 	jme_reset(sc);
1802 
1803 	/* Init descriptors. */
1804 	error = jme_init_rx_ring(sc);
1805         if (error != 0) {
1806                 printf("%s: initialization failed: no memory for Rx buffers.\n",
1807 		    sc->sc_dev.dv_xname);
1808                 jme_stop(sc);
1809 		return (error);
1810         }
1811 	jme_init_tx_ring(sc);
1812 
1813 	/* Initialize shadow status block. */
1814 	jme_init_ssb(sc);
1815 
1816 	/* Reprogram the station address. */
1817 	bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
1818 	CSR_WRITE_4(sc, JME_PAR0,
1819 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
1820 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
1821 
1822 	/*
1823 	 * Configure Tx queue.
1824 	 *  Tx priority queue weight value : 0
1825 	 *  Tx FIFO threshold for processing next packet : 16QW
1826 	 *  Maximum Tx DMA length : 512
1827 	 *  Allow Tx DMA burst.
1828 	 */
1829 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
1830 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
1831 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
1832 	sc->jme_txcsr |= sc->jme_tx_dma_size;
1833 	sc->jme_txcsr |= TXCSR_DMA_BURST;
1834 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
1835 
1836 	/* Set Tx descriptor counter. */
1837 	CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
1838 
1839 	/* Set Tx ring address to the hardware. */
1840 	paddr = JME_TX_RING_ADDR(sc, 0);
1841 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
1842 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
1843 
1844 	/* Configure TxMAC parameters. */
1845 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
1846 	reg |= TXMAC_THRESH_1_PKT;
1847 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
1848 	CSR_WRITE_4(sc, JME_TXMAC, reg);
1849 
1850 	/*
1851 	 * Configure Rx queue.
1852 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
1853 	 *  FIFO threshold for processing next packet : 128QW
1854 	 *  Rx queue 0 select
1855 	 *  Max Rx DMA length : 128
1856 	 *  Rx descriptor retry : 32
1857 	 *  Rx descriptor retry time gap : 256ns
1858 	 *  Don't receive runt/bad frame.
1859 	 */
1860 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
1861 
1862 	/*
1863 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
1864 	 * than 4K bytes will suffer from Rx FIFO overruns. So
1865 	 * decrease FIFO threshold to reduce the FIFO overruns for
1866 	 * frames larger than 4000 bytes.
1867 	 * For best performance of standard MTU sized frames use
1868 	 * maximum allowable FIFO threshold, which is 32QW for
1869 	 * chips with a full mask >= 2 otherwise 128QW. FIFO
1870 	 * thresholds of 64QW and 128QW are not valid for chips
1871 	 * with a full mask >= 2.
1872 	 */
1873 	if (sc->jme_revfm >= 2)
1874 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
1875 	else {
1876 		if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1877 		    ETHER_VLAN_ENCAP_LEN) > JME_RX_FIFO_SIZE)
1878 			sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
1879 		else
1880 			sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
1881 	}
1882 	sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
1883 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
1884 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
1885 	/* XXX TODO DROP_BAD */
1886 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
1887 
1888 	/* Set Rx descriptor counter. */
1889 	CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
1890 
1891 	/* Set Rx ring address to the hardware. */
1892 	paddr = JME_RX_RING_ADDR(sc, 0);
1893 	CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
1894 	CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
1895 
1896 	/* Clear receive filter. */
1897 	CSR_WRITE_4(sc, JME_RXMAC, 0);
1898 
1899 	/* Set up the receive filter. */
1900 	jme_iff(sc);
1901 
1902 	jme_set_vlan(sc);
1903 
1904 	/*
1905 	 * Disable all WOL bits as WOL can interfere normal Rx
1906 	 * operation. Also clear WOL detection status bits.
1907 	 */
1908 	reg = CSR_READ_4(sc, JME_PMCS);
1909 	reg &= ~PMCS_WOL_ENB_MASK;
1910 	CSR_WRITE_4(sc, JME_PMCS, reg);
1911 
1912 	/*
1913 	 * Pad 10bytes right before received frame. This will greatly
1914 	 * help Rx performance on strict-alignment architectures as
1915 	 * it does not need to copy the frame to align the payload.
1916 	 */
1917 	reg = CSR_READ_4(sc, JME_RXMAC);
1918 	reg |= RXMAC_PAD_10BYTES;
1919 	reg |= RXMAC_CSUM_ENB;
1920 	CSR_WRITE_4(sc, JME_RXMAC, reg);
1921 
1922 	/* Configure general purpose reg0 */
1923 	reg = CSR_READ_4(sc, JME_GPREG0);
1924 	reg &= ~GPREG0_PCC_UNIT_MASK;
1925 	/* Set PCC timer resolution to micro-seconds unit. */
1926 	reg |= GPREG0_PCC_UNIT_US;
1927 	/*
1928 	 * Disable all shadow register posting as we have to read
1929 	 * JME_INTR_STATUS register in jme_intr. Also it seems
1930 	 * that it's hard to synchronize interrupt status between
1931 	 * hardware and software with shadow posting due to
1932 	 * requirements of bus_dmamap_sync(9).
1933 	 */
1934 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
1935 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
1936 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
1937 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
1938 	/* Disable posting of DW0. */
1939 	reg &= ~GPREG0_POST_DW0_ENB;
1940 	/* Clear PME message. */
1941 	reg &= ~GPREG0_PME_ENB;
1942 	/* Set PHY address. */
1943 	reg &= ~GPREG0_PHY_ADDR_MASK;
1944 	reg |= sc->jme_phyaddr;
1945 	CSR_WRITE_4(sc, JME_GPREG0, reg);
1946 
1947 	/* Configure Tx queue 0 packet completion coalescing. */
1948 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1949 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
1950 	    PCCTX_COAL_TO_MASK;
1951 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1952 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
1953 	    PCCTX_COAL_PKT_MASK;
1954 	reg |= PCCTX_COAL_TXQ0;
1955 	CSR_WRITE_4(sc, JME_PCCTX, reg);
1956 
1957 	/* Configure Rx queue 0 packet completion coalescing. */
1958 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1959 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
1960 	    PCCRX_COAL_TO_MASK;
1961 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1962 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
1963 	    PCCRX_COAL_PKT_MASK;
1964 	CSR_WRITE_4(sc, JME_PCCRX0, reg);
1965 
1966 	/* Configure shadow status block but don't enable posting. */
1967 	paddr = sc->jme_rdata.jme_ssb_block_paddr;
1968 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
1969 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
1970 
1971 	/* Disable Timer 1 and Timer 2. */
1972 	CSR_WRITE_4(sc, JME_TIMER1, 0);
1973 	CSR_WRITE_4(sc, JME_TIMER2, 0);
1974 
1975 	/* Configure retry transmit period, retry limit value. */
1976 	CSR_WRITE_4(sc, JME_TXTRHD,
1977 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
1978 	    TXTRHD_RT_PERIOD_MASK) |
1979 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
1980 	    TXTRHD_RT_LIMIT_SHIFT));
1981 
1982 	/* Disable RSS. */
1983 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
1984 
1985 	/* Initialize the interrupt mask. */
1986 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1987 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
1988 
1989 	/*
1990 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
1991 	 * done after detection of valid link in jme_miibus_statchg.
1992 	 */
1993 	sc->jme_flags &= ~JME_FLAG_LINK;
1994 
1995 	/* Set the current media. */
1996 	mii = &sc->sc_miibus;
1997 	mii_mediachg(mii);
1998 
1999 	timeout_add_sec(&sc->jme_tick_ch, 1);
2000 
2001 	ifp->if_flags |= IFF_RUNNING;
2002 	ifq_clr_oactive(&ifp->if_snd);
2003 
2004 	return (0);
2005 }
2006 
2007 void
2008 jme_stop(struct jme_softc *sc)
2009 {
2010 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2011 	struct jme_txdesc *txd;
2012 	struct jme_rxdesc *rxd;
2013 	int i;
2014 
2015 	/*
2016 	 * Mark the interface down and cancel the watchdog timer.
2017 	 */
2018 	ifp->if_flags &= ~IFF_RUNNING;
2019 	ifq_clr_oactive(&ifp->if_snd);
2020 	ifp->if_timer = 0;
2021 
2022 	timeout_del(&sc->jme_tick_ch);
2023 	sc->jme_flags &= ~JME_FLAG_LINK;
2024 
2025 	/*
2026 	 * Disable interrupts.
2027 	 */
2028 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2029 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2030 
2031 	/* Disable updating shadow status block. */
2032 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2033 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2034 
2035 	/* Stop receiver, transmitter. */
2036 	jme_stop_rx(sc);
2037 	jme_stop_tx(sc);
2038 
2039 #ifdef foo
2040 	 /* Reclaim Rx/Tx buffers that have been completed. */
2041 	jme_rxeof(sc);
2042 	m_freem(sc->jme_cdata.jme_rxhead);
2043 	JME_RXCHAIN_RESET(sc);
2044 	jme_txeof(sc);
2045 #endif
2046 
2047 	/*
2048 	 * Free partial finished RX segments
2049 	 */
2050 	m_freem(sc->jme_cdata.jme_rxhead);
2051 	JME_RXCHAIN_RESET(sc);
2052 
2053 	/*
2054 	 * Free RX and TX mbufs still in the queues.
2055 	 */
2056 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2057 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2058 		if (rxd->rx_m != NULL) {
2059 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2060 			m_freem(rxd->rx_m);
2061 			rxd->rx_m = NULL;
2062 		}
2063         }
2064 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2065 		txd = &sc->jme_cdata.jme_txdesc[i];
2066 		if (txd->tx_m != NULL) {
2067 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2068 			m_freem(txd->tx_m);
2069 			txd->tx_m = NULL;
2070 			txd->tx_ndesc = 0;
2071 		}
2072         }
2073 }
2074 
2075 void
2076 jme_stop_tx(struct jme_softc *sc)
2077 {
2078 	uint32_t reg;
2079 	int i;
2080 
2081 	reg = CSR_READ_4(sc, JME_TXCSR);
2082 	if ((reg & TXCSR_TX_ENB) == 0)
2083 		return;
2084 	reg &= ~TXCSR_TX_ENB;
2085 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2086 	for (i = JME_TIMEOUT; i > 0; i--) {
2087 		DELAY(1);
2088 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2089 			break;
2090 	}
2091 	if (i == 0)
2092 		printf("%s: stopping transmitter timeout!\n",
2093 		    sc->sc_dev.dv_xname);
2094 }
2095 
2096 void
2097 jme_stop_rx(struct jme_softc *sc)
2098 {
2099 	uint32_t reg;
2100 	int i;
2101 
2102 	reg = CSR_READ_4(sc, JME_RXCSR);
2103 	if ((reg & RXCSR_RX_ENB) == 0)
2104 		return;
2105 	reg &= ~RXCSR_RX_ENB;
2106 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2107 	for (i = JME_TIMEOUT; i > 0; i--) {
2108 		DELAY(1);
2109 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2110 			break;
2111 	}
2112 	if (i == 0)
2113 		printf("%s: stopping receiver timeout!\n", sc->sc_dev.dv_xname);
2114 }
2115 
2116 void
2117 jme_init_tx_ring(struct jme_softc *sc)
2118 {
2119 	struct jme_ring_data *rd;
2120 	struct jme_txdesc *txd;
2121 	int i;
2122 
2123 	sc->jme_cdata.jme_tx_prod = 0;
2124 	sc->jme_cdata.jme_tx_cons = 0;
2125 	sc->jme_cdata.jme_tx_cnt = 0;
2126 
2127 	rd = &sc->jme_rdata;
2128 	bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2129 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2130 		txd = &sc->jme_cdata.jme_txdesc[i];
2131 		txd->tx_m = NULL;
2132 		txd->tx_desc = &rd->jme_tx_ring[i];
2133 		txd->tx_ndesc = 0;
2134 	}
2135 
2136 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
2137 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2138 }
2139 
2140 void
2141 jme_init_ssb(struct jme_softc *sc)
2142 {
2143 	struct jme_ring_data *rd;
2144 
2145 	rd = &sc->jme_rdata;
2146 	bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2147 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0,
2148 	    sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2149 }
2150 
2151 int
2152 jme_init_rx_ring(struct jme_softc *sc)
2153 {
2154 	struct jme_ring_data *rd;
2155 	struct jme_rxdesc *rxd;
2156 	int i;
2157 
2158 	KASSERT(sc->jme_cdata.jme_rxhead == NULL &&
2159 		 sc->jme_cdata.jme_rxtail == NULL &&
2160 		 sc->jme_cdata.jme_rxlen == 0);
2161 	sc->jme_cdata.jme_rx_cons = 0;
2162 
2163 	rd = &sc->jme_rdata;
2164 	bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2165 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2166 		int error;
2167 
2168 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2169 		rxd->rx_m = NULL;
2170 		rxd->rx_desc = &rd->jme_rx_ring[i];
2171 		error = jme_newbuf(sc, rxd);
2172 		if (error)
2173 			return (error);
2174 	}
2175 
2176 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
2177 	    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2178 
2179 	return (0);
2180 }
2181 
2182 int
2183 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
2184 {
2185 	struct jme_desc *desc;
2186 	struct mbuf *m;
2187 	bus_dmamap_t map;
2188 	int error;
2189 
2190 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2191 	if (m == NULL)
2192 		return (ENOBUFS);
2193 	MCLGET(m, M_DONTWAIT);
2194 	if (!(m->m_flags & M_EXT)) {
2195 		m_freem(m);
2196 		return (ENOBUFS);
2197 	}
2198 
2199 	/*
2200 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2201 	 * takes advantage of 10 bytes padding feature of hardware
2202 	 * in order not to copy entire frame to align IP header on
2203 	 * 32bit boundary.
2204 	 */
2205 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2206 
2207 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
2208 	    sc->jme_cdata.jme_rx_sparemap, m, BUS_DMA_NOWAIT);
2209 
2210 	if (error != 0) {
2211 		m_freem(m);
2212 		printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2213 		return (error);
2214 	}
2215 
2216 	if (rxd->rx_m != NULL) {
2217 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2218 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2219 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2220 	}
2221 	map = rxd->rx_dmamap;
2222 	rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2223 	sc->jme_cdata.jme_rx_sparemap = map;
2224 	rxd->rx_m = m;
2225 
2226 	desc = rxd->rx_desc;
2227 	desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len);
2228 	desc->addr_lo =
2229 	    htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr));
2230 	desc->addr_hi =
2231 	    htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr));
2232 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2233 
2234 	return (0);
2235 }
2236 
2237 void
2238 jme_set_vlan(struct jme_softc *sc)
2239 {
2240 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2241 	uint32_t reg;
2242 
2243 	reg = CSR_READ_4(sc, JME_RXMAC);
2244 	reg &= ~RXMAC_VLAN_ENB;
2245 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
2246 		reg |= RXMAC_VLAN_ENB;
2247 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2248 }
2249 
2250 void
2251 jme_iff(struct jme_softc *sc)
2252 {
2253 	struct arpcom *ac = &sc->sc_arpcom;
2254 	struct ifnet *ifp = &ac->ac_if;
2255 	struct ether_multi *enm;
2256 	struct ether_multistep step;
2257 	uint32_t crc;
2258 	uint32_t mchash[2];
2259 	uint32_t rxcfg;
2260 
2261 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2262 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2263 	    RXMAC_ALLMULTI);
2264 	ifp->if_flags &= ~IFF_ALLMULTI;
2265 
2266 	/*
2267 	 * Always accept frames destined to our station address.
2268 	 * Always accept broadcast frames.
2269 	 */
2270 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2271 
2272 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
2273 		ifp->if_flags |= IFF_ALLMULTI;
2274 		if (ifp->if_flags & IFF_PROMISC)
2275 			rxcfg |= RXMAC_PROMISC;
2276 		else
2277 			rxcfg |= RXMAC_ALLMULTI;
2278 		mchash[0] = mchash[1] = 0xFFFFFFFF;
2279 	} else {
2280 		/*
2281 		 * Set up the multicast address filter by passing all
2282 		 * multicast addresses through a CRC generator, and then
2283 		 * using the low-order 6 bits as an index into the 64 bit
2284 		 * multicast hash table.  The high order bits select the
2285 		 * register, while the rest of the bits select the bit
2286 		 * within the register.
2287 		 */
2288 		rxcfg |= RXMAC_MULTICAST;
2289 		bzero(mchash, sizeof(mchash));
2290 
2291 		ETHER_FIRST_MULTI(step, ac, enm);
2292 		while (enm != NULL) {
2293 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2294 
2295 			/* Just want the 6 least significant bits. */
2296 			crc &= 0x3f;
2297 
2298 			/* Set the corresponding bit in the hash table. */
2299 			mchash[crc >> 5] |= 1 << (crc & 0x1f);
2300 
2301 			ETHER_NEXT_MULTI(step, enm);
2302 		}
2303 	}
2304 
2305 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2306 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2307 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2308 }
2309