xref: /openbsd-src/sys/dev/pci/if_jme.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: if_jme.c,v 1.35 2014/01/27 12:04:46 brad Exp $	*/
2 /*-
3  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
29  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $
30  */
31 
32 #include "bpfilter.h"
33 #include "vlan.h"
34 
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/sockio.h>
40 #include <sys/mbuf.h>
41 #include <sys/queue.h>
42 #include <sys/kernel.h>
43 #include <sys/device.h>
44 #include <sys/timeout.h>
45 #include <sys/socket.h>
46 
47 #include <machine/bus.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 
53 #ifdef INET
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/ip.h>
57 #include <netinet/if_ether.h>
58 #endif
59 
60 #include <net/if_types.h>
61 #include <net/if_vlan_var.h>
62 
63 #if NBPFILTER > 0
64 #include <net/bpf.h>
65 #endif
66 
67 #include <dev/mii/mii.h>
68 #include <dev/mii/miivar.h>
69 #include <dev/mii/jmphyreg.h>
70 
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73 #include <dev/pci/pcidevs.h>
74 
75 #include <dev/pci/if_jmereg.h>
76 #include <dev/pci/if_jmevar.h>
77 
78 /* Define the following to disable printing Rx errors. */
79 #undef	JME_SHOW_ERRORS
80 
81 int	jme_match(struct device *, void *, void *);
82 void	jme_map_intr_vector(struct jme_softc *);
83 void	jme_attach(struct device *, struct device *, void *);
84 int	jme_detach(struct device *, int);
85 
86 int	jme_miibus_readreg(struct device *, int, int);
87 void	jme_miibus_writereg(struct device *, int, int, int);
88 void	jme_miibus_statchg(struct device *);
89 
90 int	jme_init(struct ifnet *);
91 int	jme_ioctl(struct ifnet *, u_long, caddr_t);
92 
93 void	jme_start(struct ifnet *);
94 void	jme_watchdog(struct ifnet *);
95 void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
96 int	jme_mediachange(struct ifnet *);
97 
98 int	jme_intr(void *);
99 void	jme_txeof(struct jme_softc *);
100 void	jme_rxeof(struct jme_softc *);
101 
102 int	jme_dma_alloc(struct jme_softc *);
103 void	jme_dma_free(struct jme_softc *);
104 int	jme_init_rx_ring(struct jme_softc *);
105 void	jme_init_tx_ring(struct jme_softc *);
106 void	jme_init_ssb(struct jme_softc *);
107 int	jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
108 int	jme_encap(struct jme_softc *, struct mbuf **);
109 void	jme_rxpkt(struct jme_softc *);
110 
111 void	jme_tick(void *);
112 void	jme_stop(struct jme_softc *);
113 void	jme_reset(struct jme_softc *);
114 void	jme_set_vlan(struct jme_softc *);
115 void	jme_iff(struct jme_softc *);
116 void	jme_stop_tx(struct jme_softc *);
117 void	jme_stop_rx(struct jme_softc *);
118 void	jme_mac_config(struct jme_softc *);
119 void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
120 int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
121 int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
122 void	jme_discard_rxbufs(struct jme_softc *, int, int);
123 #ifdef notyet
124 void	jme_setwol(struct jme_softc *);
125 void	jme_setlinkspeed(struct jme_softc *);
126 #endif
127 
128 /*
129  * Devices supported by this driver.
130  */
131 const struct pci_matchid jme_devices[] = {
132 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250 },
133 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260 }
134 };
135 
136 struct cfattach jme_ca = {
137 	sizeof (struct jme_softc), jme_match, jme_attach
138 };
139 
140 struct cfdriver jme_cd = {
141 	NULL, "jme", DV_IFNET
142 };
143 
144 int jmedebug = 0;
145 #define DPRINTF(x)	do { if (jmedebug) printf x; } while (0)
146 
147 /*
148  *	Read a PHY register on the MII of the JMC250.
149  */
150 int
151 jme_miibus_readreg(struct device *dev, int phy, int reg)
152 {
153 	struct jme_softc *sc = (struct jme_softc *)dev;
154 	uint32_t val;
155 	int i;
156 
157 	/* For FPGA version, PHY address 0 should be ignored. */
158 	if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0)
159 		return (0);
160 
161 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
162 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
163 
164 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
165 		DELAY(1);
166 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
167 			break;
168 	}
169 	if (i == 0) {
170 		printf("%s: phy read timeout: phy %d, reg %d\n",
171 		    sc->sc_dev.dv_xname, phy, reg);
172 		return (0);
173 	}
174 
175 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
176 }
177 
178 /*
179  *	Write a PHY register on the MII of the JMC250.
180  */
181 void
182 jme_miibus_writereg(struct device *dev, int phy, int reg, int val)
183 {
184 	struct jme_softc *sc = (struct jme_softc *)dev;
185 	int i;
186 
187 	/* For FPGA version, PHY address 0 should be ignored. */
188 	if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0)
189 		return;
190 
191 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
192 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
193 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
194 
195 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
196 		DELAY(1);
197 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
198 			break;
199 	}
200 	if (i == 0) {
201 		printf("%s: phy write timeout: phy %d, reg %d\n",
202 		    sc->sc_dev.dv_xname, phy, reg);
203 	}
204 }
205 
206 /*
207  *	Callback from MII layer when media changes.
208  */
209 void
210 jme_miibus_statchg(struct device *dev)
211 {
212 	struct jme_softc *sc = (struct jme_softc *)dev;
213 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
214 	struct mii_data *mii;
215 	struct jme_txdesc *txd;
216 	bus_addr_t paddr;
217 	int i;
218 
219 	if ((ifp->if_flags & IFF_RUNNING) == 0)
220 		return;
221 
222 	mii = &sc->sc_miibus;
223 
224 	sc->jme_flags &= ~JME_FLAG_LINK;
225 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
226 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
227 		case IFM_10_T:
228 		case IFM_100_TX:
229 			sc->jme_flags |= JME_FLAG_LINK;
230 			break;
231 		case IFM_1000_T:
232 			if (sc->jme_caps & JME_CAP_FASTETH)
233 				break;
234 			sc->jme_flags |= JME_FLAG_LINK;
235 			break;
236 		default:
237 			break;
238 		}
239 	}
240 
241 	/*
242 	 * Disabling Rx/Tx MACs have a side-effect of resetting
243 	 * JME_TXNDA/JME_RXNDA register to the first address of
244 	 * Tx/Rx descriptor address. So driver should reset its
245 	 * internal procucer/consumer pointer and reclaim any
246 	 * allocated resources.  Note, just saving the value of
247 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
248 	 * and restoring JME_TXNDA/JME_RXNDA register is not
249 	 * sufficient to make sure correct MAC state because
250 	 * stopping MAC operation can take a while and hardware
251 	 * might have updated JME_TXNDA/JME_RXNDA registers
252 	 * during the stop operation.
253 	 */
254 
255 	/* Disable interrupts */
256 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
257 
258 	/* Stop driver */
259 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
260 	ifp->if_timer = 0;
261 	timeout_del(&sc->jme_tick_ch);
262 
263 	/* Stop receiver/transmitter. */
264 	jme_stop_rx(sc);
265 	jme_stop_tx(sc);
266 
267 	jme_rxeof(sc);
268 	if (sc->jme_cdata.jme_rxhead != NULL)
269 		m_freem(sc->jme_cdata.jme_rxhead);
270 	JME_RXCHAIN_RESET(sc);
271 
272 	jme_txeof(sc);
273 	if (sc->jme_cdata.jme_tx_cnt != 0) {
274 		/* Remove queued packets for transmit. */
275 		for (i = 0; i < JME_TX_RING_CNT; i++) {
276 			txd = &sc->jme_cdata.jme_txdesc[i];
277 			if (txd->tx_m != NULL) {
278 				bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
279 				m_freem(txd->tx_m);
280 				txd->tx_m = NULL;
281 				txd->tx_ndesc = 0;
282 				ifp->if_oerrors++;
283 			}
284 		}
285 	}
286 
287 	/*
288 	 * Reuse configured Rx descriptors and reset
289 	 * procuder/consumer index.
290 	 */
291 	sc->jme_cdata.jme_rx_cons = 0;
292 
293 	jme_init_tx_ring(sc);
294 
295 	/* Initialize shadow status block. */
296 	jme_init_ssb(sc);
297 
298 	/* Program MAC with resolved speed/duplex/flow-control. */
299 	if (sc->jme_flags & JME_FLAG_LINK) {
300 		jme_mac_config(sc);
301 
302 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
303 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
304 
305 		/* Set Tx ring address to the hardware. */
306 		paddr = JME_TX_RING_ADDR(sc, 0);
307 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
308 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
309 
310 		/* Set Rx ring address to the hardware. */
311 		paddr = JME_RX_RING_ADDR(sc, 0);
312 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
313 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
314 
315 		/* Restart receiver/transmitter. */
316 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
317 		    RXCSR_RXQ_START);
318 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
319 	}
320 
321 	ifp->if_flags |= IFF_RUNNING;
322 	ifp->if_flags &= ~IFF_OACTIVE;
323 	timeout_add_sec(&sc->jme_tick_ch, 1);
324 
325 	/* Reenable interrupts. */
326 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
327 }
328 
329 /*
330  *	Get the current interface media status.
331  */
332 void
333 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
334 {
335 	struct jme_softc *sc = ifp->if_softc;
336 	struct mii_data *mii = &sc->sc_miibus;
337 
338 	mii_pollstat(mii);
339 	ifmr->ifm_status = mii->mii_media_status;
340 	ifmr->ifm_active = mii->mii_media_active;
341 }
342 
343 /*
344  *	Set hardware to newly-selected media.
345  */
346 int
347 jme_mediachange(struct ifnet *ifp)
348 {
349 	struct jme_softc *sc = ifp->if_softc;
350 	struct mii_data *mii = &sc->sc_miibus;
351 	int error;
352 
353 	if (mii->mii_instance != 0) {
354 		struct mii_softc *miisc;
355 
356 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
357 			mii_phy_reset(miisc);
358 	}
359 	error = mii_mediachg(mii);
360 
361 	return (error);
362 }
363 
364 int
365 jme_match(struct device *dev, void *match, void *aux)
366 {
367 	return pci_matchbyid((struct pci_attach_args *)aux, jme_devices,
368 	    sizeof (jme_devices) / sizeof (jme_devices[0]));
369 }
370 
371 int
372 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
373 {
374 	uint32_t reg;
375 	int i;
376 
377 	*val = 0;
378 	for (i = JME_TIMEOUT; i > 0; i--) {
379 		reg = CSR_READ_4(sc, JME_SMBCSR);
380 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
381 			break;
382 		DELAY(1);
383 	}
384 
385 	if (i == 0) {
386 		printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname);
387 		return (ETIMEDOUT);
388 	}
389 
390 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
391 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
392 	for (i = JME_TIMEOUT; i > 0; i--) {
393 		DELAY(1);
394 		reg = CSR_READ_4(sc, JME_SMBINTF);
395 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
396 			break;
397 	}
398 
399 	if (i == 0) {
400 		printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname);
401 		return (ETIMEDOUT);
402 	}
403 
404 	reg = CSR_READ_4(sc, JME_SMBINTF);
405 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
406 
407 	return (0);
408 }
409 
410 int
411 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
412 {
413 	uint8_t fup, reg, val;
414 	uint32_t offset;
415 	int match;
416 
417 	offset = 0;
418 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
419 	    fup != JME_EEPROM_SIG0)
420 		return (ENOENT);
421 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
422 	    fup != JME_EEPROM_SIG1)
423 		return (ENOENT);
424 	match = 0;
425 	do {
426 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
427 			break;
428 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
429 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
430 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
431 				break;
432 			if (reg >= JME_PAR0 &&
433 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
434 				if (jme_eeprom_read_byte(sc, offset + 2,
435 				    &val) != 0)
436 					break;
437 				eaddr[reg - JME_PAR0] = val;
438 				match++;
439 			}
440 		}
441 		/* Check for the end of EEPROM descriptor. */
442 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
443 			break;
444 		/* Try next eeprom descriptor. */
445 		offset += JME_EEPROM_DESC_BYTES;
446 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
447 
448 	if (match == ETHER_ADDR_LEN)
449 		return (0);
450 
451 	return (ENOENT);
452 }
453 
454 void
455 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
456 {
457 	uint32_t par0, par1;
458 
459 	/* Read station address. */
460 	par0 = CSR_READ_4(sc, JME_PAR0);
461 	par1 = CSR_READ_4(sc, JME_PAR1);
462 	par1 &= 0xFFFF;
463 
464 	eaddr[0] = (par0 >> 0) & 0xFF;
465 	eaddr[1] = (par0 >> 8) & 0xFF;
466 	eaddr[2] = (par0 >> 16) & 0xFF;
467 	eaddr[3] = (par0 >> 24) & 0xFF;
468 	eaddr[4] = (par1 >> 0) & 0xFF;
469 	eaddr[5] = (par1 >> 8) & 0xFF;
470 }
471 
472 void
473 jme_map_intr_vector(struct jme_softc *sc)
474 {
475 	uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
476 
477 	bzero(map, sizeof(map));
478 
479 	/* Map Tx interrupts source to MSI/MSIX vector 2. */
480 	map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
481 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
482 	map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
483 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
484 	map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
485 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
486 	map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
487 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
488 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
489 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
490 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
491 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
492 	map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
493 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
494 	map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
495 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
496 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
497 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
498 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
499 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
500 
501 	/* Map Rx interrupts source to MSI/MSIX vector 1. */
502 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
503 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
504 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
505 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
506 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
507 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
508 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
509 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
510 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
511 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
512 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
513 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
514 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
515 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
516 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
517 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
518 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
519 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
520 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
521 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
522 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
523 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
524 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
525 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
526 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
527 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
528 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
529 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
530 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
531 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
532 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
533 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
534 
535 	/* Map all other interrupts source to MSI/MSIX vector 0. */
536 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
537 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
538 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
539 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
540 }
541 
542 void
543 jme_attach(struct device *parent, struct device *self, void *aux)
544 {
545 	struct jme_softc *sc = (struct jme_softc *)self;
546 	struct pci_attach_args *pa = aux;
547 	pci_chipset_tag_t pc = pa->pa_pc;
548 	pci_intr_handle_t ih;
549 	const char *intrstr;
550 	pcireg_t memtype;
551 
552 	struct ifnet *ifp;
553 	uint32_t reg;
554 	int error = 0;
555 
556 	/*
557 	 * Allocate IO memory
558 	 *
559 	 * JMC250 supports both memory mapped and I/O register space
560 	 * access.  Because I/O register access should use different
561 	 * BARs to access registers it's waste of time to use I/O
562 	 * register spce access.  JMC250 uses 16K to map entire memory
563 	 * space.
564 	 */
565 
566 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR);
567 	if (pci_mapreg_map(pa, JME_PCIR_BAR, memtype, 0, &sc->jme_mem_bt,
568 	    &sc->jme_mem_bh, NULL, &sc->jme_mem_size, 0)) {
569 		printf(": can't map mem space\n");
570 		return;
571 	}
572 
573 	if (pci_intr_map_msi(pa, &ih) == 0)
574 		jme_map_intr_vector(sc);
575 	else if (pci_intr_map(pa, &ih) != 0) {
576 		printf(": can't map interrupt\n");
577 		return;
578 	}
579 
580 	/*
581 	 * Allocate IRQ
582 	 */
583 	intrstr = pci_intr_string(pc, ih);
584 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, jme_intr, sc,
585 	    sc->sc_dev.dv_xname);
586 	if (sc->sc_irq_handle == NULL) {
587 		printf(": could not establish interrupt");
588 		if (intrstr != NULL)
589 			printf(" at %s", intrstr);
590 		printf("\n");
591 		return;
592 	}
593 	printf(": %s", intrstr);
594 
595 	sc->sc_dmat = pa->pa_dmat;
596 	sc->jme_pct = pa->pa_pc;
597 	sc->jme_pcitag = pa->pa_tag;
598 
599 	/*
600 	 * Extract FPGA revision
601 	 */
602 	reg = CSR_READ_4(sc, JME_CHIPMODE);
603 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
604 	    CHIPMODE_NOT_FPGA) {
605 		sc->jme_caps |= JME_CAP_FPGA;
606 
607 		if (jmedebug) {
608 			printf("%s: FPGA revision : 0x%04x\n",
609 			    sc->sc_dev.dv_xname,
610 			    (reg & CHIPMODE_FPGA_REV_MASK) >>
611 			    CHIPMODE_FPGA_REV_SHIFT);
612 		}
613 	}
614 
615 	sc->jme_revfm = (reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT;
616 
617 	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_JMICRON_JMC250 &&
618 	    PCI_REVISION(pa->pa_class) == JME_REV_JMC250_A2)
619 		sc->jme_workaround |= JME_WA_CRCERRORS | JME_WA_PACKETLOSS;
620 
621 	/* Reset the ethernet controller. */
622 	jme_reset(sc);
623 
624 	/* Get station address. */
625 	reg = CSR_READ_4(sc, JME_SMBCSR);
626 	if (reg & SMBCSR_EEPROM_PRESENT)
627 		error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr);
628 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
629 		if (error != 0 && (jmedebug)) {
630 			printf("%s: ethernet hardware address "
631 			    "not found in EEPROM.\n", sc->sc_dev.dv_xname);
632 		}
633 		jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr);
634 	}
635 
636 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
637 
638 	/*
639 	 * Save PHY address.
640 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
641 	 * requires PHY probing to get correct PHY address.
642 	 */
643 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
644 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
645 		    GPREG0_PHY_ADDR_MASK;
646 		if (jmedebug) {
647 			printf("%s: PHY is at address %d.\n",
648 			    sc->sc_dev.dv_xname, sc->jme_phyaddr);
649 		}
650 	} else {
651 		sc->jme_phyaddr = 0;
652 	}
653 
654 	/* Set max allowable DMA size. */
655 	sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
656 	sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
657 
658 #ifdef notyet
659 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
660 		sc->jme_caps |= JME_CAP_PMCAP;
661 #endif
662 
663 	/* Allocate DMA stuffs */
664 	error = jme_dma_alloc(sc);
665 	if (error)
666 		goto fail;
667 
668 	ifp = &sc->sc_arpcom.ac_if;
669 	ifp->if_softc = sc;
670 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
671 	ifp->if_ioctl = jme_ioctl;
672 	ifp->if_start = jme_start;
673 	ifp->if_watchdog = jme_watchdog;
674 	IFQ_SET_MAXLEN(&ifp->if_snd, JME_TX_RING_CNT - 1);
675 	IFQ_SET_READY(&ifp->if_snd);
676 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
677 
678 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
679 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 |
680 	    IFCAP_CSUM_UDPv6;
681 
682 #if NVLAN > 0
683 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
684 #endif
685 
686 	/* Set up MII bus. */
687 	sc->sc_miibus.mii_ifp = ifp;
688 	sc->sc_miibus.mii_readreg = jme_miibus_readreg;
689 	sc->sc_miibus.mii_writereg = jme_miibus_writereg;
690 	sc->sc_miibus.mii_statchg = jme_miibus_statchg;
691 
692 	ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange,
693 	    jme_mediastatus);
694 	mii_attach(self, &sc->sc_miibus, 0xffffffff,
695 	    sc->jme_caps & JME_CAP_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
696 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
697 
698 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
699 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
700 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
701 		    0, NULL);
702 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
703 	} else
704 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
705 
706 	/*
707 	 * Save PHYADDR for FPGA mode PHY not handled, not production hw
708 	 */
709 
710 	if_attach(ifp);
711 	ether_ifattach(ifp);
712 
713 	timeout_set(&sc->jme_tick_ch, jme_tick, sc);
714 
715 	return;
716 fail:
717 	jme_detach(&sc->sc_dev, 0);
718 }
719 
720 int
721 jme_detach(struct device *self, int flags)
722 {
723 	struct jme_softc *sc = (struct jme_softc *)self;
724 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
725 	int s;
726 
727 	s = splnet();
728 	jme_stop(sc);
729 	splx(s);
730 
731 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
732 
733 	/* Delete all remaining media. */
734 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
735 
736 	ether_ifdetach(ifp);
737 	if_detach(ifp);
738 	jme_dma_free(sc);
739 
740 	if (sc->sc_irq_handle != NULL) {
741 		pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle);
742 		sc->sc_irq_handle = NULL;
743 	}
744 
745 	return (0);
746 }
747 
748 int
749 jme_dma_alloc(struct jme_softc *sc)
750 {
751 	struct jme_txdesc *txd;
752 	struct jme_rxdesc *rxd;
753 	int error, i, nsegs;
754 
755 	/*
756 	 * Create DMA stuffs for TX ring
757 	 */
758 
759 	error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1,
760 	    JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT,
761 	    &sc->jme_cdata.jme_tx_ring_map);
762 	if (error)
763 		return (ENOBUFS);
764 
765 	/* Allocate DMA'able memory for TX ring */
766 	error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0,
767 	    &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs,
768 	    BUS_DMA_WAITOK);
769 /* XXX zero */
770 	if (error) {
771 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
772 		    sc->sc_dev.dv_xname);
773 		return error;
774 	}
775 
776 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg,
777 	    nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring,
778 	    BUS_DMA_NOWAIT);
779 	if (error)
780 		return (ENOBUFS);
781 
782 	/*  Load the DMA map for Tx ring. */
783 	error = bus_dmamap_load(sc->sc_dmat,
784 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
785 	    JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
786 	if (error) {
787 		printf("%s: could not load DMA'able memory for Tx ring.\n",
788 		    sc->sc_dev.dv_xname);
789 		bus_dmamem_free(sc->sc_dmat,
790 		    (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1);
791 		return error;
792 	}
793 	sc->jme_rdata.jme_tx_ring_paddr =
794 	    sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr;
795 
796 	/*
797 	 * Create DMA stuffs for RX ring
798 	 */
799 
800 	error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1,
801 	    JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT,
802 	    &sc->jme_cdata.jme_rx_ring_map);
803 	if (error)
804 		return (ENOBUFS);
805 
806 	/* Allocate DMA'able memory for RX ring */
807 	error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0,
808 	    &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs,
809 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
810 /* XXX zero */
811 	if (error) {
812 		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
813 		    sc->sc_dev.dv_xname);
814 		return error;
815 	}
816 
817 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg,
818 	    nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring,
819 	    BUS_DMA_NOWAIT);
820 	if (error)
821 		return (ENOBUFS);
822 
823 	/* Load the DMA map for Rx ring. */
824 	error = bus_dmamap_load(sc->sc_dmat,
825 	    sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
826 	    JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
827 	if (error) {
828 		printf("%s: could not load DMA'able memory for Rx ring.\n",
829 		    sc->sc_dev.dv_xname);
830 		bus_dmamem_free(sc->sc_dmat,
831 		    (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
832 		return error;
833 	}
834 	sc->jme_rdata.jme_rx_ring_paddr =
835 	    sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr;
836 
837 #if 0
838 	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
839 	tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
840 	rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
841 	if ((JME_ADDR_HI(tx_ring_end) !=
842 	     JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
843 	    (JME_ADDR_HI(rx_ring_end) !=
844 	     JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
845 		printf("%s: 4GB boundary crossed, switching to 32bit "
846 		    "DMA address mode.\n", sc->sc_dev.dv_xname);
847 		jme_dma_free(sc);
848 		/* Limit DMA address space to 32bit and try again. */
849 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
850 		goto again;
851 	}
852 #endif
853 
854 	/*
855 	 * Create DMA stuffs for shadow status block
856 	 */
857 
858 	error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1,
859 	    JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map);
860 	if (error)
861 		return (ENOBUFS);
862 
863 	/* Allocate DMA'able memory for shared status block. */
864 	error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0,
865 	    &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK);
866 	if (error) {
867 		printf("%s: could not allocate DMA'able "
868 		    "memory for shared status block.\n", sc->sc_dev.dv_xname);
869 		return error;
870 	}
871 
872 	error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg,
873 	    nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block,
874 	    BUS_DMA_NOWAIT);
875 	if (error)
876 		return (ENOBUFS);
877 
878 	/* Load the DMA map for shared status block */
879 	error = bus_dmamap_load(sc->sc_dmat,
880 	    sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
881 	    JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT);
882 	if (error) {
883 		printf("%s: could not load DMA'able memory "
884 		    "for shared status block.\n", sc->sc_dev.dv_xname);
885 		bus_dmamem_free(sc->sc_dmat,
886 		    (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
887 		return error;
888 	}
889 	sc->jme_rdata.jme_ssb_block_paddr =
890 	    sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr;
891 
892 	/*
893 	 * Create DMA stuffs for TX buffers
894 	 */
895 
896 	/* Create DMA maps for Tx buffers. */
897 	for (i = 0; i < JME_TX_RING_CNT; i++) {
898 		txd = &sc->jme_cdata.jme_txdesc[i];
899 		error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE,
900 		    JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
901 		    &txd->tx_dmamap);
902 		if (error) {
903 			int j;
904 
905 			printf("%s: could not create %dth Tx dmamap.\n",
906 			    sc->sc_dev.dv_xname, i);
907 
908 			for (j = 0; j < i; ++j) {
909 				txd = &sc->jme_cdata.jme_txdesc[j];
910 				bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
911 			}
912 			return error;
913 		}
914 
915 	}
916 
917 	/*
918 	 * Create DMA stuffs for RX buffers
919 	 */
920 
921 	/* Create DMA maps for Rx buffers. */
922 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
923 	    0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap);
924 	if (error) {
925 		printf("%s: could not create spare Rx dmamap.\n",
926 		    sc->sc_dev.dv_xname);
927 		return error;
928 	}
929 	for (i = 0; i < JME_RX_RING_CNT; i++) {
930 		rxd = &sc->jme_cdata.jme_rxdesc[i];
931 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
932 		    0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
933 		if (error) {
934 			int j;
935 
936 			printf("%s: could not create %dth Rx dmamap.\n",
937 			    sc->sc_dev.dv_xname, i);
938 
939 			for (j = 0; j < i; ++j) {
940 				rxd = &sc->jme_cdata.jme_rxdesc[j];
941 				bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
942 			}
943 			bus_dmamap_destroy(sc->sc_dmat,
944 			    sc->jme_cdata.jme_rx_sparemap);
945 			sc->jme_cdata.jme_rx_tag = NULL;
946 			return error;
947 		}
948 	}
949 
950 	return 0;
951 }
952 
953 void
954 jme_dma_free(struct jme_softc *sc)
955 {
956 	struct jme_txdesc *txd;
957 	struct jme_rxdesc *rxd;
958 	int i;
959 
960 	/* Tx ring */
961 	bus_dmamap_unload(sc->sc_dmat,
962 	    sc->jme_cdata.jme_tx_ring_map);
963 	bus_dmamem_free(sc->sc_dmat,
964 	    (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1);
965 
966 	/* Rx ring */
967 	bus_dmamap_unload(sc->sc_dmat,
968 	    sc->jme_cdata.jme_rx_ring_map);
969 	bus_dmamem_free(sc->sc_dmat,
970 	    (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
971 
972 	/* Tx buffers */
973 	for (i = 0; i < JME_TX_RING_CNT; i++) {
974 		txd = &sc->jme_cdata.jme_txdesc[i];
975 		bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
976 	}
977 
978 	/* Rx buffers */
979 	for (i = 0; i < JME_RX_RING_CNT; i++) {
980 		rxd = &sc->jme_cdata.jme_rxdesc[i];
981 		bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
982 	}
983 	bus_dmamap_destroy(sc->sc_dmat,
984 	    sc->jme_cdata.jme_rx_sparemap);
985 
986 	/* Shadow status block. */
987 	bus_dmamap_unload(sc->sc_dmat,
988 	    sc->jme_cdata.jme_ssb_map);
989 	bus_dmamem_free(sc->sc_dmat,
990 	    (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
991 }
992 
993 #ifdef notyet
994 /*
995  * Unlike other ethernet controllers, JMC250 requires
996  * explicit resetting link speed to 10/100Mbps as gigabit
997  * link will cunsume more power than 375mA.
998  * Note, we reset the link speed to 10/100Mbps with
999  * auto-negotiation but we don't know whether that operation
1000  * would succeed or not as we have no control after powering
1001  * off. If the renegotiation fail WOL may not work. Running
1002  * at 1Gbps draws more power than 375mA at 3.3V which is
1003  * specified in PCI specification and that would result in
1004  * complete shutdowning power to ethernet controller.
1005  *
1006  * TODO
1007  *  Save current negotiated media speed/duplex/flow-control
1008  *  to softc and restore the same link again after resuming.
1009  *  PHY handling such as power down/resetting to 100Mbps
1010  *  may be better handled in suspend method in phy driver.
1011  */
1012 void
1013 jme_setlinkspeed(struct jme_softc *sc)
1014 {
1015 	struct mii_data *mii;
1016 	int aneg, i;
1017 
1018 	JME_LOCK_ASSERT(sc);
1019 
1020 	mii = &sc->sc_miibus;
1021 	mii_pollstat(mii);
1022 	aneg = 0;
1023 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1024 		switch IFM_SUBTYPE(mii->mii_media_active) {
1025 		case IFM_10_T:
1026 		case IFM_100_TX:
1027 			return;
1028 		case IFM_1000_T:
1029 			aneg++;
1030 		default:
1031 			break;
1032 		}
1033 	}
1034 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1035 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR,
1036 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1037 	jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR,
1038 	    BMCR_AUTOEN | BMCR_STARTNEG);
1039 	DELAY(1000);
1040 	if (aneg != 0) {
1041 		/* Poll link state until jme(4) get a 10/100 link. */
1042 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1043 			mii_pollstat(mii);
1044 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1045 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1046 				case IFM_10_T:
1047 				case IFM_100_TX:
1048 					jme_mac_config(sc);
1049 					return;
1050 				default:
1051 					break;
1052 				}
1053 			}
1054 			JME_UNLOCK(sc);
1055 			pause("jmelnk", hz);
1056 			JME_LOCK(sc);
1057 		}
1058 		if (i == MII_ANEGTICKS_GIGE)
1059 			printf("%s: establishing link failed, "
1060 			    "WOL may not work!\n", sc->sc_dev.dv_xname);
1061 	}
1062 	/*
1063 	 * No link, force MAC to have 100Mbps, full-duplex link.
1064 	 * This is the last resort and may/may not work.
1065 	 */
1066 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1067 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1068 	jme_mac_config(sc);
1069 }
1070 
1071 void
1072 jme_setwol(struct jme_softc *sc)
1073 {
1074 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1075 	uint32_t gpr, pmcs;
1076 	uint16_t pmstat;
1077 	int pmc;
1078 
1079 	if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) {
1080 		/* No PME capability, PHY power down. */
1081 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1082 		    MII_BMCR, BMCR_PDOWN);
1083 		return;
1084 	}
1085 
1086 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1087 	pmcs = CSR_READ_4(sc, JME_PMCS);
1088 	pmcs &= ~PMCS_WOL_ENB_MASK;
1089 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1090 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1091 		/* Enable PME message. */
1092 		gpr |= GPREG0_PME_ENB;
1093 		/* For gigabit controllers, reset link speed to 10/100. */
1094 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1095 			jme_setlinkspeed(sc);
1096 	}
1097 
1098 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1099 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1100 
1101 	/* Request PME. */
1102 	pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2);
1103 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1104 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1105 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1106 	pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1107 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1108 		/* No WOL, PHY power down. */
1109 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1110 		    MII_BMCR, BMCR_PDOWN);
1111 	}
1112 }
1113 #endif
1114 
1115 int
1116 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1117 {
1118 	struct jme_txdesc *txd;
1119 	struct jme_desc *desc;
1120 	struct mbuf *m;
1121 	int error, i, prod;
1122 	uint32_t cflags;
1123 
1124 	prod = sc->jme_cdata.jme_tx_prod;
1125 	txd = &sc->jme_cdata.jme_txdesc[prod];
1126 
1127 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
1128 				     *m_head, BUS_DMA_NOWAIT);
1129 	if (error != 0 && error != EFBIG)
1130 		goto drop;
1131 	if (error != 0) {
1132 		if (m_defrag(*m_head, M_DONTWAIT)) {
1133 			error = ENOBUFS;
1134 			goto drop;
1135 		}
1136 		error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
1137 					     *m_head, BUS_DMA_NOWAIT);
1138 		if (error != 0)
1139 			goto drop;
1140 	}
1141 
1142 	/*
1143 	 * Check descriptor overrun. Leave one free descriptor.
1144 	 * Since we always use 64bit address mode for transmitting,
1145 	 * each Tx request requires one more dummy descriptor.
1146 	 */
1147 	if (sc->jme_cdata.jme_tx_cnt + txd->tx_dmamap->dm_nsegs + JME_TXD_RSVD >
1148 	    JME_TX_RING_CNT - JME_TXD_RSVD) {
1149 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1150 		return (ENOBUFS);
1151 	}
1152 
1153 	m = *m_head;
1154 	cflags = 0;
1155 
1156 	/* Configure checksum offload. */
1157 	if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1158 		cflags |= JME_TD_IPCSUM;
1159 	if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1160 		cflags |= JME_TD_TCPCSUM;
1161 	if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1162 		cflags |= JME_TD_UDPCSUM;
1163 
1164 #if NVLAN > 0
1165 	/* Configure VLAN. */
1166 	if (m->m_flags & M_VLANTAG) {
1167 		cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1168 		cflags |= JME_TD_VLAN_TAG;
1169 	}
1170 #endif
1171 
1172 	desc = &sc->jme_rdata.jme_tx_ring[prod];
1173 	desc->flags = htole32(cflags);
1174 	desc->buflen = 0;
1175 	desc->addr_hi = htole32(m->m_pkthdr.len);
1176 	desc->addr_lo = 0;
1177 	sc->jme_cdata.jme_tx_cnt++;
1178 	JME_DESC_INC(prod, JME_TX_RING_CNT);
1179 	for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) {
1180 		desc = &sc->jme_rdata.jme_tx_ring[prod];
1181 		desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1182 		desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len);
1183 		desc->addr_hi =
1184 		    htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr));
1185 		desc->addr_lo =
1186 		    htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr));
1187 		sc->jme_cdata.jme_tx_cnt++;
1188 		JME_DESC_INC(prod, JME_TX_RING_CNT);
1189 	}
1190 
1191 	/* Update producer index. */
1192 	sc->jme_cdata.jme_tx_prod = prod;
1193 	/*
1194 	 * Finally request interrupt and give the first descriptor
1195 	 * owenership to hardware.
1196 	 */
1197 	desc = txd->tx_desc;
1198 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1199 
1200 	txd->tx_m = m;
1201 	txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + JME_TXD_RSVD;
1202 
1203 	/* Sync descriptors. */
1204 	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1205 	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1206 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1207 	     sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1208 
1209 	return (0);
1210 
1211   drop:
1212 	m_freem(*m_head);
1213 	*m_head = NULL;
1214 	return (error);
1215 }
1216 
1217 void
1218 jme_start(struct ifnet *ifp)
1219 {
1220 	struct jme_softc *sc = ifp->if_softc;
1221 	struct mbuf *m_head;
1222 	int enq = 0;
1223 
1224 	/* Reclaim transmitted frames. */
1225 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1226 		jme_txeof(sc);
1227 
1228 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1229 		return;
1230 	if ((sc->jme_flags & JME_FLAG_LINK) == 0)
1231 		return;
1232 	if (IFQ_IS_EMPTY(&ifp->if_snd))
1233 		return;
1234 
1235 	for (;;) {
1236 		/*
1237 		 * Check number of available TX descs, always
1238 		 * leave JME_TXD_RSVD free TX descs.
1239 		 */
1240 		if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD >
1241 		    JME_TX_RING_CNT - JME_TXD_RSVD) {
1242 			ifp->if_flags |= IFF_OACTIVE;
1243 			break;
1244 		}
1245 
1246 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1247 		if (m_head == NULL)
1248 			break;
1249 
1250 		/*
1251 		 * Pack the data into the transmit ring. If we
1252 		 * don't have room, set the OACTIVE flag and wait
1253 		 * for the NIC to drain the ring.
1254 		 */
1255 		if (jme_encap(sc, &m_head)) {
1256 			if (m_head == NULL)
1257 				ifp->if_oerrors++;
1258 			else {
1259 				IF_PREPEND(&ifp->if_snd, m_head);
1260 				ifp->if_flags |= IFF_OACTIVE;
1261 			}
1262 			break;
1263 		}
1264 
1265 		enq++;
1266 
1267 #if NBPFILTER > 0
1268 		/*
1269 		 * If there's a BPF listener, bounce a copy of this frame
1270 		 * to him.
1271 		 */
1272 		if (ifp->if_bpf != NULL)
1273 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1274 #endif
1275 	}
1276 
1277 	if (enq > 0) {
1278 		/*
1279 		 * Reading TXCSR takes very long time under heavy load
1280 		 * so cache TXCSR value and writes the ORed value with
1281 		 * the kick command to the TXCSR. This saves one register
1282 		 * access cycle.
1283 		 */
1284 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1285 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1286 		/* Set a timeout in case the chip goes out to lunch. */
1287 		ifp->if_timer = JME_TX_TIMEOUT;
1288 	}
1289 }
1290 
1291 void
1292 jme_watchdog(struct ifnet *ifp)
1293 {
1294 	struct jme_softc *sc = ifp->if_softc;
1295 
1296 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1297 		printf("%s: watchdog timeout (missed link)\n",
1298 		    sc->sc_dev.dv_xname);
1299 		ifp->if_oerrors++;
1300 		jme_init(ifp);
1301 		return;
1302 	}
1303 
1304 	jme_txeof(sc);
1305 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1306 		printf("%s: watchdog timeout (missed Tx interrupts) "
1307 			  "-- recovering\n", sc->sc_dev.dv_xname);
1308 		jme_start(ifp);
1309 		return;
1310 	}
1311 
1312 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1313 	ifp->if_oerrors++;
1314 	jme_init(ifp);
1315 	jme_start(ifp);
1316 }
1317 
1318 int
1319 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1320 {
1321 	struct jme_softc *sc = ifp->if_softc;
1322 	struct mii_data *mii = &sc->sc_miibus;
1323 	struct ifaddr *ifa = (struct ifaddr *)data;
1324 	struct ifreq *ifr = (struct ifreq *)data;
1325 	int error = 0, s;
1326 
1327 	s = splnet();
1328 
1329 	switch (cmd) {
1330 	case SIOCSIFADDR:
1331 		ifp->if_flags |= IFF_UP;
1332 		if (!(ifp->if_flags & IFF_RUNNING))
1333 			jme_init(ifp);
1334 #ifdef INET
1335 		if (ifa->ifa_addr->sa_family == AF_INET)
1336 			arp_ifinit(&sc->sc_arpcom, ifa);
1337 #endif
1338 		break;
1339 
1340 	case SIOCSIFFLAGS:
1341 		if (ifp->if_flags & IFF_UP) {
1342 			if (ifp->if_flags & IFF_RUNNING)
1343 				error = ENETRESET;
1344 			else
1345 				jme_init(ifp);
1346 		} else {
1347 			if (ifp->if_flags & IFF_RUNNING)
1348 				jme_stop(sc);
1349 		}
1350 		break;
1351 
1352 	case SIOCSIFMEDIA:
1353 	case SIOCGIFMEDIA:
1354 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1355 		break;
1356 
1357 	default:
1358 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1359 	}
1360 
1361 	if (error == ENETRESET) {
1362 		if (ifp->if_flags & IFF_RUNNING)
1363 			jme_iff(sc);
1364 		error = 0;
1365 	}
1366 
1367 	splx(s);
1368 	return (error);
1369 }
1370 
1371 void
1372 jme_mac_config(struct jme_softc *sc)
1373 {
1374 	struct mii_data *mii;
1375 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1376 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1377 
1378 	mii = &sc->sc_miibus;
1379 
1380 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1381 	DELAY(10);
1382 	CSR_WRITE_4(sc, JME_GHC, 0);
1383 	ghc = 0;
1384 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1385 	rxmac &= ~RXMAC_FC_ENB;
1386 	txmac = CSR_READ_4(sc, JME_TXMAC);
1387 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1388 	txpause = CSR_READ_4(sc, JME_TXPFC);
1389 	txpause &= ~TXPFC_PAUSE_ENB;
1390 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1391 		ghc |= GHC_FULL_DUPLEX;
1392 		rxmac &= ~RXMAC_COLL_DET_ENB;
1393 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1394 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1395 		    TXMAC_FRAME_BURST);
1396 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1397 			txpause |= TXPFC_PAUSE_ENB;
1398 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1399 			rxmac |= RXMAC_FC_ENB;
1400 		/* Disable retry transmit timer/retry limit. */
1401 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1402 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1403 	} else {
1404 		rxmac |= RXMAC_COLL_DET_ENB;
1405 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1406 		/* Enable retry transmit timer/retry limit. */
1407 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1408 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1409 	}
1410 
1411 	/*
1412 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1413 	 */
1414 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1415 	gp1 &= ~GPREG1_HALF_PATCH;
1416 
1417 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1418 		hdx = 1;
1419 
1420 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1421 	case IFM_10_T:
1422 		ghc |= GHC_SPEED_10;
1423 		if (hdx)
1424 			gp1 |= GPREG1_HALF_PATCH;
1425 		break;
1426 
1427 	case IFM_100_TX:
1428 		ghc |= GHC_SPEED_100;
1429 		if (hdx)
1430 			gp1 |= GPREG1_HALF_PATCH;
1431 
1432 		/*
1433 		 * Use extended FIFO depth to workaround CRC errors
1434 		 * emitted by chips before JMC250B
1435 		 */
1436 		phyconf = JMPHY_CONF_EXTFIFO;
1437 		break;
1438 
1439 	case IFM_1000_T:
1440 		if (sc->jme_caps & JME_CAP_FASTETH)
1441 			break;
1442 
1443 		ghc |= GHC_SPEED_1000;
1444 		if (hdx)
1445 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1446 		break;
1447 
1448 	default:
1449 		break;
1450 	}
1451 
1452 	if (sc->jme_revfm >= 2) {
1453 		/* set clock sources for tx mac and offload engine */
1454 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
1455 			ghc |= GHC_TCPCK_1000 | GHC_TXCK_1000;
1456 		else
1457 			ghc |= GHC_TCPCK_10_100 | GHC_TXCK_10_100;
1458 	}
1459 
1460 	CSR_WRITE_4(sc, JME_GHC, ghc);
1461 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1462 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1463 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1464 
1465 	if (sc->jme_workaround & JME_WA_CRCERRORS) {
1466 		jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1467 				    JMPHY_CONF, phyconf);
1468 	}
1469 	if (sc->jme_workaround & JME_WA_PACKETLOSS)
1470 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1471 }
1472 
1473 int
1474 jme_intr(void *xsc)
1475 {
1476 	struct jme_softc *sc = xsc;
1477 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1478 	uint32_t status;
1479 	int claimed = 0;
1480 
1481 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1482 	if (status == 0 || status == 0xFFFFFFFF)
1483 		return (0);
1484 
1485 	/* Disable interrupts. */
1486 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1487 
1488 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1489 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1490 		goto back;
1491 
1492 	/* Reset PCC counter/timer and Ack interrupts. */
1493 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1494 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1495 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1496 	if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1497 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
1498 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1499 
1500 	if (ifp->if_flags & IFF_RUNNING) {
1501 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1502 			jme_rxeof(sc);
1503 
1504 		if (status & INTR_RXQ_DESC_EMPTY) {
1505 			/*
1506 			 * Notify hardware availability of new Rx buffers.
1507 			 * Reading RXCSR takes very long time under heavy
1508 			 * load so cache RXCSR value and writes the ORed
1509 			 * value with the kick command to the RXCSR. This
1510 			 * saves one register access cycle.
1511 			 */
1512 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1513 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1514 		}
1515 
1516 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1517 			jme_txeof(sc);
1518 			jme_start(ifp);
1519 		}
1520 	}
1521 	claimed = 1;
1522 back:
1523 	/* Reenable interrupts. */
1524 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1525 
1526 	return (claimed);
1527 }
1528 
1529 void
1530 jme_txeof(struct jme_softc *sc)
1531 {
1532 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1533 	struct jme_txdesc *txd;
1534 	uint32_t status;
1535 	int cons, nsegs;
1536 
1537 	cons = sc->jme_cdata.jme_tx_cons;
1538 	if (cons == sc->jme_cdata.jme_tx_prod)
1539 		return;
1540 
1541 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1542 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1543 
1544 	/*
1545 	 * Go through our Tx list and free mbufs for those
1546 	 * frames which have been transmitted.
1547 	 */
1548 	while (cons != sc->jme_cdata.jme_tx_prod) {
1549 		txd = &sc->jme_cdata.jme_txdesc[cons];
1550 
1551 		if (txd->tx_m == NULL)
1552 			panic("%s: freeing NULL mbuf!", sc->sc_dev.dv_xname);
1553 
1554 		status = letoh32(txd->tx_desc->flags);
1555 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1556 			break;
1557 
1558 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1559 			ifp->if_oerrors++;
1560 		} else {
1561 			ifp->if_opackets++;
1562 			if (status & JME_TD_COLLISION) {
1563 				ifp->if_collisions +=
1564 				    letoh32(txd->tx_desc->buflen) &
1565 				    JME_TD_BUF_LEN_MASK;
1566 			}
1567 		}
1568 
1569 		/*
1570 		 * Only the first descriptor of multi-descriptor
1571 		 * transmission is updated so driver have to skip entire
1572 		 * chained buffers for the transmiited frame. In other
1573 		 * words, JME_TD_OWN bit is valid only at the first
1574 		 * descriptor of a multi-descriptor transmission.
1575 		 */
1576 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1577 			sc->jme_rdata.jme_tx_ring[cons].flags = 0;
1578 			JME_DESC_INC(cons, JME_TX_RING_CNT);
1579 		}
1580 
1581 		/* Reclaim transferred mbufs. */
1582 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1583 		m_freem(txd->tx_m);
1584 		txd->tx_m = NULL;
1585 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1586 		if (sc->jme_cdata.jme_tx_cnt < 0)
1587 			panic("%s: Active Tx desc counter was garbled",
1588 			    sc->sc_dev.dv_xname);
1589 		txd->tx_ndesc = 0;
1590 	}
1591 	sc->jme_cdata.jme_tx_cons = cons;
1592 
1593 	if (sc->jme_cdata.jme_tx_cnt == 0)
1594 		ifp->if_timer = 0;
1595 
1596 	if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD <=
1597 	    JME_TX_RING_CNT - JME_TXD_RSVD)
1598 		ifp->if_flags &= ~IFF_OACTIVE;
1599 
1600 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1601 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1602 }
1603 
1604 void
1605 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
1606 {
1607 	int i;
1608 
1609 	for (i = 0; i < count; ++i) {
1610 		struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
1611 
1612 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
1613 		desc->buflen = htole32(MCLBYTES);
1614 		JME_DESC_INC(cons, JME_RX_RING_CNT);
1615 	}
1616 }
1617 
1618 /* Receive a frame. */
1619 void
1620 jme_rxpkt(struct jme_softc *sc)
1621 {
1622 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1623 	struct jme_desc *desc;
1624 	struct jme_rxdesc *rxd;
1625 	struct mbuf *mp, *m;
1626 	uint32_t flags, status;
1627 	int cons, count, nsegs;
1628 
1629 	cons = sc->jme_cdata.jme_rx_cons;
1630 	desc = &sc->jme_rdata.jme_rx_ring[cons];
1631 	flags = letoh32(desc->flags);
1632 	status = letoh32(desc->buflen);
1633 	nsegs = JME_RX_NSEGS(status);
1634 
1635 	if (status & JME_RX_ERR_STAT) {
1636 		ifp->if_ierrors++;
1637 		jme_discard_rxbufs(sc, cons, nsegs);
1638 #ifdef JME_SHOW_ERRORS
1639 		printf("%s : receive error = 0x%b\n",
1640 		    sc->sc_dev.dv_xname, JME_RX_ERR(status), JME_RX_ERR_BITS);
1641 #endif
1642 		sc->jme_cdata.jme_rx_cons += nsegs;
1643 		sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1644 		return;
1645 	}
1646 
1647 	sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
1648 	for (count = 0; count < nsegs; count++,
1649 	     JME_DESC_INC(cons, JME_RX_RING_CNT)) {
1650 		rxd = &sc->jme_cdata.jme_rxdesc[cons];
1651 		mp = rxd->rx_m;
1652 
1653 		/* Add a new receive buffer to the ring. */
1654 		if (jme_newbuf(sc, rxd) != 0) {
1655 			ifp->if_iqdrops++;
1656 			/* Reuse buffer. */
1657 			jme_discard_rxbufs(sc, cons, nsegs - count);
1658 			if (sc->jme_cdata.jme_rxhead != NULL) {
1659 				m_freem(sc->jme_cdata.jme_rxhead);
1660 				JME_RXCHAIN_RESET(sc);
1661 			}
1662 			break;
1663 		}
1664 
1665 		/*
1666 		 * Assume we've received a full sized frame.
1667 		 * Actual size is fixed when we encounter the end of
1668 		 * multi-segmented frame.
1669 		 */
1670 		mp->m_len = MCLBYTES;
1671 
1672 		/* Chain received mbufs. */
1673 		if (sc->jme_cdata.jme_rxhead == NULL) {
1674 			sc->jme_cdata.jme_rxhead = mp;
1675 			sc->jme_cdata.jme_rxtail = mp;
1676 		} else {
1677 			/*
1678 			 * Receive processor can receive a maximum frame
1679 			 * size of 65535 bytes.
1680 			 */
1681 			mp->m_flags &= ~M_PKTHDR;
1682 			sc->jme_cdata.jme_rxtail->m_next = mp;
1683 			sc->jme_cdata.jme_rxtail = mp;
1684 		}
1685 
1686 		if (count == nsegs - 1) {
1687 			/* Last desc. for this frame. */
1688 			m = sc->jme_cdata.jme_rxhead;
1689 			/* XXX assert PKTHDR? */
1690 			m->m_flags |= M_PKTHDR;
1691 			m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
1692 			if (nsegs > 1) {
1693 				/* Set first mbuf size. */
1694 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
1695 				/* Set last mbuf size. */
1696 				mp->m_len = sc->jme_cdata.jme_rxlen -
1697 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
1698 				    (MCLBYTES * (nsegs - 2)));
1699 			} else {
1700 				m->m_len = sc->jme_cdata.jme_rxlen;
1701 			}
1702 			m->m_pkthdr.rcvif = ifp;
1703 
1704 			/*
1705 			 * Account for 10bytes auto padding which is used
1706 			 * to align IP header on 32bit boundary. Also note,
1707 			 * CRC bytes is automatically removed by the
1708 			 * hardware.
1709 			 */
1710 			m->m_data += JME_RX_PAD_BYTES;
1711 
1712 			/* Set checksum information. */
1713 			if (flags & (JME_RD_IPV4|JME_RD_IPV6)) {
1714 				if ((flags & JME_RD_IPV4) &&
1715 				    (flags & JME_RD_IPCSUM))
1716 					m->m_pkthdr.csum_flags |=
1717 					    M_IPV4_CSUM_IN_OK;
1718 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
1719 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
1720 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
1721 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
1722 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
1723 					m->m_pkthdr.csum_flags |=
1724 					    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1725 				}
1726 			}
1727 
1728 #if NVLAN > 0
1729 			/* Check for VLAN tagged packets. */
1730 			if (flags & JME_RD_VLAN_TAG) {
1731 				m->m_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK;
1732 				m->m_flags |= M_VLANTAG;
1733 			}
1734 #endif
1735 
1736 #if NBPFILTER > 0
1737 			if (ifp->if_bpf)
1738 				bpf_mtap_ether(ifp->if_bpf, m,
1739 				    BPF_DIRECTION_IN);
1740 #endif
1741 
1742 			ifp->if_ipackets++;
1743 			/* Pass it on. */
1744 			ether_input_mbuf(ifp, m);
1745 
1746 			/* Reset mbuf chains. */
1747 			JME_RXCHAIN_RESET(sc);
1748 		}
1749 	}
1750 
1751 	sc->jme_cdata.jme_rx_cons += nsegs;
1752 	sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1753 }
1754 
1755 void
1756 jme_rxeof(struct jme_softc *sc)
1757 {
1758 	struct jme_desc *desc;
1759 	int nsegs, prog, pktlen;
1760 
1761 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1762 	    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1763 
1764 	prog = 0;
1765 	for (;;) {
1766 		desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
1767 		if ((letoh32(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
1768 			break;
1769 		if ((letoh32(desc->buflen) & JME_RD_VALID) == 0)
1770 			break;
1771 
1772 		/*
1773 		 * Check number of segments against received bytes.
1774 		 * Non-matching value would indicate that hardware
1775 		 * is still trying to update Rx descriptors. I'm not
1776 		 * sure whether this check is needed.
1777 		 */
1778 		nsegs = JME_RX_NSEGS(letoh32(desc->buflen));
1779 		pktlen = JME_RX_BYTES(letoh32(desc->buflen));
1780 		if (nsegs != howmany(pktlen, MCLBYTES)) {
1781 			printf("%s: RX fragment count(%d) "
1782 			    "and packet size(%d) mismach\n",
1783 			     sc->sc_dev.dv_xname, nsegs, pktlen);
1784 			break;
1785 		}
1786 
1787 		/* Received a frame. */
1788 		jme_rxpkt(sc);
1789 		prog++;
1790 	}
1791 
1792 	if (prog > 0) {
1793 		bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1794 		    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1795 	}
1796 }
1797 
1798 void
1799 jme_tick(void *xsc)
1800 {
1801 	struct jme_softc *sc = xsc;
1802 	struct mii_data *mii = &sc->sc_miibus;
1803 	int s;
1804 
1805 	s = splnet();
1806 	mii_tick(mii);
1807 	timeout_add_sec(&sc->jme_tick_ch, 1);
1808 	splx(s);
1809 }
1810 
1811 void
1812 jme_reset(struct jme_softc *sc)
1813 {
1814 #ifdef foo
1815 	/* Stop receiver, transmitter. */
1816 	jme_stop_rx(sc);
1817 	jme_stop_tx(sc);
1818 #endif
1819 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1820 	DELAY(10);
1821 	CSR_WRITE_4(sc, JME_GHC, 0);
1822 }
1823 
1824 int
1825 jme_init(struct ifnet *ifp)
1826 {
1827 	struct jme_softc *sc = ifp->if_softc;
1828 	struct mii_data *mii;
1829 	uint8_t eaddr[ETHER_ADDR_LEN];
1830 	bus_addr_t paddr;
1831 	uint32_t reg;
1832 	int error;
1833 
1834 	/*
1835 	 * Cancel any pending I/O.
1836 	 */
1837 	jme_stop(sc);
1838 
1839 	/*
1840 	 * Reset the chip to a known state.
1841 	 */
1842 	jme_reset(sc);
1843 
1844 	/* Init descriptors. */
1845 	error = jme_init_rx_ring(sc);
1846         if (error != 0) {
1847                 printf("%s: initialization failed: no memory for Rx buffers.\n",
1848 		    sc->sc_dev.dv_xname);
1849                 jme_stop(sc);
1850 		return (error);
1851         }
1852 	jme_init_tx_ring(sc);
1853 
1854 	/* Initialize shadow status block. */
1855 	jme_init_ssb(sc);
1856 
1857 	/* Reprogram the station address. */
1858 	bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
1859 	CSR_WRITE_4(sc, JME_PAR0,
1860 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
1861 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
1862 
1863 	/*
1864 	 * Configure Tx queue.
1865 	 *  Tx priority queue weight value : 0
1866 	 *  Tx FIFO threshold for processing next packet : 16QW
1867 	 *  Maximum Tx DMA length : 512
1868 	 *  Allow Tx DMA burst.
1869 	 */
1870 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
1871 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
1872 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
1873 	sc->jme_txcsr |= sc->jme_tx_dma_size;
1874 	sc->jme_txcsr |= TXCSR_DMA_BURST;
1875 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
1876 
1877 	/* Set Tx descriptor counter. */
1878 	CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
1879 
1880 	/* Set Tx ring address to the hardware. */
1881 	paddr = JME_TX_RING_ADDR(sc, 0);
1882 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
1883 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
1884 
1885 	/* Configure TxMAC parameters. */
1886 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
1887 	reg |= TXMAC_THRESH_1_PKT;
1888 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
1889 	CSR_WRITE_4(sc, JME_TXMAC, reg);
1890 
1891 	/*
1892 	 * Configure Rx queue.
1893 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
1894 	 *  FIFO threshold for processing next packet : 128QW
1895 	 *  Rx queue 0 select
1896 	 *  Max Rx DMA length : 128
1897 	 *  Rx descriptor retry : 32
1898 	 *  Rx descriptor retry time gap : 256ns
1899 	 *  Don't receive runt/bad frame.
1900 	 */
1901 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
1902 
1903 	/*
1904 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
1905 	 * than 4K bytes will suffer from Rx FIFO overruns. So
1906 	 * decrease FIFO threshold to reduce the FIFO overruns for
1907 	 * frames larger than 4000 bytes.
1908 	 * For best performance of standard MTU sized frames use
1909 	 * maximum allowable FIFO threshold, which is 32QW for
1910 	 * chips with a full mask >= 2 otherwise 128QW. FIFO
1911 	 * thresholds of 64QW and 128QW are not valid for chips
1912 	 * with a full mask >= 2.
1913 	 */
1914 	if (sc->jme_revfm >= 2)
1915 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
1916 	else {
1917 		if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1918 		    ETHER_VLAN_ENCAP_LEN) > JME_RX_FIFO_SIZE)
1919 			sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
1920 		else
1921 			sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
1922 	}
1923 	sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
1924 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
1925 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
1926 	/* XXX TODO DROP_BAD */
1927 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
1928 
1929 	/* Set Rx descriptor counter. */
1930 	CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
1931 
1932 	/* Set Rx ring address to the hardware. */
1933 	paddr = JME_RX_RING_ADDR(sc, 0);
1934 	CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
1935 	CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
1936 
1937 	/* Clear receive filter. */
1938 	CSR_WRITE_4(sc, JME_RXMAC, 0);
1939 
1940 	/* Set up the receive filter. */
1941 	jme_iff(sc);
1942 
1943 	jme_set_vlan(sc);
1944 
1945 	/*
1946 	 * Disable all WOL bits as WOL can interfere normal Rx
1947 	 * operation. Also clear WOL detection status bits.
1948 	 */
1949 	reg = CSR_READ_4(sc, JME_PMCS);
1950 	reg &= ~PMCS_WOL_ENB_MASK;
1951 	CSR_WRITE_4(sc, JME_PMCS, reg);
1952 
1953 	/*
1954 	 * Pad 10bytes right before received frame. This will greatly
1955 	 * help Rx performance on strict-alignment architectures as
1956 	 * it does not need to copy the frame to align the payload.
1957 	 */
1958 	reg = CSR_READ_4(sc, JME_RXMAC);
1959 	reg |= RXMAC_PAD_10BYTES;
1960 	reg |= RXMAC_CSUM_ENB;
1961 	CSR_WRITE_4(sc, JME_RXMAC, reg);
1962 
1963 	/* Configure general purpose reg0 */
1964 	reg = CSR_READ_4(sc, JME_GPREG0);
1965 	reg &= ~GPREG0_PCC_UNIT_MASK;
1966 	/* Set PCC timer resolution to micro-seconds unit. */
1967 	reg |= GPREG0_PCC_UNIT_US;
1968 	/*
1969 	 * Disable all shadow register posting as we have to read
1970 	 * JME_INTR_STATUS register in jme_intr. Also it seems
1971 	 * that it's hard to synchronize interrupt status between
1972 	 * hardware and software with shadow posting due to
1973 	 * requirements of bus_dmamap_sync(9).
1974 	 */
1975 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
1976 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
1977 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
1978 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
1979 	/* Disable posting of DW0. */
1980 	reg &= ~GPREG0_POST_DW0_ENB;
1981 	/* Clear PME message. */
1982 	reg &= ~GPREG0_PME_ENB;
1983 	/* Set PHY address. */
1984 	reg &= ~GPREG0_PHY_ADDR_MASK;
1985 	reg |= sc->jme_phyaddr;
1986 	CSR_WRITE_4(sc, JME_GPREG0, reg);
1987 
1988 	/* Configure Tx queue 0 packet completion coalescing. */
1989 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1990 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
1991 	    PCCTX_COAL_TO_MASK;
1992 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1993 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
1994 	    PCCTX_COAL_PKT_MASK;
1995 	reg |= PCCTX_COAL_TXQ0;
1996 	CSR_WRITE_4(sc, JME_PCCTX, reg);
1997 
1998 	/* Configure Rx queue 0 packet completion coalescing. */
1999 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
2000 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2001 	    PCCRX_COAL_TO_MASK;
2002 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
2003 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2004 	    PCCRX_COAL_PKT_MASK;
2005 	CSR_WRITE_4(sc, JME_PCCRX0, reg);
2006 
2007 	/* Configure shadow status block but don't enable posting. */
2008 	paddr = sc->jme_rdata.jme_ssb_block_paddr;
2009 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2010 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2011 
2012 	/* Disable Timer 1 and Timer 2. */
2013 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2014 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2015 
2016 	/* Configure retry transmit period, retry limit value. */
2017 	CSR_WRITE_4(sc, JME_TXTRHD,
2018 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2019 	    TXTRHD_RT_PERIOD_MASK) |
2020 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2021 	    TXTRHD_RT_LIMIT_SHIFT));
2022 
2023 	/* Disable RSS. */
2024 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2025 
2026 	/* Initialize the interrupt mask. */
2027 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2028 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2029 
2030 	/*
2031 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2032 	 * done after detection of valid link in jme_miibus_statchg.
2033 	 */
2034 	sc->jme_flags &= ~JME_FLAG_LINK;
2035 
2036 	/* Set the current media. */
2037 	mii = &sc->sc_miibus;
2038 	mii_mediachg(mii);
2039 
2040 	timeout_add_sec(&sc->jme_tick_ch, 1);
2041 
2042 	ifp->if_flags |= IFF_RUNNING;
2043 	ifp->if_flags &= ~IFF_OACTIVE;
2044 
2045 	return (0);
2046 }
2047 
2048 void
2049 jme_stop(struct jme_softc *sc)
2050 {
2051 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2052 	struct jme_txdesc *txd;
2053 	struct jme_rxdesc *rxd;
2054 	int i;
2055 
2056 	/*
2057 	 * Mark the interface down and cancel the watchdog timer.
2058 	 */
2059 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2060 	ifp->if_timer = 0;
2061 
2062 	timeout_del(&sc->jme_tick_ch);
2063 	sc->jme_flags &= ~JME_FLAG_LINK;
2064 
2065 	/*
2066 	 * Disable interrupts.
2067 	 */
2068 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2069 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2070 
2071 	/* Disable updating shadow status block. */
2072 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2073 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2074 
2075 	/* Stop receiver, transmitter. */
2076 	jme_stop_rx(sc);
2077 	jme_stop_tx(sc);
2078 
2079 #ifdef foo
2080 	 /* Reclaim Rx/Tx buffers that have been completed. */
2081 	jme_rxeof(sc);
2082 	if (sc->jme_cdata.jme_rxhead != NULL)
2083 		m_freem(sc->jme_cdata.jme_rxhead);
2084 	JME_RXCHAIN_RESET(sc);
2085 	jme_txeof(sc);
2086 #endif
2087 
2088 	/*
2089 	 * Free partial finished RX segments
2090 	 */
2091 	if (sc->jme_cdata.jme_rxhead != NULL)
2092 		m_freem(sc->jme_cdata.jme_rxhead);
2093 	JME_RXCHAIN_RESET(sc);
2094 
2095 	/*
2096 	 * Free RX and TX mbufs still in the queues.
2097 	 */
2098 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2099 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2100 		if (rxd->rx_m != NULL) {
2101 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2102 			m_freem(rxd->rx_m);
2103 			rxd->rx_m = NULL;
2104 		}
2105         }
2106 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2107 		txd = &sc->jme_cdata.jme_txdesc[i];
2108 		if (txd->tx_m != NULL) {
2109 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2110 			m_freem(txd->tx_m);
2111 			txd->tx_m = NULL;
2112 			txd->tx_ndesc = 0;
2113 		}
2114         }
2115 }
2116 
2117 void
2118 jme_stop_tx(struct jme_softc *sc)
2119 {
2120 	uint32_t reg;
2121 	int i;
2122 
2123 	reg = CSR_READ_4(sc, JME_TXCSR);
2124 	if ((reg & TXCSR_TX_ENB) == 0)
2125 		return;
2126 	reg &= ~TXCSR_TX_ENB;
2127 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2128 	for (i = JME_TIMEOUT; i > 0; i--) {
2129 		DELAY(1);
2130 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2131 			break;
2132 	}
2133 	if (i == 0)
2134 		printf("%s: stopping transmitter timeout!\n",
2135 		    sc->sc_dev.dv_xname);
2136 }
2137 
2138 void
2139 jme_stop_rx(struct jme_softc *sc)
2140 {
2141 	uint32_t reg;
2142 	int i;
2143 
2144 	reg = CSR_READ_4(sc, JME_RXCSR);
2145 	if ((reg & RXCSR_RX_ENB) == 0)
2146 		return;
2147 	reg &= ~RXCSR_RX_ENB;
2148 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2149 	for (i = JME_TIMEOUT; i > 0; i--) {
2150 		DELAY(1);
2151 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2152 			break;
2153 	}
2154 	if (i == 0)
2155 		printf("%s: stopping recevier timeout!\n", sc->sc_dev.dv_xname);
2156 }
2157 
2158 void
2159 jme_init_tx_ring(struct jme_softc *sc)
2160 {
2161 	struct jme_ring_data *rd;
2162 	struct jme_txdesc *txd;
2163 	int i;
2164 
2165 	sc->jme_cdata.jme_tx_prod = 0;
2166 	sc->jme_cdata.jme_tx_cons = 0;
2167 	sc->jme_cdata.jme_tx_cnt = 0;
2168 
2169 	rd = &sc->jme_rdata;
2170 	bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2171 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2172 		txd = &sc->jme_cdata.jme_txdesc[i];
2173 		txd->tx_m = NULL;
2174 		txd->tx_desc = &rd->jme_tx_ring[i];
2175 		txd->tx_ndesc = 0;
2176 	}
2177 
2178 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
2179 	    sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2180 }
2181 
2182 void
2183 jme_init_ssb(struct jme_softc *sc)
2184 {
2185 	struct jme_ring_data *rd;
2186 
2187 	rd = &sc->jme_rdata;
2188 	bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2189 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0,
2190 	    sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2191 }
2192 
2193 int
2194 jme_init_rx_ring(struct jme_softc *sc)
2195 {
2196 	struct jme_ring_data *rd;
2197 	struct jme_rxdesc *rxd;
2198 	int i;
2199 
2200 	KASSERT(sc->jme_cdata.jme_rxhead == NULL &&
2201 		 sc->jme_cdata.jme_rxtail == NULL &&
2202 		 sc->jme_cdata.jme_rxlen == 0);
2203 	sc->jme_cdata.jme_rx_cons = 0;
2204 
2205 	rd = &sc->jme_rdata;
2206 	bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2207 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2208 		int error;
2209 
2210 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2211 		rxd->rx_m = NULL;
2212 		rxd->rx_desc = &rd->jme_rx_ring[i];
2213 		error = jme_newbuf(sc, rxd);
2214 		if (error)
2215 			return (error);
2216 	}
2217 
2218 	bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
2219 	    sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2220 
2221 	return (0);
2222 }
2223 
2224 int
2225 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
2226 {
2227 	struct jme_desc *desc;
2228 	struct mbuf *m;
2229 	bus_dmamap_t map;
2230 	int error;
2231 
2232 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2233 	if (m == NULL)
2234 		return (ENOBUFS);
2235 	MCLGET(m, M_DONTWAIT);
2236 	if (!(m->m_flags & M_EXT)) {
2237 		m_freem(m);
2238 		return (ENOBUFS);
2239 	}
2240 
2241 	/*
2242 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2243 	 * takes advantage of 10 bytes padding feature of hardware
2244 	 * in order not to copy entire frame to align IP header on
2245 	 * 32bit boundary.
2246 	 */
2247 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2248 
2249 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
2250 	    sc->jme_cdata.jme_rx_sparemap, m, BUS_DMA_NOWAIT);
2251 
2252 	if (error != 0) {
2253 		m_freem(m);
2254 		printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2255 		return (error);
2256 	}
2257 
2258 	if (rxd->rx_m != NULL) {
2259 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2260 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2261 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2262 	}
2263 	map = rxd->rx_dmamap;
2264 	rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2265 	sc->jme_cdata.jme_rx_sparemap = map;
2266 	rxd->rx_m = m;
2267 
2268 	desc = rxd->rx_desc;
2269 	desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len);
2270 	desc->addr_lo =
2271 	    htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr));
2272 	desc->addr_hi =
2273 	    htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr));
2274 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2275 
2276 	return (0);
2277 }
2278 
2279 void
2280 jme_set_vlan(struct jme_softc *sc)
2281 {
2282 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2283 	uint32_t reg;
2284 
2285 	reg = CSR_READ_4(sc, JME_RXMAC);
2286 	reg &= ~RXMAC_VLAN_ENB;
2287 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
2288 		reg |= RXMAC_VLAN_ENB;
2289 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2290 }
2291 
2292 void
2293 jme_iff(struct jme_softc *sc)
2294 {
2295 	struct arpcom *ac = &sc->sc_arpcom;
2296 	struct ifnet *ifp = &ac->ac_if;
2297 	struct ether_multi *enm;
2298 	struct ether_multistep step;
2299 	uint32_t crc;
2300 	uint32_t mchash[2];
2301 	uint32_t rxcfg;
2302 
2303 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2304 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2305 	    RXMAC_ALLMULTI);
2306 	ifp->if_flags &= ~IFF_ALLMULTI;
2307 
2308 	/*
2309 	 * Always accept frames destined to our station address.
2310 	 * Always accept broadcast frames.
2311 	 */
2312 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2313 
2314 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
2315 		ifp->if_flags |= IFF_ALLMULTI;
2316 		if (ifp->if_flags & IFF_PROMISC)
2317 			rxcfg |= RXMAC_PROMISC;
2318 		else
2319 			rxcfg |= RXMAC_ALLMULTI;
2320 		mchash[0] = mchash[1] = 0xFFFFFFFF;
2321 	} else {
2322 		/*
2323 		 * Set up the multicast address filter by passing all
2324 		 * multicast addresses through a CRC generator, and then
2325 		 * using the low-order 6 bits as an index into the 64 bit
2326 		 * multicast hash table.  The high order bits select the
2327 		 * register, while the rest of the bits select the bit
2328 		 * within the register.
2329 		 */
2330 		rxcfg |= RXMAC_MULTICAST;
2331 		bzero(mchash, sizeof(mchash));
2332 
2333 		ETHER_FIRST_MULTI(step, ac, enm);
2334 		while (enm != NULL) {
2335 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2336 
2337 			/* Just want the 6 least significant bits. */
2338 			crc &= 0x3f;
2339 
2340 			/* Set the corresponding bit in the hash table. */
2341 			mchash[crc >> 5] |= 1 << (crc & 0x1f);
2342 
2343 			ETHER_NEXT_MULTI(step, enm);
2344 		}
2345 	}
2346 
2347 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2348 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2349 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2350 }
2351