1 /* $OpenBSD: if_jme.c,v 1.58 2024/05/24 06:02:53 jsg Exp $ */
2 /*-
3 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
29 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $
30 */
31
32 #include "bpfilter.h"
33 #include "vlan.h"
34
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/systm.h>
38 #include <sys/sockio.h>
39 #include <sys/mbuf.h>
40 #include <sys/queue.h>
41 #include <sys/device.h>
42 #include <sys/timeout.h>
43
44 #include <machine/bus.h>
45
46 #include <net/if.h>
47 #include <net/if_dl.h>
48 #include <net/if_media.h>
49
50 #include <netinet/in.h>
51 #include <netinet/if_ether.h>
52
53 #if NBPFILTER > 0
54 #include <net/bpf.h>
55 #endif
56
57 #include <dev/mii/miivar.h>
58 #include <dev/mii/jmphyreg.h>
59
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pcidevs.h>
63
64 #include <dev/pci/if_jmereg.h>
65 #include <dev/pci/if_jmevar.h>
66
67 /* Define the following to disable printing Rx errors. */
68 #undef JME_SHOW_ERRORS
69
70 int jme_match(struct device *, void *, void *);
71 void jme_map_intr_vector(struct jme_softc *);
72 void jme_attach(struct device *, struct device *, void *);
73 int jme_detach(struct device *, int);
74
75 int jme_miibus_readreg(struct device *, int, int);
76 void jme_miibus_writereg(struct device *, int, int, int);
77 void jme_miibus_statchg(struct device *);
78
79 int jme_init(struct ifnet *);
80 int jme_ioctl(struct ifnet *, u_long, caddr_t);
81
82 void jme_start(struct ifnet *);
83 void jme_watchdog(struct ifnet *);
84 void jme_mediastatus(struct ifnet *, struct ifmediareq *);
85 int jme_mediachange(struct ifnet *);
86
87 int jme_intr(void *);
88 void jme_txeof(struct jme_softc *);
89 void jme_rxeof(struct jme_softc *);
90
91 int jme_dma_alloc(struct jme_softc *);
92 void jme_dma_free(struct jme_softc *);
93 int jme_init_rx_ring(struct jme_softc *);
94 void jme_init_tx_ring(struct jme_softc *);
95 void jme_init_ssb(struct jme_softc *);
96 int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
97 int jme_encap(struct jme_softc *, struct mbuf *);
98 void jme_rxpkt(struct jme_softc *);
99
100 void jme_tick(void *);
101 void jme_stop(struct jme_softc *);
102 void jme_reset(struct jme_softc *);
103 void jme_set_vlan(struct jme_softc *);
104 void jme_iff(struct jme_softc *);
105 void jme_stop_tx(struct jme_softc *);
106 void jme_stop_rx(struct jme_softc *);
107 void jme_mac_config(struct jme_softc *);
108 void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
109 int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
110 int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
111 void jme_discard_rxbufs(struct jme_softc *, int, int);
112 #ifdef notyet
113 void jme_setwol(struct jme_softc *);
114 void jme_setlinkspeed(struct jme_softc *);
115 #endif
116
117 /*
118 * Devices supported by this driver.
119 */
120 const struct pci_matchid jme_devices[] = {
121 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250 },
122 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260 }
123 };
124
125 const struct cfattach jme_ca = {
126 sizeof (struct jme_softc), jme_match, jme_attach
127 };
128
129 struct cfdriver jme_cd = {
130 NULL, "jme", DV_IFNET
131 };
132
133 int jmedebug = 0;
134 #define DPRINTF(x) do { if (jmedebug) printf x; } while (0)
135
136 /*
137 * Read a PHY register on the MII of the JMC250.
138 */
139 int
jme_miibus_readreg(struct device * dev,int phy,int reg)140 jme_miibus_readreg(struct device *dev, int phy, int reg)
141 {
142 struct jme_softc *sc = (struct jme_softc *)dev;
143 uint32_t val;
144 int i;
145
146 /* For FPGA version, PHY address 0 should be ignored. */
147 if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0)
148 return (0);
149
150 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
151 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
152
153 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
154 DELAY(1);
155 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
156 break;
157 }
158 if (i == 0) {
159 printf("%s: phy read timeout: phy %d, reg %d\n",
160 sc->sc_dev.dv_xname, phy, reg);
161 return (0);
162 }
163
164 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
165 }
166
167 /*
168 * Write a PHY register on the MII of the JMC250.
169 */
170 void
jme_miibus_writereg(struct device * dev,int phy,int reg,int val)171 jme_miibus_writereg(struct device *dev, int phy, int reg, int val)
172 {
173 struct jme_softc *sc = (struct jme_softc *)dev;
174 int i;
175
176 /* For FPGA version, PHY address 0 should be ignored. */
177 if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0)
178 return;
179
180 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
181 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
182 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
183
184 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
185 DELAY(1);
186 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
187 break;
188 }
189 if (i == 0) {
190 printf("%s: phy write timeout: phy %d, reg %d\n",
191 sc->sc_dev.dv_xname, phy, reg);
192 }
193 }
194
195 /*
196 * Callback from MII layer when media changes.
197 */
198 void
jme_miibus_statchg(struct device * dev)199 jme_miibus_statchg(struct device *dev)
200 {
201 struct jme_softc *sc = (struct jme_softc *)dev;
202 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
203 struct mii_data *mii;
204 struct jme_txdesc *txd;
205 bus_addr_t paddr;
206 int i;
207
208 if ((ifp->if_flags & IFF_RUNNING) == 0)
209 return;
210
211 mii = &sc->sc_miibus;
212
213 sc->jme_flags &= ~JME_FLAG_LINK;
214 if ((mii->mii_media_status & IFM_AVALID) != 0) {
215 switch (IFM_SUBTYPE(mii->mii_media_active)) {
216 case IFM_10_T:
217 case IFM_100_TX:
218 sc->jme_flags |= JME_FLAG_LINK;
219 break;
220 case IFM_1000_T:
221 if (sc->jme_caps & JME_CAP_FASTETH)
222 break;
223 sc->jme_flags |= JME_FLAG_LINK;
224 break;
225 default:
226 break;
227 }
228 }
229
230 /*
231 * Disabling Rx/Tx MACs have a side-effect of resetting
232 * JME_TXNDA/JME_RXNDA register to the first address of
233 * Tx/Rx descriptor address. So driver should reset its
234 * internal producer/consumer pointer and reclaim any
235 * allocated resources. Note, just saving the value of
236 * JME_TXNDA and JME_RXNDA registers before stopping MAC
237 * and restoring JME_TXNDA/JME_RXNDA register is not
238 * sufficient to make sure correct MAC state because
239 * stopping MAC operation can take a while and hardware
240 * might have updated JME_TXNDA/JME_RXNDA registers
241 * during the stop operation.
242 */
243
244 /* Disable interrupts */
245 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
246
247 /* Stop driver */
248 ifp->if_flags &= ~IFF_RUNNING;
249 ifq_clr_oactive(&ifp->if_snd);
250 ifp->if_timer = 0;
251 timeout_del(&sc->jme_tick_ch);
252
253 /* Stop receiver/transmitter. */
254 jme_stop_rx(sc);
255 jme_stop_tx(sc);
256
257 jme_rxeof(sc);
258 m_freem(sc->jme_cdata.jme_rxhead);
259 JME_RXCHAIN_RESET(sc);
260
261 jme_txeof(sc);
262 if (sc->jme_cdata.jme_tx_cnt != 0) {
263 /* Remove queued packets for transmit. */
264 for (i = 0; i < JME_TX_RING_CNT; i++) {
265 txd = &sc->jme_cdata.jme_txdesc[i];
266 if (txd->tx_m != NULL) {
267 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
268 m_freem(txd->tx_m);
269 txd->tx_m = NULL;
270 txd->tx_ndesc = 0;
271 ifp->if_oerrors++;
272 }
273 }
274 }
275
276 /*
277 * Reuse configured Rx descriptors and reset
278 * producer/consumer index.
279 */
280 sc->jme_cdata.jme_rx_cons = 0;
281
282 jme_init_tx_ring(sc);
283
284 /* Initialize shadow status block. */
285 jme_init_ssb(sc);
286
287 /* Program MAC with resolved speed/duplex/flow-control. */
288 if (sc->jme_flags & JME_FLAG_LINK) {
289 jme_mac_config(sc);
290
291 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
292 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
293
294 /* Set Tx ring address to the hardware. */
295 paddr = JME_TX_RING_ADDR(sc, 0);
296 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
297 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
298
299 /* Set Rx ring address to the hardware. */
300 paddr = JME_RX_RING_ADDR(sc, 0);
301 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
302 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
303
304 /* Restart receiver/transmitter. */
305 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
306 RXCSR_RXQ_START);
307 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
308 }
309
310 ifp->if_flags |= IFF_RUNNING;
311 ifq_clr_oactive(&ifp->if_snd);
312 timeout_add_sec(&sc->jme_tick_ch, 1);
313
314 /* Reenable interrupts. */
315 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
316 }
317
318 /*
319 * Get the current interface media status.
320 */
321 void
jme_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)322 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
323 {
324 struct jme_softc *sc = ifp->if_softc;
325 struct mii_data *mii = &sc->sc_miibus;
326
327 mii_pollstat(mii);
328 ifmr->ifm_status = mii->mii_media_status;
329 ifmr->ifm_active = mii->mii_media_active;
330 }
331
332 /*
333 * Set hardware to newly-selected media.
334 */
335 int
jme_mediachange(struct ifnet * ifp)336 jme_mediachange(struct ifnet *ifp)
337 {
338 struct jme_softc *sc = ifp->if_softc;
339 struct mii_data *mii = &sc->sc_miibus;
340 int error;
341
342 if (mii->mii_instance != 0) {
343 struct mii_softc *miisc;
344
345 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
346 mii_phy_reset(miisc);
347 }
348 error = mii_mediachg(mii);
349
350 return (error);
351 }
352
353 int
jme_match(struct device * dev,void * match,void * aux)354 jme_match(struct device *dev, void *match, void *aux)
355 {
356 return pci_matchbyid((struct pci_attach_args *)aux, jme_devices,
357 sizeof (jme_devices) / sizeof (jme_devices[0]));
358 }
359
360 int
jme_eeprom_read_byte(struct jme_softc * sc,uint8_t addr,uint8_t * val)361 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
362 {
363 uint32_t reg;
364 int i;
365
366 *val = 0;
367 for (i = JME_TIMEOUT; i > 0; i--) {
368 reg = CSR_READ_4(sc, JME_SMBCSR);
369 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
370 break;
371 DELAY(1);
372 }
373
374 if (i == 0) {
375 printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname);
376 return (ETIMEDOUT);
377 }
378
379 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
380 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
381 for (i = JME_TIMEOUT; i > 0; i--) {
382 DELAY(1);
383 reg = CSR_READ_4(sc, JME_SMBINTF);
384 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
385 break;
386 }
387
388 if (i == 0) {
389 printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname);
390 return (ETIMEDOUT);
391 }
392
393 reg = CSR_READ_4(sc, JME_SMBINTF);
394 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
395
396 return (0);
397 }
398
399 int
jme_eeprom_macaddr(struct jme_softc * sc,uint8_t eaddr[])400 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
401 {
402 uint8_t fup, reg, val;
403 uint32_t offset;
404 int match;
405
406 offset = 0;
407 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
408 fup != JME_EEPROM_SIG0)
409 return (ENOENT);
410 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
411 fup != JME_EEPROM_SIG1)
412 return (ENOENT);
413 match = 0;
414 do {
415 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
416 break;
417 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
418 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
419 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
420 break;
421 if (reg >= JME_PAR0 &&
422 reg < JME_PAR0 + ETHER_ADDR_LEN) {
423 if (jme_eeprom_read_byte(sc, offset + 2,
424 &val) != 0)
425 break;
426 eaddr[reg - JME_PAR0] = val;
427 match++;
428 }
429 }
430 /* Check for the end of EEPROM descriptor. */
431 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
432 break;
433 /* Try next eeprom descriptor. */
434 offset += JME_EEPROM_DESC_BYTES;
435 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
436
437 if (match == ETHER_ADDR_LEN)
438 return (0);
439
440 return (ENOENT);
441 }
442
443 void
jme_reg_macaddr(struct jme_softc * sc,uint8_t eaddr[])444 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
445 {
446 uint32_t par0, par1;
447
448 /* Read station address. */
449 par0 = CSR_READ_4(sc, JME_PAR0);
450 par1 = CSR_READ_4(sc, JME_PAR1);
451 par1 &= 0xFFFF;
452
453 eaddr[0] = (par0 >> 0) & 0xFF;
454 eaddr[1] = (par0 >> 8) & 0xFF;
455 eaddr[2] = (par0 >> 16) & 0xFF;
456 eaddr[3] = (par0 >> 24) & 0xFF;
457 eaddr[4] = (par1 >> 0) & 0xFF;
458 eaddr[5] = (par1 >> 8) & 0xFF;
459 }
460
461 void
jme_map_intr_vector(struct jme_softc * sc)462 jme_map_intr_vector(struct jme_softc *sc)
463 {
464 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
465
466 bzero(map, sizeof(map));
467
468 /* Map Tx interrupts source to MSI/MSIX vector 2. */
469 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
470 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
471 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
472 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
473 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
474 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
475 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
476 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
477 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
478 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
479 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
480 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
481 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
482 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
483 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
484 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
485 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
486 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
487 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
488 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
489
490 /* Map Rx interrupts source to MSI/MSIX vector 1. */
491 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
492 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
493 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
494 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
495 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
496 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
497 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
498 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
499 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
500 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
501 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
502 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
503 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
504 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
505 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
506 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
507 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
508 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
509 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
510 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
511 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
512 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
513 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
514 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
515 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
516 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
517 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
518 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
519 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
520 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
521 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
522 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
523
524 /* Map all other interrupts source to MSI/MSIX vector 0. */
525 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
526 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
527 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
528 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
529 }
530
531 void
jme_attach(struct device * parent,struct device * self,void * aux)532 jme_attach(struct device *parent, struct device *self, void *aux)
533 {
534 struct jme_softc *sc = (struct jme_softc *)self;
535 struct pci_attach_args *pa = aux;
536 pci_chipset_tag_t pc = pa->pa_pc;
537 pci_intr_handle_t ih;
538 const char *intrstr;
539 pcireg_t memtype;
540
541 struct ifnet *ifp;
542 uint32_t reg;
543 int error = 0;
544
545 /*
546 * Allocate IO memory
547 *
548 * JMC250 supports both memory mapped and I/O register space
549 * access. Because I/O register access should use different
550 * BARs to access registers it's waste of time to use I/O
551 * register space access. JMC250 uses 16K to map entire memory
552 * space.
553 */
554
555 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR);
556 if (pci_mapreg_map(pa, JME_PCIR_BAR, memtype, 0, &sc->jme_mem_bt,
557 &sc->jme_mem_bh, NULL, &sc->jme_mem_size, 0)) {
558 printf(": can't map mem space\n");
559 return;
560 }
561
562 if (pci_intr_map_msi(pa, &ih) == 0)
563 jme_map_intr_vector(sc);
564 else if (pci_intr_map(pa, &ih) != 0) {
565 printf(": can't map interrupt\n");
566 return;
567 }
568
569 /*
570 * Allocate IRQ
571 */
572 intrstr = pci_intr_string(pc, ih);
573 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, jme_intr, sc,
574 sc->sc_dev.dv_xname);
575 if (sc->sc_irq_handle == NULL) {
576 printf(": could not establish interrupt");
577 if (intrstr != NULL)
578 printf(" at %s", intrstr);
579 printf("\n");
580 return;
581 }
582 printf(": %s", intrstr);
583
584 sc->sc_dmat = pa->pa_dmat;
585 sc->jme_pct = pa->pa_pc;
586 sc->jme_pcitag = pa->pa_tag;
587
588 /*
589 * Extract FPGA revision
590 */
591 reg = CSR_READ_4(sc, JME_CHIPMODE);
592 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
593 CHIPMODE_NOT_FPGA) {
594 sc->jme_caps |= JME_CAP_FPGA;
595
596 if (jmedebug) {
597 printf("%s: FPGA revision : 0x%04x\n",
598 sc->sc_dev.dv_xname,
599 (reg & CHIPMODE_FPGA_REV_MASK) >>
600 CHIPMODE_FPGA_REV_SHIFT);
601 }
602 }
603
604 sc->jme_revfm = (reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT;
605
606 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_JMICRON_JMC250 &&
607 PCI_REVISION(pa->pa_class) == JME_REV_JMC250_A2)
608 sc->jme_workaround |= JME_WA_CRCERRORS | JME_WA_PACKETLOSS;
609
610 /* Reset the ethernet controller. */
611 jme_reset(sc);
612
613 /* Get station address. */
614 reg = CSR_READ_4(sc, JME_SMBCSR);
615 if (reg & SMBCSR_EEPROM_PRESENT)
616 error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr);
617 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
618 if (error != 0 && (jmedebug)) {
619 printf("%s: ethernet hardware address "
620 "not found in EEPROM.\n", sc->sc_dev.dv_xname);
621 }
622 jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr);
623 }
624
625 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
626
627 /*
628 * Save PHY address.
629 * Integrated JR0211 has fixed PHY address whereas FPGA version
630 * requires PHY probing to get correct PHY address.
631 */
632 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
633 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
634 GPREG0_PHY_ADDR_MASK;
635 if (jmedebug) {
636 printf("%s: PHY is at address %d.\n",
637 sc->sc_dev.dv_xname, sc->jme_phyaddr);
638 }
639 } else {
640 sc->jme_phyaddr = 0;
641 }
642
643 /* Set max allowable DMA size. */
644 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
645 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
646
647 #ifdef notyet
648 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
649 sc->jme_caps |= JME_CAP_PMCAP;
650 #endif
651
652 /* Allocate DMA stuffs */
653 error = jme_dma_alloc(sc);
654 if (error)
655 goto fail;
656
657 ifp = &sc->sc_arpcom.ac_if;
658 ifp->if_softc = sc;
659 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
660 ifp->if_ioctl = jme_ioctl;
661 ifp->if_start = jme_start;
662 ifp->if_watchdog = jme_watchdog;
663 ifq_init_maxlen(&ifp->if_snd, JME_TX_RING_CNT - 1);
664 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
665
666 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
667 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 |
668 IFCAP_CSUM_UDPv6;
669
670 #if NVLAN > 0
671 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
672 #endif
673
674 /* Set up MII bus. */
675 sc->sc_miibus.mii_ifp = ifp;
676 sc->sc_miibus.mii_readreg = jme_miibus_readreg;
677 sc->sc_miibus.mii_writereg = jme_miibus_writereg;
678 sc->sc_miibus.mii_statchg = jme_miibus_statchg;
679
680 ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange,
681 jme_mediastatus);
682 mii_attach(self, &sc->sc_miibus, 0xffffffff,
683 sc->jme_caps & JME_CAP_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
684 MII_OFFSET_ANY, MIIF_DOPAUSE);
685
686 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
687 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
688 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
689 0, NULL);
690 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
691 } else
692 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
693
694 /*
695 * Save PHYADDR for FPGA mode PHY not handled, not production hw
696 */
697
698 if_attach(ifp);
699 ether_ifattach(ifp);
700
701 timeout_set(&sc->jme_tick_ch, jme_tick, sc);
702
703 return;
704 fail:
705 jme_detach(&sc->sc_dev, 0);
706 }
707
708 int
jme_detach(struct device * self,int flags)709 jme_detach(struct device *self, int flags)
710 {
711 struct jme_softc *sc = (struct jme_softc *)self;
712 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
713 int s;
714
715 s = splnet();
716 jme_stop(sc);
717 splx(s);
718
719 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
720
721 /* Delete all remaining media. */
722 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
723
724 ether_ifdetach(ifp);
725 if_detach(ifp);
726 jme_dma_free(sc);
727
728 if (sc->sc_irq_handle != NULL) {
729 pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle);
730 sc->sc_irq_handle = NULL;
731 }
732
733 return (0);
734 }
735
736 int
jme_dma_alloc(struct jme_softc * sc)737 jme_dma_alloc(struct jme_softc *sc)
738 {
739 struct jme_txdesc *txd;
740 struct jme_rxdesc *rxd;
741 int error, i, nsegs;
742
743 /*
744 * Create DMA stuffs for TX ring
745 */
746
747 error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1,
748 JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT,
749 &sc->jme_cdata.jme_tx_ring_map);
750 if (error)
751 return (ENOBUFS);
752
753 /* Allocate DMA'able memory for TX ring */
754 error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0,
755 &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs,
756 BUS_DMA_WAITOK);
757 /* XXX zero */
758 if (error) {
759 printf("%s: could not allocate DMA'able memory for Tx ring.\n",
760 sc->sc_dev.dv_xname);
761 return error;
762 }
763
764 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg,
765 nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring,
766 BUS_DMA_NOWAIT);
767 if (error)
768 return (ENOBUFS);
769
770 /* Load the DMA map for Tx ring. */
771 error = bus_dmamap_load(sc->sc_dmat,
772 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
773 JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
774 if (error) {
775 printf("%s: could not load DMA'able memory for Tx ring.\n",
776 sc->sc_dev.dv_xname);
777 bus_dmamem_free(sc->sc_dmat,
778 (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1);
779 return error;
780 }
781 sc->jme_rdata.jme_tx_ring_paddr =
782 sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr;
783
784 /*
785 * Create DMA stuffs for RX ring
786 */
787
788 error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1,
789 JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT,
790 &sc->jme_cdata.jme_rx_ring_map);
791 if (error)
792 return (ENOBUFS);
793
794 /* Allocate DMA'able memory for RX ring */
795 error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0,
796 &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs,
797 BUS_DMA_WAITOK | BUS_DMA_ZERO);
798 /* XXX zero */
799 if (error) {
800 printf("%s: could not allocate DMA'able memory for Rx ring.\n",
801 sc->sc_dev.dv_xname);
802 return error;
803 }
804
805 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg,
806 nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring,
807 BUS_DMA_NOWAIT);
808 if (error)
809 return (ENOBUFS);
810
811 /* Load the DMA map for Rx ring. */
812 error = bus_dmamap_load(sc->sc_dmat,
813 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
814 JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
815 if (error) {
816 printf("%s: could not load DMA'able memory for Rx ring.\n",
817 sc->sc_dev.dv_xname);
818 bus_dmamem_free(sc->sc_dmat,
819 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
820 return error;
821 }
822 sc->jme_rdata.jme_rx_ring_paddr =
823 sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr;
824
825 #if 0
826 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
827 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
828 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
829 if ((JME_ADDR_HI(tx_ring_end) !=
830 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
831 (JME_ADDR_HI(rx_ring_end) !=
832 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
833 printf("%s: 4GB boundary crossed, switching to 32bit "
834 "DMA address mode.\n", sc->sc_dev.dv_xname);
835 jme_dma_free(sc);
836 /* Limit DMA address space to 32bit and try again. */
837 lowaddr = BUS_SPACE_MAXADDR_32BIT;
838 goto again;
839 }
840 #endif
841
842 /*
843 * Create DMA stuffs for shadow status block
844 */
845
846 error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1,
847 JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map);
848 if (error)
849 return (ENOBUFS);
850
851 /* Allocate DMA'able memory for shared status block. */
852 error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0,
853 &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK);
854 if (error) {
855 printf("%s: could not allocate DMA'able "
856 "memory for shared status block.\n", sc->sc_dev.dv_xname);
857 return error;
858 }
859
860 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg,
861 nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block,
862 BUS_DMA_NOWAIT);
863 if (error)
864 return (ENOBUFS);
865
866 /* Load the DMA map for shared status block */
867 error = bus_dmamap_load(sc->sc_dmat,
868 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
869 JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT);
870 if (error) {
871 printf("%s: could not load DMA'able memory "
872 "for shared status block.\n", sc->sc_dev.dv_xname);
873 bus_dmamem_free(sc->sc_dmat,
874 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
875 return error;
876 }
877 sc->jme_rdata.jme_ssb_block_paddr =
878 sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr;
879
880 /*
881 * Create DMA stuffs for TX buffers
882 */
883
884 /* Create DMA maps for Tx buffers. */
885 for (i = 0; i < JME_TX_RING_CNT; i++) {
886 txd = &sc->jme_cdata.jme_txdesc[i];
887 error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE,
888 JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
889 &txd->tx_dmamap);
890 if (error) {
891 int j;
892
893 printf("%s: could not create %dth Tx dmamap.\n",
894 sc->sc_dev.dv_xname, i);
895
896 for (j = 0; j < i; ++j) {
897 txd = &sc->jme_cdata.jme_txdesc[j];
898 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
899 }
900 return error;
901 }
902
903 }
904
905 /*
906 * Create DMA stuffs for RX buffers
907 */
908
909 /* Create DMA maps for Rx buffers. */
910 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
911 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap);
912 if (error) {
913 printf("%s: could not create spare Rx dmamap.\n",
914 sc->sc_dev.dv_xname);
915 return error;
916 }
917 for (i = 0; i < JME_RX_RING_CNT; i++) {
918 rxd = &sc->jme_cdata.jme_rxdesc[i];
919 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
920 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
921 if (error) {
922 int j;
923
924 printf("%s: could not create %dth Rx dmamap.\n",
925 sc->sc_dev.dv_xname, i);
926
927 for (j = 0; j < i; ++j) {
928 rxd = &sc->jme_cdata.jme_rxdesc[j];
929 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
930 }
931 bus_dmamap_destroy(sc->sc_dmat,
932 sc->jme_cdata.jme_rx_sparemap);
933 sc->jme_cdata.jme_rx_tag = NULL;
934 return error;
935 }
936 }
937
938 return 0;
939 }
940
941 void
jme_dma_free(struct jme_softc * sc)942 jme_dma_free(struct jme_softc *sc)
943 {
944 struct jme_txdesc *txd;
945 struct jme_rxdesc *rxd;
946 int i;
947
948 /* Tx ring */
949 bus_dmamap_unload(sc->sc_dmat,
950 sc->jme_cdata.jme_tx_ring_map);
951 bus_dmamem_free(sc->sc_dmat,
952 (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1);
953
954 /* Rx ring */
955 bus_dmamap_unload(sc->sc_dmat,
956 sc->jme_cdata.jme_rx_ring_map);
957 bus_dmamem_free(sc->sc_dmat,
958 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
959
960 /* Tx buffers */
961 for (i = 0; i < JME_TX_RING_CNT; i++) {
962 txd = &sc->jme_cdata.jme_txdesc[i];
963 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
964 }
965
966 /* Rx buffers */
967 for (i = 0; i < JME_RX_RING_CNT; i++) {
968 rxd = &sc->jme_cdata.jme_rxdesc[i];
969 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
970 }
971 bus_dmamap_destroy(sc->sc_dmat,
972 sc->jme_cdata.jme_rx_sparemap);
973
974 /* Shadow status block. */
975 bus_dmamap_unload(sc->sc_dmat,
976 sc->jme_cdata.jme_ssb_map);
977 bus_dmamem_free(sc->sc_dmat,
978 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
979 }
980
981 #ifdef notyet
982 /*
983 * Unlike other ethernet controllers, JMC250 requires
984 * explicit resetting link speed to 10/100Mbps as gigabit
985 * link will consume more power than 375mA.
986 * Note, we reset the link speed to 10/100Mbps with
987 * auto-negotiation but we don't know whether that operation
988 * would succeed or not as we have no control after powering
989 * off. If the renegotiation fail WOL may not work. Running
990 * at 1Gbps draws more power than 375mA at 3.3V which is
991 * specified in PCI specification and that would result in
992 * a complete shutdown of power to the ethernet controller.
993 *
994 * TODO
995 * Save current negotiated media speed/duplex/flow-control
996 * to softc and restore the same link again after resuming.
997 * PHY handling such as power down/resetting to 100Mbps
998 * may be better handled in suspend method in phy driver.
999 */
1000 void
jme_setlinkspeed(struct jme_softc * sc)1001 jme_setlinkspeed(struct jme_softc *sc)
1002 {
1003 struct mii_data *mii;
1004 int aneg, i;
1005
1006 JME_LOCK_ASSERT(sc);
1007
1008 mii = &sc->sc_miibus;
1009 mii_pollstat(mii);
1010 aneg = 0;
1011 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1012 switch IFM_SUBTYPE(mii->mii_media_active) {
1013 case IFM_10_T:
1014 case IFM_100_TX:
1015 return;
1016 case IFM_1000_T:
1017 aneg++;
1018 default:
1019 break;
1020 }
1021 }
1022 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1023 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR,
1024 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1025 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR,
1026 BMCR_AUTOEN | BMCR_STARTNEG);
1027 DELAY(1000);
1028 if (aneg != 0) {
1029 /* Poll link state until jme(4) get a 10/100 link. */
1030 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1031 mii_pollstat(mii);
1032 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1033 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1034 case IFM_10_T:
1035 case IFM_100_TX:
1036 jme_mac_config(sc);
1037 return;
1038 default:
1039 break;
1040 }
1041 }
1042 JME_UNLOCK(sc);
1043 pause("jmelnk", hz);
1044 JME_LOCK(sc);
1045 }
1046 if (i == MII_ANEGTICKS_GIGE)
1047 printf("%s: establishing link failed, "
1048 "WOL may not work!\n", sc->sc_dev.dv_xname);
1049 }
1050 /*
1051 * No link, force MAC to have 100Mbps, full-duplex link.
1052 * This is the last resort and may/may not work.
1053 */
1054 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1055 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1056 jme_mac_config(sc);
1057 }
1058
1059 void
jme_setwol(struct jme_softc * sc)1060 jme_setwol(struct jme_softc *sc)
1061 {
1062 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1063 uint32_t gpr, pmcs;
1064 uint16_t pmstat;
1065 int pmc;
1066
1067 if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) {
1068 /* No PME capability, PHY power down. */
1069 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1070 MII_BMCR, BMCR_PDOWN);
1071 return;
1072 }
1073
1074 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1075 pmcs = CSR_READ_4(sc, JME_PMCS);
1076 pmcs &= ~PMCS_WOL_ENB_MASK;
1077 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1078 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1079 /* Enable PME message. */
1080 gpr |= GPREG0_PME_ENB;
1081 /* For gigabit controllers, reset link speed to 10/100. */
1082 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1083 jme_setlinkspeed(sc);
1084 }
1085
1086 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1087 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1088
1089 /* Request PME. */
1090 pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2);
1091 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1092 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1093 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1094 pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1095 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1096 /* No WOL, PHY power down. */
1097 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1098 MII_BMCR, BMCR_PDOWN);
1099 }
1100 }
1101 #endif
1102
1103 int
jme_encap(struct jme_softc * sc,struct mbuf * m)1104 jme_encap(struct jme_softc *sc, struct mbuf *m)
1105 {
1106 struct jme_txdesc *txd;
1107 struct jme_desc *desc;
1108 int error, i, prod;
1109 uint32_t cflags;
1110
1111 prod = sc->jme_cdata.jme_tx_prod;
1112 txd = &sc->jme_cdata.jme_txdesc[prod];
1113
1114 error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
1115 m, BUS_DMA_NOWAIT);
1116 if (error != 0 && error != EFBIG)
1117 goto drop;
1118 if (error != 0) {
1119 if (m_defrag(m, M_DONTWAIT)) {
1120 error = ENOBUFS;
1121 goto drop;
1122 }
1123 error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
1124 m, BUS_DMA_NOWAIT);
1125 if (error != 0)
1126 goto drop;
1127 }
1128
1129 cflags = 0;
1130
1131 /* Configure checksum offload. */
1132 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1133 cflags |= JME_TD_IPCSUM;
1134 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1135 cflags |= JME_TD_TCPCSUM;
1136 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1137 cflags |= JME_TD_UDPCSUM;
1138
1139 #if NVLAN > 0
1140 /* Configure VLAN. */
1141 if (m->m_flags & M_VLANTAG) {
1142 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1143 cflags |= JME_TD_VLAN_TAG;
1144 }
1145 #endif
1146
1147 desc = &sc->jme_rdata.jme_tx_ring[prod];
1148 desc->flags = htole32(cflags);
1149 desc->buflen = 0;
1150 desc->addr_hi = htole32(m->m_pkthdr.len);
1151 desc->addr_lo = 0;
1152 sc->jme_cdata.jme_tx_cnt++;
1153 JME_DESC_INC(prod, JME_TX_RING_CNT);
1154 for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) {
1155 desc = &sc->jme_rdata.jme_tx_ring[prod];
1156 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1157 desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len);
1158 desc->addr_hi =
1159 htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr));
1160 desc->addr_lo =
1161 htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr));
1162 sc->jme_cdata.jme_tx_cnt++;
1163 JME_DESC_INC(prod, JME_TX_RING_CNT);
1164 }
1165
1166 /* Update producer index. */
1167 sc->jme_cdata.jme_tx_prod = prod;
1168 /*
1169 * Finally request interrupt and give the first descriptor
1170 * ownership to hardware.
1171 */
1172 desc = txd->tx_desc;
1173 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1174
1175 txd->tx_m = m;
1176 txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + JME_TXD_RSVD;
1177
1178 /* Sync descriptors. */
1179 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1180 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1181 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1182 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1183
1184 return (0);
1185
1186 drop:
1187 m_freem(m);
1188 return (error);
1189 }
1190
1191 void
jme_start(struct ifnet * ifp)1192 jme_start(struct ifnet *ifp)
1193 {
1194 struct jme_softc *sc = ifp->if_softc;
1195 struct mbuf *m;
1196 int enq = 0;
1197
1198 /* Reclaim transmitted frames. */
1199 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1200 jme_txeof(sc);
1201
1202 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1203 return;
1204 if ((sc->jme_flags & JME_FLAG_LINK) == 0)
1205 return;
1206 if (ifq_empty(&ifp->if_snd))
1207 return;
1208
1209 for (;;) {
1210 /*
1211 * Check number of available TX descs, always
1212 * leave JME_TXD_RSVD free TX descs.
1213 */
1214 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD >
1215 JME_TX_RING_CNT - JME_TXD_RSVD) {
1216 ifq_set_oactive(&ifp->if_snd);
1217 break;
1218 }
1219
1220 m = ifq_dequeue(&ifp->if_snd);
1221 if (m == NULL)
1222 break;
1223
1224 /*
1225 * Pack the data into the transmit ring. If we
1226 * don't have room, set the OACTIVE flag and wait
1227 * for the NIC to drain the ring.
1228 */
1229 if (jme_encap(sc, m) != 0) {
1230 ifp->if_oerrors++;
1231 continue;
1232 }
1233
1234 enq++;
1235
1236 #if NBPFILTER > 0
1237 /*
1238 * If there's a BPF listener, bounce a copy of this frame
1239 * to him.
1240 */
1241 if (ifp->if_bpf != NULL)
1242 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1243 #endif
1244 }
1245
1246 if (enq > 0) {
1247 /*
1248 * Reading TXCSR takes very long time under heavy load
1249 * so cache TXCSR value and writes the ORed value with
1250 * the kick command to the TXCSR. This saves one register
1251 * access cycle.
1252 */
1253 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1254 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1255 /* Set a timeout in case the chip goes out to lunch. */
1256 ifp->if_timer = JME_TX_TIMEOUT;
1257 }
1258 }
1259
1260 void
jme_watchdog(struct ifnet * ifp)1261 jme_watchdog(struct ifnet *ifp)
1262 {
1263 struct jme_softc *sc = ifp->if_softc;
1264
1265 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1266 printf("%s: watchdog timeout (missed link)\n",
1267 sc->sc_dev.dv_xname);
1268 ifp->if_oerrors++;
1269 jme_init(ifp);
1270 return;
1271 }
1272
1273 jme_txeof(sc);
1274 if (sc->jme_cdata.jme_tx_cnt == 0) {
1275 printf("%s: watchdog timeout (missed Tx interrupts) "
1276 "-- recovering\n", sc->sc_dev.dv_xname);
1277 jme_start(ifp);
1278 return;
1279 }
1280
1281 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1282 ifp->if_oerrors++;
1283 jme_init(ifp);
1284 jme_start(ifp);
1285 }
1286
1287 int
jme_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1288 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1289 {
1290 struct jme_softc *sc = ifp->if_softc;
1291 struct mii_data *mii = &sc->sc_miibus;
1292 struct ifreq *ifr = (struct ifreq *)data;
1293 int error = 0, s;
1294
1295 s = splnet();
1296
1297 switch (cmd) {
1298 case SIOCSIFADDR:
1299 ifp->if_flags |= IFF_UP;
1300 if (!(ifp->if_flags & IFF_RUNNING))
1301 jme_init(ifp);
1302 break;
1303
1304 case SIOCSIFFLAGS:
1305 if (ifp->if_flags & IFF_UP) {
1306 if (ifp->if_flags & IFF_RUNNING)
1307 error = ENETRESET;
1308 else
1309 jme_init(ifp);
1310 } else {
1311 if (ifp->if_flags & IFF_RUNNING)
1312 jme_stop(sc);
1313 }
1314 break;
1315
1316 case SIOCSIFMEDIA:
1317 case SIOCGIFMEDIA:
1318 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1319 break;
1320
1321 default:
1322 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1323 }
1324
1325 if (error == ENETRESET) {
1326 if (ifp->if_flags & IFF_RUNNING)
1327 jme_iff(sc);
1328 error = 0;
1329 }
1330
1331 splx(s);
1332 return (error);
1333 }
1334
1335 void
jme_mac_config(struct jme_softc * sc)1336 jme_mac_config(struct jme_softc *sc)
1337 {
1338 struct mii_data *mii;
1339 uint32_t ghc, rxmac, txmac, txpause, gp1;
1340 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1341
1342 mii = &sc->sc_miibus;
1343
1344 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1345 DELAY(10);
1346 CSR_WRITE_4(sc, JME_GHC, 0);
1347 ghc = 0;
1348 rxmac = CSR_READ_4(sc, JME_RXMAC);
1349 rxmac &= ~RXMAC_FC_ENB;
1350 txmac = CSR_READ_4(sc, JME_TXMAC);
1351 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1352 txpause = CSR_READ_4(sc, JME_TXPFC);
1353 txpause &= ~TXPFC_PAUSE_ENB;
1354 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1355 ghc |= GHC_FULL_DUPLEX;
1356 rxmac &= ~RXMAC_COLL_DET_ENB;
1357 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1358 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1359 TXMAC_FRAME_BURST);
1360 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1361 txpause |= TXPFC_PAUSE_ENB;
1362 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1363 rxmac |= RXMAC_FC_ENB;
1364 /* Disable retry transmit timer/retry limit. */
1365 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1366 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1367 } else {
1368 rxmac |= RXMAC_COLL_DET_ENB;
1369 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1370 /* Enable retry transmit timer/retry limit. */
1371 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1372 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1373 }
1374
1375 /*
1376 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1377 */
1378 gp1 = CSR_READ_4(sc, JME_GPREG1);
1379 gp1 &= ~GPREG1_HALF_PATCH;
1380
1381 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1382 hdx = 1;
1383
1384 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1385 case IFM_10_T:
1386 ghc |= GHC_SPEED_10;
1387 if (hdx)
1388 gp1 |= GPREG1_HALF_PATCH;
1389 break;
1390
1391 case IFM_100_TX:
1392 ghc |= GHC_SPEED_100;
1393 if (hdx)
1394 gp1 |= GPREG1_HALF_PATCH;
1395
1396 /*
1397 * Use extended FIFO depth to workaround CRC errors
1398 * emitted by chips before JMC250B
1399 */
1400 phyconf = JMPHY_CONF_EXTFIFO;
1401 break;
1402
1403 case IFM_1000_T:
1404 if (sc->jme_caps & JME_CAP_FASTETH)
1405 break;
1406
1407 ghc |= GHC_SPEED_1000;
1408 if (hdx)
1409 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1410 break;
1411
1412 default:
1413 break;
1414 }
1415
1416 if (sc->jme_revfm >= 2) {
1417 /* set clock sources for tx mac and offload engine */
1418 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
1419 ghc |= GHC_TCPCK_1000 | GHC_TXCK_1000;
1420 else
1421 ghc |= GHC_TCPCK_10_100 | GHC_TXCK_10_100;
1422 }
1423
1424 CSR_WRITE_4(sc, JME_GHC, ghc);
1425 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1426 CSR_WRITE_4(sc, JME_TXMAC, txmac);
1427 CSR_WRITE_4(sc, JME_TXPFC, txpause);
1428
1429 if (sc->jme_workaround & JME_WA_CRCERRORS) {
1430 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1431 JMPHY_CONF, phyconf);
1432 }
1433 if (sc->jme_workaround & JME_WA_PACKETLOSS)
1434 CSR_WRITE_4(sc, JME_GPREG1, gp1);
1435 }
1436
1437 int
jme_intr(void * xsc)1438 jme_intr(void *xsc)
1439 {
1440 struct jme_softc *sc = xsc;
1441 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1442 uint32_t status;
1443 int claimed = 0;
1444
1445 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1446 if (status == 0 || status == 0xFFFFFFFF)
1447 return (0);
1448
1449 /* Disable interrupts. */
1450 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1451
1452 status = CSR_READ_4(sc, JME_INTR_STATUS);
1453 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1454 goto back;
1455
1456 /* Reset PCC counter/timer and Ack interrupts. */
1457 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1458 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1459 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1460 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1461 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
1462 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1463
1464 if (ifp->if_flags & IFF_RUNNING) {
1465 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1466 jme_rxeof(sc);
1467
1468 if (status & INTR_RXQ_DESC_EMPTY) {
1469 /*
1470 * Notify hardware availability of new Rx buffers.
1471 * Reading RXCSR takes very long time under heavy
1472 * load so cache RXCSR value and writes the ORed
1473 * value with the kick command to the RXCSR. This
1474 * saves one register access cycle.
1475 */
1476 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1477 RXCSR_RX_ENB | RXCSR_RXQ_START);
1478 }
1479
1480 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1481 jme_txeof(sc);
1482 jme_start(ifp);
1483 }
1484 }
1485 claimed = 1;
1486 back:
1487 /* Reenable interrupts. */
1488 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1489
1490 return (claimed);
1491 }
1492
1493 void
jme_txeof(struct jme_softc * sc)1494 jme_txeof(struct jme_softc *sc)
1495 {
1496 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1497 struct jme_txdesc *txd;
1498 uint32_t status;
1499 int cons, nsegs;
1500
1501 cons = sc->jme_cdata.jme_tx_cons;
1502 if (cons == sc->jme_cdata.jme_tx_prod)
1503 return;
1504
1505 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1506 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1507
1508 /*
1509 * Go through our Tx list and free mbufs for those
1510 * frames which have been transmitted.
1511 */
1512 while (cons != sc->jme_cdata.jme_tx_prod) {
1513 txd = &sc->jme_cdata.jme_txdesc[cons];
1514
1515 if (txd->tx_m == NULL)
1516 panic("%s: freeing NULL mbuf!", sc->sc_dev.dv_xname);
1517
1518 status = letoh32(txd->tx_desc->flags);
1519 if ((status & JME_TD_OWN) == JME_TD_OWN)
1520 break;
1521
1522 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1523 ifp->if_oerrors++;
1524 } else {
1525 if (status & JME_TD_COLLISION) {
1526 ifp->if_collisions +=
1527 letoh32(txd->tx_desc->buflen) &
1528 JME_TD_BUF_LEN_MASK;
1529 }
1530 }
1531
1532 /*
1533 * Only the first descriptor of multi-descriptor
1534 * transmission is updated so driver have to skip entire
1535 * chained buffers for the transmitted frame. In other
1536 * words, JME_TD_OWN bit is valid only at the first
1537 * descriptor of a multi-descriptor transmission.
1538 */
1539 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1540 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
1541 JME_DESC_INC(cons, JME_TX_RING_CNT);
1542 }
1543
1544 /* Reclaim transferred mbufs. */
1545 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1546 m_freem(txd->tx_m);
1547 txd->tx_m = NULL;
1548 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1549 if (sc->jme_cdata.jme_tx_cnt < 0)
1550 panic("%s: Active Tx desc counter was garbled",
1551 sc->sc_dev.dv_xname);
1552 txd->tx_ndesc = 0;
1553 }
1554 sc->jme_cdata.jme_tx_cons = cons;
1555
1556 if (sc->jme_cdata.jme_tx_cnt == 0)
1557 ifp->if_timer = 0;
1558
1559 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD <=
1560 JME_TX_RING_CNT - JME_TXD_RSVD)
1561 ifq_clr_oactive(&ifp->if_snd);
1562
1563 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1564 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1565 }
1566
1567 void
jme_discard_rxbufs(struct jme_softc * sc,int cons,int count)1568 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
1569 {
1570 int i;
1571
1572 for (i = 0; i < count; ++i) {
1573 struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
1574
1575 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
1576 desc->buflen = htole32(MCLBYTES);
1577 JME_DESC_INC(cons, JME_RX_RING_CNT);
1578 }
1579 }
1580
1581 /* Receive a frame. */
1582 void
jme_rxpkt(struct jme_softc * sc)1583 jme_rxpkt(struct jme_softc *sc)
1584 {
1585 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1586 struct jme_desc *desc;
1587 struct jme_rxdesc *rxd;
1588 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1589 struct mbuf *mp, *m;
1590 uint32_t flags, status;
1591 int cons, count, nsegs;
1592
1593 cons = sc->jme_cdata.jme_rx_cons;
1594 desc = &sc->jme_rdata.jme_rx_ring[cons];
1595 flags = letoh32(desc->flags);
1596 status = letoh32(desc->buflen);
1597 nsegs = JME_RX_NSEGS(status);
1598
1599 if (status & JME_RX_ERR_STAT) {
1600 ifp->if_ierrors++;
1601 jme_discard_rxbufs(sc, cons, nsegs);
1602 #ifdef JME_SHOW_ERRORS
1603 printf("%s : receive error = 0x%b\n",
1604 sc->sc_dev.dv_xname, JME_RX_ERR(status), JME_RX_ERR_BITS);
1605 #endif
1606 sc->jme_cdata.jme_rx_cons += nsegs;
1607 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1608 return;
1609 }
1610
1611 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
1612 for (count = 0; count < nsegs; count++,
1613 JME_DESC_INC(cons, JME_RX_RING_CNT)) {
1614 rxd = &sc->jme_cdata.jme_rxdesc[cons];
1615 mp = rxd->rx_m;
1616
1617 /* Add a new receive buffer to the ring. */
1618 if (jme_newbuf(sc, rxd) != 0) {
1619 ifp->if_iqdrops++;
1620 /* Reuse buffer. */
1621 jme_discard_rxbufs(sc, cons, nsegs - count);
1622 if (sc->jme_cdata.jme_rxhead != NULL) {
1623 m_freem(sc->jme_cdata.jme_rxhead);
1624 JME_RXCHAIN_RESET(sc);
1625 }
1626 break;
1627 }
1628
1629 /*
1630 * Assume we've received a full sized frame.
1631 * Actual size is fixed when we encounter the end of
1632 * multi-segmented frame.
1633 */
1634 mp->m_len = MCLBYTES;
1635
1636 /* Chain received mbufs. */
1637 if (sc->jme_cdata.jme_rxhead == NULL) {
1638 sc->jme_cdata.jme_rxhead = mp;
1639 sc->jme_cdata.jme_rxtail = mp;
1640 } else {
1641 /*
1642 * Receive processor can receive a maximum frame
1643 * size of 65535 bytes.
1644 */
1645 mp->m_flags &= ~M_PKTHDR;
1646 sc->jme_cdata.jme_rxtail->m_next = mp;
1647 sc->jme_cdata.jme_rxtail = mp;
1648 }
1649
1650 if (count == nsegs - 1) {
1651 /* Last desc. for this frame. */
1652 m = sc->jme_cdata.jme_rxhead;
1653 /* XXX assert PKTHDR? */
1654 m->m_flags |= M_PKTHDR;
1655 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
1656 if (nsegs > 1) {
1657 /* Set first mbuf size. */
1658 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
1659 /* Set last mbuf size. */
1660 mp->m_len = sc->jme_cdata.jme_rxlen -
1661 ((MCLBYTES - JME_RX_PAD_BYTES) +
1662 (MCLBYTES * (nsegs - 2)));
1663 } else {
1664 m->m_len = sc->jme_cdata.jme_rxlen;
1665 }
1666
1667 /*
1668 * Account for 10bytes auto padding which is used
1669 * to align IP header on 32bit boundary. Also note,
1670 * CRC bytes is automatically removed by the
1671 * hardware.
1672 */
1673 m->m_data += JME_RX_PAD_BYTES;
1674
1675 /* Set checksum information. */
1676 if (flags & (JME_RD_IPV4|JME_RD_IPV6)) {
1677 if ((flags & JME_RD_IPV4) &&
1678 (flags & JME_RD_IPCSUM))
1679 m->m_pkthdr.csum_flags |=
1680 M_IPV4_CSUM_IN_OK;
1681 if ((flags & JME_RD_MORE_FRAG) == 0 &&
1682 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
1683 (JME_RD_TCP | JME_RD_TCPCSUM) ||
1684 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
1685 (JME_RD_UDP | JME_RD_UDPCSUM))) {
1686 m->m_pkthdr.csum_flags |=
1687 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1688 }
1689 }
1690
1691 #if NVLAN > 0
1692 /* Check for VLAN tagged packets. */
1693 if (flags & JME_RD_VLAN_TAG) {
1694 m->m_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK;
1695 m->m_flags |= M_VLANTAG;
1696 }
1697 #endif
1698
1699 ml_enqueue(&ml, m);
1700
1701 /* Reset mbuf chains. */
1702 JME_RXCHAIN_RESET(sc);
1703 }
1704 }
1705
1706 if_input(ifp, &ml);
1707
1708 sc->jme_cdata.jme_rx_cons += nsegs;
1709 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1710 }
1711
1712 void
jme_rxeof(struct jme_softc * sc)1713 jme_rxeof(struct jme_softc *sc)
1714 {
1715 struct jme_desc *desc;
1716 int nsegs, prog, pktlen;
1717
1718 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1719 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1720
1721 prog = 0;
1722 for (;;) {
1723 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
1724 if ((letoh32(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
1725 break;
1726 if ((letoh32(desc->buflen) & JME_RD_VALID) == 0)
1727 break;
1728
1729 /*
1730 * Check number of segments against received bytes.
1731 * Non-matching value would indicate that hardware
1732 * is still trying to update Rx descriptors. I'm not
1733 * sure whether this check is needed.
1734 */
1735 nsegs = JME_RX_NSEGS(letoh32(desc->buflen));
1736 pktlen = JME_RX_BYTES(letoh32(desc->buflen));
1737 if (nsegs != howmany(pktlen, MCLBYTES)) {
1738 printf("%s: RX fragment count(%d) "
1739 "and packet size(%d) mismatch\n",
1740 sc->sc_dev.dv_xname, nsegs, pktlen);
1741 break;
1742 }
1743
1744 /* Received a frame. */
1745 jme_rxpkt(sc);
1746 prog++;
1747 }
1748
1749 if (prog > 0) {
1750 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1751 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1752 }
1753 }
1754
1755 void
jme_tick(void * xsc)1756 jme_tick(void *xsc)
1757 {
1758 struct jme_softc *sc = xsc;
1759 struct mii_data *mii = &sc->sc_miibus;
1760 int s;
1761
1762 s = splnet();
1763 mii_tick(mii);
1764 timeout_add_sec(&sc->jme_tick_ch, 1);
1765 splx(s);
1766 }
1767
1768 void
jme_reset(struct jme_softc * sc)1769 jme_reset(struct jme_softc *sc)
1770 {
1771 #ifdef foo
1772 /* Stop receiver, transmitter. */
1773 jme_stop_rx(sc);
1774 jme_stop_tx(sc);
1775 #endif
1776 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1777 DELAY(10);
1778 CSR_WRITE_4(sc, JME_GHC, 0);
1779 }
1780
1781 int
jme_init(struct ifnet * ifp)1782 jme_init(struct ifnet *ifp)
1783 {
1784 struct jme_softc *sc = ifp->if_softc;
1785 struct mii_data *mii;
1786 uint8_t eaddr[ETHER_ADDR_LEN];
1787 bus_addr_t paddr;
1788 uint32_t reg;
1789 int error;
1790
1791 /*
1792 * Cancel any pending I/O.
1793 */
1794 jme_stop(sc);
1795
1796 /*
1797 * Reset the chip to a known state.
1798 */
1799 jme_reset(sc);
1800
1801 /* Init descriptors. */
1802 error = jme_init_rx_ring(sc);
1803 if (error != 0) {
1804 printf("%s: initialization failed: no memory for Rx buffers.\n",
1805 sc->sc_dev.dv_xname);
1806 jme_stop(sc);
1807 return (error);
1808 }
1809 jme_init_tx_ring(sc);
1810
1811 /* Initialize shadow status block. */
1812 jme_init_ssb(sc);
1813
1814 /* Reprogram the station address. */
1815 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
1816 CSR_WRITE_4(sc, JME_PAR0,
1817 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
1818 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
1819
1820 /*
1821 * Configure Tx queue.
1822 * Tx priority queue weight value : 0
1823 * Tx FIFO threshold for processing next packet : 16QW
1824 * Maximum Tx DMA length : 512
1825 * Allow Tx DMA burst.
1826 */
1827 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
1828 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
1829 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
1830 sc->jme_txcsr |= sc->jme_tx_dma_size;
1831 sc->jme_txcsr |= TXCSR_DMA_BURST;
1832 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
1833
1834 /* Set Tx descriptor counter. */
1835 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
1836
1837 /* Set Tx ring address to the hardware. */
1838 paddr = JME_TX_RING_ADDR(sc, 0);
1839 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
1840 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
1841
1842 /* Configure TxMAC parameters. */
1843 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
1844 reg |= TXMAC_THRESH_1_PKT;
1845 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
1846 CSR_WRITE_4(sc, JME_TXMAC, reg);
1847
1848 /*
1849 * Configure Rx queue.
1850 * FIFO full threshold for transmitting Tx pause packet : 128T
1851 * FIFO threshold for processing next packet : 128QW
1852 * Rx queue 0 select
1853 * Max Rx DMA length : 128
1854 * Rx descriptor retry : 32
1855 * Rx descriptor retry time gap : 256ns
1856 * Don't receive runt/bad frame.
1857 */
1858 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
1859
1860 /*
1861 * Since Rx FIFO size is 4K bytes, receiving frames larger
1862 * than 4K bytes will suffer from Rx FIFO overruns. So
1863 * decrease FIFO threshold to reduce the FIFO overruns for
1864 * frames larger than 4000 bytes.
1865 * For best performance of standard MTU sized frames use
1866 * maximum allowable FIFO threshold, which is 32QW for
1867 * chips with a full mask >= 2 otherwise 128QW. FIFO
1868 * thresholds of 64QW and 128QW are not valid for chips
1869 * with a full mask >= 2.
1870 */
1871 if (sc->jme_revfm >= 2)
1872 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
1873 else {
1874 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1875 ETHER_VLAN_ENCAP_LEN) > JME_RX_FIFO_SIZE)
1876 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
1877 else
1878 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
1879 }
1880 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
1881 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
1882 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
1883 /* XXX TODO DROP_BAD */
1884 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
1885
1886 /* Set Rx descriptor counter. */
1887 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
1888
1889 /* Set Rx ring address to the hardware. */
1890 paddr = JME_RX_RING_ADDR(sc, 0);
1891 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
1892 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
1893
1894 /* Clear receive filter. */
1895 CSR_WRITE_4(sc, JME_RXMAC, 0);
1896
1897 /* Set up the receive filter. */
1898 jme_iff(sc);
1899
1900 jme_set_vlan(sc);
1901
1902 /*
1903 * Disable all WOL bits as WOL can interfere normal Rx
1904 * operation. Also clear WOL detection status bits.
1905 */
1906 reg = CSR_READ_4(sc, JME_PMCS);
1907 reg &= ~PMCS_WOL_ENB_MASK;
1908 CSR_WRITE_4(sc, JME_PMCS, reg);
1909
1910 /*
1911 * Pad 10bytes right before received frame. This will greatly
1912 * help Rx performance on strict-alignment architectures as
1913 * it does not need to copy the frame to align the payload.
1914 */
1915 reg = CSR_READ_4(sc, JME_RXMAC);
1916 reg |= RXMAC_PAD_10BYTES;
1917 reg |= RXMAC_CSUM_ENB;
1918 CSR_WRITE_4(sc, JME_RXMAC, reg);
1919
1920 /* Configure general purpose reg0 */
1921 reg = CSR_READ_4(sc, JME_GPREG0);
1922 reg &= ~GPREG0_PCC_UNIT_MASK;
1923 /* Set PCC timer resolution to micro-seconds unit. */
1924 reg |= GPREG0_PCC_UNIT_US;
1925 /*
1926 * Disable all shadow register posting as we have to read
1927 * JME_INTR_STATUS register in jme_intr. Also it seems
1928 * that it's hard to synchronize interrupt status between
1929 * hardware and software with shadow posting due to
1930 * requirements of bus_dmamap_sync(9).
1931 */
1932 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
1933 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
1934 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
1935 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
1936 /* Disable posting of DW0. */
1937 reg &= ~GPREG0_POST_DW0_ENB;
1938 /* Clear PME message. */
1939 reg &= ~GPREG0_PME_ENB;
1940 /* Set PHY address. */
1941 reg &= ~GPREG0_PHY_ADDR_MASK;
1942 reg |= sc->jme_phyaddr;
1943 CSR_WRITE_4(sc, JME_GPREG0, reg);
1944
1945 /* Configure Tx queue 0 packet completion coalescing. */
1946 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1947 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
1948 PCCTX_COAL_TO_MASK;
1949 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1950 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
1951 PCCTX_COAL_PKT_MASK;
1952 reg |= PCCTX_COAL_TXQ0;
1953 CSR_WRITE_4(sc, JME_PCCTX, reg);
1954
1955 /* Configure Rx queue 0 packet completion coalescing. */
1956 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1957 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
1958 PCCRX_COAL_TO_MASK;
1959 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1960 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
1961 PCCRX_COAL_PKT_MASK;
1962 CSR_WRITE_4(sc, JME_PCCRX0, reg);
1963
1964 /* Configure shadow status block but don't enable posting. */
1965 paddr = sc->jme_rdata.jme_ssb_block_paddr;
1966 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
1967 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
1968
1969 /* Disable Timer 1 and Timer 2. */
1970 CSR_WRITE_4(sc, JME_TIMER1, 0);
1971 CSR_WRITE_4(sc, JME_TIMER2, 0);
1972
1973 /* Configure retry transmit period, retry limit value. */
1974 CSR_WRITE_4(sc, JME_TXTRHD,
1975 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
1976 TXTRHD_RT_PERIOD_MASK) |
1977 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
1978 TXTRHD_RT_LIMIT_SHIFT));
1979
1980 /* Disable RSS. */
1981 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
1982
1983 /* Initialize the interrupt mask. */
1984 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1985 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
1986
1987 /*
1988 * Enabling Tx/Rx DMA engines and Rx queue processing is
1989 * done after detection of valid link in jme_miibus_statchg.
1990 */
1991 sc->jme_flags &= ~JME_FLAG_LINK;
1992
1993 /* Set the current media. */
1994 mii = &sc->sc_miibus;
1995 mii_mediachg(mii);
1996
1997 timeout_add_sec(&sc->jme_tick_ch, 1);
1998
1999 ifp->if_flags |= IFF_RUNNING;
2000 ifq_clr_oactive(&ifp->if_snd);
2001
2002 return (0);
2003 }
2004
2005 void
jme_stop(struct jme_softc * sc)2006 jme_stop(struct jme_softc *sc)
2007 {
2008 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2009 struct jme_txdesc *txd;
2010 struct jme_rxdesc *rxd;
2011 int i;
2012
2013 /*
2014 * Mark the interface down and cancel the watchdog timer.
2015 */
2016 ifp->if_flags &= ~IFF_RUNNING;
2017 ifq_clr_oactive(&ifp->if_snd);
2018 ifp->if_timer = 0;
2019
2020 timeout_del(&sc->jme_tick_ch);
2021 sc->jme_flags &= ~JME_FLAG_LINK;
2022
2023 /*
2024 * Disable interrupts.
2025 */
2026 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2027 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2028
2029 /* Disable updating shadow status block. */
2030 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2031 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2032
2033 /* Stop receiver, transmitter. */
2034 jme_stop_rx(sc);
2035 jme_stop_tx(sc);
2036
2037 #ifdef foo
2038 /* Reclaim Rx/Tx buffers that have been completed. */
2039 jme_rxeof(sc);
2040 m_freem(sc->jme_cdata.jme_rxhead);
2041 JME_RXCHAIN_RESET(sc);
2042 jme_txeof(sc);
2043 #endif
2044
2045 /*
2046 * Free partial finished RX segments
2047 */
2048 m_freem(sc->jme_cdata.jme_rxhead);
2049 JME_RXCHAIN_RESET(sc);
2050
2051 /*
2052 * Free RX and TX mbufs still in the queues.
2053 */
2054 for (i = 0; i < JME_RX_RING_CNT; i++) {
2055 rxd = &sc->jme_cdata.jme_rxdesc[i];
2056 if (rxd->rx_m != NULL) {
2057 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2058 m_freem(rxd->rx_m);
2059 rxd->rx_m = NULL;
2060 }
2061 }
2062 for (i = 0; i < JME_TX_RING_CNT; i++) {
2063 txd = &sc->jme_cdata.jme_txdesc[i];
2064 if (txd->tx_m != NULL) {
2065 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2066 m_freem(txd->tx_m);
2067 txd->tx_m = NULL;
2068 txd->tx_ndesc = 0;
2069 }
2070 }
2071 }
2072
2073 void
jme_stop_tx(struct jme_softc * sc)2074 jme_stop_tx(struct jme_softc *sc)
2075 {
2076 uint32_t reg;
2077 int i;
2078
2079 reg = CSR_READ_4(sc, JME_TXCSR);
2080 if ((reg & TXCSR_TX_ENB) == 0)
2081 return;
2082 reg &= ~TXCSR_TX_ENB;
2083 CSR_WRITE_4(sc, JME_TXCSR, reg);
2084 for (i = JME_TIMEOUT; i > 0; i--) {
2085 DELAY(1);
2086 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2087 break;
2088 }
2089 if (i == 0)
2090 printf("%s: stopping transmitter timeout!\n",
2091 sc->sc_dev.dv_xname);
2092 }
2093
2094 void
jme_stop_rx(struct jme_softc * sc)2095 jme_stop_rx(struct jme_softc *sc)
2096 {
2097 uint32_t reg;
2098 int i;
2099
2100 reg = CSR_READ_4(sc, JME_RXCSR);
2101 if ((reg & RXCSR_RX_ENB) == 0)
2102 return;
2103 reg &= ~RXCSR_RX_ENB;
2104 CSR_WRITE_4(sc, JME_RXCSR, reg);
2105 for (i = JME_TIMEOUT; i > 0; i--) {
2106 DELAY(1);
2107 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2108 break;
2109 }
2110 if (i == 0)
2111 printf("%s: stopping receiver timeout!\n", sc->sc_dev.dv_xname);
2112 }
2113
2114 void
jme_init_tx_ring(struct jme_softc * sc)2115 jme_init_tx_ring(struct jme_softc *sc)
2116 {
2117 struct jme_ring_data *rd;
2118 struct jme_txdesc *txd;
2119 int i;
2120
2121 sc->jme_cdata.jme_tx_prod = 0;
2122 sc->jme_cdata.jme_tx_cons = 0;
2123 sc->jme_cdata.jme_tx_cnt = 0;
2124
2125 rd = &sc->jme_rdata;
2126 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2127 for (i = 0; i < JME_TX_RING_CNT; i++) {
2128 txd = &sc->jme_cdata.jme_txdesc[i];
2129 txd->tx_m = NULL;
2130 txd->tx_desc = &rd->jme_tx_ring[i];
2131 txd->tx_ndesc = 0;
2132 }
2133
2134 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
2135 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2136 }
2137
2138 void
jme_init_ssb(struct jme_softc * sc)2139 jme_init_ssb(struct jme_softc *sc)
2140 {
2141 struct jme_ring_data *rd;
2142
2143 rd = &sc->jme_rdata;
2144 bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2145 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0,
2146 sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2147 }
2148
2149 int
jme_init_rx_ring(struct jme_softc * sc)2150 jme_init_rx_ring(struct jme_softc *sc)
2151 {
2152 struct jme_ring_data *rd;
2153 struct jme_rxdesc *rxd;
2154 int i;
2155
2156 KASSERT(sc->jme_cdata.jme_rxhead == NULL &&
2157 sc->jme_cdata.jme_rxtail == NULL &&
2158 sc->jme_cdata.jme_rxlen == 0);
2159 sc->jme_cdata.jme_rx_cons = 0;
2160
2161 rd = &sc->jme_rdata;
2162 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2163 for (i = 0; i < JME_RX_RING_CNT; i++) {
2164 int error;
2165
2166 rxd = &sc->jme_cdata.jme_rxdesc[i];
2167 rxd->rx_m = NULL;
2168 rxd->rx_desc = &rd->jme_rx_ring[i];
2169 error = jme_newbuf(sc, rxd);
2170 if (error)
2171 return (error);
2172 }
2173
2174 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
2175 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2176
2177 return (0);
2178 }
2179
2180 int
jme_newbuf(struct jme_softc * sc,struct jme_rxdesc * rxd)2181 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
2182 {
2183 struct jme_desc *desc;
2184 struct mbuf *m;
2185 bus_dmamap_t map;
2186 int error;
2187
2188 MGETHDR(m, M_DONTWAIT, MT_DATA);
2189 if (m == NULL)
2190 return (ENOBUFS);
2191 MCLGET(m, M_DONTWAIT);
2192 if (!(m->m_flags & M_EXT)) {
2193 m_freem(m);
2194 return (ENOBUFS);
2195 }
2196
2197 /*
2198 * JMC250 has 64bit boundary alignment limitation so jme(4)
2199 * takes advantage of 10 bytes padding feature of hardware
2200 * in order not to copy entire frame to align IP header on
2201 * 32bit boundary.
2202 */
2203 m->m_len = m->m_pkthdr.len = MCLBYTES;
2204
2205 error = bus_dmamap_load_mbuf(sc->sc_dmat,
2206 sc->jme_cdata.jme_rx_sparemap, m, BUS_DMA_NOWAIT);
2207
2208 if (error != 0) {
2209 m_freem(m);
2210 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2211 return (error);
2212 }
2213
2214 if (rxd->rx_m != NULL) {
2215 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2216 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2217 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2218 }
2219 map = rxd->rx_dmamap;
2220 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2221 sc->jme_cdata.jme_rx_sparemap = map;
2222 rxd->rx_m = m;
2223
2224 desc = rxd->rx_desc;
2225 desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len);
2226 desc->addr_lo =
2227 htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr));
2228 desc->addr_hi =
2229 htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr));
2230 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2231
2232 return (0);
2233 }
2234
2235 void
jme_set_vlan(struct jme_softc * sc)2236 jme_set_vlan(struct jme_softc *sc)
2237 {
2238 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2239 uint32_t reg;
2240
2241 reg = CSR_READ_4(sc, JME_RXMAC);
2242 reg &= ~RXMAC_VLAN_ENB;
2243 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
2244 reg |= RXMAC_VLAN_ENB;
2245 CSR_WRITE_4(sc, JME_RXMAC, reg);
2246 }
2247
2248 void
jme_iff(struct jme_softc * sc)2249 jme_iff(struct jme_softc *sc)
2250 {
2251 struct arpcom *ac = &sc->sc_arpcom;
2252 struct ifnet *ifp = &ac->ac_if;
2253 struct ether_multi *enm;
2254 struct ether_multistep step;
2255 uint32_t crc;
2256 uint32_t mchash[2];
2257 uint32_t rxcfg;
2258
2259 rxcfg = CSR_READ_4(sc, JME_RXMAC);
2260 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2261 RXMAC_ALLMULTI);
2262 ifp->if_flags &= ~IFF_ALLMULTI;
2263
2264 /*
2265 * Always accept frames destined to our station address.
2266 * Always accept broadcast frames.
2267 */
2268 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2269
2270 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
2271 ifp->if_flags |= IFF_ALLMULTI;
2272 if (ifp->if_flags & IFF_PROMISC)
2273 rxcfg |= RXMAC_PROMISC;
2274 else
2275 rxcfg |= RXMAC_ALLMULTI;
2276 mchash[0] = mchash[1] = 0xFFFFFFFF;
2277 } else {
2278 /*
2279 * Set up the multicast address filter by passing all
2280 * multicast addresses through a CRC generator, and then
2281 * using the low-order 6 bits as an index into the 64 bit
2282 * multicast hash table. The high order bits select the
2283 * register, while the rest of the bits select the bit
2284 * within the register.
2285 */
2286 rxcfg |= RXMAC_MULTICAST;
2287 bzero(mchash, sizeof(mchash));
2288
2289 ETHER_FIRST_MULTI(step, ac, enm);
2290 while (enm != NULL) {
2291 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2292
2293 /* Just want the 6 least significant bits. */
2294 crc &= 0x3f;
2295
2296 /* Set the corresponding bit in the hash table. */
2297 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2298
2299 ETHER_NEXT_MULTI(step, enm);
2300 }
2301 }
2302
2303 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2304 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2305 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2306 }
2307