1 /* $NetBSD: if_et.c,v 1.36 2023/08/01 20:46:01 andvar Exp $ */
2 /* $OpenBSD: if_et.c,v 1.12 2008/07/11 09:29:02 kevlo $ */
3 /*
4 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Sepherosa Ziehau <sepherosa@gmail.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.36 2023/08/01 20:46:01 andvar Exp $");
41
42 #include <sys/param.h>
43 #include <sys/endian.h>
44 #include <sys/systm.h>
45 #include <sys/types.h>
46 #include <sys/sockio.h>
47 #include <sys/mbuf.h>
48 #include <sys/queue.h>
49 #include <sys/kernel.h>
50 #include <sys/device.h>
51 #include <sys/callout.h>
52 #include <sys/socket.h>
53
54 #include <sys/bus.h>
55
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
60 #include <net/if_arp.h>
61
62 #include <net/bpf.h>
63
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 #include <dev/pci/pcidevs.h>
70
71 #include <dev/pci/if_etreg.h>
72
73 static int et_match(device_t, cfdata_t, void *);
74 static void et_attach(device_t, device_t, void *);
75 static int et_detach(device_t, int);
76
77 static int et_miibus_readreg(device_t, int, int, uint16_t *);
78 static int et_miibus_writereg(device_t, int, int, uint16_t);
79 static void et_miibus_statchg(struct ifnet *);
80
81 static int et_init(struct ifnet *);
82 static int et_ioctl(struct ifnet *, u_long, void *);
83 static void et_start(struct ifnet *);
84 static void et_watchdog(struct ifnet *);
85 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
86
87 static int et_intr(void *);
88 static void et_enable_intrs(struct et_softc *, uint32_t);
89 static void et_disable_intrs(struct et_softc *);
90 static void et_rxeof(struct et_softc *);
91 static void et_txeof(struct et_softc *);
92 static void et_txtick(void *);
93
94 static int et_dma_alloc(struct et_softc *);
95 static void et_dma_free(struct et_softc *);
96 static int et_dma_mem_create(struct et_softc *, bus_size_t,
97 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *);
98 static void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t);
99 static int et_dma_mbuf_create(struct et_softc *);
100 static void et_dma_mbuf_destroy(struct et_softc *, int, const int[]);
101
102 static int et_init_tx_ring(struct et_softc *);
103 static int et_init_rx_ring(struct et_softc *);
104 static void et_free_tx_ring(struct et_softc *);
105 static void et_free_rx_ring(struct et_softc *);
106 static int et_encap(struct et_softc *, struct mbuf **);
107 static int et_newbuf(struct et_rxbuf_data *, int, int, int);
108 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
109 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
110
111 static void et_stop(struct et_softc *);
112 static int et_chip_init(struct et_softc *);
113 static void et_chip_attach(struct et_softc *);
114 static void et_init_mac(struct et_softc *);
115 static void et_init_rxmac(struct et_softc *);
116 static void et_init_txmac(struct et_softc *);
117 static int et_init_rxdma(struct et_softc *);
118 static int et_init_txdma(struct et_softc *);
119 static int et_start_rxdma(struct et_softc *);
120 static int et_start_txdma(struct et_softc *);
121 static int et_stop_rxdma(struct et_softc *);
122 static int et_stop_txdma(struct et_softc *);
123 static void et_reset(struct et_softc *);
124 static int et_bus_config(struct et_softc *);
125 static void et_get_eaddr(struct et_softc *, uint8_t[]);
126 static void et_setmulti(struct et_softc *);
127 static void et_tick(void *);
128
129 static int et_rx_intr_npkts = 32;
130 static int et_rx_intr_delay = 20; /* x10 usec */
131 static int et_tx_intr_nsegs = 128;
132 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
133
134 struct et_bsize {
135 int bufsize;
136 et_newbuf_t newbuf;
137 };
138
139 static const struct et_bsize et_bufsize[ET_RX_NRING] = {
140 { .bufsize = 0, .newbuf = et_newbuf_hdr },
141 { .bufsize = 0, .newbuf = et_newbuf_cluster },
142 };
143
144 static const struct device_compatible_entry compat_data[] = {
145 { .id = PCI_ID_CODE(PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310),
146 .value = 0 },
147
148
149 { .id = PCI_ID_CODE(PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301),
150 .value = ET_FLAG_FASTETHER },
151
152 PCI_COMPAT_EOL
153 };
154
155 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach,
156 NULL);
157
158 static int
et_match(device_t dev,cfdata_t match,void * aux)159 et_match(device_t dev, cfdata_t match, void *aux)
160 {
161 struct pci_attach_args *pa = aux;
162
163 return pci_compatible_match(pa, compat_data);
164 }
165
166 static void
et_attach(device_t parent,device_t self,void * aux)167 et_attach(device_t parent, device_t self, void *aux)
168 {
169 struct et_softc *sc = device_private(self);
170 struct pci_attach_args *pa = aux;
171 const struct device_compatible_entry *dce;
172 pci_chipset_tag_t pc = pa->pa_pc;
173 pci_intr_handle_t ih;
174 const char *intrstr;
175 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
176 struct mii_data * const mii = &sc->sc_miibus;
177 uint32_t pmcfg;
178 pcireg_t memtype;
179 int error;
180 char intrbuf[PCI_INTRSTR_LEN];
181
182 pci_aprint_devinfo(pa, "Ethernet controller");
183
184 sc->sc_dev = self;
185
186 /*
187 * Initialize tunables
188 */
189 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
190 sc->sc_rx_intr_delay = et_rx_intr_delay;
191 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
192 sc->sc_timer = et_timer;
193
194 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR);
195 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
196 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
197 aprint_error_dev(self, "could not map mem space\n");
198 return;
199 }
200
201 if (pci_intr_map(pa, &ih) != 0) {
202 aprint_error_dev(self, "could not map interrupt\n");
203 goto fail;
204 }
205
206 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
207 sc->sc_irq_handle = pci_intr_establish_xname(pc, ih, IPL_NET, et_intr,
208 sc, device_xname(self));
209 if (sc->sc_irq_handle == NULL) {
210 aprint_error_dev(self, "could not establish interrupt");
211 if (intrstr != NULL)
212 aprint_error(" at %s", intrstr);
213 aprint_error("\n");
214 goto fail;
215 }
216 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
217
218 sc->sc_pct = pa->pa_pc;
219 sc->sc_pcitag = pa->pa_tag;
220
221 if (pci_dma64_available(pa))
222 sc->sc_dmat = pa->pa_dmat64;
223 else
224 sc->sc_dmat = pa->pa_dmat;
225
226 dce = pci_compatible_lookup(pa, compat_data);
227 KASSERT(dce != NULL);
228 sc->sc_flags = (uint32_t)dce->value;
229
230 error = et_bus_config(sc);
231 if (error)
232 goto fail;
233
234 et_get_eaddr(sc, sc->sc_enaddr);
235
236 aprint_normal_dev(self, "Ethernet address %s\n",
237 ether_sprintf(sc->sc_enaddr));
238
239 /* Take PHY out of COMA and enable clocks. */
240 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
241 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
242 pmcfg |= EM_PM_GIGEPHY_ENB;
243 CSR_WRITE_4(sc, ET_PM, pmcfg);
244
245 et_reset(sc);
246
247 et_disable_intrs(sc);
248
249 error = et_dma_alloc(sc);
250 if (error)
251 goto fail;
252
253 ifp->if_softc = sc;
254 ifp->if_mtu = ETHERMTU;
255 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
256 ifp->if_init = et_init;
257 ifp->if_ioctl = et_ioctl;
258 ifp->if_start = et_start;
259 ifp->if_watchdog = et_watchdog;
260 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
261 IFQ_SET_READY(&ifp->if_snd);
262 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
263
264 et_chip_attach(sc);
265
266 mii->mii_ifp = ifp;
267 mii->mii_readreg = et_miibus_readreg;
268 mii->mii_writereg = et_miibus_writereg;
269 mii->mii_statchg = et_miibus_statchg;
270
271 sc->sc_ethercom.ec_mii = mii;
272 ifmedia_init(&mii->mii_media, 0, ether_mediachange,
273 et_ifmedia_sts);
274 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
275 if (LIST_FIRST(&mii->mii_phys) == NULL) {
276 aprint_error_dev(self, "no PHY found!\n");
277 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL,
278 0, NULL);
279 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
280 } else
281 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
282
283 if_attach(ifp);
284 if_deferred_start_init(ifp, NULL);
285 ether_ifattach(ifp, sc->sc_enaddr);
286
287 callout_init(&sc->sc_tick, 0);
288 callout_setfunc(&sc->sc_tick, et_tick, sc);
289 callout_init(&sc->sc_txtick, 0);
290 callout_setfunc(&sc->sc_txtick, et_txtick, sc);
291
292 if (pmf_device_register(self, NULL, NULL))
293 pmf_class_network_register(self, ifp);
294 else
295 aprint_error_dev(self, "couldn't establish power handler\n");
296
297 return;
298
299 fail:
300 et_dma_free(sc);
301 if (sc->sc_irq_handle != NULL) {
302 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
303 sc->sc_irq_handle = NULL;
304 }
305 if (sc->sc_mem_size) {
306 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
307 sc->sc_mem_size = 0;
308 }
309 }
310
311 static int
et_detach(device_t self,int flags)312 et_detach(device_t self, int flags)
313 {
314 struct et_softc *sc = device_private(self);
315 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
316 int s;
317
318 pmf_device_deregister(self);
319 s = splnet();
320 et_stop(sc);
321 splx(s);
322
323 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
324
325 ether_ifdetach(ifp);
326 if_detach(ifp);
327 et_dma_free(sc);
328
329 /* Delete all remaining media. */
330 ifmedia_fini(&sc->sc_miibus.mii_media);
331
332 if (sc->sc_irq_handle != NULL) {
333 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
334 sc->sc_irq_handle = NULL;
335 }
336
337 if (sc->sc_mem_size) {
338 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
339 sc->sc_mem_size = 0;
340 }
341
342 return 0;
343 }
344
345 #if 0 /* XXX XXX XXX UNUSED */
346 static int
347 et_shutdown(device_t self)
348 {
349 struct et_softc *sc = device_private(self);
350 int s;
351
352 s = splnet();
353 et_stop(sc);
354 splx(s);
355
356 return 0;
357 }
358 #endif
359
360 static int
et_miibus_readreg(device_t dev,int phy,int reg,uint16_t * val)361 et_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
362 {
363 struct et_softc *sc = device_private(dev);
364 uint32_t data;
365 int i, ret;
366
367 /* Stop any pending operations */
368 CSR_WRITE_4(sc, ET_MII_CMD, 0);
369
370 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
371 __SHIFTIN(reg, ET_MII_ADDR_REG);
372 CSR_WRITE_4(sc, ET_MII_ADDR, data);
373
374 /* Start reading */
375 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
376
377 #define NRETRY 50
378
379 for (i = 0; i < NRETRY; ++i) {
380 data = CSR_READ_4(sc, ET_MII_IND);
381 if ((data & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
382 break;
383 DELAY(50);
384 }
385 if (i == NRETRY) {
386 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n",
387 phy, reg);
388 ret = ETIMEDOUT;
389 goto back;
390 }
391
392 #undef NRETRY
393
394 data = CSR_READ_4(sc, ET_MII_STAT);
395 *val = __SHIFTOUT(data, ET_MII_STAT_VALUE);
396 ret = 0;
397
398 back:
399 /* Make sure that the current operation is stopped */
400 CSR_WRITE_4(sc, ET_MII_CMD, 0);
401 return ret;
402 }
403
404 static int
et_miibus_writereg(device_t dev,int phy,int reg,uint16_t val)405 et_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
406 {
407 struct et_softc *sc = device_private(dev);
408 uint32_t data;
409 uint16_t tmp;
410 int rv = 0;
411 int i;
412
413 /* Stop any pending operations */
414 CSR_WRITE_4(sc, ET_MII_CMD, 0);
415
416 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
417 __SHIFTIN(reg, ET_MII_ADDR_REG);
418 CSR_WRITE_4(sc, ET_MII_ADDR, data);
419
420 /* Start writing */
421 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val, ET_MII_CTRL_VALUE));
422
423 #define NRETRY 100
424
425 for (i = 0; i < NRETRY; ++i) {
426 data = CSR_READ_4(sc, ET_MII_IND);
427 if ((data & ET_MII_IND_BUSY) == 0)
428 break;
429 DELAY(50);
430 }
431 if (i == NRETRY) {
432 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n",
433 phy, reg);
434 et_miibus_readreg(dev, phy, reg, &tmp);
435 rv = ETIMEDOUT;
436 }
437
438 #undef NRETRY
439
440 /* Make sure that the current operation is stopped */
441 CSR_WRITE_4(sc, ET_MII_CMD, 0);
442
443 return rv;
444 }
445
446 static void
et_miibus_statchg(struct ifnet * ifp)447 et_miibus_statchg(struct ifnet *ifp)
448 {
449 struct et_softc *sc = ifp->if_softc;
450 struct mii_data *mii = &sc->sc_miibus;
451 uint32_t cfg1, cfg2, ctrl;
452 int i;
453
454 sc->sc_flags &= ~ET_FLAG_LINK;
455 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
456 (IFM_ACTIVE | IFM_AVALID)) {
457 switch (IFM_SUBTYPE(mii->mii_media_active)) {
458 case IFM_10_T:
459 case IFM_100_TX:
460 sc->sc_flags |= ET_FLAG_LINK;
461 break;
462 case IFM_1000_T:
463 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
464 sc->sc_flags |= ET_FLAG_LINK;
465 break;
466 }
467 }
468
469 /* XXX Stop TX/RX MAC? */
470 if ((sc->sc_flags & ET_FLAG_LINK) == 0)
471 return;
472
473 /* Program MACs with resolved speed/duplex/flow-control. */
474 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
475 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
476 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
477 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
478 ET_MAC_CFG1_LOOPBACK);
479 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
480 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
481 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
482 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
483 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
484
485
486 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
487 cfg2 |= ET_MAC_CFG2_MODE_GMII;
488 else {
489 cfg2 |= ET_MAC_CFG2_MODE_MII;
490 ctrl |= ET_MAC_CTRL_MODE_MII;
491 }
492
493 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
494 cfg2 |= ET_MAC_CFG2_FDX;
495 /*
496 * Controller lacks automatic TX pause frame
497 * generation so it should be handled by driver.
498 * Even though driver can send pause frame with
499 * arbitrary pause time, controller does not
500 * provide a way that tells how many free RX
501 * buffers are available in controller. This
502 * limitation makes it hard to generate XON frame
503 * in time on driver side so don't enable TX flow
504 * control.
505 */
506 #ifdef notyet
507 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
508 cfg1 |= ET_MAC_CFG1_TXFLOW;
509 #endif
510 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
511 cfg1 |= ET_MAC_CFG1_RXFLOW;
512 } else
513 ctrl |= ET_MAC_CTRL_GHDX;
514
515 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
516 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
517 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
518 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
519
520 #define NRETRY 100
521
522 for (i = 0; i < NRETRY; ++i) {
523 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
524 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
525 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
526 break;
527
528 DELAY(10);
529 }
530 /* Note: Timeout always happens when cable is not plugged in. */
531
532 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
533
534 #undef NRETRY
535 }
536
537 static void
et_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)538 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
539 {
540 struct et_softc *sc;
541 struct mii_data *mii;
542
543 sc = ifp->if_softc;
544 mii = &sc->sc_miibus;
545 mii_pollstat(mii);
546 ifmr->ifm_active = mii->mii_media_active;
547 ifmr->ifm_status = mii->mii_media_status;
548 }
549
550 static void
et_stop(struct et_softc * sc)551 et_stop(struct et_softc *sc)
552 {
553 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
554
555 callout_stop(&sc->sc_tick);
556 callout_stop(&sc->sc_txtick);
557
558 et_stop_rxdma(sc);
559 et_stop_txdma(sc);
560
561 et_disable_intrs(sc);
562
563 et_free_tx_ring(sc);
564 et_free_rx_ring(sc);
565
566 et_reset(sc);
567
568 sc->sc_tx = 0;
569 sc->sc_tx_intr = 0;
570 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
571
572 ifp->if_timer = 0;
573 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
574 }
575
576 static int
et_bus_config(struct et_softc * sc)577 et_bus_config(struct et_softc *sc)
578 {
579 uint32_t val; //, max_plsz;
580 // uint16_t ack_latency, replay_timer;
581
582 /*
583 * Test whether EEPROM is valid
584 * NOTE: Read twice to get the correct value
585 */
586 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
587 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
588
589 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
590 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val);
591 return ENXIO;
592 }
593
594 /* TODO: LED */
595 #if 0
596 /*
597 * Configure ACK latency and replay timer according to
598 * max playload size
599 */
600 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS);
601 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
602
603 switch (max_plsz) {
604 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
605 ack_latency = ET_PCIV_ACK_LATENCY_128;
606 replay_timer = ET_PCIV_REPLAY_TIMER_128;
607 break;
608
609 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
610 ack_latency = ET_PCIV_ACK_LATENCY_256;
611 replay_timer = ET_PCIV_REPLAY_TIMER_256;
612 break;
613
614 default:
615 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
616 ET_PCIR_ACK_LATENCY) >> 16;
617 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
618 ET_PCIR_REPLAY_TIMER) >> 16;
619 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n",
620 ack_latency, replay_timer);
621 break;
622 }
623 if (ack_latency != 0) {
624 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
625 ET_PCIR_ACK_LATENCY, ack_latency << 16);
626 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
627 ET_PCIR_REPLAY_TIMER, replay_timer << 16);
628 }
629
630 /*
631 * Set L0s and L1 latency timer to 2us
632 */
633 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
634 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY,
635 val << 24);
636
637 /*
638 * Set max read request size to 2048 bytes
639 */
640 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
641 ET_PCIR_DEVICE_CTRL) >> 16;
642 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
643 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
644 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL,
645 val << 16);
646 #endif
647
648 return 0;
649 }
650
651 static void
et_get_eaddr(struct et_softc * sc,uint8_t eaddr[])652 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[])
653 {
654 uint32_t r;
655
656 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO);
657 eaddr[0] = r & 0xff;
658 eaddr[1] = (r >> 8) & 0xff;
659 eaddr[2] = (r >> 16) & 0xff;
660 eaddr[3] = (r >> 24) & 0xff;
661 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI);
662 eaddr[4] = r & 0xff;
663 eaddr[5] = (r >> 8) & 0xff;
664 }
665
666 static void
et_reset(struct et_softc * sc)667 et_reset(struct et_softc *sc)
668 {
669
670 CSR_WRITE_4(sc, ET_MAC_CFG1,
671 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
672 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
673 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
674
675 CSR_WRITE_4(sc, ET_SWRST,
676 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
677 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
678 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
679
680 CSR_WRITE_4(sc, ET_MAC_CFG1,
681 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
682 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
683 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
684 }
685
686 static void
et_disable_intrs(struct et_softc * sc)687 et_disable_intrs(struct et_softc *sc)
688 {
689 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
690 }
691
692 static void
et_enable_intrs(struct et_softc * sc,uint32_t intrs)693 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
694 {
695 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
696 }
697
698 static int
et_dma_alloc(struct et_softc * sc)699 et_dma_alloc(struct et_softc *sc)
700 {
701 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
702 struct et_txstatus_data *txsd = &sc->sc_tx_status;
703 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
704 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
705 int i, error;
706
707 /*
708 * Create TX ring DMA stuffs
709 */
710 error = et_dma_mem_create(sc, ET_TX_RING_SIZE,
711 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap,
712 &tx_ring->tr_seg);
713 if (error) {
714 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n");
715 return error;
716 }
717
718 /*
719 * Create TX status DMA stuffs
720 */
721 error = et_dma_mem_create(sc, sizeof(uint32_t),
722 (void **)&txsd->txsd_status,
723 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg);
724 if (error) {
725 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n");
726 return error;
727 }
728
729 /*
730 * Create DMA stuffs for RX rings
731 */
732 for (i = 0; i < ET_RX_NRING; ++i) {
733 static const uint32_t rx_ring_posreg[ET_RX_NRING] =
734 { ET_RX_RING0_POS, ET_RX_RING1_POS };
735
736 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
737
738 error = et_dma_mem_create(sc, ET_RX_RING_SIZE,
739 (void **)&rx_ring->rr_desc,
740 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg);
741 if (error) {
742 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for "
743 "the %d RX ring\n", i);
744 return error;
745 }
746 rx_ring->rr_posreg = rx_ring_posreg[i];
747 }
748
749 /*
750 * Create RX stat ring DMA stuffs
751 */
752 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE,
753 (void **)&rxst_ring->rsr_stat,
754 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg);
755 if (error) {
756 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n");
757 return error;
758 }
759
760 /*
761 * Create RX status DMA stuffs
762 */
763 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus),
764 (void **)&rxsd->rxsd_status,
765 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg);
766 if (error) {
767 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n");
768 return error;
769 }
770
771 /*
772 * Create mbuf DMA stuffs
773 */
774 error = et_dma_mbuf_create(sc);
775 if (error)
776 return error;
777
778 return 0;
779 }
780
781 static void
et_dma_free(struct et_softc * sc)782 et_dma_free(struct et_softc *sc)
783 {
784 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
785 struct et_txstatus_data *txsd = &sc->sc_tx_status;
786 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
787 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
788 int i, rx_done[ET_RX_NRING];
789
790 /*
791 * Destroy TX ring DMA stuffs
792 */
793 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap);
794
795 /*
796 * Destroy TX status DMA stuffs
797 */
798 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap);
799
800 /*
801 * Destroy DMA stuffs for RX rings
802 */
803 for (i = 0; i < ET_RX_NRING; ++i) {
804 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
805
806 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap);
807 }
808
809 /*
810 * Destroy RX stat ring DMA stuffs
811 */
812 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap);
813
814 /*
815 * Destroy RX status DMA stuffs
816 */
817 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap);
818
819 /*
820 * Destroy mbuf DMA stuffs
821 */
822 for (i = 0; i < ET_RX_NRING; ++i)
823 rx_done[i] = ET_RX_NDESC;
824 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done);
825 }
826
827 static int
et_dma_mbuf_create(struct et_softc * sc)828 et_dma_mbuf_create(struct et_softc *sc)
829 {
830 struct et_txbuf_data *tbd = &sc->sc_tx_data;
831 int i, error, rx_done[ET_RX_NRING];
832
833 /*
834 * Create spare DMA map for RX mbufs
835 */
836 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
837 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap);
838 if (error) {
839 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n");
840 return error;
841 }
842
843 /*
844 * Create DMA maps for RX mbufs
845 */
846 bzero(rx_done, sizeof(rx_done));
847 for (i = 0; i < ET_RX_NRING; ++i) {
848 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
849 int j;
850
851 for (j = 0; j < ET_RX_NDESC; ++j) {
852 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
853 MCLBYTES, 0, BUS_DMA_NOWAIT,
854 &rbd->rbd_buf[j].rb_dmap);
855 if (error) {
856 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf "
857 "for %d RX ring\n", j, i);
858 rx_done[i] = j;
859 et_dma_mbuf_destroy(sc, 0, rx_done);
860 return error;
861 }
862 }
863 rx_done[i] = ET_RX_NDESC;
864
865 rbd->rbd_softc = sc;
866 rbd->rbd_ring = &sc->sc_rx_ring[i];
867 }
868
869 /*
870 * Create DMA maps for TX mbufs
871 */
872 for (i = 0; i < ET_TX_NDESC; ++i) {
873 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
874 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap);
875 if (error) {
876 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf "
877 "DMA map\n", i);
878 et_dma_mbuf_destroy(sc, i, rx_done);
879 return error;
880 }
881 }
882
883 return 0;
884 }
885
886 static void
et_dma_mbuf_destroy(struct et_softc * sc,int tx_done,const int rx_done[])887 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[])
888 {
889 struct et_txbuf_data *tbd = &sc->sc_tx_data;
890 int i;
891
892 /*
893 * Destroy DMA maps for RX mbufs
894 */
895 for (i = 0; i < ET_RX_NRING; ++i) {
896 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
897 int j;
898
899 for (j = 0; j < rx_done[i]; ++j) {
900 struct et_rxbuf *rb = &rbd->rbd_buf[j];
901
902 KASSERTMSG(rb->rb_mbuf == NULL,
903 "RX mbuf in %d RX ring is not freed yet\n", i);
904 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap);
905 }
906 }
907
908 /*
909 * Destroy DMA maps for TX mbufs
910 */
911 for (i = 0; i < tx_done; ++i) {
912 struct et_txbuf *tb = &tbd->tbd_buf[i];
913
914 KASSERTMSG(tb->tb_mbuf == NULL, "TX mbuf is not freed yet\n");
915 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap);
916 }
917
918 /*
919 * Destroy spare mbuf DMA map
920 */
921 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap);
922 }
923
924 static int
et_dma_mem_create(struct et_softc * sc,bus_size_t size,void ** addr,bus_addr_t * paddr,bus_dmamap_t * dmap,bus_dma_segment_t * seg)925 et_dma_mem_create(struct et_softc *sc, bus_size_t size,
926 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg)
927 {
928 int error, nsegs;
929
930 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
931 dmap);
932 if (error) {
933 aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
934 return error;
935 }
936
937 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg,
938 1, &nsegs, BUS_DMA_WAITOK);
939 if (error) {
940 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n");
941 return error;
942 }
943
944 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs,
945 size, (void **)addr, BUS_DMA_NOWAIT);
946 if (error) {
947 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n");
948 return (error);
949 }
950
951 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL,
952 BUS_DMA_WAITOK);
953 if (error) {
954 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n");
955 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1);
956 return error;
957 }
958
959 memset(*addr, 0, size);
960
961 *paddr = (*dmap)->dm_segs[0].ds_addr;
962
963 return 0;
964 }
965
966 static void
et_dma_mem_destroy(struct et_softc * sc,void * addr,bus_dmamap_t dmap)967 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap)
968 {
969 bus_dmamap_unload(sc->sc_dmat, dmap);
970 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1);
971 }
972
973 static void
et_chip_attach(struct et_softc * sc)974 et_chip_attach(struct et_softc *sc)
975 {
976 uint32_t val;
977
978 /*
979 * Perform minimal initialization
980 */
981
982 /* Disable loopback */
983 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
984
985 /* Reset MAC */
986 CSR_WRITE_4(sc, ET_MAC_CFG1,
987 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
988 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
989 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
990
991 /*
992 * Setup half duplex mode
993 */
994 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
995 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
996 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
997 ET_MAC_HDX_EXC_DEFER;
998 CSR_WRITE_4(sc, ET_MAC_HDX, val);
999
1000 /* Clear MAC control */
1001 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1002
1003 /* Reset MII */
1004 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1005
1006 /* Bring MAC out of reset state */
1007 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1008
1009 /* Enable memory controllers */
1010 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1011 }
1012
1013 static int
et_intr(void * xsc)1014 et_intr(void *xsc)
1015 {
1016 struct et_softc *sc = xsc;
1017 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1018 uint32_t intrs;
1019
1020 if ((ifp->if_flags & IFF_RUNNING) == 0)
1021 return (0);
1022
1023 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1024 if (intrs == 0 || intrs == 0xffffffff)
1025 return (0);
1026
1027 et_disable_intrs(sc);
1028 intrs &= ET_INTRS;
1029 if (intrs == 0) /* Not interested */
1030 goto back;
1031
1032 if (intrs & ET_INTR_RXEOF)
1033 et_rxeof(sc);
1034 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1035 et_txeof(sc);
1036 if (intrs & ET_INTR_TIMER)
1037 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1038 back:
1039 et_enable_intrs(sc, ET_INTRS);
1040
1041 return (1);
1042 }
1043
1044 static int
et_init(struct ifnet * ifp)1045 et_init(struct ifnet *ifp)
1046 {
1047 struct et_softc *sc = ifp->if_softc;
1048 int error, i, s;
1049
1050 if (ifp->if_flags & IFF_RUNNING)
1051 return 0;
1052
1053 s = splnet();
1054
1055 et_stop(sc);
1056 et_reset(sc);
1057
1058 for (i = 0; i < ET_RX_NRING; ++i) {
1059 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize;
1060 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf;
1061 }
1062
1063 error = et_init_tx_ring(sc);
1064 if (error)
1065 goto back;
1066
1067 error = et_init_rx_ring(sc);
1068 if (error)
1069 goto back;
1070
1071 error = et_chip_init(sc);
1072 if (error)
1073 goto back;
1074
1075 error = et_start_rxdma(sc);
1076 if (error)
1077 goto back;
1078
1079 error = et_start_txdma(sc);
1080 if (error)
1081 goto back;
1082
1083 /* Enable interrupts. */
1084 et_enable_intrs(sc, ET_INTRS);
1085
1086 callout_schedule(&sc->sc_tick, hz);
1087
1088 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1089
1090 ifp->if_flags |= IFF_RUNNING;
1091 ifp->if_flags &= ~IFF_OACTIVE;
1092
1093 sc->sc_flags &= ~ET_FLAG_LINK;
1094 ether_mediachange(ifp);
1095 back:
1096 if (error)
1097 et_stop(sc);
1098
1099 splx(s);
1100
1101 return (0);
1102 }
1103
1104 static int
et_ioctl(struct ifnet * ifp,u_long cmd,void * data)1105 et_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1106 {
1107 struct et_softc *sc = ifp->if_softc;
1108 int s, error = 0;
1109
1110 s = splnet();
1111
1112 switch (cmd) {
1113 case SIOCSIFFLAGS:
1114 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1115 break;
1116 if (ifp->if_flags & IFF_UP) {
1117 /*
1118 * If only the PROMISC or ALLMULTI flag changes, then
1119 * don't do a full re-init of the chip, just update
1120 * the Rx filter.
1121 */
1122 if ((ifp->if_flags & IFF_RUNNING) &&
1123 ((ifp->if_flags ^ sc->sc_if_flags) &
1124 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1125 et_setmulti(sc);
1126 } else {
1127 if (!(ifp->if_flags & IFF_RUNNING))
1128 et_init(ifp);
1129 }
1130 } else {
1131 if (ifp->if_flags & IFF_RUNNING)
1132 et_stop(sc);
1133 }
1134 sc->sc_if_flags = ifp->if_flags;
1135 break;
1136 default:
1137 error = ether_ioctl(ifp, cmd, data);
1138 if (error == ENETRESET) {
1139 if (ifp->if_flags & IFF_RUNNING)
1140 et_setmulti(sc);
1141 error = 0;
1142 }
1143 break;
1144 }
1145
1146 splx(s);
1147
1148 return error;
1149 }
1150
1151 static void
et_start(struct ifnet * ifp)1152 et_start(struct ifnet *ifp)
1153 {
1154 struct et_softc *sc = ifp->if_softc;
1155 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1156 int trans;
1157 struct mbuf *m;
1158
1159 if (((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) ||
1160 ((sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1161 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)))
1162 return;
1163
1164 trans = 0;
1165 for (;;) {
1166 IFQ_DEQUEUE(&ifp->if_snd, m);
1167 if (m == NULL)
1168 break;
1169
1170 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1171 ifp->if_flags |= IFF_OACTIVE;
1172 break;
1173 }
1174
1175 if (et_encap(sc, &m)) {
1176 if_statinc(ifp, if_oerrors);
1177 ifp->if_flags |= IFF_OACTIVE;
1178 break;
1179 }
1180
1181 trans = 1;
1182
1183 bpf_mtap(ifp, m, BPF_D_OUT);
1184 }
1185
1186 if (trans) {
1187 callout_schedule(&sc->sc_txtick, hz);
1188 ifp->if_timer = 5;
1189 }
1190 }
1191
1192 static void
et_watchdog(struct ifnet * ifp)1193 et_watchdog(struct ifnet *ifp)
1194 {
1195 struct et_softc *sc = ifp->if_softc;
1196 aprint_error_dev(sc->sc_dev, "watchdog timed out\n");
1197
1198 ifp->if_flags &= ~IFF_RUNNING;
1199 et_init(ifp);
1200 et_start(ifp);
1201 }
1202
1203 static int
et_stop_rxdma(struct et_softc * sc)1204 et_stop_rxdma(struct et_softc *sc)
1205 {
1206
1207 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1208 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1209
1210 DELAY(5);
1211 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1212 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n");
1213 return ETIMEDOUT;
1214 }
1215 return 0;
1216 }
1217
1218 static int
et_stop_txdma(struct et_softc * sc)1219 et_stop_txdma(struct et_softc *sc)
1220 {
1221
1222 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1223 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1224 return 0;
1225 }
1226
1227 static void
et_free_tx_ring(struct et_softc * sc)1228 et_free_tx_ring(struct et_softc *sc)
1229 {
1230 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1231 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1232 int i;
1233
1234 for (i = 0; i < ET_TX_NDESC; ++i) {
1235 struct et_txbuf *tb = &tbd->tbd_buf[i];
1236
1237 if (tb->tb_mbuf != NULL) {
1238 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
1239 m_freem(tb->tb_mbuf);
1240 tb->tb_mbuf = NULL;
1241 }
1242 }
1243
1244 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1245 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1246 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1247 }
1248
1249 static void
et_free_rx_ring(struct et_softc * sc)1250 et_free_rx_ring(struct et_softc *sc)
1251 {
1252 int n;
1253
1254 for (n = 0; n < ET_RX_NRING; ++n) {
1255 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1256 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1257 int i;
1258
1259 for (i = 0; i < ET_RX_NDESC; ++i) {
1260 struct et_rxbuf *rb = &rbd->rbd_buf[i];
1261
1262 if (rb->rb_mbuf != NULL) {
1263 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
1264 m_freem(rb->rb_mbuf);
1265 rb->rb_mbuf = NULL;
1266 }
1267 }
1268
1269 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1270 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
1271 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1272 }
1273 }
1274
1275 static void
et_setmulti(struct et_softc * sc)1276 et_setmulti(struct et_softc *sc)
1277 {
1278 struct ethercom *ec = &sc->sc_ethercom;
1279 struct ifnet *ifp = &ec->ec_if;
1280 uint32_t hash[4] = { 0, 0, 0, 0 };
1281 uint32_t rxmac_ctrl, pktfilt;
1282 struct ether_multi *enm;
1283 struct ether_multistep step;
1284 int i, count;
1285
1286 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1287 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1288
1289 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1290 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1291 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1292 goto back;
1293 }
1294
1295 count = 0;
1296 ETHER_LOCK(ec);
1297 ETHER_FIRST_MULTI(step, ec, enm);
1298 while (enm != NULL) {
1299 uint32_t *hp, h;
1300
1301 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1302 h = (h & 0x3f800000) >> 23;
1303
1304 hp = &hash[0];
1305 if (h >= 32 && h < 64) {
1306 h -= 32;
1307 hp = &hash[1];
1308 } else if (h >= 64 && h < 96) {
1309 h -= 64;
1310 hp = &hash[2];
1311 } else if (h >= 96) {
1312 h -= 96;
1313 hp = &hash[3];
1314 }
1315 *hp |= (1 << h);
1316
1317 ++count;
1318 ETHER_NEXT_MULTI(step, enm);
1319 }
1320 ETHER_UNLOCK(ec);
1321
1322 for (i = 0; i < 4; ++i)
1323 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1324
1325 if (count > 0)
1326 pktfilt |= ET_PKTFILT_MCAST;
1327 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1328 back:
1329 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1330 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1331 }
1332
1333 static int
et_chip_init(struct et_softc * sc)1334 et_chip_init(struct et_softc *sc)
1335 {
1336 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1337 uint32_t rxq_end;
1338 int error;
1339
1340 /*
1341 * Split internal memory between TX and RX according to MTU
1342 */
1343 if (ifp->if_mtu < 2048)
1344 rxq_end = 0x2bc;
1345 else if (ifp->if_mtu < 8192)
1346 rxq_end = 0x1ff;
1347 else
1348 rxq_end = 0x1b3;
1349 CSR_WRITE_4(sc, ET_RXQ_START, 0);
1350 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end);
1351 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1);
1352 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END);
1353
1354 /* No loopback */
1355 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1356
1357 /* Clear MSI configure */
1358 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1359
1360 /* Disable timer */
1361 CSR_WRITE_4(sc, ET_TIMER, 0);
1362
1363 /* Initialize MAC */
1364 et_init_mac(sc);
1365
1366 /* Enable memory controllers */
1367 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1368
1369 /* Initialize RX MAC */
1370 et_init_rxmac(sc);
1371
1372 /* Initialize TX MAC */
1373 et_init_txmac(sc);
1374
1375 /* Initialize RX DMA engine */
1376 error = et_init_rxdma(sc);
1377 if (error)
1378 return error;
1379
1380 /* Initialize TX DMA engine */
1381 error = et_init_txdma(sc);
1382 if (error)
1383 return error;
1384
1385 return 0;
1386 }
1387
1388 static int
et_init_tx_ring(struct et_softc * sc)1389 et_init_tx_ring(struct et_softc *sc)
1390 {
1391 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1392 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1393 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1394
1395 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1396 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1397 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1398
1399 tbd->tbd_start_index = 0;
1400 tbd->tbd_start_wrap = 0;
1401 tbd->tbd_used = 0;
1402
1403 bzero(txsd->txsd_status, sizeof(uint32_t));
1404 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0,
1405 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1406 return 0;
1407 }
1408
1409 static int
et_init_rx_ring(struct et_softc * sc)1410 et_init_rx_ring(struct et_softc *sc)
1411 {
1412 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1413 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1414 int n;
1415
1416 for (n = 0; n < ET_RX_NRING; ++n) {
1417 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1418 int i, error;
1419
1420 for (i = 0; i < ET_RX_NDESC; ++i) {
1421 error = rbd->rbd_newbuf(rbd, i, 1);
1422 if (error) {
1423 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: "
1424 "%d\n", n, i, error);
1425 return error;
1426 }
1427 }
1428 }
1429
1430 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1431 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1432 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1433
1434 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1435 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1436 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1437
1438 return 0;
1439 }
1440
1441 static int
et_init_rxdma(struct et_softc * sc)1442 et_init_rxdma(struct et_softc *sc)
1443 {
1444 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1445 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1446 struct et_rxdesc_ring *rx_ring;
1447 int error;
1448
1449 error = et_stop_rxdma(sc);
1450 if (error) {
1451 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n");
1452 return error;
1453 }
1454
1455 /*
1456 * Install RX status
1457 */
1458 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1459 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1460
1461 /*
1462 * Install RX stat ring
1463 */
1464 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1465 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1466 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1467 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1468 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1469
1470 /* Match ET_RXSTAT_POS */
1471 rxst_ring->rsr_index = 0;
1472 rxst_ring->rsr_wrap = 0;
1473
1474 /*
1475 * Install the 2nd RX descriptor ring
1476 */
1477 rx_ring = &sc->sc_rx_ring[1];
1478 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1479 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1480 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1481 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1482 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1483
1484 /* Match ET_RX_RING1_POS */
1485 rx_ring->rr_index = 0;
1486 rx_ring->rr_wrap = 1;
1487
1488 /*
1489 * Install the 1st RX descriptor ring
1490 */
1491 rx_ring = &sc->sc_rx_ring[0];
1492 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1493 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1494 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1495 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1496 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1497
1498 /* Match ET_RX_RING0_POS */
1499 rx_ring->rr_index = 0;
1500 rx_ring->rr_wrap = 1;
1501
1502 /*
1503 * RX intr moderation
1504 */
1505 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1506 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1507
1508 return 0;
1509 }
1510
1511 static int
et_init_txdma(struct et_softc * sc)1512 et_init_txdma(struct et_softc *sc)
1513 {
1514 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1515 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1516 int error;
1517
1518 error = et_stop_txdma(sc);
1519 if (error) {
1520 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n");
1521 return error;
1522 }
1523
1524 /*
1525 * Install TX descriptor ring
1526 */
1527 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1528 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1529 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1530
1531 /*
1532 * Install TX status
1533 */
1534 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1535 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1536
1537 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1538
1539 /* Match ET_TX_READY_POS */
1540 tx_ring->tr_ready_index = 0;
1541 tx_ring->tr_ready_wrap = 0;
1542
1543 return 0;
1544 }
1545
1546 static void
et_init_mac(struct et_softc * sc)1547 et_init_mac(struct et_softc *sc)
1548 {
1549 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1550 const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
1551 uint32_t val;
1552
1553 /* Reset MAC */
1554 CSR_WRITE_4(sc, ET_MAC_CFG1,
1555 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1556 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1557 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1558
1559 /*
1560 * Setup inter packet gap
1561 */
1562 val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1563 __SHIFTIN(88, ET_IPG_NONB2B_2) |
1564 __SHIFTIN(80, ET_IPG_MINIFG) |
1565 __SHIFTIN(96, ET_IPG_B2B);
1566 CSR_WRITE_4(sc, ET_IPG, val);
1567
1568 /*
1569 * Setup half duplex mode
1570 */
1571 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1572 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1573 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1574 ET_MAC_HDX_EXC_DEFER;
1575 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1576
1577 /* Clear MAC control */
1578 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1579
1580 /* Reset MII */
1581 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1582
1583 /*
1584 * Set MAC address
1585 */
1586 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1587 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1588 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1589 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1590
1591 /* Set max frame length */
1592 CSR_WRITE_4(sc, ET_MAX_FRMLEN,
1593 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN);
1594
1595 /* Bring MAC out of reset state */
1596 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1597 }
1598
1599 static void
et_init_rxmac(struct et_softc * sc)1600 et_init_rxmac(struct et_softc *sc)
1601 {
1602 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1603 const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
1604 uint32_t val;
1605 int i;
1606
1607 /* Disable RX MAC and WOL */
1608 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1609
1610 /*
1611 * Clear all WOL related registers
1612 */
1613 for (i = 0; i < 3; ++i)
1614 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1615 for (i = 0; i < 20; ++i)
1616 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1617
1618 /*
1619 * Set WOL source address. XXX is this necessary?
1620 */
1621 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1622 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1623 val = (eaddr[0] << 8) | eaddr[1];
1624 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1625
1626 /* Clear packet filters */
1627 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1628
1629 /* No ucast filtering */
1630 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1631 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1632 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1633
1634 if (ifp->if_mtu > 8192) {
1635 /*
1636 * In order to transmit jumbo packets greater than 8k,
1637 * the FIFO between RX MAC and RX DMA needs to be reduced
1638 * in size to (16k - MTU). In order to implement this, we
1639 * must use "cut through" mode in the RX MAC, which chops
1640 * packets down into segments which are (max_size * 16).
1641 * In this case we selected 256 bytes, since this is the
1642 * size of the PCI-Express TLP's that the 1310 uses.
1643 */
1644 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) |
1645 ET_RXMAC_MC_SEGSZ_ENABLE;
1646 } else {
1647 val = 0;
1648 }
1649 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1650
1651 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1652
1653 /* Initialize RX MAC management register */
1654 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1655
1656 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1657
1658 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1659 ET_RXMAC_MGT_PASS_ECRC |
1660 ET_RXMAC_MGT_PASS_ELEN |
1661 ET_RXMAC_MGT_PASS_ETRUNC |
1662 ET_RXMAC_MGT_CHECK_PKT);
1663
1664 /*
1665 * Configure runt filtering (may not work on certain chip generation)
1666 */
1667 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1668 CSR_WRITE_4(sc, ET_PKTFILT, val);
1669
1670 /* Enable RX MAC but leave WOL disabled */
1671 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1672 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1673
1674 /*
1675 * Setup multicast hash and allmulti/promisc mode
1676 */
1677 et_setmulti(sc);
1678 }
1679
1680 static void
et_init_txmac(struct et_softc * sc)1681 et_init_txmac(struct et_softc *sc)
1682 {
1683
1684 /* Disable TX MAC and FC(?) */
1685 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1686
1687 /* No flow control yet */
1688 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1689
1690 /* Enable TX MAC but leave FC(?) disabled */
1691 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1692 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1693 }
1694
1695 static int
et_start_rxdma(struct et_softc * sc)1696 et_start_rxdma(struct et_softc *sc)
1697 {
1698 uint32_t val = 0;
1699
1700 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1701 ET_RXDMA_CTRL_RING0_SIZE) |
1702 ET_RXDMA_CTRL_RING0_ENABLE;
1703 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1704 ET_RXDMA_CTRL_RING1_SIZE) |
1705 ET_RXDMA_CTRL_RING1_ENABLE;
1706
1707 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1708
1709 DELAY(5);
1710
1711 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1712 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n");
1713 return ETIMEDOUT;
1714 }
1715 return 0;
1716 }
1717
1718 static int
et_start_txdma(struct et_softc * sc)1719 et_start_txdma(struct et_softc *sc)
1720 {
1721
1722 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1723 return 0;
1724 }
1725
1726 static void
et_rxeof(struct et_softc * sc)1727 et_rxeof(struct et_softc *sc)
1728 {
1729 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1730 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1731 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1732 uint32_t rxs_stat_ring;
1733 int rxst_wrap, rxst_index;
1734
1735 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1736 return;
1737
1738 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1739 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1740 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1741 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1742
1743 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1744 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1745 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1746
1747 while (rxst_index != rxst_ring->rsr_index ||
1748 rxst_wrap != rxst_ring->rsr_wrap) {
1749 struct et_rxbuf_data *rbd;
1750 struct et_rxdesc_ring *rx_ring;
1751 struct et_rxstat *st;
1752 struct et_rxbuf *rb;
1753 struct mbuf *m;
1754 int buflen, buf_idx, ring_idx;
1755 uint32_t rxstat_pos, rxring_pos;
1756
1757 KASSERT(rxst_ring->rsr_index < ET_RX_NSTAT);
1758 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1759
1760 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1761 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1762 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1763
1764 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1765 rxst_ring->rsr_index = 0;
1766 rxst_ring->rsr_wrap ^= 1;
1767 }
1768 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1769 ET_RXSTAT_POS_INDEX);
1770 if (rxst_ring->rsr_wrap)
1771 rxstat_pos |= ET_RXSTAT_POS_WRAP;
1772 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1773
1774 if (ring_idx >= ET_RX_NRING) {
1775 if_statinc(ifp, if_ierrors);
1776 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n",
1777 ring_idx);
1778 continue;
1779 }
1780 if (buf_idx >= ET_RX_NDESC) {
1781 if_statinc(ifp, if_ierrors);
1782 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n",
1783 buf_idx);
1784 continue;
1785 }
1786
1787 rbd = &sc->sc_rx_data[ring_idx];
1788 rb = &rbd->rbd_buf[buf_idx];
1789 m = rb->rb_mbuf;
1790 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0,
1791 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1792
1793 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1794 if (buflen < ETHER_CRC_LEN) {
1795 m_freem(m);
1796 if_statinc(ifp, if_ierrors);
1797 } else {
1798 m->m_pkthdr.len = m->m_len = buflen -
1799 ETHER_CRC_LEN;
1800 m_set_rcvif(m, ifp);
1801
1802 if_percpuq_enqueue(ifp->if_percpuq, m);
1803 }
1804 } else {
1805 if_statinc(ifp, if_ierrors);
1806 }
1807
1808 rx_ring = &sc->sc_rx_ring[ring_idx];
1809
1810 if (buf_idx != rx_ring->rr_index) {
1811 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, "
1812 "buf_idx %d, rr_idx %d\n",
1813 ring_idx, buf_idx, rx_ring->rr_index);
1814 }
1815
1816 KASSERT(rx_ring->rr_index < ET_RX_NDESC);
1817 if (++rx_ring->rr_index == ET_RX_NDESC) {
1818 rx_ring->rr_index = 0;
1819 rx_ring->rr_wrap ^= 1;
1820 }
1821 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1822 if (rx_ring->rr_wrap)
1823 rxring_pos |= ET_RX_RING_POS_WRAP;
1824 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1825 }
1826 }
1827
1828 static int
et_encap(struct et_softc * sc,struct mbuf ** m0)1829 et_encap(struct et_softc *sc, struct mbuf **m0)
1830 {
1831 struct mbuf *m = *m0;
1832 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1833 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1834 struct et_txdesc *td;
1835 bus_dmamap_t map;
1836 int error, maxsegs, first_idx, last_idx, i;
1837 uint32_t tx_ready_pos, last_td_ctrl2;
1838
1839 maxsegs = ET_TX_NDESC - tbd->tbd_used;
1840 if (maxsegs > ET_NSEG_MAX)
1841 maxsegs = ET_NSEG_MAX;
1842 KASSERTMSG(maxsegs >= ET_NSEG_SPARE,
1843 "not enough spare TX desc (%d)\n", maxsegs);
1844
1845 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1846 first_idx = tx_ring->tr_ready_index;
1847 map = tbd->tbd_buf[first_idx].tb_dmap;
1848
1849 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1850 BUS_DMA_NOWAIT);
1851 if (!error && map->dm_nsegs == 0) {
1852 bus_dmamap_unload(sc->sc_dmat, map);
1853 error = EFBIG;
1854 }
1855 if (error && error != EFBIG) {
1856 aprint_error_dev(sc->sc_dev, "can't load TX mbuf");
1857 goto back;
1858 }
1859 if (error) { /* error == EFBIG */
1860 struct mbuf *m_new;
1861
1862 error = 0;
1863
1864 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1865 if (m_new == NULL) {
1866 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n");
1867 error = ENOBUFS;
1868 goto back;
1869 }
1870
1871 m_copy_pkthdr(m_new, m);
1872 if (m->m_pkthdr.len > MHLEN) {
1873 MCLGET(m_new, M_DONTWAIT);
1874 if (!(m_new->m_flags & M_EXT)) {
1875 m_freem(m_new);
1876 error = ENOBUFS;
1877 }
1878 }
1879
1880 if (error) {
1881 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n");
1882 goto back;
1883 }
1884
1885 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *));
1886 m_freem(m);
1887 m_new->m_len = m_new->m_pkthdr.len;
1888 *m0 = m = m_new;
1889
1890 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1891 BUS_DMA_NOWAIT);
1892 if (error || map->dm_nsegs == 0) {
1893 if (map->dm_nsegs == 0) {
1894 bus_dmamap_unload(sc->sc_dmat, map);
1895 error = EFBIG;
1896 }
1897 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n");
1898 goto back;
1899 }
1900 }
1901
1902 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1903 BUS_DMASYNC_PREWRITE);
1904
1905 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
1906 sc->sc_tx += map->dm_nsegs;
1907 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
1908 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
1909 last_td_ctrl2 |= ET_TDCTRL2_INTR;
1910 }
1911
1912 last_idx = -1;
1913 for (i = 0; i < map->dm_nsegs; ++i) {
1914 int idx;
1915
1916 idx = (first_idx + i) % ET_TX_NDESC;
1917 td = &tx_ring->tr_desc[idx];
1918 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr);
1919 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr);
1920 td->td_ctrl1 =
1921 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN);
1922
1923 if (i == map->dm_nsegs - 1) { /* Last frag */
1924 td->td_ctrl2 = last_td_ctrl2;
1925 last_idx = idx;
1926 }
1927
1928 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1929 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
1930 tx_ring->tr_ready_index = 0;
1931 tx_ring->tr_ready_wrap ^= 1;
1932 }
1933 }
1934 td = &tx_ring->tr_desc[first_idx];
1935 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */
1936
1937 KASSERT(last_idx >= 0);
1938 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
1939 tbd->tbd_buf[last_idx].tb_dmap = map;
1940 tbd->tbd_buf[last_idx].tb_mbuf = m;
1941
1942 tbd->tbd_used += map->dm_nsegs;
1943 KASSERT(tbd->tbd_used <= ET_TX_NDESC);
1944
1945 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1946 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1947
1948 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
1949 ET_TX_READY_POS_INDEX);
1950 if (tx_ring->tr_ready_wrap)
1951 tx_ready_pos |= ET_TX_READY_POS_WRAP;
1952 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1953
1954 error = 0;
1955 back:
1956 if (error) {
1957 m_freem(m);
1958 *m0 = NULL;
1959 }
1960 return error;
1961 }
1962
1963 static void
et_txeof(struct et_softc * sc)1964 et_txeof(struct et_softc *sc)
1965 {
1966 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1967 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1968 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1969 uint32_t tx_done;
1970 int end, wrap;
1971
1972 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1973 return;
1974
1975 if (tbd->tbd_used == 0)
1976 return;
1977
1978 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
1979 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
1980 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
1981
1982 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
1983 struct et_txbuf *tb;
1984
1985 KASSERT(tbd->tbd_start_index < ET_TX_NDESC);
1986 tb = &tbd->tbd_buf[tbd->tbd_start_index];
1987
1988 bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
1989 sizeof(struct et_txdesc));
1990 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1991 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1992
1993 if (tb->tb_mbuf != NULL) {
1994 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
1995 m_freem(tb->tb_mbuf);
1996 tb->tb_mbuf = NULL;
1997 if_statinc(ifp, if_opackets);
1998 }
1999
2000 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2001 tbd->tbd_start_index = 0;
2002 tbd->tbd_start_wrap ^= 1;
2003 }
2004
2005 KASSERT(tbd->tbd_used > 0);
2006 tbd->tbd_used--;
2007 }
2008
2009 if (tbd->tbd_used == 0) {
2010 callout_stop(&sc->sc_txtick);
2011 ifp->if_timer = 0;
2012 }
2013 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2014 ifp->if_flags &= ~IFF_OACTIVE;
2015
2016 if_schedule_deferred_start(ifp);
2017 }
2018
2019 static void
et_txtick(void * xsc)2020 et_txtick(void *xsc)
2021 {
2022 struct et_softc *sc = xsc;
2023 int s;
2024
2025 s = splnet();
2026 et_txeof(sc);
2027 splx(s);
2028 }
2029
2030 static void
et_tick(void * xsc)2031 et_tick(void *xsc)
2032 {
2033 struct et_softc *sc = xsc;
2034 int s;
2035
2036 s = splnet();
2037 mii_tick(&sc->sc_miibus);
2038 callout_schedule(&sc->sc_tick, hz);
2039 splx(s);
2040 }
2041
2042 static int
et_newbuf_cluster(struct et_rxbuf_data * rbd,int buf_idx,int init)2043 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2044 {
2045 return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2046 }
2047
2048 static int
et_newbuf_hdr(struct et_rxbuf_data * rbd,int buf_idx,int init)2049 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2050 {
2051 return et_newbuf(rbd, buf_idx, init, MHLEN);
2052 }
2053
2054 static int
et_newbuf(struct et_rxbuf_data * rbd,int buf_idx,int init,int len0)2055 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2056 {
2057 struct et_softc *sc = rbd->rbd_softc;
2058 struct et_rxdesc_ring *rx_ring;
2059 struct et_rxdesc *desc;
2060 struct et_rxbuf *rb;
2061 struct mbuf *m;
2062 bus_dmamap_t dmap;
2063 int error, len;
2064
2065 KASSERT(buf_idx < ET_RX_NDESC);
2066 rb = &rbd->rbd_buf[buf_idx];
2067
2068 if (len0 >= MINCLSIZE) {
2069 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2070 if (m == NULL)
2071 return (ENOBUFS);
2072 MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
2073 if ((m->m_flags & M_EXT) == 0) {
2074 m_freem(m);
2075 return (ENOBUFS);
2076 }
2077 len = MCLBYTES;
2078 } else {
2079 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2080 len = MHLEN;
2081 }
2082
2083 if (m == NULL) {
2084 error = ENOBUFS;
2085
2086 /* XXX for debug */
2087 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0);
2088 if (init) {
2089 return error;
2090 } else {
2091 goto back;
2092 }
2093 }
2094 m->m_len = m->m_pkthdr.len = len;
2095
2096 /*
2097 * Try load RX mbuf into temporary DMA tag
2098 */
2099 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m,
2100 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2101 if (error) {
2102 m_freem(m);
2103
2104 /* XXX for debug */
2105 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n");
2106 if (init) {
2107 return error;
2108 } else {
2109 goto back;
2110 }
2111 }
2112
2113 if (!init)
2114 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
2115 rb->rb_mbuf = m;
2116
2117 /*
2118 * Swap RX buf's DMA map with the loaded temporary one
2119 */
2120 dmap = rb->rb_dmap;
2121 rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2122 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr;
2123 sc->sc_mbuf_tmp_dmap = dmap;
2124
2125 error = 0;
2126 back:
2127 rx_ring = rbd->rbd_ring;
2128 desc = &rx_ring->rr_desc[buf_idx];
2129
2130 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr);
2131 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr);
2132 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
2133
2134 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
2135 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
2136 return error;
2137 }
2138