xref: /openbsd-src/sys/dev/pci/if_vte.c (revision 0f9891f1fafd8f53a63c41edb56ce51e2589b910)
1 /*	$OpenBSD: if_vte.c,v 1.28 2024/05/24 06:02:57 jsg Exp $	*/
2 /*-
3  * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
30 
31 #include "bpfilter.h"
32 
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/systm.h>
36 #include <sys/sockio.h>
37 #include <sys/mbuf.h>
38 #include <sys/queue.h>
39 #include <sys/device.h>
40 #include <sys/timeout.h>
41 
42 #include <machine/bus.h>
43 
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 
48 #include <netinet/in.h>
49 #include <netinet/if_ether.h>
50 
51 #if NBPFILTER > 0
52 #include <net/bpf.h>
53 #endif
54 
55 #include <dev/mii/miivar.h>
56 
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pcidevs.h>
59 
60 #include <dev/pci/if_vtereg.h>
61 
62 int	vte_match(struct device *, void *, void *);
63 void	vte_attach(struct device *, struct device *, void *);
64 int	vte_detach(struct device *, int);
65 
66 int	vte_miibus_readreg(struct device *, int, int);
67 void	vte_miibus_writereg(struct device *, int, int, int);
68 void	vte_miibus_statchg(struct device *);
69 
70 int	vte_init(struct ifnet *);
71 void	vte_start(struct ifnet *);
72 int	vte_ioctl(struct ifnet *, u_long, caddr_t);
73 void	vte_watchdog(struct ifnet *);
74 int	vte_mediachange(struct ifnet *);
75 void	vte_mediastatus(struct ifnet *, struct ifmediareq *);
76 
77 int	vte_intr(void *);
78 int	vte_dma_alloc(struct vte_softc *);
79 void	vte_dma_free(struct vte_softc *);
80 struct vte_txdesc *
81 	    vte_encap(struct vte_softc *, struct mbuf **);
82 void	vte_get_macaddr(struct vte_softc *);
83 int	vte_init_rx_ring(struct vte_softc *);
84 int	vte_init_tx_ring(struct vte_softc *);
85 void	vte_mac_config(struct vte_softc *);
86 int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *, int);
87 void	vte_reset(struct vte_softc *);
88 void	vte_rxeof(struct vte_softc *);
89 void	vte_iff(struct vte_softc *);
90 void	vte_start_mac(struct vte_softc *);
91 void	vte_stats_clear(struct vte_softc *);
92 void	vte_stats_update(struct vte_softc *);
93 void	vte_stop(struct vte_softc *);
94 void	vte_stop_mac(struct vte_softc *);
95 void	vte_tick(void *);
96 void	vte_txeof(struct vte_softc *);
97 
98 const struct pci_matchid vte_devices[] = {
99 	{ PCI_VENDOR_RDC, PCI_PRODUCT_RDC_R6040_ETHER }
100 };
101 
102 const struct cfattach vte_ca = {
103 	sizeof(struct vte_softc), vte_match, vte_attach
104 };
105 
106 struct cfdriver vte_cd = {
107 	NULL, "vte", DV_IFNET
108 };
109 
110 int vtedebug = 0;
111 #define	DPRINTF(x)	do { if (vtedebug) printf x; } while (0)
112 
113 int
vte_miibus_readreg(struct device * dev,int phy,int reg)114 vte_miibus_readreg(struct device *dev, int phy, int reg)
115 {
116 	struct vte_softc *sc = (struct vte_softc *)dev;
117 	int i;
118 
119 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
120 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
121 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
122 		DELAY(5);
123 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
124 			break;
125 	}
126 
127 	if (i == 0) {
128 		printf("%s: phy read timeout: phy %d, reg %d\n",
129 		    sc->sc_dev.dv_xname, phy, reg);
130 		return (0);
131 	}
132 
133 	return (CSR_READ_2(sc, VTE_MMRD));
134 }
135 
136 void
vte_miibus_writereg(struct device * dev,int phy,int reg,int val)137 vte_miibus_writereg(struct device *dev, int phy, int reg, int val)
138 {
139 	struct vte_softc *sc = (struct vte_softc *)dev;
140 	int i;
141 
142 	CSR_WRITE_2(sc, VTE_MMWD, val);
143 	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
144 	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
145 	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
146 		DELAY(5);
147 		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
148 			break;
149 	}
150 
151 	if (i == 0)
152 		printf("%s: phy write timeout: phy %d, reg %d\n",
153 		    sc->sc_dev.dv_xname, phy, reg);
154 }
155 
156 void
vte_miibus_statchg(struct device * dev)157 vte_miibus_statchg(struct device *dev)
158 {
159 	struct vte_softc *sc = (struct vte_softc *)dev;
160 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
161 	struct mii_data *mii;
162 	uint16_t val;
163 
164 	if ((ifp->if_flags & IFF_RUNNING) == 0)
165 		return;
166 
167 	mii = &sc->sc_miibus;
168 
169 	sc->vte_flags &= ~VTE_FLAG_LINK;
170 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
171 	    (IFM_ACTIVE | IFM_AVALID)) {
172 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
173 		case IFM_10_T:
174 		case IFM_100_TX:
175 			sc->vte_flags |= VTE_FLAG_LINK;
176 			break;
177 		default:
178 			break;
179 		}
180 	}
181 
182 	/* Stop RX/TX MACs. */
183 	vte_stop_mac(sc);
184 	/* Program MACs with resolved duplex and flow control. */
185 	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
186 		/*
187 		 * Timer waiting time : (63 + TIMER * 64) MII clock.
188 		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
189 		 */
190 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
191 			val = 18 << VTE_IM_TIMER_SHIFT;
192 		else
193 			val = 1 << VTE_IM_TIMER_SHIFT;
194 		sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
195 		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
196 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
197 		CSR_WRITE_2(sc, VTE_MRICR, val);
198 
199 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
200 			val = 18 << VTE_IM_TIMER_SHIFT;
201 		else
202 			val = 1 << VTE_IM_TIMER_SHIFT;
203 		sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
204 		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
205 		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
206 		CSR_WRITE_2(sc, VTE_MTICR, val);
207 
208 		vte_mac_config(sc);
209 		vte_start_mac(sc);
210 	}
211 }
212 
213 void
vte_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)214 vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
215 {
216 	struct vte_softc *sc = ifp->if_softc;
217 	struct mii_data *mii = &sc->sc_miibus;
218 
219 	mii_pollstat(mii);
220 	ifmr->ifm_status = mii->mii_media_status;
221 	ifmr->ifm_active = mii->mii_media_active;
222 }
223 
224 int
vte_mediachange(struct ifnet * ifp)225 vte_mediachange(struct ifnet *ifp)
226 {
227 	struct vte_softc *sc = ifp->if_softc;
228 	struct mii_data *mii = &sc->sc_miibus;
229 	int error;
230 
231 	if (mii->mii_instance != 0) {
232 		struct mii_softc *miisc;
233 
234 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
235 			mii_phy_reset(miisc);
236 	}
237 	error = mii_mediachg(mii);
238 
239 	return (error);
240 }
241 
242 int
vte_match(struct device * dev,void * match,void * aux)243 vte_match(struct device *dev, void *match, void *aux)
244 {
245 	return pci_matchbyid((struct pci_attach_args *)aux, vte_devices,
246 	    sizeof(vte_devices) / sizeof(vte_devices[0]));
247 }
248 
249 void
vte_get_macaddr(struct vte_softc * sc)250 vte_get_macaddr(struct vte_softc *sc)
251 {
252 	uint16_t mid;
253 
254 	/*
255 	 * It seems there is no way to reload station address and
256 	 * it is supposed to be set by BIOS.
257 	 */
258 	mid = CSR_READ_2(sc, VTE_MID0L);
259 	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
260 	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
261 	mid = CSR_READ_2(sc, VTE_MID0M);
262 	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
263 	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
264 	mid = CSR_READ_2(sc, VTE_MID0H);
265 	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
266 	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
267 }
268 
269 void
vte_attach(struct device * parent,struct device * self,void * aux)270 vte_attach(struct device *parent, struct device *self, void *aux)
271 {
272 	struct vte_softc *sc = (struct vte_softc *)self;
273 	struct pci_attach_args *pa = aux;
274 	pci_chipset_tag_t pc = pa->pa_pc;
275 	pci_intr_handle_t ih;
276 	const char *intrstr;
277 	struct ifnet *ifp;
278 	pcireg_t memtype;
279 	int error = 0;
280 
281 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VTE_PCI_LOMEM);
282 	if (pci_mapreg_map(pa, VTE_PCI_LOMEM, memtype, 0, &sc->sc_mem_bt,
283 	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
284 		printf(": can't map mem space\n");
285 		return;
286 	}
287 
288 	if (pci_intr_map(pa, &ih) != 0) {
289 		printf(": can't map interrupt\n");
290 		goto fail;
291 	}
292 
293   	/*
294 	 * Allocate IRQ
295 	 */
296 	intrstr = pci_intr_string(pc, ih);
297 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, vte_intr, sc,
298 	    sc->sc_dev.dv_xname);
299 	if (sc->sc_irq_handle == NULL) {
300 		printf(": could not establish interrupt");
301 		if (intrstr != NULL)
302 			printf(" at %s", intrstr);
303 		printf("\n");
304 		goto fail;
305 	}
306 	printf(": %s", intrstr);
307 
308 	sc->sc_dmat = pa->pa_dmat;
309 	sc->sc_pct = pa->pa_pc;
310 	sc->sc_pcitag = pa->pa_tag;
311 
312 	/* Reset the ethernet controller. */
313 	vte_reset(sc);
314 
315 	error = vte_dma_alloc(sc);
316 	if (error)
317 		goto fail;
318 
319 	/* Load station address. */
320 	vte_get_macaddr(sc);
321 
322 	ifp = &sc->sc_arpcom.ac_if;
323 	ifp->if_softc = sc;
324 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
325 	ifp->if_ioctl = vte_ioctl;
326 	ifp->if_start = vte_start;
327 	ifp->if_watchdog = vte_watchdog;
328 	ifq_init_maxlen(&ifp->if_snd, VTE_TX_RING_CNT - 1);
329 	bcopy(sc->vte_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
330 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
331 
332 	ifp->if_capabilities = IFCAP_VLAN_MTU;
333 
334 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
335 
336 	/*
337 	 * Set up MII bus.
338 	 * BIOS would have initialized VTE_MPSCCR to catch PHY
339 	 * status changes so driver may be able to extract
340 	 * configured PHY address.  Since it's common to see BIOS
341 	 * fails to initialize the register(including the sample
342 	 * board I have), let mii(4) probe it.  This is more
343 	 * reliable than relying on BIOS's initialization.
344 	 *
345 	 * Advertising flow control capability to mii(4) was
346 	 * intentionally disabled due to severe problems in TX
347 	 * pause frame generation.  See vte_rxeof() for more
348 	 * details.
349 	 */
350 	sc->sc_miibus.mii_ifp = ifp;
351 	sc->sc_miibus.mii_readreg = vte_miibus_readreg;
352 	sc->sc_miibus.mii_writereg = vte_miibus_writereg;
353 	sc->sc_miibus.mii_statchg = vte_miibus_statchg;
354 
355 	ifmedia_init(&sc->sc_miibus.mii_media, 0, vte_mediachange,
356 	    vte_mediastatus);
357 	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
358 	    MII_OFFSET_ANY, 0);
359 
360 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
361 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
362 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
363 		    0, NULL);
364 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
365 	} else
366 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
367 
368 	if_attach(ifp);
369 	ether_ifattach(ifp);
370 
371 	timeout_set(&sc->vte_tick_ch, vte_tick, sc);
372 	return;
373 fail:
374 	vte_detach(&sc->sc_dev, 0);
375 }
376 
377 int
vte_detach(struct device * self,int flags)378 vte_detach(struct device *self, int flags)
379 {
380 	struct vte_softc *sc = (struct vte_softc *)self;
381 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
382 	int s;
383 
384 	s = splnet();
385 	vte_stop(sc);
386 	splx(s);
387 
388 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
389 
390 	/* Delete all remaining media. */
391 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
392 
393 	ether_ifdetach(ifp);
394 	if_detach(ifp);
395 	vte_dma_free(sc);
396 
397 	if (sc->sc_irq_handle != NULL) {
398 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
399 		sc->sc_irq_handle = NULL;
400 	}
401 
402 	return (0);
403 }
404 
405 int
vte_dma_alloc(struct vte_softc * sc)406 vte_dma_alloc(struct vte_softc *sc)
407 {
408 	struct vte_txdesc *txd;
409 	struct vte_rxdesc *rxd;
410 	int error, i, nsegs;
411 
412 	/* Create DMA stuffs for TX ring */
413 	error = bus_dmamap_create(sc->sc_dmat, VTE_TX_RING_SZ, 1,
414 	    VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_tx_ring_map);
415 	if (error)
416 		return (ENOBUFS);
417 
418 	/* Allocate DMA'able memory for TX ring */
419 	error = bus_dmamem_alloc(sc->sc_dmat, VTE_TX_RING_SZ, ETHER_ALIGN,
420 	    0, &sc->vte_cdata.vte_tx_ring_seg, 1, &nsegs,
421 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
422 	if (error) {
423 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
424 		    sc->sc_dev.dv_xname);
425 		return (error);
426 	}
427 
428 	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_tx_ring_seg,
429 	    nsegs, VTE_TX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_tx_ring,
430 	    BUS_DMA_NOWAIT);
431 	if (error)
432 		return (ENOBUFS);
433 
434 	/*  Load the DMA map for Tx ring. */
435 	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map,
436 	    sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
437 	if (error) {
438 		printf("%s: could not load DMA'able memory for Tx ring.\n",
439 		    sc->sc_dev.dv_xname);
440 		bus_dmamem_free(sc->sc_dmat,
441 		    (bus_dma_segment_t *)&sc->vte_cdata.vte_tx_ring, 1);
442 		return (error);
443 	}
444 
445 	sc->vte_cdata.vte_tx_ring_paddr =
446 	    sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr;
447 
448 	/* Create DMA stuffs for RX ring */
449 	error = bus_dmamap_create(sc->sc_dmat, VTE_RX_RING_SZ, 1,
450 	    VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_ring_map);
451 	if (error)
452 		return (ENOBUFS);
453 
454 	/* Allocate DMA'able memory for RX ring */
455 	error = bus_dmamem_alloc(sc->sc_dmat, VTE_RX_RING_SZ, ETHER_ALIGN,
456 	    0, &sc->vte_cdata.vte_rx_ring_seg, 1, &nsegs,
457 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
458 	if (error) {
459 		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
460 		    sc->sc_dev.dv_xname);
461 		return (error);
462 	}
463 
464 	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_rx_ring_seg,
465 	    nsegs, VTE_RX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_rx_ring,
466 	    BUS_DMA_NOWAIT);
467 	if (error)
468 		return (ENOBUFS);
469 
470 	/* Load the DMA map for Rx ring. */
471 	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map,
472 	    sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
473 	if (error) {
474 		printf("%s: could not load DMA'able memory for Rx ring.\n",
475 		    sc->sc_dev.dv_xname);
476 		bus_dmamem_free(sc->sc_dmat,
477 		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
478 		return (error);
479 	}
480 
481 	sc->vte_cdata.vte_rx_ring_paddr =
482 	    sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr;
483 
484 	/* Create DMA maps for Tx buffers. */
485 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
486 		txd = &sc->vte_cdata.vte_txdesc[i];
487 		txd->tx_m = NULL;
488 		txd->tx_dmamap = NULL;
489 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
490 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->tx_dmamap);
491 		if (error) {
492 			printf("%s: could not create Tx dmamap.\n",
493 			    sc->sc_dev.dv_xname);
494 			return (error);
495 		}
496 	}
497 
498 	/* Create DMA maps for Rx buffers. */
499 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
500 	    BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_sparemap);
501 	if (error) {
502 		printf("%s: could not create spare Rx dmamap.\n",
503 		    sc->sc_dev.dv_xname);
504 		return (error);
505 	}
506 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
507 		rxd = &sc->vte_cdata.vte_rxdesc[i];
508 		rxd->rx_m = NULL;
509 		rxd->rx_dmamap = NULL;
510 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
511 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
512 		if (error) {
513 			printf("%s: could not create Rx dmamap.\n",
514 			    sc->sc_dev.dv_xname);
515 			return (error);
516 		}
517 	}
518 
519 	return (0);
520 }
521 
522 void
vte_dma_free(struct vte_softc * sc)523 vte_dma_free(struct vte_softc *sc)
524 {
525 	struct vte_txdesc *txd;
526 	struct vte_rxdesc *rxd;
527 	int i;
528 
529 	/* TX buffers. */
530 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
531 		txd = &sc->vte_cdata.vte_txdesc[i];
532 		if (txd->tx_dmamap != NULL) {
533 			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
534 			txd->tx_dmamap = NULL;
535 		}
536 	}
537 	/* Rx buffers */
538 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
539 		rxd = &sc->vte_cdata.vte_rxdesc[i];
540 		if (rxd->rx_dmamap != NULL) {
541 			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
542 			rxd->rx_dmamap = NULL;
543 		}
544 	}
545 	if (sc->vte_cdata.vte_rx_sparemap != NULL) {
546 		bus_dmamap_destroy(sc->sc_dmat, sc->vte_cdata.vte_rx_sparemap);
547 		sc->vte_cdata.vte_rx_sparemap = NULL;
548 	}
549 	/* TX descriptor ring. */
550 	if (sc->vte_cdata.vte_tx_ring_map != NULL)
551 		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map);
552 	if (sc->vte_cdata.vte_tx_ring_map != NULL &&
553 	    sc->vte_cdata.vte_tx_ring != NULL)
554 		bus_dmamem_free(sc->sc_dmat,
555 		    (bus_dma_segment_t *)sc->vte_cdata.vte_tx_ring, 1);
556 	sc->vte_cdata.vte_tx_ring = NULL;
557 	sc->vte_cdata.vte_tx_ring_map = NULL;
558 	/* RX ring. */
559 	if (sc->vte_cdata.vte_rx_ring_map != NULL)
560 		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map);
561 	if (sc->vte_cdata.vte_rx_ring_map != NULL &&
562 	    sc->vte_cdata.vte_rx_ring != NULL)
563 		bus_dmamem_free(sc->sc_dmat,
564 		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
565 	sc->vte_cdata.vte_rx_ring = NULL;
566 	sc->vte_cdata.vte_rx_ring_map = NULL;
567 }
568 
569 struct vte_txdesc *
vte_encap(struct vte_softc * sc,struct mbuf ** m_head)570 vte_encap(struct vte_softc *sc, struct mbuf **m_head)
571 {
572 	struct vte_txdesc *txd;
573 	struct mbuf *m, *n;
574 	int copy, error, padlen;
575 
576 	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
577 	m = *m_head;
578 	/*
579 	 * Controller doesn't auto-pad, so we have to make sure pad
580 	 * short frames out to the minimum frame length.
581 	 */
582 	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
583 		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
584 	else
585 		padlen = 0;
586 
587 	/*
588 	 * Controller does not support multi-fragmented TX buffers.
589 	 * Controller spends most of its TX processing time in
590 	 * de-fragmenting TX buffers.  Either faster CPU or more
591 	 * advanced controller DMA engine is required to speed up
592 	 * TX path processing.
593 	 * To mitigate the de-fragmenting issue, perform deep copy
594 	 * from fragmented mbuf chains to a pre-allocated mbuf
595 	 * cluster with extra cost of kernel memory.  For frames
596 	 * that is composed of single TX buffer, the deep copy is
597 	 * bypassed.
598 	 */
599 	copy = 0;
600 	if (m->m_next != NULL)
601 		copy++;
602 	if (padlen > 0 && (padlen > m_trailingspace(m)))
603 		copy++;
604 	if (copy != 0) {
605 		/* Avoid expensive m_defrag(9) and do deep copy. */
606 		n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
607 		m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
608 		n->m_pkthdr.len = m->m_pkthdr.len;
609 		n->m_len = m->m_pkthdr.len;
610 		m = n;
611 		txd->tx_flags |= VTE_TXMBUF;
612 	}
613 
614 	if (padlen > 0) {
615 		/* Zero out the bytes in the pad area. */
616 		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
617 		m->m_pkthdr.len += padlen;
618 		m->m_len = m->m_pkthdr.len;
619 	}
620 
621 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, m,
622 	    BUS_DMA_NOWAIT);
623 
624 	if (error != 0) {
625 		txd->tx_flags &= ~VTE_TXMBUF;
626 		return (NULL);
627 	}
628 
629 	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
630 	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
631 
632 	txd->tx_desc->dtlen =
633 	    htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len));
634 	txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr);
635 	sc->vte_cdata.vte_tx_cnt++;
636 	/* Update producer index. */
637 	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
638 
639 	/* Finally hand over ownership to controller. */
640 	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
641 	txd->tx_m = m;
642 
643 	return (txd);
644 }
645 
646 void
vte_start(struct ifnet * ifp)647 vte_start(struct ifnet *ifp)
648 {
649 	struct vte_softc *sc = ifp->if_softc;
650 	struct vte_txdesc *txd;
651 	struct mbuf *m_head;
652 	int enq = 0;
653 
654 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
655 		return;
656 
657 	for (;;) {
658 		/* Reserve one free TX descriptor. */
659 		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
660 			ifq_set_oactive(&ifp->if_snd);
661 			break;
662 		}
663 		m_head = ifq_dequeue(&ifp->if_snd);
664 		if (m_head == NULL)
665 			break;
666 
667 		/*
668 		 * Pack the data into the transmit ring. If we
669 		 * don't have room, set the OACTIVE flag and wait
670 		 * for the NIC to drain the ring.
671 		 */
672 		if ((txd = vte_encap(sc, &m_head)) == NULL) {
673 			break;
674 		}
675 
676 		enq++;
677 
678 #if NBPFILTER > 0
679 		/*
680 		 * If there's a BPF listener, bounce a copy of this frame
681 		 * to him.
682 		 */
683 		if (ifp->if_bpf != NULL)
684 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
685 #endif
686 		/* Free consumed TX frame. */
687 		if ((txd->tx_flags & VTE_TXMBUF) != 0)
688 			m_freem(m_head);
689 	}
690 
691 	if (enq > 0) {
692 		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
693 		    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
694 		    BUS_DMASYNC_PREWRITE);
695 		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
696 		ifp->if_timer = VTE_TX_TIMEOUT;
697 	}
698 }
699 
700 void
vte_watchdog(struct ifnet * ifp)701 vte_watchdog(struct ifnet *ifp)
702 {
703 	struct vte_softc *sc = ifp->if_softc;
704 
705 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
706 	ifp->if_oerrors++;
707 	vte_init(ifp);
708 
709 	if (!ifq_empty(&ifp->if_snd))
710 		vte_start(ifp);
711 }
712 
713 int
vte_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)714 vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
715 {
716 	struct vte_softc *sc = ifp->if_softc;
717 	struct mii_data *mii = &sc->sc_miibus;
718 	struct ifreq *ifr = (struct ifreq *)data;
719 	int s, error = 0;
720 
721 	s = splnet();
722 
723 	switch (cmd) {
724 	case SIOCSIFADDR:
725 		ifp->if_flags |= IFF_UP;
726 		if (!(ifp->if_flags & IFF_RUNNING))
727 			vte_init(ifp);
728 		break;
729 	case SIOCSIFFLAGS:
730 		if (ifp->if_flags & IFF_UP) {
731 			if (ifp->if_flags & IFF_RUNNING)
732 				error = ENETRESET;
733 			else
734 				vte_init(ifp);
735 		} else {
736 			if (ifp->if_flags & IFF_RUNNING)
737 				vte_stop(sc);
738 		}
739 		break;
740 	case SIOCSIFMEDIA:
741 	case SIOCGIFMEDIA:
742 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
743 		break;
744 	default:
745 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
746 		break;
747 	}
748 
749 	if (error == ENETRESET) {
750 		if (ifp->if_flags & IFF_RUNNING)
751 			vte_iff(sc);
752 		error = 0;
753 	}
754 
755 	splx(s);
756 	return (error);
757 }
758 
759 void
vte_mac_config(struct vte_softc * sc)760 vte_mac_config(struct vte_softc *sc)
761 {
762 	struct mii_data *mii;
763 	uint16_t mcr;
764 
765 	mii = &sc->sc_miibus;
766 	mcr = CSR_READ_2(sc, VTE_MCR0);
767 	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
768 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
769 		mcr |= MCR0_FULL_DUPLEX;
770 #ifdef notyet
771 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
772 			mcr |= MCR0_FC_ENB;
773 		/*
774 		 * The data sheet is not clear whether the controller
775 		 * honors received pause frames or not.  The is no
776 		 * separate control bit for RX pause frame so just
777 		 * enable MCR0_FC_ENB bit.
778 		 */
779 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
780 			mcr |= MCR0_FC_ENB;
781 #endif
782 	}
783 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
784 }
785 
786 void
vte_stats_clear(struct vte_softc * sc)787 vte_stats_clear(struct vte_softc *sc)
788 {
789 
790 	/* Reading counter registers clears its contents. */
791 	CSR_READ_2(sc, VTE_CNT_RX_DONE);
792 	CSR_READ_2(sc, VTE_CNT_MECNT0);
793 	CSR_READ_2(sc, VTE_CNT_MECNT1);
794 	CSR_READ_2(sc, VTE_CNT_MECNT2);
795 	CSR_READ_2(sc, VTE_CNT_MECNT3);
796 	CSR_READ_2(sc, VTE_CNT_TX_DONE);
797 	CSR_READ_2(sc, VTE_CNT_MECNT4);
798 	CSR_READ_2(sc, VTE_CNT_PAUSE);
799 }
800 
801 void
vte_stats_update(struct vte_softc * sc)802 vte_stats_update(struct vte_softc *sc)
803 {
804 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
805 	struct vte_hw_stats *stat;
806 	uint16_t value;
807 
808 	stat = &sc->vte_stats;
809 
810 	CSR_READ_2(sc, VTE_MECISR);
811 	/* RX stats. */
812 	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
813 	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
814 	stat->rx_bcast_frames += (value >> 8);
815 	stat->rx_mcast_frames += (value & 0xFF);
816 	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
817 	stat->rx_runts += (value >> 8);
818 	stat->rx_crcerrs += (value & 0xFF);
819 	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
820 	stat->rx_long_frames += (value & 0xFF);
821 	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
822 	stat->rx_fifo_full += (value >> 8);
823 	stat->rx_desc_unavail += (value & 0xFF);
824 
825 	/* TX stats. */
826 	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
827 	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
828 	stat->tx_underruns += (value >> 8);
829 	stat->tx_late_colls += (value & 0xFF);
830 
831 	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
832 	stat->tx_pause_frames += (value >> 8);
833 	stat->rx_pause_frames += (value & 0xFF);
834 
835 	/* Update ifp counters. */
836 	ifp->if_collisions = stat->tx_late_colls;
837 	ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
838 	ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
839 	    stat->rx_long_frames + stat->rx_fifo_full;
840 }
841 
842 int
vte_intr(void * arg)843 vte_intr(void *arg)
844 {
845 	struct vte_softc *sc = arg;
846 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
847 	uint16_t status;
848 	int n;
849 	int claimed = 0;
850 
851 	/* Reading VTE_MISR acknowledges interrupts. */
852 	status = CSR_READ_2(sc, VTE_MISR);
853 	if ((status & VTE_INTRS) == 0)
854 		return (0);
855 
856 	/* Disable interrupts. */
857 	CSR_WRITE_2(sc, VTE_MIER, 0);
858 	for (n = 8; (status & VTE_INTRS) != 0;) {
859 		if ((ifp->if_flags & IFF_RUNNING) == 0)
860 			break;
861 		claimed = 1;
862 		if (status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
863 		    MISR_RX_FIFO_FULL))
864 			vte_rxeof(sc);
865 		if (status & MISR_TX_DONE)
866 			vte_txeof(sc);
867 		if (status & MISR_EVENT_CNT_OFLOW)
868 			vte_stats_update(sc);
869 		if (!ifq_empty(&ifp->if_snd))
870 			vte_start(ifp);
871 		if (--n > 0)
872 			status = CSR_READ_2(sc, VTE_MISR);
873 		else
874 			break;
875 	}
876 
877 	/* Re-enable interrupts. */
878 	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
879 
880 	return (claimed);
881 }
882 
883 void
vte_txeof(struct vte_softc * sc)884 vte_txeof(struct vte_softc *sc)
885 {
886 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
887 	struct vte_txdesc *txd;
888 	uint16_t status;
889 	int cons, prog;
890 
891 	if (sc->vte_cdata.vte_tx_cnt == 0)
892 		return;
893 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
894 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
895 	cons = sc->vte_cdata.vte_tx_cons;
896 	/*
897 	 * Go through our TX list and free mbufs for those
898 	 * frames which have been transmitted.
899 	 */
900 	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
901 		txd = &sc->vte_cdata.vte_txdesc[cons];
902 		status = letoh16(txd->tx_desc->dtst);
903 		if (status & VTE_DTST_TX_OWN)
904 			break;
905 		sc->vte_cdata.vte_tx_cnt--;
906 		/* Reclaim transmitted mbufs. */
907 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
908 		if ((txd->tx_flags & VTE_TXMBUF) == 0)
909 			m_freem(txd->tx_m);
910 		txd->tx_flags &= ~VTE_TXMBUF;
911 		txd->tx_m = NULL;
912 		prog++;
913 		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
914 	}
915 
916 	if (prog > 0) {
917 		ifq_clr_oactive(&ifp->if_snd);
918 		sc->vte_cdata.vte_tx_cons = cons;
919 		/*
920 		 * Unarm watchdog timer only when there is no pending
921 		 * frames in TX queue.
922 		 */
923 		if (sc->vte_cdata.vte_tx_cnt == 0)
924 			ifp->if_timer = 0;
925 	}
926 }
927 
928 int
vte_newbuf(struct vte_softc * sc,struct vte_rxdesc * rxd,int init)929 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd, int init)
930 {
931 	struct mbuf *m;
932 	bus_dmamap_t map;
933 	int error;
934 
935 	MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
936 	if (m == NULL)
937 		return (ENOBUFS);
938 	MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
939 	if (!(m->m_flags & M_EXT)) {
940 		m_freem(m);
941 		return (ENOBUFS);
942 	}
943 	m->m_len = m->m_pkthdr.len = MCLBYTES;
944 	m_adj(m, sizeof(uint32_t));
945 
946 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
947 	    sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT);
948 
949 	if (error != 0) {
950 		if (!error) {
951 			bus_dmamap_unload(sc->sc_dmat,
952 			    sc->vte_cdata.vte_rx_sparemap);
953 			error = EFBIG;
954 			printf("%s: too many segments?!\n",
955 			    sc->sc_dev.dv_xname);
956 		}
957 		m_freem(m);
958 
959 		if (init)
960 			printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
961 		return (error);
962 	}
963 
964 	if (rxd->rx_m != NULL) {
965 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
966 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
967 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
968 	}
969 	map = rxd->rx_dmamap;
970 	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
971 	sc->vte_cdata.vte_rx_sparemap = map;
972 
973 	rxd->rx_m = m;
974 	rxd->rx_desc->drbp = htole32(rxd->rx_dmamap->dm_segs[0].ds_addr);
975 	rxd->rx_desc->drlen =
976 	    htole16(VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len));
977 	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
978 
979 	return (0);
980 }
981 
982 void
vte_rxeof(struct vte_softc * sc)983 vte_rxeof(struct vte_softc *sc)
984 {
985 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
986 	struct vte_rxdesc *rxd;
987 	struct mbuf *m;
988 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
989 	uint16_t status, total_len;
990 	int cons, prog;
991 
992 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
993 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
994 	cons = sc->vte_cdata.vte_rx_cons;
995 	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++,
996 	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
997 		rxd = &sc->vte_cdata.vte_rxdesc[cons];
998 		status = letoh16(rxd->rx_desc->drst);
999 		if (status & VTE_DRST_RX_OWN)
1000 			break;
1001 		total_len = VTE_RX_LEN(letoh16(rxd->rx_desc->drlen));
1002 		m = rxd->rx_m;
1003 		if ((status & VTE_DRST_RX_OK) == 0) {
1004 			/* Discard errored frame. */
1005 			rxd->rx_desc->drlen =
1006 			    htole16(MCLBYTES - sizeof(uint32_t));
1007 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1008 			continue;
1009 		}
1010 		if (vte_newbuf(sc, rxd, 0) != 0) {
1011 			ifp->if_iqdrops++;
1012 			rxd->rx_desc->drlen =
1013 			    htole16(MCLBYTES - sizeof(uint32_t));
1014 			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1015 			continue;
1016 		}
1017 
1018 		/*
1019 		 * It seems there is no way to strip FCS bytes.
1020 		 */
1021 		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1022 		ml_enqueue(&ml, m);
1023 	}
1024 
1025 	if_input(ifp, &ml);
1026 
1027 	if (prog > 0) {
1028 		/* Update the consumer index. */
1029 		sc->vte_cdata.vte_rx_cons = cons;
1030 		/*
1031 		 * Sync updated RX descriptors such that controller see
1032 		 * modified RX buffer addresses.
1033 		 */
1034 		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1035 		    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1036 		    BUS_DMASYNC_PREWRITE);
1037 #ifdef notyet
1038 		/*
1039 		 * Update residue counter.  Controller does not
1040 		 * keep track of number of available RX descriptors
1041 		 * such that driver should have to update VTE_MRDCR
1042 		 * to make controller know how many free RX
1043 		 * descriptors were added to controller.  This is
1044 		 * a similar mechanism used in VIA velocity
1045 		 * controllers and it indicates controller just
1046 		 * polls OWN bit of current RX descriptor pointer.
1047 		 * A couple of severe issues were seen on sample
1048 		 * board where the controller continuously emits TX
1049 		 * pause frames once RX pause threshold crossed.
1050 		 * Once triggered it never recovered form that
1051 		 * state, I couldn't find a way to make it back to
1052 		 * work at least.  This issue effectively
1053 		 * disconnected the system from network.  Also, the
1054 		 * controller used 00:00:00:00:00:00 as source
1055 		 * station address of TX pause frame. Probably this
1056 		 * is one of reason why vendor recommends not to
1057 		 * enable flow control on R6040 controller.
1058 		 */
1059 		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1060 		    (((VTE_RX_RING_CNT * 2) / 10) <<
1061 		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1062 #endif
1063 	}
1064 }
1065 
1066 void
vte_tick(void * arg)1067 vte_tick(void *arg)
1068 {
1069 	struct vte_softc *sc = arg;
1070 	struct mii_data *mii = &sc->sc_miibus;
1071 	int s;
1072 
1073 	s = splnet();
1074 	mii_tick(mii);
1075 	vte_stats_update(sc);
1076 	timeout_add_sec(&sc->vte_tick_ch, 1);
1077 	splx(s);
1078 }
1079 
1080 void
vte_reset(struct vte_softc * sc)1081 vte_reset(struct vte_softc *sc)
1082 {
1083 	uint16_t mcr, mdcsc;
1084 	int i;
1085 
1086 	mdcsc = CSR_READ_2(sc, VTE_MDCSC);
1087 	mcr = CSR_READ_2(sc, VTE_MCR1);
1088 	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1089 	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1090 		DELAY(10);
1091 		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1092 			break;
1093 	}
1094 	if (i == 0)
1095 		printf("%s: reset timeout(0x%04x)!\n", sc->sc_dev.dv_xname,
1096 		    mcr);
1097 	/*
1098 	 * Follow the guide of vendor recommended way to reset MAC.
1099 	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1100 	 * not reliable so manually reset internal state machine.
1101 	 */
1102 	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1103 	CSR_WRITE_2(sc, VTE_MACSM, 0);
1104 	DELAY(5000);
1105 
1106 	/*
1107 	 * On some SoCs (like Vortex86DX3) MDC speed control register value
1108 	 * needs to be restored to original value instead of default one,
1109 	 * otherwise some PHY registers may fail to be read.
1110 	 */
1111 	if (mdcsc != MDCSC_DEFAULT)
1112 		CSR_WRITE_2(sc, VTE_MDCSC, mdcsc);
1113 }
1114 
1115 int
vte_init(struct ifnet * ifp)1116 vte_init(struct ifnet *ifp)
1117 {
1118 	struct vte_softc *sc = ifp->if_softc;
1119 	bus_addr_t paddr;
1120 	uint8_t *eaddr;
1121 	int error;
1122 
1123 	/*
1124 	 * Cancel any pending I/O.
1125 	 */
1126 	vte_stop(sc);
1127 	/*
1128 	 * Reset the chip to a known state.
1129 	 */
1130 	vte_reset(sc);
1131 
1132 	/* Initialize RX descriptors. */
1133 	error = vte_init_rx_ring(sc);
1134 	if (error != 0) {
1135 		printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
1136 		vte_stop(sc);
1137 		return (error);
1138 	}
1139 	error = vte_init_tx_ring(sc);
1140 	if (error != 0) {
1141 		printf("%s: no memory for Tx buffers.\n", sc->sc_dev.dv_xname);
1142 		vte_stop(sc);
1143 		return (error);
1144 	}
1145 
1146 	/*
1147 	 * Reprogram the station address.  Controller supports up
1148 	 * to 4 different station addresses so driver programs the
1149 	 * first station address as its own ethernet address and
1150 	 * configure the remaining three addresses as perfect
1151 	 * multicast addresses.
1152 	 */
1153 	eaddr = LLADDR(ifp->if_sadl);
1154 	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1155 	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1156 	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1157 
1158 	/* Set TX descriptor base addresses. */
1159 	paddr = sc->vte_cdata.vte_tx_ring_paddr;
1160 	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1161 	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1162 	/* Set RX descriptor base addresses. */
1163 	paddr = sc->vte_cdata.vte_rx_ring_paddr;
1164 	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1165 	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1166 	/*
1167 	 * Initialize RX descriptor residue counter and set RX
1168 	 * pause threshold to 20% of available RX descriptors.
1169 	 * See comments on vte_rxeof() for details on flow control
1170 	 * issues.
1171 	 */
1172 	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1173 	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1174 
1175 	/*
1176 	 * Always use maximum frame size that controller can
1177 	 * support.  Otherwise received frames that has longer
1178 	 * frame length than vte(4) MTU would be silently dropped
1179 	 * in controller.  This would break path-MTU discovery as
1180 	 * sender wouldn't get any responses from receiver. The
1181 	 * RX buffer size should be multiple of 4.
1182 	 * Note, jumbo frames are silently ignored by controller
1183 	 * and even MAC counters do not detect them.
1184 	 */
1185 	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1186 
1187 	/* Configure FIFO. */
1188 	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1189 	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1190 	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1191 
1192 	/*
1193 	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1194 	 * control configuration is done after detecting a valid
1195 	 * link.  Note, we don't generate early interrupt here
1196 	 * as well since FreeBSD does not have interrupt latency
1197 	 * problems like Windows.
1198 	 */
1199 	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1200 	/*
1201 	 * We manually keep track of PHY status changes to
1202 	 * configure resolved duplex and flow control since only
1203 	 * duplex configuration can be automatically reflected to
1204 	 * MCR0.
1205 	 */
1206 	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1207 	    MCR1_EXCESS_COL_RETRY_16);
1208 
1209 	/* Initialize RX filter. */
1210 	vte_iff(sc);
1211 
1212 	/* Disable TX/RX interrupt moderation control. */
1213 	CSR_WRITE_2(sc, VTE_MRICR, 0);
1214 	CSR_WRITE_2(sc, VTE_MTICR, 0);
1215 
1216 	/* Enable MAC event counter interrupts. */
1217 	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1218 	/* Clear MAC statistics. */
1219 	vte_stats_clear(sc);
1220 
1221 	/* Acknowledge all pending interrupts and clear it. */
1222 	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1223 	CSR_WRITE_2(sc, VTE_MISR, 0);
1224 
1225 	sc->vte_flags &= ~VTE_FLAG_LINK;
1226 	/* Switch to the current media. */
1227 	vte_mediachange(ifp);
1228 
1229 	timeout_add_sec(&sc->vte_tick_ch, 1);
1230 
1231 	ifp->if_flags |= IFF_RUNNING;
1232 	ifq_clr_oactive(&ifp->if_snd);
1233 
1234 	return (0);
1235 }
1236 
1237 void
vte_stop(struct vte_softc * sc)1238 vte_stop(struct vte_softc *sc)
1239 {
1240 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1241 	struct vte_txdesc *txd;
1242 	struct vte_rxdesc *rxd;
1243 	int i;
1244 
1245 	/*
1246 	 * Mark the interface down and cancel the watchdog timer.
1247 	 */
1248 	ifp->if_flags &= ~IFF_RUNNING;
1249 	ifq_clr_oactive(&ifp->if_snd);
1250 	ifp->if_timer = 0;
1251 	sc->vte_flags &= ~VTE_FLAG_LINK;
1252 	timeout_del(&sc->vte_tick_ch);
1253 	vte_stats_update(sc);
1254 	/* Disable interrupts. */
1255 	CSR_WRITE_2(sc, VTE_MIER, 0);
1256 	CSR_WRITE_2(sc, VTE_MECIER, 0);
1257 	/* Stop RX/TX MACs. */
1258 	vte_stop_mac(sc);
1259 	/* Clear interrupts. */
1260 	CSR_READ_2(sc, VTE_MISR);
1261 	/*
1262 	 * Free TX/RX mbufs still in the queues.
1263 	 */
1264 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1265 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1266 		if (rxd->rx_m != NULL) {
1267 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1268 			m_freem(rxd->rx_m);
1269 			rxd->rx_m = NULL;
1270 		}
1271 	}
1272 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1273 		txd = &sc->vte_cdata.vte_txdesc[i];
1274 		if (txd->tx_m != NULL) {
1275 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1276 			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1277 				m_freem(txd->tx_m);
1278 			txd->tx_m = NULL;
1279 			txd->tx_flags &= ~VTE_TXMBUF;
1280 		}
1281 	}
1282 	/* Free TX mbuf pools used for deep copy. */
1283 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1284 		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1285 			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1286 			sc->vte_cdata.vte_txmbufs[i] = NULL;
1287 		}
1288 	}
1289 }
1290 
1291 void
vte_start_mac(struct vte_softc * sc)1292 vte_start_mac(struct vte_softc *sc)
1293 {
1294 	uint16_t mcr;
1295 	int i;
1296 
1297 	/* Enable RX/TX MACs. */
1298 	mcr = CSR_READ_2(sc, VTE_MCR0);
1299 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1300 	    (MCR0_RX_ENB | MCR0_TX_ENB)) {
1301 		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1302 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1303 		for (i = VTE_TIMEOUT; i > 0; i--) {
1304 			mcr = CSR_READ_2(sc, VTE_MCR0);
1305 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1306 			    (MCR0_RX_ENB | MCR0_TX_ENB))
1307 				break;
1308 			DELAY(10);
1309 		}
1310 		if (i == 0)
1311 			printf("%s: could not enable RX/TX MAC(0x%04x)!\n",
1312 			    sc->sc_dev.dv_xname, mcr);
1313 	}
1314 }
1315 
1316 void
vte_stop_mac(struct vte_softc * sc)1317 vte_stop_mac(struct vte_softc *sc)
1318 {
1319 	uint16_t mcr;
1320 	int i;
1321 
1322 	/* Disable RX/TX MACs. */
1323 	mcr = CSR_READ_2(sc, VTE_MCR0);
1324 	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1325 		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1326 		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1327 		for (i = VTE_TIMEOUT; i > 0; i--) {
1328 			mcr = CSR_READ_2(sc, VTE_MCR0);
1329 			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1330 				break;
1331 			DELAY(10);
1332 		}
1333 		if (i == 0)
1334 			printf("%s: could not disable RX/TX MAC(0x%04x)!\n",
1335 			    sc->sc_dev.dv_xname, mcr);
1336 	}
1337 }
1338 
1339 int
vte_init_tx_ring(struct vte_softc * sc)1340 vte_init_tx_ring(struct vte_softc *sc)
1341 {
1342 	struct vte_tx_desc *desc;
1343 	struct vte_txdesc *txd;
1344 	bus_addr_t addr;
1345 	int i;
1346 
1347 	sc->vte_cdata.vte_tx_prod = 0;
1348 	sc->vte_cdata.vte_tx_cons = 0;
1349 	sc->vte_cdata.vte_tx_cnt = 0;
1350 
1351 	/* Pre-allocate TX mbufs for deep copy. */
1352 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1353 		MGETHDR(sc->vte_cdata.vte_txmbufs[i],
1354 		    M_DONTWAIT, MT_DATA);
1355 		if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1356 			return (ENOBUFS);
1357 		MCLGET(sc->vte_cdata.vte_txmbufs[i], M_DONTWAIT);
1358 		if (!(sc->vte_cdata.vte_txmbufs[i]->m_flags & M_EXT)) {
1359 			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1360 			sc->vte_cdata.vte_txmbufs[i] = NULL;
1361 			return (ENOBUFS);
1362 		}
1363 		sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1364 		sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1365 	}
1366 	desc = sc->vte_cdata.vte_tx_ring;
1367 	bzero(desc, VTE_TX_RING_SZ);
1368 	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1369 		txd = &sc->vte_cdata.vte_txdesc[i];
1370 		txd->tx_m = NULL;
1371 		if (i != VTE_TX_RING_CNT - 1)
1372 			addr = sc->vte_cdata.vte_tx_ring_paddr +
1373 			    sizeof(struct vte_tx_desc) * (i + 1);
1374 		else
1375 			addr = sc->vte_cdata.vte_tx_ring_paddr +
1376 			    sizeof(struct vte_tx_desc) * 0;
1377 		desc = &sc->vte_cdata.vte_tx_ring[i];
1378 		desc->dtnp = htole32(addr);
1379 		txd->tx_desc = desc;
1380 	}
1381 
1382 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
1383 	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1384 	return (0);
1385 }
1386 
1387 int
vte_init_rx_ring(struct vte_softc * sc)1388 vte_init_rx_ring(struct vte_softc *sc)
1389 {
1390 	struct vte_rx_desc *desc;
1391 	struct vte_rxdesc *rxd;
1392 	bus_addr_t addr;
1393 	int i;
1394 
1395 	sc->vte_cdata.vte_rx_cons = 0;
1396 	desc = sc->vte_cdata.vte_rx_ring;
1397 	bzero(desc, VTE_RX_RING_SZ);
1398 	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1399 		rxd = &sc->vte_cdata.vte_rxdesc[i];
1400 		rxd->rx_m = NULL;
1401 		if (i != VTE_RX_RING_CNT - 1)
1402 			addr = sc->vte_cdata.vte_rx_ring_paddr +
1403 			    sizeof(struct vte_rx_desc) * (i + 1);
1404 		else
1405 			addr = sc->vte_cdata.vte_rx_ring_paddr +
1406 			    sizeof(struct vte_rx_desc) * 0;
1407 		desc = &sc->vte_cdata.vte_rx_ring[i];
1408 		desc->drnp = htole32(addr);
1409 		rxd->rx_desc = desc;
1410 		if (vte_newbuf(sc, rxd, 1) != 0)
1411 			return (ENOBUFS);
1412 	}
1413 
1414 	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1415 	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1416 
1417 	return (0);
1418 }
1419 
1420 void
vte_iff(struct vte_softc * sc)1421 vte_iff(struct vte_softc *sc)
1422 {
1423 	struct arpcom *ac = &sc->sc_arpcom;
1424 	struct ifnet *ifp = &ac->ac_if;
1425 	struct ether_multi *enm;
1426 	struct ether_multistep step;
1427 	uint8_t *eaddr;
1428 	uint32_t crc;
1429 	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1430 	uint16_t mchash[4], mcr;
1431 	int i, nperf;
1432 
1433 	bzero(mchash, sizeof(mchash));
1434 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1435 		rxfilt_perf[i][0] = 0xFFFF;
1436 		rxfilt_perf[i][1] = 0xFFFF;
1437 		rxfilt_perf[i][2] = 0xFFFF;
1438 	}
1439 
1440 	mcr = CSR_READ_2(sc, VTE_MCR0);
1441 	mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST);
1442 	ifp->if_flags &= ~IFF_ALLMULTI;
1443 
1444 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1445 		ifp->if_flags |= IFF_ALLMULTI;
1446 		if (ifp->if_flags & IFF_PROMISC)
1447 			mcr |= MCR0_PROMISC;
1448 		else
1449 			mcr |= MCR0_MULTICAST;
1450 		mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xFFFF;
1451 	} else {
1452 		nperf = 0;
1453 		ETHER_FIRST_MULTI(step, ac, enm);
1454 		while (enm != NULL) {
1455 			/*
1456 			 * Program the first 3 multicast groups into
1457 			 * the perfect filter.  For all others, use the
1458 			 * hash table.
1459 			 */
1460 			if (nperf < VTE_RXFILT_PERFECT_CNT) {
1461 				eaddr = enm->enm_addrlo;
1462 				rxfilt_perf[nperf][0] =
1463 				    eaddr[1] << 8 | eaddr[0];
1464 				rxfilt_perf[nperf][1] =
1465 				    eaddr[3] << 8 | eaddr[2];
1466 				rxfilt_perf[nperf][2] =
1467 				    eaddr[5] << 8 | eaddr[4];
1468 				nperf++;
1469 				continue;
1470 			}
1471 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1472 			mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1473 			ETHER_NEXT_MULTI(step, enm);
1474 		}
1475 		if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
1476 		    mchash[3] != 0)
1477 			mcr |= MCR0_MULTICAST;
1478 	}
1479 
1480 	/* Program multicast hash table. */
1481 	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
1482 	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
1483 	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
1484 	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
1485 	/* Program perfect filter table. */
1486 	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1487 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
1488 		    rxfilt_perf[i][0]);
1489 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
1490 		    rxfilt_perf[i][1]);
1491 		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
1492 		    rxfilt_perf[i][2]);
1493 	}
1494 	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1495 	CSR_READ_2(sc, VTE_MCR0);
1496 }
1497