xref: /openbsd-src/sys/dev/fdt/if_cad.c (revision d0fc3bb68efd6c434b4053cd7adb29023cbec341)
1 /*	$OpenBSD: if_cad.c,v 1.2 2021/06/13 02:56:48 drahn Exp $	*/
2 
3 /*
4  * Copyright (c) 2021 Visa Hankala
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for Cadence 10/100/Gigabit Ethernet device.
21  */
22 
23 #include "bpfilter.h"
24 #include "kstat.h"
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/atomic.h>
29 #include <sys/device.h>
30 #include <sys/ioctl.h>
31 #include <sys/mutex.h>
32 #include <sys/kstat.h>
33 #include <sys/task.h>
34 #include <sys/timeout.h>
35 
36 #include <net/if.h>
37 #include <net/if_media.h>
38 #include <netinet/in.h>
39 #include <netinet/ip.h>
40 #include <netinet/if_ether.h>
41 
42 #if NBPFILTER > 0
43 #include <net/bpf.h>
44 #endif
45 
46 #include <dev/mii/mii.h>
47 #include <dev/mii/miivar.h>
48 #include <dev/mii/miidevs.h>
49 
50 #include <machine/bus.h>
51 #include <machine/fdt.h>
52 
53 #include <dev/ofw/fdt.h>
54 #include <dev/ofw/openfirm.h>
55 #include <dev/ofw/ofw_clock.h>
56 
57 #define GEM_NETCTL			0x0000
58 #define  GEM_NETCTL_DPRAM			(1 << 18)
59 #define  GEM_NETCTL_STARTTX			(1 << 9)
60 #define  GEM_NETCTL_STATCLR			(1 << 5)
61 #define  GEM_NETCTL_MDEN			(1 << 4)
62 #define  GEM_NETCTL_TXEN			(1 << 3)
63 #define  GEM_NETCTL_RXEN			(1 << 2)
64 #define GEM_NETCFG			0x0004
65 #define  GEM_NETCFG_SGMIIEN			(1 << 27)
66 #define  GEM_NETCFG_RXCSUMEN			(1 << 24)
67 #define  GEM_NETCFG_MDCCLKDIV_MASK		(0x7 << 18)
68 #define  GEM_NETCFG_MDCCLKDIV_SHIFT		18
69 #define  GEM_NETCFG_FCSREM			(1 << 17)
70 #define  GEM_NETCFG_RXOFFS_MASK			(0x3 << 14)
71 #define  GEM_NETCFG_RXOFFS_SHIFT		14
72 #define  GEM_NETCFG_PCSSEL			(1 << 11)
73 #define  GEM_NETCFG_1000			(1 << 10)
74 #define  GEM_NETCFG_1536RXEN			(1 << 8)
75 #define  GEM_NETCFG_UCASTHASHEN			(1 << 7)
76 #define  GEM_NETCFG_MCASTHASHEN			(1 << 6)
77 #define  GEM_NETCFG_BCASTDI			(1 << 5)
78 #define  GEM_NETCFG_COPYALL			(1 << 4)
79 #define  GEM_NETCFG_FDEN			(1 << 1)
80 #define  GEM_NETCFG_100				(1 << 0)
81 #define GEM_NETSR			0x0008
82 #define  GEM_NETSR_PHY_MGMT_IDLE		(1 << 2)
83 #define GEM_DMACR			0x0010
84 #define  GEM_DMACR_AHBDISC			(1 << 24)
85 #define  GEM_DMACR_RXBUF_MASK			(0xff << 16)
86 #define  GEM_DMACR_RXBUF_SHIFT			16
87 #define  GEM_DMACR_TXCSUMEN			(1 << 11)
88 #define  GEM_DMACR_TXSIZE			(1 << 10)
89 #define  GEM_DMACR_RXSIZE_MASK			(0x3 << 8)
90 #define  GEM_DMACR_RXSIZE_8K			(0x3 << 8)
91 #define  GEM_DMACR_ES_PDATA			(1 << 7)
92 #define  GEM_DMACR_ES_DESCR			(1 << 6)
93 #define  GEM_DMACR_BLEN_MASK			(0x1f << 0)
94 #define  GEM_DMACR_BLEN_16			(0x10 << 0)
95 #define GEM_TXSR			0x0014
96 #define  GEM_TXSR_TXGO				(1 << 3)
97 #define GEM_RXQBASE			0x0018
98 #define GEM_TXQBASE			0x001c
99 #define GEM_RXSR			0x0020
100 #define  GEM_RXSR_RXOVR				(1 << 2)
101 #define GEM_ISR				0x0024
102 #define GEM_IER				0x0028
103 #define GEM_IDR				0x002c
104 #define  GEM_IXR_HRESP				(1 << 11)
105 #define  GEM_IXR_RXOVR				(1 << 10)
106 #define  GEM_IXR_TXDONE				(1 << 7)
107 #define  GEM_IXR_TXURUN				(1 << 6)
108 #define  GEM_IXR_RETRY				(1 << 5)
109 #define  GEM_IXR_TXUSED				(1 << 3)
110 #define  GEM_IXR_RXUSED				(1 << 2)
111 #define  GEM_IXR_RXDONE				(1 << 1)
112 #define GEM_PHYMNTNC			0x0034
113 #define  GEM_PHYMNTNC_CLAUSE_22			(1 << 30)
114 #define  GEM_PHYMNTNC_OP_READ			(0x2 << 28)
115 #define  GEM_PHYMNTNC_OP_WRITE			(0x1 << 28)
116 #define  GEM_PHYMNTNC_ADDR_MASK			(0x1f << 23)
117 #define  GEM_PHYMNTNC_ADDR_SHIFT		23
118 #define  GEM_PHYMNTNC_REG_MASK			(0x1f << 18)
119 #define  GEM_PHYMNTNC_REG_SHIFT			18
120 #define  GEM_PHYMNTNC_MUST_10			(0x2 << 16)
121 #define  GEM_PHYMNTNC_DATA_MASK			0xffff
122 #define GEM_HASHL			0x0080
123 #define GEM_HASHH			0x0084
124 #define GEM_LADDRL(i)			(0x0088 + (i) * 8)
125 #define GEM_LADDRH(i)			(0x008c + (i) * 8)
126 #define GEM_LADDRNUM			4
127 #define GEM_MID				0x00fc
128 #define GEM_OCTTXL			0x0100
129 #define GEM_OCTTXH			0x0104
130 #define GEM_TXCNT			0x0108
131 #define GEM_TXBCCNT			0x010c
132 #define GEM_TXMCCNT			0x0110
133 #define GEM_TXPAUSECNT			0x0114
134 #define GEM_TX64CNT			0x0118
135 #define GEM_TX65CNT			0x011c
136 #define GEM_TX128CNT			0x0120
137 #define GEM_TX256CNT			0x0124
138 #define GEM_TX512CNT			0x0128
139 #define GEM_TX1024CNT			0x012c
140 #define GEM_TXURUNCNT			0x0134
141 #define GEM_SNGLCOLLCNT			0x0138
142 #define GEM_MULTICOLLCNT		0x013c
143 #define GEM_EXCESSCOLLCNT		0x0140
144 #define GEM_LATECOLLCNT			0x0144
145 #define GEM_TXDEFERCNT			0x0148
146 #define GEM_TXCSENSECNT			0x014c
147 #define GEM_OCTRXL			0x0150
148 #define GEM_OCTRXH			0x0154
149 #define GEM_RXCNT			0x0158
150 #define GEM_RXBROADCNT			0x015c
151 #define GEM_RXMULTICNT			0x0160
152 #define GEM_RXPAUSECNT			0x0164
153 #define GEM_RX64CNT			0x0168
154 #define GEM_RX65CNT			0x016c
155 #define GEM_RX128CNT			0x0170
156 #define GEM_RX256CNT			0x0174
157 #define GEM_RX512CNT			0x0178
158 #define GEM_RX1024CNT			0x017c
159 #define GEM_RXUNDRCNT			0x0184
160 #define GEM_RXOVRCNT			0x0188
161 #define GEM_RXJABCNT			0x018c
162 #define GEM_RXFCSCNT			0x0190
163 #define GEM_RXLENGTHCNT			0x0194
164 #define GEM_RXSYMBCNT			0x0198
165 #define GEM_RXALIGNCNT			0x019c
166 #define GEM_RXRESERRCNT			0x01a0
167 #define GEM_RXORCNT			0x01a4
168 #define GEM_RXIPCCNT			0x01a8
169 #define GEM_RXTCPCCNT			0x01ac
170 #define GEM_RXUDPCCNT			0x01b0
171 
172 #define GEM_CLK_TX		"tx_clk"
173 
174 struct cad_buf {
175 	bus_dmamap_t		bf_map;
176 	struct mbuf		*bf_m;
177 };
178 
179 struct cad_dmamem {
180 	bus_dmamap_t		cdm_map;
181 	bus_dma_segment_t	cdm_seg;
182 	size_t			cdm_size;
183 	caddr_t			cdm_kva;
184 };
185 
186 struct cad_desc {
187 	uint32_t		d_addr;
188 	uint32_t		d_status;
189 };
190 
191 #define GEM_RXD_ADDR_WRAP	(1 << 1)
192 #define GEM_RXD_ADDR_USED	(1 << 0)
193 
194 #define GEM_RXD_BCAST		(1 << 31)
195 #define GEM_RXD_MCAST		(1 << 30)
196 #define GEM_RXD_UCAST		(1 << 29)
197 #define GEM_RXD_SPEC		(1 << 27)
198 #define GEM_RXD_SPEC_MASK	(0x3 << 25)
199 #define GEM_RXD_CSUM_MASK	(0x3 << 22)
200 #define GEM_RXD_CSUM_UDP_OK	(0x3 << 22)
201 #define GEM_RXD_CSUM_TCP_OK	(0x2 << 22)
202 #define GEM_RXD_CSUM_IP_OK	(0x1 << 22)
203 #define GEM_RXD_VLANTAG		(1 << 21)
204 #define GEM_RXD_PRIOTAG		(1 << 20)
205 #define GEM_RXD_CFI		(1 << 16)
206 #define GEM_RXD_EOF		(1 << 15)
207 #define GEM_RXD_SOF		(1 << 14)
208 #define GEM_RXD_BADFCS		(1 << 13)
209 #define GEM_RXD_LEN_MASK	0x1fff
210 
211 struct cad_txdesc {
212 	uint32_t		txd_addr;
213 	uint32_t		txd_status;
214 };
215 
216 #define GEM_TXD_USED		(1 << 31)
217 #define GEM_TXD_WRAP		(1 << 30)
218 #define GEM_TXD_RLIMIT		(1 << 29)
219 #define GEM_TXD_CORRUPT		(1 << 27)
220 #define GEM_TXD_LCOLL		(1 << 26)
221 #define GEM_TXD_CSUMERR_MASK	(0x7 << 20)
222 #define GEM_TXD_NOFCS		(1 << 16)
223 #define GEM_TXD_LAST		(1 << 15)
224 #define GEM_TXD_LEN_MASK	0x3fff
225 
226 #define CAD_NRXDESC		256
227 
228 #define CAD_NTXDESC		256
229 #define CAD_NTXSEGS		16
230 
231 enum cad_phy_mode {
232 	CAD_PHY_MODE_GMII,
233 	CAD_PHY_MODE_RGMII,
234 	CAD_PHY_MODE_RGMII_ID,
235 	CAD_PHY_MODE_RGMII_RXID,
236 	CAD_PHY_MODE_RGMII_TXID,
237 	CAD_PHY_MODE_SGMII,
238 };
239 
240 struct cad_softc {
241 	struct device		sc_dev;
242 	struct arpcom		sc_ac;
243 
244 	bus_dma_tag_t		sc_dmat;
245 	bus_space_tag_t		sc_iot;
246 	bus_space_handle_t	sc_ioh;
247 	void			*sc_ih;
248 	int			sc_node;
249 	int			sc_phy_loc;
250 	enum cad_phy_mode	sc_phy_mode;
251 	unsigned char		sc_rxhang_erratum;
252 	unsigned char		sc_rxdone;
253 
254 	struct mii_data		sc_mii;
255 #define sc_media	sc_mii.mii_media
256 	struct timeout		sc_tick;
257 
258 	struct cad_dmamem	*sc_txring;
259 	struct cad_buf		*sc_txbuf;
260 	struct cad_desc		*sc_txdesc;
261 	unsigned int		sc_tx_prod;
262 	unsigned int		sc_tx_cons;
263 
264 	struct if_rxring	sc_rx_ring;
265 	struct cad_dmamem	*sc_rxring;
266 	struct cad_buf		*sc_rxbuf;
267 	struct cad_desc		*sc_rxdesc;
268 	unsigned int		sc_rx_prod;
269 	unsigned int		sc_rx_cons;
270 	uint32_t		sc_netctl;
271 
272 	struct task		sc_statchg_task;
273 	uint32_t		sc_tx_freq;
274 
275 	struct mutex		sc_kstat_mtx;
276 	struct kstat		*sc_kstat;
277 };
278 
279 #define HREAD4(sc, reg) \
280 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
281 #define HWRITE4(sc, reg, val) \
282 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
283 
284 int	cad_match(struct device *, void *, void *);
285 void	cad_attach(struct device *, struct device *, void *);
286 
287 int	cad_ioctl(struct ifnet *, u_long, caddr_t);
288 void	cad_start(struct ifqueue *);
289 void	cad_watchdog(struct ifnet *);
290 
291 void	cad_reset(struct cad_softc *);
292 int	cad_up(struct cad_softc *);
293 void	cad_down(struct cad_softc *);
294 void	cad_iff(struct cad_softc *);
295 int	cad_intr(void *);
296 void	cad_tick(void *);
297 void	cad_statchg_task(void *);
298 
299 int	cad_media_change(struct ifnet *);
300 void	cad_media_status(struct ifnet *, struct ifmediareq *);
301 int	cad_mii_readreg(struct device *, int, int);
302 void	cad_mii_writereg(struct device *, int, int, int);
303 void	cad_mii_statchg(struct device *);
304 
305 struct cad_dmamem *cad_dmamem_alloc(struct cad_softc *, bus_size_t, bus_size_t);
306 void	cad_dmamem_free(struct cad_softc *, struct cad_dmamem *);
307 void	cad_rxfill(struct cad_softc *);
308 void	cad_rxeof(struct cad_softc *);
309 void	cad_txeof(struct cad_softc *);
310 unsigned int cad_encap(struct cad_softc *, struct mbuf *);
311 struct mbuf *cad_alloc_mbuf(struct cad_softc *, bus_dmamap_t);
312 
313 #if NKSTAT > 0
314 void	cad_kstat_attach(struct cad_softc *);
315 int	cad_kstat_read(struct kstat *);
316 void	cad_kstat_tick(void *);
317 #endif
318 
319 #ifdef DDB
320 struct cad_softc *cad_sc[4];
321 #endif
322 
323 const struct cfattach cad_ca = {
324 	sizeof(struct cad_softc), cad_match, cad_attach
325 };
326 
327 struct cfdriver cad_cd = {
328 	NULL, "cad", DV_DULL
329 };
330 
331 const struct {
332 	const char		*name;
333 	enum cad_phy_mode	mode;
334 } cad_phy_modes[] = {
335 	{ "gmii",	CAD_PHY_MODE_GMII },
336 	{ "rgmii",	CAD_PHY_MODE_RGMII },
337 	{ "rgmii-id",	CAD_PHY_MODE_RGMII_ID },
338 	{ "rgmii-rxid",	CAD_PHY_MODE_RGMII_RXID },
339 	{ "rgmii-txid",	CAD_PHY_MODE_RGMII_TXID },
340 	{ "sgmii",	CAD_PHY_MODE_SGMII },
341 };
342 
343 int
344 cad_match(struct device *parent, void *match, void *aux)
345 {
346 	struct fdt_attach_args *faa = aux;
347 
348 	return (OF_is_compatible(faa->fa_node, "cdns,gem") ||
349 	    OF_is_compatible(faa->fa_node, "sifive,fu740-c000-gem"));
350 }
351 
352 void
353 cad_attach(struct device *parent, struct device *self, void *aux)
354 {
355 	char phy_mode[16];
356 	struct fdt_attach_args *faa = aux;
357 	struct cad_softc *sc = (struct cad_softc *)self;
358 	struct ifnet *ifp = &sc->sc_ac.ac_if;
359 	uint32_t hi, lo;
360 	unsigned int i;
361 	int node, phy;
362 
363 	if (faa->fa_nreg < 1) {
364 		printf(": no registers\n");
365 		return;
366 	}
367 
368 	sc->sc_node = faa->fa_node;
369 	sc->sc_dmat = faa->fa_dmat;
370 	sc->sc_iot = faa->fa_iot;
371 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
372 	    faa->fa_reg[0].size, 0, &sc->sc_ioh) != 0) {
373 		printf(": can't map registers\n");
374 		return;
375 	}
376 
377 	if (OF_getprop(faa->fa_node, "local-mac-address", sc->sc_ac.ac_enaddr,
378 	    sizeof(sc->sc_ac.ac_enaddr)) != sizeof(sc->sc_ac.ac_enaddr)) {
379 		for (i = 0; i < GEM_LADDRNUM; i++) {
380 			lo = HREAD4(sc, GEM_LADDRL(i));
381 			hi = HREAD4(sc, GEM_LADDRH(i));
382 			if (lo != 0 || hi != 0) {
383 				sc->sc_ac.ac_enaddr[0] = lo;
384 				sc->sc_ac.ac_enaddr[1] = lo >> 8;
385 				sc->sc_ac.ac_enaddr[2] = lo >> 16;
386 				sc->sc_ac.ac_enaddr[3] = lo >> 24;
387 				sc->sc_ac.ac_enaddr[4] = hi;
388 				sc->sc_ac.ac_enaddr[5] = hi >> 8;
389 				break;
390 			}
391 		}
392 		if (i == GEM_LADDRNUM)
393 			ether_fakeaddr(ifp);
394 	}
395 
396 	phy = OF_getpropint(faa->fa_node, "phy-handle", 0);
397 	node = OF_getnodebyphandle(phy);
398 	if (node != 0)
399 		sc->sc_phy_loc = OF_getpropint(node, "reg", MII_PHY_ANY);
400 	else
401 		sc->sc_phy_loc = MII_PHY_ANY;
402 
403 	sc->sc_phy_mode = CAD_PHY_MODE_RGMII;
404 	OF_getprop(faa->fa_node, "phy-mode", phy_mode, sizeof(phy_mode));
405 	for (i = 0; i < nitems(cad_phy_modes); i++) {
406 		if (strcmp(phy_mode, cad_phy_modes[i].name) == 0) {
407 			sc->sc_phy_mode = cad_phy_modes[i].mode;
408 			break;
409 		}
410 	}
411 
412 	if (OF_is_compatible(faa->fa_node, "cdns,zynq-gem"))
413 		sc->sc_rxhang_erratum = 1;
414 
415 	timeout_set(&sc->sc_tick, cad_tick, sc);
416 	task_set(&sc->sc_statchg_task, cad_statchg_task, sc);
417 
418 	cad_reset(sc);
419 
420 	sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE,
421 	    cad_intr, sc, sc->sc_dev.dv_xname);
422 	if (sc->sc_ih == NULL) {
423 		printf(": can't establish interrupt\n");
424 		goto fail;
425 	}
426 
427 	ifp->if_softc = sc;
428 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
429 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
430 	ifp->if_xflags |= IFXF_MPSAFE;
431 	ifp->if_ioctl = cad_ioctl;
432 	ifp->if_qstart = cad_start;
433 	ifp->if_watchdog = cad_watchdog;
434 	ifp->if_hardmtu = ETHER_MAX_DIX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;
435 	ifp->if_capabilities = IFCAP_VLAN_MTU;
436 
437 	/*
438 	 * Enable transmit checksum offload only on reliable hardware.
439 	 * At least Zynq-7000 appears to generate bad UDP header checksum if
440 	 * the checksum field has not been initialized to zero and
441 	 * UDP payload size is less than three octets.
442 	 */
443 	if (0) {
444 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 |
445 		    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
446 		    IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
447 	}
448 
449 	printf(": rev 0x%x, address %s\n", HREAD4(sc, GEM_MID),
450 	    ether_sprintf(sc->sc_ac.ac_enaddr));
451 
452 	sc->sc_mii.mii_ifp = ifp;
453 	sc->sc_mii.mii_readreg = cad_mii_readreg;
454 	sc->sc_mii.mii_writereg = cad_mii_writereg;
455 	sc->sc_mii.mii_statchg = cad_mii_statchg;
456 	ifmedia_init(&sc->sc_media, 0, cad_media_change, cad_media_status);
457 
458 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, sc->sc_phy_loc,
459 	    MII_OFFSET_ANY, MIIF_NOISOLATE);
460 
461 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
462 		printf("%s: no PHY found\n", sc->sc_dev.dv_xname);
463 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
464 		ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
465 	} else {
466 		ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
467 	}
468 
469 	if_attach(ifp);
470 	ether_ifattach(ifp);
471 
472 #if NKSTAT > 0
473 	cad_kstat_attach(sc);
474 #endif
475 
476 #ifdef DDB
477 	if (sc->sc_dev.dv_unit < nitems(cad_sc))
478 		cad_sc[sc->sc_dev.dv_unit] = sc;
479 #endif
480 
481 	return;
482 
483 fail:
484 	if (sc->sc_ioh != 0)
485 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size);
486 }
487 
488 int
489 cad_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
490 {
491 	struct cad_softc *sc = ifp->if_softc;
492 	struct ifreq *ifr = (struct ifreq *)data;
493 	int error = 0;
494 	int s;
495 
496 	s = splnet();
497 
498 	switch (cmd) {
499 	case SIOCSIFADDR:
500 		ifp->if_flags |= IFF_UP;
501 		/* FALLTHROUGH */
502 
503 	case SIOCSIFFLAGS:
504 		if (ISSET(ifp->if_flags, IFF_UP)) {
505 			if (ISSET(ifp->if_flags, IFF_RUNNING))
506 				error = ENETRESET;
507 			else
508 				error = cad_up(sc);
509 		} else {
510 			if (ISSET(ifp->if_flags, IFF_RUNNING))
511 				cad_down(sc);
512 		}
513 		break;
514 
515 	case SIOCGIFMEDIA:
516 	case SIOCSIFMEDIA:
517 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
518 		break;
519 
520 	default:
521 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
522 		break;
523 	}
524 
525 	if (error == ENETRESET) {
526 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
527 		    (IFF_UP | IFF_RUNNING))
528 			cad_iff(sc);
529 		error = 0;
530 	}
531 
532 	splx(s);
533 
534 	return error;
535 }
536 
537 void
538 cad_reset(struct cad_softc *sc)
539 {
540 	static const unsigned int mdcclk_divs[] = {
541 		8, 16, 32, 48, 64, 96, 128, 224
542 	};
543 	unsigned int freq;
544 	uint32_t div, netcfg;
545 
546 	HWRITE4(sc, GEM_NETCTL, 0);
547 	HWRITE4(sc, GEM_IDR, ~0U);
548 	HWRITE4(sc, GEM_RXSR, 0);
549 	HWRITE4(sc, GEM_TXSR, 0);
550 	HWRITE4(sc, GEM_RXQBASE, 0);
551 	HWRITE4(sc, GEM_TXQBASE, 0);
552 
553 	/* MDIO clock rate must not exceed 2.5 MHz. */
554 	freq = clock_get_frequency(sc->sc_node, "pclk");
555 	for (div = 0; div < nitems(mdcclk_divs) - 1; div++) {
556 		if (freq / mdcclk_divs[div] <= 2500000)
557 			break;
558 	}
559 	KASSERT(div < nitems(mdcclk_divs));
560 
561 	netcfg = HREAD4(sc, GEM_NETCFG);
562 	netcfg &= ~GEM_NETCFG_MDCCLKDIV_MASK;
563 	netcfg |= div << GEM_NETCFG_MDCCLKDIV_SHIFT;
564 	HWRITE4(sc, GEM_NETCFG, netcfg);
565 
566 	/* Enable MDIO bus. */
567 	sc->sc_netctl = GEM_NETCTL_MDEN;
568 	HWRITE4(sc, GEM_NETCTL, sc->sc_netctl);
569 }
570 
571 int
572 cad_up(struct cad_softc *sc)
573 {
574 	struct ifnet *ifp = &sc->sc_ac.ac_if;
575 	struct cad_buf *rxb, *txb;
576 	struct cad_desc *rxd, *txd;
577 	unsigned int i;
578 	uint32_t val;
579 
580 	/*
581 	 * Set up Tx descriptor ring.
582 	 */
583 
584 	sc->sc_txring = cad_dmamem_alloc(sc,
585 	    CAD_NTXDESC * sizeof(struct cad_desc), sizeof(struct cad_desc));
586 	sc->sc_txdesc = (struct cad_desc *)sc->sc_txring->cdm_kva;
587 
588 	sc->sc_txbuf = malloc(sizeof(*sc->sc_txbuf) * CAD_NTXDESC,
589 	    M_DEVBUF, M_WAITOK);
590 	for (i = 0; i < CAD_NTXDESC; i++) {
591 		txb = &sc->sc_txbuf[i];
592 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, CAD_NTXSEGS,
593 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->bf_map);
594 		txb->bf_m = NULL;
595 
596 		txd = &sc->sc_txdesc[i];
597 		txd->d_addr = 0;
598 		txd->d_status = GEM_TXD_USED;
599 		if (i == CAD_NTXDESC - 1)
600 			txd->d_status |= GEM_TXD_WRAP;
601 	}
602 
603 	sc->sc_tx_prod = 0;
604 	sc->sc_tx_cons = 0;
605 
606 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
607 	    0, sc->sc_txring->cdm_size,
608 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
609 
610 	HWRITE4(sc, GEM_TXQBASE, sc->sc_txring->cdm_map->dm_segs[0].ds_addr);
611 
612 	/*
613 	 * Set up Rx descriptor ring.
614 	 */
615 
616 	sc->sc_rxring = cad_dmamem_alloc(sc,
617 	    CAD_NRXDESC * sizeof(struct cad_desc), sizeof(struct cad_desc));
618 	sc->sc_rxdesc = (struct cad_desc *)sc->sc_rxring->cdm_kva;
619 
620 	sc->sc_rxbuf = malloc(sizeof(struct cad_buf) * CAD_NRXDESC,
621 	    M_DEVBUF, M_WAITOK);
622 	for (i = 0; i < CAD_NRXDESC; i++) {
623 		rxb = &sc->sc_rxbuf[i];
624 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
625 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->bf_map);
626 		rxb->bf_m = NULL;
627 
628 		/* Mark all descriptors as used so that driver owns them. */
629 		rxd = &sc->sc_rxdesc[i];
630 		rxd->d_addr = GEM_RXD_ADDR_USED;
631 		if (i == CAD_NRXDESC - 1)
632 			rxd->d_addr |= GEM_RXD_ADDR_WRAP;
633 	}
634 
635 	if_rxr_init(&sc->sc_rx_ring, 2, CAD_NRXDESC);
636 
637 	sc->sc_rx_prod = 0;
638 	sc->sc_rx_cons = 0;
639 	cad_rxfill(sc);
640 
641 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map,
642 	    0, sc->sc_rxring->cdm_size,
643 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
644 
645 	HWRITE4(sc, GEM_RXQBASE, sc->sc_rxring->cdm_map->dm_segs[0].ds_addr);
646 
647 	/*
648 	 * Set MAC address filters.
649 	 */
650 
651 	HWRITE4(sc, GEM_LADDRL(0), sc->sc_ac.ac_enaddr[0] |
652 	    ((uint32_t)sc->sc_ac.ac_enaddr[1] << 8) |
653 	    ((uint32_t)sc->sc_ac.ac_enaddr[2] << 16) |
654 	    ((uint32_t)sc->sc_ac.ac_enaddr[3] << 24));
655 	HWRITE4(sc, GEM_LADDRH(0), sc->sc_ac.ac_enaddr[4] |
656 	    ((uint32_t)sc->sc_ac.ac_enaddr[5] << 8));
657 
658 	for (i = 1; i < GEM_LADDRNUM; i++) {
659 		HWRITE4(sc, GEM_LADDRL(i), 0);
660 		HWRITE4(sc, GEM_LADDRH(i), 0);
661 	}
662 
663 	cad_iff(sc);
664 
665 	clock_set_frequency(sc->sc_node, GEM_CLK_TX, 2500000);
666 	clock_enable(sc->sc_node, GEM_CLK_TX);
667 	delay(1000);
668 
669 	val = HREAD4(sc, GEM_NETCFG);
670 
671 	val |= GEM_NETCFG_FCSREM | GEM_NETCFG_RXCSUMEN | GEM_NETCFG_1000 |
672 	    GEM_NETCFG_100 | GEM_NETCFG_FDEN | GEM_NETCFG_1536RXEN;
673 	val &= ~GEM_NETCFG_RXOFFS_MASK;
674 	val |= ETHER_ALIGN << GEM_NETCFG_RXOFFS_SHIFT;
675 	val &= ~GEM_NETCFG_BCASTDI;
676 
677 	if (sc->sc_phy_mode == CAD_PHY_MODE_SGMII)
678 		val |= GEM_NETCFG_SGMIIEN | GEM_NETCFG_PCSSEL;
679 	else
680 		val &= ~(GEM_NETCFG_SGMIIEN | GEM_NETCFG_PCSSEL);
681 
682 	HWRITE4(sc, GEM_NETCFG, val);
683 
684 	val = HREAD4(sc, GEM_DMACR);
685 
686 	/* Use CPU's native byte order with descriptor words. */
687 #if BYTE_ORDER == BIG_ENDIAN
688 	val |= GEM_DMACR_ES_DESCR;
689 #else
690 	val &= ~GEM_DMACR_ES_DESCR;
691 #endif
692 	val &= ~GEM_DMACR_ES_PDATA;
693 	val |= GEM_DMACR_AHBDISC | GEM_DMACR_TXSIZE;
694 	val &= ~GEM_DMACR_RXSIZE_MASK;
695 	val |= GEM_DMACR_RXSIZE_8K;
696 	val &= ~GEM_DMACR_RXBUF_MASK;
697 	val |= (MCLBYTES / 64) << GEM_DMACR_RXBUF_SHIFT;
698 	val &= ~GEM_DMACR_BLEN_MASK;
699 	val |= GEM_DMACR_BLEN_16;
700 
701 	if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
702 		val |= GEM_DMACR_TXCSUMEN;
703 
704 	HWRITE4(sc, GEM_DMACR, val);
705 
706 	/* Clear statistics. */
707 	HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STATCLR);
708 
709 	/* Enable Rx and Tx. */
710 	sc->sc_netctl |= GEM_NETCTL_RXEN | GEM_NETCTL_TXEN;
711 	HWRITE4(sc, GEM_NETCTL, sc->sc_netctl);
712 
713 	/* Enable interrupts. */
714 	HWRITE4(sc, GEM_IER, GEM_IXR_HRESP | GEM_IXR_RXOVR | GEM_IXR_RXDONE |
715 	    GEM_IXR_TXDONE);
716 
717 	if (sc->sc_rxhang_erratum)
718 		HWRITE4(sc, GEM_IER, GEM_IXR_RXUSED);
719 
720 	if (!LIST_EMPTY(&sc->sc_mii.mii_phys))
721 		mii_mediachg(&sc->sc_mii);
722 
723 	ifp->if_flags |= IFF_RUNNING;
724 	ifq_clr_oactive(&ifp->if_snd);
725 
726 	timeout_add_sec(&sc->sc_tick, 1);
727 
728 	return 0;
729 }
730 
731 void
732 cad_down(struct cad_softc *sc)
733 {
734 	struct ifnet *ifp = &sc->sc_ac.ac_if;
735 	struct cad_buf *rxb, *txb;
736 	unsigned int i, timeout;
737 
738 	ifp->if_flags &= ~IFF_RUNNING;
739 
740 	ifq_clr_oactive(&ifp->if_snd);
741 	ifp->if_timer = 0;
742 
743 	timeout_del_barrier(&sc->sc_tick);
744 
745 	/* Disable data transfer. */
746 	sc->sc_netctl &= ~(GEM_NETCTL_TXEN | GEM_NETCTL_RXEN);
747 	HWRITE4(sc, GEM_NETCTL, sc->sc_netctl);
748 
749 	/* Disable all interrupts. */
750 	HWRITE4(sc, GEM_IDR, ~0U);
751 
752 	/* Wait for transmitter to become idle. */
753 	for (timeout = 1000; timeout > 0; timeout--) {
754 		if ((HREAD4(sc, GEM_TXSR) & GEM_TXSR_TXGO) == 0)
755 			break;
756 		delay(10);
757 	}
758 	if (timeout == 0)
759 		printf("%s: transmitter not idle\n", sc->sc_dev.dv_xname);
760 
761 	mii_down(&sc->sc_mii);
762 
763 	/* Wait for activity to cease. */
764 	intr_barrier(sc->sc_ih);
765 	ifq_barrier(&ifp->if_snd);
766 	taskq_del_barrier(systq, &sc->sc_statchg_task);
767 
768 	/* Disable the packet clock as it is not needed any longer. */
769 	clock_disable(sc->sc_node, GEM_CLK_TX);
770 
771 	cad_reset(sc);
772 
773 	/*
774 	 * Tear down the Tx descriptor ring.
775 	 */
776 
777 	for (i = 0; i < CAD_NTXDESC; i++) {
778 		txb = &sc->sc_txbuf[i];
779 		if (txb->bf_m != NULL) {
780 			bus_dmamap_sync(sc->sc_dmat, txb->bf_map, 0,
781 			    txb->bf_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
782 			bus_dmamap_unload(sc->sc_dmat, txb->bf_map);
783 			m_freem(txb->bf_m);
784 		}
785 		bus_dmamap_destroy(sc->sc_dmat, txb->bf_map);
786 	}
787 	free(sc->sc_txbuf, M_DEVBUF, sizeof(*sc->sc_txbuf) * CAD_NTXDESC);
788 	sc->sc_txbuf = NULL;
789 
790 	cad_dmamem_free(sc, sc->sc_txring);
791 	sc->sc_txring = NULL;
792 	sc->sc_txdesc = NULL;
793 
794 	/*
795 	 * Tear down the Rx descriptor ring.
796 	 */
797 
798 	for (i = 0; i < CAD_NRXDESC; i++) {
799 		rxb = &sc->sc_rxbuf[i];
800 		if (rxb->bf_m != NULL) {
801 			bus_dmamap_sync(sc->sc_dmat, rxb->bf_map, 0,
802 			    rxb->bf_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
803 			bus_dmamap_unload(sc->sc_dmat, rxb->bf_map);
804 			m_freem(rxb->bf_m);
805 		}
806 		bus_dmamap_destroy(sc->sc_dmat, rxb->bf_map);
807 	}
808 	free(sc->sc_rxbuf, M_DEVBUF, sizeof(*sc->sc_txbuf) * CAD_NRXDESC);
809 	sc->sc_rxbuf = NULL;
810 
811 	cad_dmamem_free(sc, sc->sc_rxring);
812 	sc->sc_rxring = NULL;
813 	sc->sc_rxdesc = NULL;
814 }
815 
816 uint8_t
817 cad_hash_mac(const uint8_t *eaddr)
818 {
819 	uint64_t val = 0;
820 	int i;
821 	uint8_t hash = 0;
822 
823 	for (i = ETHER_ADDR_LEN - 1; i >= 0; i--)
824 		val = (val << 8) | eaddr[i];
825 
826 	for (i = 0; i < 8; i++) {
827 		hash ^= val;
828 		val >>= 6;
829 	}
830 
831 	return hash & 0x3f;
832 }
833 
834 void
835 cad_iff(struct cad_softc *sc)
836 {
837 	struct arpcom *ac = &sc->sc_ac;
838 	struct ifnet *ifp = &sc->sc_ac.ac_if;
839 	struct ether_multi *enm;
840 	struct ether_multistep step;
841 	uint64_t hash;
842 	uint32_t netcfg;
843 
844 	netcfg = HREAD4(sc, GEM_NETCFG);
845 	netcfg &= ~GEM_NETCFG_UCASTHASHEN;
846 
847 	ifp->if_flags &= ~IFF_ALLMULTI;
848 
849 	if (ifp->if_flags & IFF_PROMISC) {
850 		netcfg |= GEM_NETCFG_COPYALL;
851 		netcfg &= ~GEM_NETCFG_MCASTHASHEN;
852 	} else {
853 		netcfg &= ~GEM_NETCFG_COPYALL;
854 		netcfg |= GEM_NETCFG_MCASTHASHEN;
855 
856 		if (ac->ac_multirangecnt > 0)
857 			ifp->if_flags |= IFF_ALLMULTI;
858 
859 		if (ifp->if_flags & IFF_ALLMULTI) {
860 			hash = ~0ULL;
861 		} else {
862 			hash = 0;
863 			ETHER_FIRST_MULTI(step, ac, enm);
864 			while (enm != NULL) {
865 				hash |= 1ULL << cad_hash_mac(enm->enm_addrlo);
866 				ETHER_NEXT_MULTI(step, enm);
867 			}
868 		}
869 
870 		HWRITE4(sc, GEM_HASHL, hash);
871 		HWRITE4(sc, GEM_HASHH, hash >> 32);
872 	}
873 
874 	HWRITE4(sc, GEM_NETCFG, netcfg);
875 }
876 
877 void
878 cad_start(struct ifqueue *ifq)
879 {
880 	struct ifnet *ifp = ifq->ifq_if;
881 	struct cad_softc *sc = ifp->if_softc;
882 	struct mbuf *m;
883 	unsigned int free, head, used;
884 
885 	free = sc->sc_tx_cons;
886 	head = sc->sc_tx_prod;
887 	if (free <= head)
888 		free += CAD_NTXDESC;
889 	free -= head;
890 
891 	for (;;) {
892 		if (free <= CAD_NTXSEGS) {
893 			ifq_set_oactive(ifq);
894 			break;
895 		}
896 
897 		m = ifq_dequeue(ifq);
898 		if (m == NULL)
899 			break;
900 
901 		used = cad_encap(sc, m);
902 		if (used == 0) {
903 			m_freem(m);
904 			continue;
905 		}
906 
907 #if NBPFILTER > 0
908 		if (ifp->if_bpf != NULL)
909 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
910 #endif
911 
912 		ifp->if_timer = 5;
913 
914 		KASSERT(free >= used);
915 		free -= used;
916 	}
917 
918 	HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STARTTX);
919 }
920 
921 void
922 cad_watchdog(struct ifnet *ifp)
923 {
924 	struct cad_softc *sc = ifp->if_softc;
925 
926 	ifp->if_timer = 0;
927 
928 	if ((ifp->if_flags & IFF_RUNNING) == 0)
929 		return;
930 
931 	if (sc->sc_tx_cons == sc->sc_tx_prod)
932 		return;
933 
934 	/* XXX */
935 	HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STARTTX);
936 }
937 
938 unsigned int
939 cad_encap(struct cad_softc *sc, struct mbuf *m)
940 {
941 	bus_dmamap_t map;
942 	struct cad_buf *txb;
943 	struct cad_desc *txd;
944 	unsigned int head, idx, nsegs;
945 	uint32_t status;
946 	int i;
947 
948 	head = sc->sc_tx_prod;
949 
950 	txb = &sc->sc_txbuf[head];
951 	map = txb->bf_map;
952 
953 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
954 	case 0:
955 		break;
956 	case EFBIG:
957 		if (m_defrag(m, M_DONTWAIT) != 0)
958 			return 0;
959 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
960 		    BUS_DMA_NOWAIT) != 0)
961 			return 0;
962 		break;
963 	default:
964 		return 0;
965 	}
966 
967 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
968 	    BUS_DMASYNC_PREWRITE);
969 
970 	nsegs = map->dm_nsegs;
971 	KASSERT(nsegs > 0);
972 
973 	txb->bf_m = m;
974 
975 	/*
976 	 * Fill descriptors in reverse order so that all the descriptors
977 	 * are ready when the first descriptor's GEM_TXD_USED bit is cleared.
978 	 */
979 	for (i = nsegs - 1; i >= 0; i--) {
980 		idx = (head + i) % CAD_NTXDESC;
981 
982 		status = map->dm_segs[i].ds_len & GEM_TXD_LEN_MASK;
983 		if (i == nsegs - 1)
984 			status |= GEM_TXD_LAST;
985 		if (idx == CAD_NTXDESC - 1)
986 			status |= GEM_TXD_WRAP;
987 
988 		txd = &sc->sc_txdesc[idx];
989 		txd->d_addr = map->dm_segs[i].ds_addr;
990 
991 		/* Make d_addr visible before GEM_TXD_USED is cleared
992 		 * in d_status. */
993 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
994 		    idx * sizeof(*txd), sizeof(*txd),
995 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
996 
997 		txd->d_status = status;
998 
999 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
1000 		    idx * sizeof(*txd), sizeof(*txd),
1001 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1002 	}
1003 
1004 	sc->sc_tx_prod = (head + nsegs) % CAD_NTXDESC;
1005 
1006 	return nsegs;
1007 }
1008 
1009 int
1010 cad_intr(void *arg)
1011 {
1012 	struct cad_softc *sc = arg;
1013 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1014 	uint32_t isr;
1015 
1016 	isr = HREAD4(sc, GEM_ISR);
1017 	HWRITE4(sc, GEM_ISR, isr);
1018 
1019 	if (isr & GEM_IXR_RXDONE)
1020 		cad_rxeof(sc);
1021 	if (isr & GEM_IXR_TXDONE)
1022 		cad_txeof(sc);
1023 
1024 	if (isr & GEM_IXR_RXOVR)
1025 		ifp->if_ierrors++;
1026 
1027 	if (sc->sc_rxhang_erratum && (isr & GEM_IXR_RXUSED)) {
1028 		/*
1029 		 * Try to flush a packet from the Rx SRAM to avoid triggering
1030 		 * the Rx hang.
1031 		 */
1032 		HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_DPRAM);
1033 		cad_rxfill(sc);
1034 	}
1035 
1036 	/* If there has been a DMA error, stop the interface to limit damage. */
1037 	if (isr & GEM_IXR_HRESP) {
1038 		sc->sc_netctl &= ~(GEM_NETCTL_TXEN | GEM_NETCTL_RXEN);
1039 		HWRITE4(sc, GEM_NETCTL, sc->sc_netctl);
1040 		HWRITE4(sc, GEM_IDR, ~0U);
1041 
1042 		printf("%s: hresp error, interface stopped\n",
1043 		    sc->sc_dev.dv_xname);
1044 	}
1045 
1046 	return 1;
1047 }
1048 
1049 void
1050 cad_rxeof(struct cad_softc *sc)
1051 {
1052 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1053 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1054 	struct mbuf *m;
1055 	struct cad_buf *rxb;
1056 	struct cad_desc *rxd;
1057 	size_t len;
1058 	unsigned int idx;
1059 	uint32_t status;
1060 
1061 	idx = sc->sc_rx_cons;
1062 
1063 	while (if_rxr_inuse(&sc->sc_rx_ring) > 0) {
1064 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map,
1065 		    idx * sizeof(*rxd), sizeof(*rxd),
1066 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1067 
1068 		rxd = &sc->sc_rxdesc[idx];
1069 		if ((rxd->d_addr & GEM_RXD_ADDR_USED) == 0)
1070 			break;
1071 
1072 		/* Prevent premature read of d_status. */
1073 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map,
1074 		    idx * sizeof(*rxd), sizeof(*rxd),
1075 		    BUS_DMASYNC_POSTREAD);
1076 
1077 		status = rxd->d_status;
1078 		len = status & GEM_RXD_LEN_MASK;
1079 
1080 		rxb = &sc->sc_rxbuf[idx];
1081 
1082 		bus_dmamap_sync(sc->sc_dmat, rxb->bf_map, ETHER_ALIGN, len,
1083 		    BUS_DMASYNC_POSTREAD);
1084 		bus_dmamap_unload(sc->sc_dmat, rxb->bf_map);
1085 
1086 		m = rxb->bf_m;
1087 		rxb->bf_m = NULL;
1088 		KASSERT(m != NULL);
1089 
1090 		if_rxr_put(&sc->sc_rx_ring, 1);
1091 		idx = (idx + 1) % CAD_NRXDESC;
1092 
1093 		if ((status & (GEM_RXD_SOF | GEM_RXD_EOF)) !=
1094 		    (GEM_RXD_SOF | GEM_RXD_EOF)) {
1095 			m_freem(m);
1096 			ifp->if_ierrors++;
1097 			continue;
1098 		}
1099 
1100 		m_adj(m, ETHER_ALIGN);
1101 		m->m_len = m->m_pkthdr.len = len;
1102 
1103 		m->m_pkthdr.csum_flags = 0;
1104 		switch (status & GEM_RXD_CSUM_MASK) {
1105 		case GEM_RXD_CSUM_IP_OK:
1106 			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
1107 			break;
1108 		case GEM_RXD_CSUM_TCP_OK:
1109 		case GEM_RXD_CSUM_UDP_OK:
1110 			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK |
1111 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1112 			break;
1113 		}
1114 
1115 		ml_enqueue(&ml, m);
1116 
1117 		sc->sc_rxdone = 1;
1118 	}
1119 
1120 	sc->sc_rx_cons = idx;
1121 
1122 	cad_rxfill(sc);
1123 
1124 	if (ifiq_input(&ifp->if_rcv, &ml))
1125 		if_rxr_livelocked(&sc->sc_rx_ring);
1126 }
1127 
1128 void
1129 cad_rxfill(struct cad_softc *sc)
1130 {
1131 	struct cad_buf *rxb;
1132 	struct cad_desc *rxd;
1133 	uint32_t addr;
1134 	unsigned int idx;
1135 	u_int slots;
1136 
1137 	idx = sc->sc_rx_prod;
1138 
1139 	for (slots = if_rxr_get(&sc->sc_rx_ring, CAD_NRXDESC);
1140 	    slots > 0; slots--) {
1141 		rxb = &sc->sc_rxbuf[idx];
1142 		rxb->bf_m = cad_alloc_mbuf(sc, rxb->bf_map);
1143 		if (rxb->bf_m == NULL)
1144 			break;
1145 
1146 		addr = rxb->bf_map->dm_segs[0].ds_addr;
1147 		KASSERT((addr & (GEM_RXD_ADDR_WRAP | GEM_RXD_ADDR_USED)) == 0);
1148 		if (idx == CAD_NRXDESC - 1)
1149 			addr |= GEM_RXD_ADDR_WRAP;
1150 
1151 		rxd = &sc->sc_rxdesc[idx];
1152 		rxd->d_status = 0;
1153 
1154 		/* Make d_status visible before clearing GEM_RXD_ADDR_USED
1155 		 * in d_addr. */
1156 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map,
1157 		    idx * sizeof(*rxd), sizeof(*rxd),
1158 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1159 
1160 		rxd->d_addr = addr;
1161 
1162 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map,
1163 		    idx * sizeof(*rxd), sizeof(*rxd),
1164 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1165 
1166 		idx = (idx + 1) % CAD_NRXDESC;
1167 	}
1168 	if_rxr_put(&sc->sc_rx_ring, slots);
1169 
1170 	sc->sc_rx_prod = idx;
1171 }
1172 
1173 void
1174 cad_txeof(struct cad_softc *sc)
1175 {
1176 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1177 	struct cad_buf *txb;
1178 	struct cad_desc *txd;
1179 	unsigned int free = 0;
1180 	unsigned int idx, nsegs;
1181 	uint32_t status;
1182 
1183 	idx = sc->sc_tx_cons;
1184 
1185 	while (idx != sc->sc_tx_prod) {
1186 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
1187 		    idx * sizeof(*txd), sizeof(*txd),
1188 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1189 
1190 		txd = &sc->sc_txdesc[idx];
1191 		status = txd->d_status;
1192 		if ((status & GEM_TXD_USED) == 0)
1193 			break;
1194 
1195 		if (status & (GEM_TXD_RLIMIT | GEM_TXD_CORRUPT |
1196 		    GEM_TXD_LCOLL | GEM_TXD_CSUMERR_MASK))
1197 			ifp->if_oerrors++;
1198 
1199 		txb = &sc->sc_txbuf[idx];
1200 		nsegs = txb->bf_map->dm_nsegs;
1201 		KASSERT(nsegs > 0);
1202 
1203 		bus_dmamap_sync(sc->sc_dmat, txb->bf_map, 0,
1204 		    txb->bf_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1205 		bus_dmamap_unload(sc->sc_dmat, txb->bf_map);
1206 
1207 		m_freem(txb->bf_m);
1208 		txb->bf_m = NULL;
1209 
1210 		for (;;) {
1211 			idx = (idx + 1) % CAD_NTXDESC;
1212 
1213 			nsegs--;
1214 			if (nsegs == 0)
1215 				break;
1216 
1217 			/*
1218 			 * The controller marks only the initial segment used.
1219 			 * Mark the remaining segments used manually, so that
1220 			 * the controller will not accidentally use them later.
1221 			 *
1222 			 * This could be done lazily on the Tx ring producer
1223 			 * side by ensuring that the subsequent descriptor
1224 			 * after the actual segments is marked used.
1225 			 * However, this would make the ring trickier to debug.
1226 			 */
1227 
1228 			bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
1229 			    idx * sizeof(*txd), sizeof(*txd),
1230 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1231 
1232 			txd = &sc->sc_txdesc[idx];
1233 			txd->d_status |= GEM_TXD_USED;
1234 
1235 			bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
1236 			    idx * sizeof(*txd), sizeof(*txd),
1237 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1238 		}
1239 
1240 		free++;
1241 	}
1242 
1243 	if (free == 0)
1244 		return;
1245 
1246 	sc->sc_tx_cons = idx;
1247 
1248 	if (ifq_is_oactive(&ifp->if_snd))
1249 		ifq_restart(&ifp->if_snd);
1250 }
1251 
1252 void
1253 cad_tick(void *arg)
1254 {
1255 	struct cad_softc *sc = arg;
1256 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1257 	int s;
1258 
1259 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1260 		return;
1261 
1262 	s = splnet();
1263 
1264 	mii_tick(&sc->sc_mii);
1265 
1266 	/*
1267 	 * If there has been no Rx for a moment, Rx DMA might be stuck.
1268 	 * Try to recover by restarting the receiver.
1269 	 */
1270 	if (sc->sc_rxhang_erratum && !sc->sc_rxdone) {
1271 		HWRITE4(sc, GEM_NETCTL, sc->sc_netctl & ~GEM_NETCTL_RXEN);
1272 		(void)HREAD4(sc, GEM_NETCTL);
1273 		HWRITE4(sc, GEM_NETCTL, sc->sc_netctl);
1274 	}
1275 	sc->sc_rxdone = 0;
1276 
1277 	splx(s);
1278 
1279 	timeout_add_sec(&sc->sc_tick, 1);
1280 }
1281 
1282 int
1283 cad_media_change(struct ifnet *ifp)
1284 {
1285 	struct cad_softc *sc = ifp->if_softc;
1286 
1287 	if (!LIST_EMPTY(&sc->sc_mii.mii_phys))
1288 		mii_mediachg(&sc->sc_mii);
1289 
1290 	return 0;
1291 }
1292 
1293 void
1294 cad_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1295 {
1296 	struct cad_softc *sc = ifp->if_softc;
1297 
1298 	if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1299 		mii_pollstat(&sc->sc_mii);
1300 		imr->ifm_active = sc->sc_mii.mii_media_active;
1301 		imr->ifm_status = sc->sc_mii.mii_media_status;
1302 	}
1303 }
1304 
1305 int
1306 cad_mii_wait(struct cad_softc *sc)
1307 {
1308 	int timeout;
1309 
1310 	for (timeout = 10000; timeout > 0; timeout--) {
1311 		if (HREAD4(sc, GEM_NETSR) & GEM_NETSR_PHY_MGMT_IDLE)
1312 			break;
1313 		delay(10);
1314 	}
1315 	if (timeout == 0)
1316 		return ETIMEDOUT;
1317 	return 0;
1318 }
1319 
1320 void
1321 cad_mii_oper(struct cad_softc *sc, int phy_no, int reg, uint32_t oper)
1322 {
1323 	oper |= (phy_no << GEM_PHYMNTNC_ADDR_SHIFT) & GEM_PHYMNTNC_ADDR_MASK;
1324 	oper |= (reg << GEM_PHYMNTNC_REG_SHIFT) & GEM_PHYMNTNC_REG_MASK;
1325 	oper |= GEM_PHYMNTNC_CLAUSE_22 | GEM_PHYMNTNC_MUST_10;
1326 
1327 	if (cad_mii_wait(sc) != 0) {
1328 		printf("%s: MII bus idle timeout\n", sc->sc_dev.dv_xname);
1329 		return;
1330 	}
1331 
1332 	HWRITE4(sc, GEM_PHYMNTNC, oper);
1333 
1334 	if (cad_mii_wait(sc) != 0) {
1335 		printf("%s: MII bus operation timeout\n", sc->sc_dev.dv_xname);
1336 		return;
1337 	}
1338 }
1339 
1340 int
1341 cad_mii_readreg(struct device *self, int phy_no, int reg)
1342 {
1343 	struct cad_softc *sc = (struct cad_softc *)self;
1344 	int val;
1345 
1346 	cad_mii_oper(sc, phy_no, reg, GEM_PHYMNTNC_OP_READ);
1347 
1348 	val = HREAD4(sc, GEM_PHYMNTNC) & GEM_PHYMNTNC_DATA_MASK;
1349 
1350 	/* The MAC does not handle 1000baseT in half duplex mode. */
1351 	if (reg == MII_EXTSR)
1352 		val &= ~EXTSR_1000THDX;
1353 
1354 	return val;
1355 }
1356 
1357 void
1358 cad_mii_writereg(struct device *self, int phy_no, int reg, int val)
1359 {
1360 	struct cad_softc *sc = (struct cad_softc *)self;
1361 
1362 	cad_mii_oper(sc, phy_no, reg, GEM_PHYMNTNC_OP_WRITE |
1363 	    (val & GEM_PHYMNTNC_DATA_MASK));
1364 }
1365 
1366 void
1367 cad_mii_statchg(struct device *self)
1368 {
1369 	struct cad_softc *sc = (struct cad_softc *)self;
1370 	uint32_t netcfg;
1371 
1372 	netcfg = HREAD4(sc, GEM_NETCFG);
1373 	if (sc->sc_mii.mii_media_active & IFM_FDX)
1374 		netcfg |= GEM_NETCFG_FDEN;
1375 	else
1376 		netcfg &= ~GEM_NETCFG_FDEN;
1377 
1378 	netcfg &= ~(GEM_NETCFG_100 | GEM_NETCFG_1000);
1379 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1380 	default:
1381 		sc->sc_tx_freq = 2500000;
1382 		break;
1383 	case IFM_100_TX:
1384 		netcfg |= GEM_NETCFG_100;
1385 		sc->sc_tx_freq = 25000000;
1386 		break;
1387 	case IFM_1000_T:
1388 		netcfg |= GEM_NETCFG_100 | GEM_NETCFG_1000;
1389 		sc->sc_tx_freq = 125000000;
1390 		break;
1391 	}
1392 
1393 	HWRITE4(sc, GEM_NETCFG, netcfg);
1394 
1395 	/* Defer clock setting because it allocates memory with M_WAITOK. */
1396 	task_add(systq, &sc->sc_statchg_task);
1397 }
1398 
1399 void
1400 cad_statchg_task(void *arg)
1401 {
1402 	struct cad_softc *sc = arg;
1403 
1404 	clock_set_frequency(sc->sc_node, GEM_CLK_TX, sc->sc_tx_freq);
1405 }
1406 
1407 struct cad_dmamem *
1408 cad_dmamem_alloc(struct cad_softc *sc, bus_size_t size, bus_size_t align)
1409 {
1410 	struct cad_dmamem *cdm;
1411 	int nsegs;
1412 
1413 	cdm = malloc(sizeof(*cdm), M_DEVBUF, M_WAITOK | M_ZERO);
1414 	cdm->cdm_size = size;
1415 
1416 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1417 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &cdm->cdm_map) != 0)
1418 		goto cdmfree;
1419 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &cdm->cdm_seg, 1,
1420 	    &nsegs, BUS_DMA_WAITOK) != 0)
1421 		goto destroy;
1422 	if (bus_dmamem_map(sc->sc_dmat, &cdm->cdm_seg, nsegs, size,
1423 	    &cdm->cdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1424 		goto free;
1425 	if (bus_dmamap_load(sc->sc_dmat, cdm->cdm_map, cdm->cdm_kva, size,
1426 	    NULL, BUS_DMA_WAITOK) != 0)
1427 		goto unmap;
1428 	memset(cdm->cdm_kva, 0, size);
1429 	return cdm;
1430 
1431 unmap:
1432 	bus_dmamem_unmap(sc->sc_dmat, cdm->cdm_kva, size);
1433 free:
1434 	bus_dmamem_free(sc->sc_dmat, &cdm->cdm_seg, 1);
1435 destroy:
1436 	bus_dmamap_destroy(sc->sc_dmat, cdm->cdm_map);
1437 cdmfree:
1438 	free(cdm, M_DEVBUF, sizeof(*cdm));
1439 	return NULL;
1440 }
1441 
1442 void
1443 cad_dmamem_free(struct cad_softc *sc, struct cad_dmamem *cdm)
1444 {
1445 	bus_dmamem_unmap(sc->sc_dmat, cdm->cdm_kva, cdm->cdm_size);
1446 	bus_dmamem_free(sc->sc_dmat, &cdm->cdm_seg, 1);
1447 	bus_dmamap_destroy(sc->sc_dmat, cdm->cdm_map);
1448 	free(cdm, M_DEVBUF, sizeof(*cdm));
1449 }
1450 
1451 struct mbuf *
1452 cad_alloc_mbuf(struct cad_softc *sc, bus_dmamap_t map)
1453 {
1454 	struct mbuf *m;
1455 
1456 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1457 	if (m == NULL)
1458 		return NULL;
1459 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1460 
1461 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1462 		m_freem(m);
1463 		return NULL;
1464 	}
1465 
1466 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1467 	    BUS_DMASYNC_PREREAD);
1468 
1469 	return m;
1470 }
1471 
1472 #if NKSTAT > 0
1473 enum cad_stat {
1474 	cad_stat_tx_toto,
1475 	cad_stat_tx_totp,
1476 	cad_stat_tx_bcast,
1477 	cad_stat_tx_mcast,
1478 	cad_stat_tx_pause,
1479 	cad_stat_tx_h64,
1480 	cad_stat_tx_h65,
1481 	cad_stat_tx_h128,
1482 	cad_stat_tx_h256,
1483 	cad_stat_tx_h512,
1484 	cad_stat_tx_h1024,
1485 	cad_stat_tx_underrun,
1486 	cad_stat_tx_scoll,
1487 	cad_stat_tx_mcoll,
1488 	cad_stat_tx_ecoll,
1489 	cad_stat_tx_lcoll,
1490 	cad_stat_tx_defer,
1491 	cad_stat_tx_sense,
1492 	cad_stat_rx_toto,
1493 	cad_stat_rx_totp,
1494 	cad_stat_rx_bcast,
1495 	cad_stat_rx_mcast,
1496 	cad_stat_rx_pause,
1497 	cad_stat_rx_h64,
1498 	cad_stat_rx_h65,
1499 	cad_stat_rx_h128,
1500 	cad_stat_rx_h256,
1501 	cad_stat_rx_h512,
1502 	cad_stat_rx_h1024,
1503 	cad_stat_rx_undersz,
1504 	cad_stat_rx_oversz,
1505 	cad_stat_rx_jabber,
1506 	cad_stat_rx_fcs,
1507 	cad_stat_rx_symberr,
1508 	cad_stat_rx_align,
1509 	cad_stat_rx_reserr,
1510 	cad_stat_rx_overrun,
1511 	cad_stat_rx_ipcsum,
1512 	cad_stat_rx_tcpcsum,
1513 	cad_stat_rx_udpcsum,
1514 	cad_stat_count
1515 };
1516 
1517 struct cad_counter {
1518 	const char		*c_name;
1519 	enum kstat_kv_unit	c_unit;
1520 	uint32_t		c_reg;
1521 };
1522 
1523 const struct cad_counter cad_counters[cad_stat_count] = {
1524 	[cad_stat_tx_toto] =
1525 	    { "tx total",	KSTAT_KV_U_BYTES, 0 },
1526 	[cad_stat_tx_totp] =
1527 	    { "tx total",	KSTAT_KV_U_PACKETS, GEM_TXCNT },
1528 	[cad_stat_tx_bcast] =
1529 	    { "tx bcast",	KSTAT_KV_U_PACKETS, GEM_TXBCCNT },
1530 	[cad_stat_tx_mcast] =
1531 	    { "tx mcast",	KSTAT_KV_U_PACKETS, GEM_TXMCCNT },
1532 	[cad_stat_tx_pause] =
1533 	    { "tx pause",	KSTAT_KV_U_PACKETS, GEM_TXPAUSECNT },
1534 	[cad_stat_tx_h64] =
1535 	    { "tx 64B",		KSTAT_KV_U_PACKETS, GEM_TX64CNT },
1536 	[cad_stat_tx_h65] =
1537 	    { "tx 65-127B",	KSTAT_KV_U_PACKETS, GEM_TX65CNT },
1538 	[cad_stat_tx_h128] =
1539 	    { "tx 128-255B",	KSTAT_KV_U_PACKETS, GEM_TX128CNT },
1540 	[cad_stat_tx_h256] =
1541 	    { "tx 256-511B",	KSTAT_KV_U_PACKETS, GEM_TX256CNT },
1542 	[cad_stat_tx_h512] =
1543 	    { "tx 512-1023B",	KSTAT_KV_U_PACKETS, GEM_TX512CNT },
1544 	[cad_stat_tx_h1024] =
1545 	    { "tx 1024-1518B",	KSTAT_KV_U_PACKETS, GEM_TX1024CNT },
1546 	[cad_stat_tx_underrun] =
1547 	    { "tx underrun",	KSTAT_KV_U_PACKETS, GEM_TXURUNCNT },
1548 	[cad_stat_tx_scoll] =
1549 	    { "tx scoll",	KSTAT_KV_U_PACKETS, GEM_SNGLCOLLCNT },
1550 	[cad_stat_tx_mcoll] =
1551 	    { "tx mcoll",	KSTAT_KV_U_PACKETS, GEM_MULTICOLLCNT },
1552 	[cad_stat_tx_ecoll] =
1553 	    { "tx excess coll",	KSTAT_KV_U_PACKETS, GEM_EXCESSCOLLCNT },
1554 	[cad_stat_tx_lcoll] =
1555 	    { "tx late coll",	KSTAT_KV_U_PACKETS, GEM_LATECOLLCNT },
1556 	[cad_stat_tx_defer] =
1557 	    { "tx defer",	KSTAT_KV_U_PACKETS, GEM_TXDEFERCNT },
1558 	[cad_stat_tx_sense] =
1559 	    { "tx csense",	KSTAT_KV_U_PACKETS, GEM_TXCSENSECNT },
1560 	[cad_stat_rx_toto] =
1561 	    { "rx total",	KSTAT_KV_U_BYTES, 0 },
1562 	[cad_stat_rx_totp] =
1563 	    { "rx total",	KSTAT_KV_U_PACKETS, GEM_RXCNT },
1564 	[cad_stat_rx_bcast] =
1565 	    { "rx bcast",	KSTAT_KV_U_PACKETS, GEM_RXBROADCNT },
1566 	[cad_stat_rx_mcast] =
1567 	    { "rx mcast",	KSTAT_KV_U_PACKETS, GEM_RXMULTICNT },
1568 	[cad_stat_rx_pause] =
1569 	    { "rx pause",	KSTAT_KV_U_PACKETS, GEM_RXPAUSECNT },
1570 	[cad_stat_rx_h64] =
1571 	    { "rx 64B",		KSTAT_KV_U_PACKETS, GEM_RX64CNT },
1572 	[cad_stat_rx_h65] =
1573 	    { "rx 65-127B",	KSTAT_KV_U_PACKETS, GEM_RX65CNT },
1574 	[cad_stat_rx_h128] =
1575 	    { "rx 128-255B",	KSTAT_KV_U_PACKETS, GEM_RX128CNT },
1576 	[cad_stat_rx_h256] =
1577 	    { "rx 256-511B",	KSTAT_KV_U_PACKETS, GEM_RX256CNT },
1578 	[cad_stat_rx_h512] =
1579 	    { "rx 512-1023B",	KSTAT_KV_U_PACKETS, GEM_RX512CNT },
1580 	[cad_stat_rx_h1024] =
1581 	    { "rx 1024-1518B",	KSTAT_KV_U_PACKETS, GEM_RX1024CNT },
1582 	[cad_stat_rx_undersz] =
1583 	    { "rx undersz",	KSTAT_KV_U_PACKETS, GEM_RXUNDRCNT },
1584 	[cad_stat_rx_oversz] =
1585 	    { "rx oversz",	KSTAT_KV_U_PACKETS, GEM_RXOVRCNT },
1586 	[cad_stat_rx_jabber] =
1587 	    { "rx jabber",	KSTAT_KV_U_PACKETS, GEM_RXJABCNT },
1588 	[cad_stat_rx_fcs] =
1589 	    { "rx fcs",		KSTAT_KV_U_PACKETS, GEM_RXFCSCNT },
1590 	[cad_stat_rx_symberr] =
1591 	    { "rx symberr",	KSTAT_KV_U_PACKETS, GEM_RXSYMBCNT },
1592 	[cad_stat_rx_align] =
1593 	    { "rx align",	KSTAT_KV_U_PACKETS, GEM_RXALIGNCNT },
1594 	[cad_stat_rx_reserr] =
1595 	    { "rx reserr",	KSTAT_KV_U_PACKETS, GEM_RXRESERRCNT },
1596 	[cad_stat_rx_overrun] =
1597 	    { "rx overrun",	KSTAT_KV_U_PACKETS, GEM_RXORCNT },
1598 	[cad_stat_rx_ipcsum] =
1599 	    { "rx ip csum",	KSTAT_KV_U_PACKETS, GEM_RXIPCCNT },
1600 	[cad_stat_rx_tcpcsum] =
1601 	    { "rx tcp csum",	KSTAT_KV_U_PACKETS, GEM_RXTCPCCNT },
1602 	[cad_stat_rx_udpcsum] =
1603 	    { "rx udp csum",	KSTAT_KV_U_PACKETS, GEM_RXUDPCCNT },
1604 };
1605 
1606 void
1607 cad_kstat_attach(struct cad_softc *sc)
1608 {
1609 	const struct cad_counter *c;
1610 	struct kstat *ks;
1611 	struct kstat_kv *kvs;
1612 	int i;
1613 
1614 	mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
1615 
1616 	ks = kstat_create(sc->sc_dev.dv_xname, 0, "cad-stats", 0,
1617 	    KSTAT_T_KV, 0);
1618 	if (ks == NULL)
1619 		return;
1620 
1621 	kvs = mallocarray(nitems(cad_counters), sizeof(*kvs),
1622 	    M_DEVBUF, M_WAITOK | M_ZERO);
1623 	for (i = 0; i < nitems(cad_counters); i++) {
1624 		c = &cad_counters[i];
1625 		kstat_kv_unit_init(&kvs[i], c->c_name, KSTAT_KV_T_COUNTER64,
1626 		    c->c_unit);
1627 	}
1628 
1629 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
1630 	ks->ks_softc = sc;
1631 	ks->ks_data = kvs;
1632 	ks->ks_datalen = nitems(cad_counters) * sizeof(*kvs);
1633 	ks->ks_read = cad_kstat_read;
1634 
1635 	sc->sc_kstat = ks;
1636 	kstat_install(ks);
1637 }
1638 
1639 int
1640 cad_kstat_read(struct kstat *ks)
1641 {
1642 	const struct cad_counter *c;
1643 	struct kstat_kv *kvs = ks->ks_data;
1644 	struct cad_softc *sc = ks->ks_softc;
1645 	uint64_t v64;
1646 	int i;
1647 
1648 	v64 = HREAD4(sc, GEM_OCTTXL);
1649 	v64 |= (uint64_t)HREAD4(sc, GEM_OCTTXH) << 32;
1650 	kstat_kv_u64(&kvs[cad_stat_tx_toto]) += v64;
1651 
1652 	v64 = HREAD4(sc, GEM_OCTRXL);
1653 	v64 |= (uint64_t)HREAD4(sc, GEM_OCTRXH) << 32;
1654 	kstat_kv_u64(&kvs[cad_stat_rx_toto]) += v64;
1655 
1656 	for (i = 0; i < nitems(cad_counters); i++) {
1657 		c = &cad_counters[i];
1658 		if (c->c_reg == 0)
1659 			continue;
1660 		kstat_kv_u64(&kvs[i]) += HREAD4(sc, c->c_reg);
1661 	}
1662 
1663 	getnanouptime(&ks->ks_updated);
1664 
1665 	return 0;
1666 }
1667 
1668 void
1669 cad_kstat_tick(void *arg)
1670 {
1671 	struct cad_softc *sc = arg;
1672 
1673 	if (mtx_enter_try(&sc->sc_kstat_mtx)) {
1674 		cad_kstat_read(sc->sc_kstat);
1675 		mtx_leave(&sc->sc_kstat_mtx);
1676 	}
1677 }
1678 #endif /* NKSTAT > 0 */
1679 
1680 #ifdef DDB
1681 void
1682 cad_dump(struct cad_softc *sc)
1683 {
1684 	struct cad_buf *rxb, *txb;
1685 	struct cad_desc *rxd, *txd;
1686 	uint32_t rxqbase, txqbase;
1687 	int i;
1688 
1689 	rxqbase = HREAD4(sc, GEM_RXQBASE);
1690 	txqbase = HREAD4(sc, GEM_TXQBASE);
1691 
1692 	printf("isr 0x%x txsr 0x%x rxsr 0x%x\n", HREAD4(sc, GEM_ISR),
1693 	    HREAD4(sc, GEM_TXSR), HREAD4(sc, GEM_RXSR));
1694 
1695 	printf("tx q 0x%08x\n", txqbase);
1696 	if (sc->sc_txbuf != NULL) {
1697 		for (i = 0; i < CAD_NTXDESC; i++) {
1698 			txb = &sc->sc_txbuf[i];
1699 			txd = &sc->sc_txdesc[i];
1700 			printf(" %3i %p 0x%08x 0x%08x %s%s m %p\n", i,
1701 			    txd, txd->d_addr, txd->d_status,
1702 			    sc->sc_tx_cons == i ? ">" : " ",
1703 			    sc->sc_tx_prod == i ? "<" : " ",
1704 			    txb->bf_m);
1705 		}
1706 	}
1707 
1708 	printf("rx q 0x%08x\n", rxqbase);
1709 	if (sc->sc_rxbuf != NULL) {
1710 		for (i = 0; i < CAD_NRXDESC; i++) {
1711 			rxb = &sc->sc_rxbuf[i];
1712 			rxd = &sc->sc_rxdesc[i];
1713 			printf(" %3i %p 0x%08x 0x%08x %s%s m %p\n", i,
1714 			    rxd, rxd->d_addr, rxd->d_status,
1715 			    sc->sc_rx_cons == i ? ">" : " ",
1716 			    sc->sc_rx_prod == i ? "<" : " ",
1717 			    rxb->bf_m);
1718 		}
1719 	}
1720 }
1721 #endif
1722