1 /* $OpenBSD: if_cad.c,v 1.8 2021/07/29 09:19:42 patrick Exp $ */ 2 3 /* 4 * Copyright (c) 2021 Visa Hankala 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for Cadence 10/100/Gigabit Ethernet device. 21 */ 22 23 #include "bpfilter.h" 24 #include "kstat.h" 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/atomic.h> 29 #include <sys/device.h> 30 #include <sys/ioctl.h> 31 #include <sys/mutex.h> 32 #include <sys/kstat.h> 33 #include <sys/task.h> 34 #include <sys/timeout.h> 35 36 #include <net/if.h> 37 #include <net/if_media.h> 38 #include <netinet/in.h> 39 #include <netinet/ip.h> 40 #include <netinet/if_ether.h> 41 42 #if NBPFILTER > 0 43 #include <net/bpf.h> 44 #endif 45 46 #include <dev/mii/mii.h> 47 #include <dev/mii/miivar.h> 48 #include <dev/mii/miidevs.h> 49 50 #include <machine/bus.h> 51 #include <machine/fdt.h> 52 53 #include <dev/ofw/fdt.h> 54 #include <dev/ofw/openfirm.h> 55 #include <dev/ofw/ofw_clock.h> 56 57 #define GEM_NETCTL 0x0000 58 #define GEM_NETCTL_DPRAM (1 << 18) 59 #define GEM_NETCTL_STARTTX (1 << 9) 60 #define GEM_NETCTL_STATCLR (1 << 5) 61 #define GEM_NETCTL_MDEN (1 << 4) 62 #define GEM_NETCTL_TXEN (1 << 3) 63 #define GEM_NETCTL_RXEN (1 << 2) 64 #define GEM_NETCFG 0x0004 65 #define GEM_NETCFG_SGMIIEN (1 << 27) 66 #define GEM_NETCFG_RXCSUMEN (1 << 24) 67 #define GEM_NETCFG_MDCCLKDIV_MASK (0x7 << 18) 68 #define GEM_NETCFG_MDCCLKDIV_SHIFT 18 69 #define GEM_NETCFG_FCSREM (1 << 17) 70 #define GEM_NETCFG_RXOFFS_MASK (0x3 << 14) 71 #define GEM_NETCFG_RXOFFS_SHIFT 14 72 #define GEM_NETCFG_PCSSEL (1 << 11) 73 #define GEM_NETCFG_1000 (1 << 10) 74 #define GEM_NETCFG_1536RXEN (1 << 8) 75 #define GEM_NETCFG_UCASTHASHEN (1 << 7) 76 #define GEM_NETCFG_MCASTHASHEN (1 << 6) 77 #define GEM_NETCFG_BCASTDI (1 << 5) 78 #define GEM_NETCFG_COPYALL (1 << 4) 79 #define GEM_NETCFG_FDEN (1 << 1) 80 #define GEM_NETCFG_100 (1 << 0) 81 #define GEM_NETSR 0x0008 82 #define GEM_NETSR_PHY_MGMT_IDLE (1 << 2) 83 #define GEM_DMACR 0x0010 84 #define GEM_DMACR_DMA64 (1 << 30) 85 #define GEM_DMACR_AHBDISC (1 << 24) 86 #define GEM_DMACR_RXBUF_MASK (0xff << 16) 87 #define GEM_DMACR_RXBUF_SHIFT 16 88 #define GEM_DMACR_TXCSUMEN (1 << 11) 89 #define GEM_DMACR_TXSIZE (1 << 10) 90 #define GEM_DMACR_RXSIZE_MASK (0x3 << 8) 91 #define GEM_DMACR_RXSIZE_8K (0x3 << 8) 92 #define GEM_DMACR_ES_PDATA (1 << 7) 93 #define GEM_DMACR_ES_DESCR (1 << 6) 94 #define GEM_DMACR_BLEN_MASK (0x1f << 0) 95 #define GEM_DMACR_BLEN_16 (0x10 << 0) 96 #define GEM_TXSR 0x0014 97 #define GEM_TXSR_TXGO (1 << 3) 98 #define GEM_RXQBASE 0x0018 99 #define GEM_TXQBASE 0x001c 100 #define GEM_RXSR 0x0020 101 #define GEM_RXSR_RXOVR (1 << 2) 102 #define GEM_ISR 0x0024 103 #define GEM_IER 0x0028 104 #define GEM_IDR 0x002c 105 #define GEM_IXR_HRESP (1 << 11) 106 #define GEM_IXR_RXOVR (1 << 10) 107 #define GEM_IXR_TXDONE (1 << 7) 108 #define GEM_IXR_TXURUN (1 << 6) 109 #define GEM_IXR_RETRY (1 << 5) 110 #define GEM_IXR_TXUSED (1 << 3) 111 #define GEM_IXR_RXUSED (1 << 2) 112 #define GEM_IXR_RXDONE (1 << 1) 113 #define GEM_PHYMNTNC 0x0034 114 #define GEM_PHYMNTNC_CLAUSE_22 (1 << 30) 115 #define GEM_PHYMNTNC_OP_READ (0x2 << 28) 116 #define GEM_PHYMNTNC_OP_WRITE (0x1 << 28) 117 #define GEM_PHYMNTNC_ADDR_MASK (0x1f << 23) 118 #define GEM_PHYMNTNC_ADDR_SHIFT 23 119 #define GEM_PHYMNTNC_REG_MASK (0x1f << 18) 120 #define GEM_PHYMNTNC_REG_SHIFT 18 121 #define GEM_PHYMNTNC_MUST_10 (0x2 << 16) 122 #define GEM_PHYMNTNC_DATA_MASK 0xffff 123 #define GEM_HASHL 0x0080 124 #define GEM_HASHH 0x0084 125 #define GEM_LADDRL(i) (0x0088 + (i) * 8) 126 #define GEM_LADDRH(i) (0x008c + (i) * 8) 127 #define GEM_LADDRNUM 4 128 #define GEM_MID 0x00fc 129 #define GEM_MID_VERSION_MASK (0xfff << 16) 130 #define GEM_MID_VERSION_SHIFT 16 131 #define GEM_OCTTXL 0x0100 132 #define GEM_OCTTXH 0x0104 133 #define GEM_TXCNT 0x0108 134 #define GEM_TXBCCNT 0x010c 135 #define GEM_TXMCCNT 0x0110 136 #define GEM_TXPAUSECNT 0x0114 137 #define GEM_TX64CNT 0x0118 138 #define GEM_TX65CNT 0x011c 139 #define GEM_TX128CNT 0x0120 140 #define GEM_TX256CNT 0x0124 141 #define GEM_TX512CNT 0x0128 142 #define GEM_TX1024CNT 0x012c 143 #define GEM_TXURUNCNT 0x0134 144 #define GEM_SNGLCOLLCNT 0x0138 145 #define GEM_MULTICOLLCNT 0x013c 146 #define GEM_EXCESSCOLLCNT 0x0140 147 #define GEM_LATECOLLCNT 0x0144 148 #define GEM_TXDEFERCNT 0x0148 149 #define GEM_TXCSENSECNT 0x014c 150 #define GEM_OCTRXL 0x0150 151 #define GEM_OCTRXH 0x0154 152 #define GEM_RXCNT 0x0158 153 #define GEM_RXBROADCNT 0x015c 154 #define GEM_RXMULTICNT 0x0160 155 #define GEM_RXPAUSECNT 0x0164 156 #define GEM_RX64CNT 0x0168 157 #define GEM_RX65CNT 0x016c 158 #define GEM_RX128CNT 0x0170 159 #define GEM_RX256CNT 0x0174 160 #define GEM_RX512CNT 0x0178 161 #define GEM_RX1024CNT 0x017c 162 #define GEM_RXUNDRCNT 0x0184 163 #define GEM_RXOVRCNT 0x0188 164 #define GEM_RXJABCNT 0x018c 165 #define GEM_RXFCSCNT 0x0190 166 #define GEM_RXLENGTHCNT 0x0194 167 #define GEM_RXSYMBCNT 0x0198 168 #define GEM_RXALIGNCNT 0x019c 169 #define GEM_RXRESERRCNT 0x01a0 170 #define GEM_RXORCNT 0x01a4 171 #define GEM_RXIPCCNT 0x01a8 172 #define GEM_RXTCPCCNT 0x01ac 173 #define GEM_RXUDPCCNT 0x01b0 174 #define GEM_CFG6 0x0294 175 #define GEM_CFG6_DMA64 (1 << 23) 176 #define GEM_TXQBASEHI 0x04c8 177 #define GEM_RXQBASEHI 0x04d4 178 179 #define GEM_CLK_TX "tx_clk" 180 181 struct cad_buf { 182 bus_dmamap_t bf_map; 183 struct mbuf *bf_m; 184 }; 185 186 struct cad_dmamem { 187 bus_dmamap_t cdm_map; 188 bus_dma_segment_t cdm_seg; 189 size_t cdm_size; 190 caddr_t cdm_kva; 191 }; 192 193 struct cad_desc32 { 194 uint32_t d_addr; 195 uint32_t d_status; 196 }; 197 198 struct cad_desc64 { 199 uint32_t d_addrlo; 200 uint32_t d_status; 201 uint32_t d_addrhi; 202 uint32_t d_unused; 203 }; 204 205 #define GEM_RXD_ADDR_WRAP (1 << 1) 206 #define GEM_RXD_ADDR_USED (1 << 0) 207 208 #define GEM_RXD_BCAST (1 << 31) 209 #define GEM_RXD_MCAST (1 << 30) 210 #define GEM_RXD_UCAST (1 << 29) 211 #define GEM_RXD_SPEC (1 << 27) 212 #define GEM_RXD_SPEC_MASK (0x3 << 25) 213 #define GEM_RXD_CSUM_MASK (0x3 << 22) 214 #define GEM_RXD_CSUM_UDP_OK (0x3 << 22) 215 #define GEM_RXD_CSUM_TCP_OK (0x2 << 22) 216 #define GEM_RXD_CSUM_IP_OK (0x1 << 22) 217 #define GEM_RXD_VLANTAG (1 << 21) 218 #define GEM_RXD_PRIOTAG (1 << 20) 219 #define GEM_RXD_CFI (1 << 16) 220 #define GEM_RXD_EOF (1 << 15) 221 #define GEM_RXD_SOF (1 << 14) 222 #define GEM_RXD_BADFCS (1 << 13) 223 #define GEM_RXD_LEN_MASK 0x1fff 224 225 #define GEM_TXD_USED (1 << 31) 226 #define GEM_TXD_WRAP (1 << 30) 227 #define GEM_TXD_RLIMIT (1 << 29) 228 #define GEM_TXD_CORRUPT (1 << 27) 229 #define GEM_TXD_LCOLL (1 << 26) 230 #define GEM_TXD_CSUMERR_MASK (0x7 << 20) 231 #define GEM_TXD_NOFCS (1 << 16) 232 #define GEM_TXD_LAST (1 << 15) 233 #define GEM_TXD_LEN_MASK 0x3fff 234 235 #define CAD_NRXDESC 256 236 237 #define CAD_NTXDESC 256 238 #define CAD_NTXSEGS 16 239 240 enum cad_phy_mode { 241 CAD_PHY_MODE_GMII, 242 CAD_PHY_MODE_RGMII, 243 CAD_PHY_MODE_RGMII_ID, 244 CAD_PHY_MODE_RGMII_RXID, 245 CAD_PHY_MODE_RGMII_TXID, 246 CAD_PHY_MODE_SGMII, 247 }; 248 249 struct cad_softc { 250 struct device sc_dev; 251 struct arpcom sc_ac; 252 253 bus_dma_tag_t sc_dmat; 254 bus_space_tag_t sc_iot; 255 bus_space_handle_t sc_ioh; 256 void *sc_ih; 257 int sc_node; 258 int sc_phy_loc; 259 enum cad_phy_mode sc_phy_mode; 260 unsigned char sc_rxhang_erratum; 261 unsigned char sc_rxdone; 262 unsigned char sc_dma64; 263 size_t sc_descsize; 264 265 struct mii_data sc_mii; 266 #define sc_media sc_mii.mii_media 267 struct timeout sc_tick; 268 269 struct cad_dmamem *sc_txring; 270 struct cad_buf *sc_txbuf; 271 caddr_t sc_txdesc; 272 unsigned int sc_tx_prod; 273 unsigned int sc_tx_cons; 274 275 struct if_rxring sc_rx_ring; 276 struct cad_dmamem *sc_rxring; 277 struct cad_buf *sc_rxbuf; 278 caddr_t sc_rxdesc; 279 unsigned int sc_rx_prod; 280 unsigned int sc_rx_cons; 281 uint32_t sc_netctl; 282 283 struct task sc_statchg_task; 284 uint32_t sc_tx_freq; 285 286 struct mutex sc_kstat_mtx; 287 struct kstat *sc_kstat; 288 }; 289 290 #define HREAD4(sc, reg) \ 291 (bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))) 292 #define HWRITE4(sc, reg, val) \ 293 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) 294 295 int cad_match(struct device *, void *, void *); 296 void cad_attach(struct device *, struct device *, void *); 297 298 int cad_ioctl(struct ifnet *, u_long, caddr_t); 299 void cad_start(struct ifqueue *); 300 void cad_watchdog(struct ifnet *); 301 302 void cad_reset(struct cad_softc *); 303 int cad_up(struct cad_softc *); 304 void cad_down(struct cad_softc *); 305 void cad_iff(struct cad_softc *); 306 int cad_intr(void *); 307 void cad_tick(void *); 308 void cad_statchg_task(void *); 309 310 int cad_media_change(struct ifnet *); 311 void cad_media_status(struct ifnet *, struct ifmediareq *); 312 int cad_mii_readreg(struct device *, int, int); 313 void cad_mii_writereg(struct device *, int, int, int); 314 void cad_mii_statchg(struct device *); 315 316 struct cad_dmamem *cad_dmamem_alloc(struct cad_softc *, bus_size_t, bus_size_t); 317 void cad_dmamem_free(struct cad_softc *, struct cad_dmamem *); 318 void cad_rxfill(struct cad_softc *); 319 void cad_rxeof(struct cad_softc *); 320 void cad_txeof(struct cad_softc *); 321 unsigned int cad_encap(struct cad_softc *, struct mbuf *); 322 struct mbuf *cad_alloc_mbuf(struct cad_softc *, bus_dmamap_t); 323 324 #if NKSTAT > 0 325 void cad_kstat_attach(struct cad_softc *); 326 int cad_kstat_read(struct kstat *); 327 void cad_kstat_tick(void *); 328 #endif 329 330 #ifdef DDB 331 struct cad_softc *cad_sc[4]; 332 #endif 333 334 const struct cfattach cad_ca = { 335 sizeof(struct cad_softc), cad_match, cad_attach 336 }; 337 338 struct cfdriver cad_cd = { 339 NULL, "cad", DV_IFNET 340 }; 341 342 const struct { 343 const char *name; 344 enum cad_phy_mode mode; 345 } cad_phy_modes[] = { 346 { "gmii", CAD_PHY_MODE_GMII }, 347 { "rgmii", CAD_PHY_MODE_RGMII }, 348 { "rgmii-id", CAD_PHY_MODE_RGMII_ID }, 349 { "rgmii-rxid", CAD_PHY_MODE_RGMII_RXID }, 350 { "rgmii-txid", CAD_PHY_MODE_RGMII_TXID }, 351 { "sgmii", CAD_PHY_MODE_SGMII }, 352 }; 353 354 int 355 cad_match(struct device *parent, void *match, void *aux) 356 { 357 struct fdt_attach_args *faa = aux; 358 359 return (OF_is_compatible(faa->fa_node, "cdns,gem") || 360 OF_is_compatible(faa->fa_node, "sifive,fu540-c000-gem") || 361 OF_is_compatible(faa->fa_node, "sifive,fu740-c000-gem")); 362 } 363 364 void 365 cad_attach(struct device *parent, struct device *self, void *aux) 366 { 367 char phy_mode[16]; 368 struct fdt_attach_args *faa = aux; 369 struct cad_softc *sc = (struct cad_softc *)self; 370 struct ifnet *ifp = &sc->sc_ac.ac_if; 371 uint32_t hi, lo; 372 uint32_t rev, ver; 373 unsigned int i; 374 int node, phy; 375 376 if (faa->fa_nreg < 1) { 377 printf(": no registers\n"); 378 return; 379 } 380 381 sc->sc_node = faa->fa_node; 382 sc->sc_dmat = faa->fa_dmat; 383 sc->sc_iot = faa->fa_iot; 384 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 385 faa->fa_reg[0].size, 0, &sc->sc_ioh) != 0) { 386 printf(": can't map registers\n"); 387 return; 388 } 389 390 if (OF_getprop(faa->fa_node, "local-mac-address", sc->sc_ac.ac_enaddr, 391 sizeof(sc->sc_ac.ac_enaddr)) != sizeof(sc->sc_ac.ac_enaddr)) { 392 for (i = 0; i < GEM_LADDRNUM; i++) { 393 lo = HREAD4(sc, GEM_LADDRL(i)); 394 hi = HREAD4(sc, GEM_LADDRH(i)); 395 if (lo != 0 || hi != 0) { 396 sc->sc_ac.ac_enaddr[0] = lo; 397 sc->sc_ac.ac_enaddr[1] = lo >> 8; 398 sc->sc_ac.ac_enaddr[2] = lo >> 16; 399 sc->sc_ac.ac_enaddr[3] = lo >> 24; 400 sc->sc_ac.ac_enaddr[4] = hi; 401 sc->sc_ac.ac_enaddr[5] = hi >> 8; 402 break; 403 } 404 } 405 if (i == GEM_LADDRNUM) 406 ether_fakeaddr(ifp); 407 } 408 409 phy = OF_getpropint(faa->fa_node, "phy-handle", 0); 410 node = OF_getnodebyphandle(phy); 411 if (node != 0) 412 sc->sc_phy_loc = OF_getpropint(node, "reg", MII_PHY_ANY); 413 else 414 sc->sc_phy_loc = MII_PHY_ANY; 415 416 sc->sc_phy_mode = CAD_PHY_MODE_RGMII; 417 OF_getprop(faa->fa_node, "phy-mode", phy_mode, sizeof(phy_mode)); 418 for (i = 0; i < nitems(cad_phy_modes); i++) { 419 if (strcmp(phy_mode, cad_phy_modes[i].name) == 0) { 420 sc->sc_phy_mode = cad_phy_modes[i].mode; 421 break; 422 } 423 } 424 425 rev = HREAD4(sc, GEM_MID); 426 ver = (rev & GEM_MID_VERSION_MASK) >> GEM_MID_VERSION_SHIFT; 427 428 sc->sc_descsize = sizeof(struct cad_desc32); 429 /* Register CFG6 is not present on Zynq-7000 / GEM version 0x2. */ 430 if (ver >= 0x7 && (HREAD4(sc, GEM_CFG6) & GEM_CFG6_DMA64)) { 431 sc->sc_descsize = sizeof(struct cad_desc64); 432 sc->sc_dma64 = 1; 433 } 434 435 if (OF_is_compatible(faa->fa_node, "cdns,zynq-gem")) 436 sc->sc_rxhang_erratum = 1; 437 438 timeout_set(&sc->sc_tick, cad_tick, sc); 439 task_set(&sc->sc_statchg_task, cad_statchg_task, sc); 440 441 cad_reset(sc); 442 443 sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE, 444 cad_intr, sc, sc->sc_dev.dv_xname); 445 if (sc->sc_ih == NULL) { 446 printf(": can't establish interrupt\n"); 447 goto fail; 448 } 449 450 ifp->if_softc = sc; 451 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 452 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 453 ifp->if_xflags |= IFXF_MPSAFE; 454 ifp->if_ioctl = cad_ioctl; 455 ifp->if_qstart = cad_start; 456 ifp->if_watchdog = cad_watchdog; 457 ifp->if_hardmtu = ETHER_MAX_DIX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN; 458 ifp->if_capabilities = IFCAP_VLAN_MTU; 459 460 /* 461 * Enable transmit checksum offload only on reliable hardware. 462 * At least Zynq-7000 appears to generate bad UDP header checksum if 463 * the checksum field has not been initialized to zero and 464 * UDP payload size is less than three octets. 465 */ 466 if (0) { 467 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | 468 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 | 469 IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6; 470 } 471 472 printf(": rev 0x%x, address %s\n", rev, 473 ether_sprintf(sc->sc_ac.ac_enaddr)); 474 475 sc->sc_mii.mii_ifp = ifp; 476 sc->sc_mii.mii_readreg = cad_mii_readreg; 477 sc->sc_mii.mii_writereg = cad_mii_writereg; 478 sc->sc_mii.mii_statchg = cad_mii_statchg; 479 ifmedia_init(&sc->sc_media, 0, cad_media_change, cad_media_status); 480 481 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, sc->sc_phy_loc, 482 MII_OFFSET_ANY, MIIF_NOISOLATE); 483 484 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { 485 printf("%s: no PHY found\n", sc->sc_dev.dv_xname); 486 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 487 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL); 488 } else { 489 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 490 } 491 492 if_attach(ifp); 493 ether_ifattach(ifp); 494 495 #if NKSTAT > 0 496 cad_kstat_attach(sc); 497 #endif 498 499 #ifdef DDB 500 if (sc->sc_dev.dv_unit < nitems(cad_sc)) 501 cad_sc[sc->sc_dev.dv_unit] = sc; 502 #endif 503 504 return; 505 506 fail: 507 if (sc->sc_ioh != 0) 508 bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size); 509 } 510 511 int 512 cad_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 513 { 514 struct cad_softc *sc = ifp->if_softc; 515 struct ifreq *ifr = (struct ifreq *)data; 516 int error = 0; 517 int s; 518 519 s = splnet(); 520 521 switch (cmd) { 522 case SIOCSIFADDR: 523 ifp->if_flags |= IFF_UP; 524 /* FALLTHROUGH */ 525 526 case SIOCSIFFLAGS: 527 if (ISSET(ifp->if_flags, IFF_UP)) { 528 if (ISSET(ifp->if_flags, IFF_RUNNING)) 529 error = ENETRESET; 530 else 531 error = cad_up(sc); 532 } else { 533 if (ISSET(ifp->if_flags, IFF_RUNNING)) 534 cad_down(sc); 535 } 536 break; 537 538 case SIOCGIFMEDIA: 539 case SIOCSIFMEDIA: 540 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 541 break; 542 543 default: 544 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 545 break; 546 } 547 548 if (error == ENETRESET) { 549 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 550 (IFF_UP | IFF_RUNNING)) 551 cad_iff(sc); 552 error = 0; 553 } 554 555 splx(s); 556 557 return error; 558 } 559 560 void 561 cad_reset(struct cad_softc *sc) 562 { 563 static const unsigned int mdcclk_divs[] = { 564 8, 16, 32, 48, 64, 96, 128, 224 565 }; 566 unsigned int freq; 567 uint32_t div, netcfg; 568 569 HWRITE4(sc, GEM_NETCTL, 0); 570 HWRITE4(sc, GEM_IDR, ~0U); 571 HWRITE4(sc, GEM_RXSR, 0); 572 HWRITE4(sc, GEM_TXSR, 0); 573 if (sc->sc_dma64) { 574 HWRITE4(sc, GEM_RXQBASEHI, 0); 575 HWRITE4(sc, GEM_TXQBASEHI, 0); 576 } 577 HWRITE4(sc, GEM_RXQBASE, 0); 578 HWRITE4(sc, GEM_TXQBASE, 0); 579 580 /* MDIO clock rate must not exceed 2.5 MHz. */ 581 freq = clock_get_frequency(sc->sc_node, "pclk"); 582 for (div = 0; div < nitems(mdcclk_divs) - 1; div++) { 583 if (freq / mdcclk_divs[div] <= 2500000) 584 break; 585 } 586 KASSERT(div < nitems(mdcclk_divs)); 587 588 netcfg = HREAD4(sc, GEM_NETCFG); 589 netcfg &= ~GEM_NETCFG_MDCCLKDIV_MASK; 590 netcfg |= div << GEM_NETCFG_MDCCLKDIV_SHIFT; 591 HWRITE4(sc, GEM_NETCFG, netcfg); 592 593 /* Enable MDIO bus. */ 594 sc->sc_netctl = GEM_NETCTL_MDEN; 595 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl); 596 } 597 598 int 599 cad_up(struct cad_softc *sc) 600 { 601 struct ifnet *ifp = &sc->sc_ac.ac_if; 602 struct cad_buf *rxb, *txb; 603 struct cad_desc32 *desc32; 604 struct cad_desc64 *desc64; 605 uint64_t addr; 606 int flags = BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW; 607 unsigned int i; 608 uint32_t val; 609 610 if (sc->sc_dma64) 611 flags |= BUS_DMA_64BIT; 612 613 /* 614 * Set up Tx descriptor ring. 615 */ 616 617 sc->sc_txring = cad_dmamem_alloc(sc, 618 CAD_NTXDESC * sc->sc_descsize, sc->sc_descsize); 619 sc->sc_txdesc = sc->sc_txring->cdm_kva; 620 621 desc32 = (struct cad_desc32 *)sc->sc_txdesc; 622 desc64 = (struct cad_desc64 *)sc->sc_txdesc; 623 624 sc->sc_txbuf = malloc(sizeof(*sc->sc_txbuf) * CAD_NTXDESC, 625 M_DEVBUF, M_WAITOK); 626 for (i = 0; i < CAD_NTXDESC; i++) { 627 txb = &sc->sc_txbuf[i]; 628 bus_dmamap_create(sc->sc_dmat, MCLBYTES, CAD_NTXSEGS, 629 MCLBYTES, 0, flags, &txb->bf_map); 630 txb->bf_m = NULL; 631 632 if (sc->sc_dma64) { 633 desc64[i].d_addrhi = 0; 634 desc64[i].d_addrlo = 0; 635 desc64[i].d_status = GEM_TXD_USED; 636 if (i == CAD_NTXDESC - 1) 637 desc64[i].d_status |= GEM_TXD_WRAP; 638 } else { 639 desc32[i].d_addr = 0; 640 desc32[i].d_status = GEM_TXD_USED; 641 if (i == CAD_NTXDESC - 1) 642 desc32[i].d_status |= GEM_TXD_WRAP; 643 } 644 } 645 646 sc->sc_tx_prod = 0; 647 sc->sc_tx_cons = 0; 648 649 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 650 0, sc->sc_txring->cdm_size, 651 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 652 653 addr = sc->sc_txring->cdm_map->dm_segs[0].ds_addr; 654 if (sc->sc_dma64) 655 HWRITE4(sc, GEM_TXQBASEHI, addr >> 32); 656 HWRITE4(sc, GEM_TXQBASE, addr); 657 658 /* 659 * Set up Rx descriptor ring. 660 */ 661 662 sc->sc_rxring = cad_dmamem_alloc(sc, 663 CAD_NRXDESC * sc->sc_descsize, sc->sc_descsize); 664 sc->sc_rxdesc = sc->sc_rxring->cdm_kva; 665 666 desc32 = (struct cad_desc32 *)sc->sc_rxdesc; 667 desc64 = (struct cad_desc64 *)sc->sc_rxdesc; 668 669 sc->sc_rxbuf = malloc(sizeof(struct cad_buf) * CAD_NRXDESC, 670 M_DEVBUF, M_WAITOK); 671 for (i = 0; i < CAD_NRXDESC; i++) { 672 rxb = &sc->sc_rxbuf[i]; 673 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 674 MCLBYTES, 0, flags, &rxb->bf_map); 675 rxb->bf_m = NULL; 676 677 /* Mark all descriptors as used so that driver owns them. */ 678 if (sc->sc_dma64) { 679 desc64[i].d_addrhi = 0; 680 desc64[i].d_addrlo = GEM_RXD_ADDR_USED; 681 if (i == CAD_NRXDESC - 1) 682 desc64[i].d_addrlo |= GEM_RXD_ADDR_WRAP; 683 } else { 684 desc32[i].d_addr = GEM_RXD_ADDR_USED; 685 if (i == CAD_NRXDESC - 1) 686 desc32[i].d_addr |= GEM_RXD_ADDR_WRAP; 687 } 688 } 689 690 if_rxr_init(&sc->sc_rx_ring, 2, CAD_NRXDESC); 691 692 sc->sc_rx_prod = 0; 693 sc->sc_rx_cons = 0; 694 cad_rxfill(sc); 695 696 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map, 697 0, sc->sc_rxring->cdm_size, 698 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 699 700 addr = sc->sc_rxring->cdm_map->dm_segs[0].ds_addr; 701 if (sc->sc_dma64) 702 HWRITE4(sc, GEM_RXQBASEHI, addr >> 32); 703 HWRITE4(sc, GEM_RXQBASE, addr); 704 705 /* 706 * Set MAC address filters. 707 */ 708 709 HWRITE4(sc, GEM_LADDRL(0), sc->sc_ac.ac_enaddr[0] | 710 ((uint32_t)sc->sc_ac.ac_enaddr[1] << 8) | 711 ((uint32_t)sc->sc_ac.ac_enaddr[2] << 16) | 712 ((uint32_t)sc->sc_ac.ac_enaddr[3] << 24)); 713 HWRITE4(sc, GEM_LADDRH(0), sc->sc_ac.ac_enaddr[4] | 714 ((uint32_t)sc->sc_ac.ac_enaddr[5] << 8)); 715 716 for (i = 1; i < GEM_LADDRNUM; i++) { 717 HWRITE4(sc, GEM_LADDRL(i), 0); 718 HWRITE4(sc, GEM_LADDRH(i), 0); 719 } 720 721 cad_iff(sc); 722 723 clock_set_frequency(sc->sc_node, GEM_CLK_TX, 2500000); 724 clock_enable(sc->sc_node, GEM_CLK_TX); 725 delay(1000); 726 727 val = HREAD4(sc, GEM_NETCFG); 728 729 val |= GEM_NETCFG_FCSREM | GEM_NETCFG_RXCSUMEN | GEM_NETCFG_1000 | 730 GEM_NETCFG_100 | GEM_NETCFG_FDEN | GEM_NETCFG_1536RXEN; 731 val &= ~GEM_NETCFG_RXOFFS_MASK; 732 val |= ETHER_ALIGN << GEM_NETCFG_RXOFFS_SHIFT; 733 val &= ~GEM_NETCFG_BCASTDI; 734 735 if (sc->sc_phy_mode == CAD_PHY_MODE_SGMII) 736 val |= GEM_NETCFG_SGMIIEN | GEM_NETCFG_PCSSEL; 737 else 738 val &= ~(GEM_NETCFG_SGMIIEN | GEM_NETCFG_PCSSEL); 739 740 HWRITE4(sc, GEM_NETCFG, val); 741 742 val = HREAD4(sc, GEM_DMACR); 743 744 if (sc->sc_dma64) 745 val |= GEM_DMACR_DMA64; 746 else 747 val &= ~GEM_DMACR_DMA64; 748 /* Use CPU's native byte order with descriptor words. */ 749 #if BYTE_ORDER == BIG_ENDIAN 750 val |= GEM_DMACR_ES_DESCR; 751 #else 752 val &= ~GEM_DMACR_ES_DESCR; 753 #endif 754 val &= ~GEM_DMACR_ES_PDATA; 755 val |= GEM_DMACR_AHBDISC | GEM_DMACR_TXSIZE; 756 val &= ~GEM_DMACR_RXSIZE_MASK; 757 val |= GEM_DMACR_RXSIZE_8K; 758 val &= ~GEM_DMACR_RXBUF_MASK; 759 val |= (MCLBYTES / 64) << GEM_DMACR_RXBUF_SHIFT; 760 val &= ~GEM_DMACR_BLEN_MASK; 761 val |= GEM_DMACR_BLEN_16; 762 763 if (ifp->if_capabilities & IFCAP_CSUM_IPv4) 764 val |= GEM_DMACR_TXCSUMEN; 765 766 HWRITE4(sc, GEM_DMACR, val); 767 768 /* Clear statistics. */ 769 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STATCLR); 770 771 /* Enable Rx and Tx. */ 772 sc->sc_netctl |= GEM_NETCTL_RXEN | GEM_NETCTL_TXEN; 773 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl); 774 775 /* Enable interrupts. */ 776 HWRITE4(sc, GEM_IER, GEM_IXR_HRESP | GEM_IXR_RXOVR | GEM_IXR_RXDONE | 777 GEM_IXR_TXDONE); 778 779 if (sc->sc_rxhang_erratum) 780 HWRITE4(sc, GEM_IER, GEM_IXR_RXUSED); 781 782 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) 783 mii_mediachg(&sc->sc_mii); 784 785 ifp->if_flags |= IFF_RUNNING; 786 ifq_clr_oactive(&ifp->if_snd); 787 788 timeout_add_sec(&sc->sc_tick, 1); 789 790 return 0; 791 } 792 793 void 794 cad_down(struct cad_softc *sc) 795 { 796 struct ifnet *ifp = &sc->sc_ac.ac_if; 797 struct cad_buf *rxb, *txb; 798 unsigned int i, timeout; 799 800 ifp->if_flags &= ~IFF_RUNNING; 801 802 ifq_clr_oactive(&ifp->if_snd); 803 ifp->if_timer = 0; 804 805 timeout_del_barrier(&sc->sc_tick); 806 807 /* Disable data transfer. */ 808 sc->sc_netctl &= ~(GEM_NETCTL_TXEN | GEM_NETCTL_RXEN); 809 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl); 810 811 /* Disable all interrupts. */ 812 HWRITE4(sc, GEM_IDR, ~0U); 813 814 /* Wait for transmitter to become idle. */ 815 for (timeout = 1000; timeout > 0; timeout--) { 816 if ((HREAD4(sc, GEM_TXSR) & GEM_TXSR_TXGO) == 0) 817 break; 818 delay(10); 819 } 820 if (timeout == 0) 821 printf("%s: transmitter not idle\n", sc->sc_dev.dv_xname); 822 823 mii_down(&sc->sc_mii); 824 825 /* Wait for activity to cease. */ 826 intr_barrier(sc->sc_ih); 827 ifq_barrier(&ifp->if_snd); 828 taskq_del_barrier(systq, &sc->sc_statchg_task); 829 830 /* Disable the packet clock as it is not needed any longer. */ 831 clock_disable(sc->sc_node, GEM_CLK_TX); 832 833 cad_reset(sc); 834 835 /* 836 * Tear down the Tx descriptor ring. 837 */ 838 839 for (i = 0; i < CAD_NTXDESC; i++) { 840 txb = &sc->sc_txbuf[i]; 841 if (txb->bf_m != NULL) { 842 bus_dmamap_sync(sc->sc_dmat, txb->bf_map, 0, 843 txb->bf_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 844 bus_dmamap_unload(sc->sc_dmat, txb->bf_map); 845 m_freem(txb->bf_m); 846 } 847 bus_dmamap_destroy(sc->sc_dmat, txb->bf_map); 848 } 849 free(sc->sc_txbuf, M_DEVBUF, sizeof(*sc->sc_txbuf) * CAD_NTXDESC); 850 sc->sc_txbuf = NULL; 851 852 cad_dmamem_free(sc, sc->sc_txring); 853 sc->sc_txring = NULL; 854 sc->sc_txdesc = NULL; 855 856 /* 857 * Tear down the Rx descriptor ring. 858 */ 859 860 for (i = 0; i < CAD_NRXDESC; i++) { 861 rxb = &sc->sc_rxbuf[i]; 862 if (rxb->bf_m != NULL) { 863 bus_dmamap_sync(sc->sc_dmat, rxb->bf_map, 0, 864 rxb->bf_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 865 bus_dmamap_unload(sc->sc_dmat, rxb->bf_map); 866 m_freem(rxb->bf_m); 867 } 868 bus_dmamap_destroy(sc->sc_dmat, rxb->bf_map); 869 } 870 free(sc->sc_rxbuf, M_DEVBUF, sizeof(*sc->sc_txbuf) * CAD_NRXDESC); 871 sc->sc_rxbuf = NULL; 872 873 cad_dmamem_free(sc, sc->sc_rxring); 874 sc->sc_rxring = NULL; 875 sc->sc_rxdesc = NULL; 876 } 877 878 uint8_t 879 cad_hash_mac(const uint8_t *eaddr) 880 { 881 uint64_t val = 0; 882 int i; 883 uint8_t hash = 0; 884 885 for (i = ETHER_ADDR_LEN - 1; i >= 0; i--) 886 val = (val << 8) | eaddr[i]; 887 888 for (i = 0; i < 8; i++) { 889 hash ^= val; 890 val >>= 6; 891 } 892 893 return hash & 0x3f; 894 } 895 896 void 897 cad_iff(struct cad_softc *sc) 898 { 899 struct arpcom *ac = &sc->sc_ac; 900 struct ifnet *ifp = &sc->sc_ac.ac_if; 901 struct ether_multi *enm; 902 struct ether_multistep step; 903 uint64_t hash; 904 uint32_t netcfg; 905 906 netcfg = HREAD4(sc, GEM_NETCFG); 907 netcfg &= ~GEM_NETCFG_UCASTHASHEN; 908 909 ifp->if_flags &= ~IFF_ALLMULTI; 910 911 if (ifp->if_flags & IFF_PROMISC) { 912 netcfg |= GEM_NETCFG_COPYALL; 913 netcfg &= ~GEM_NETCFG_MCASTHASHEN; 914 } else { 915 netcfg &= ~GEM_NETCFG_COPYALL; 916 netcfg |= GEM_NETCFG_MCASTHASHEN; 917 918 if (ac->ac_multirangecnt > 0) 919 ifp->if_flags |= IFF_ALLMULTI; 920 921 if (ifp->if_flags & IFF_ALLMULTI) { 922 hash = ~0ULL; 923 } else { 924 hash = 0; 925 ETHER_FIRST_MULTI(step, ac, enm); 926 while (enm != NULL) { 927 hash |= 1ULL << cad_hash_mac(enm->enm_addrlo); 928 ETHER_NEXT_MULTI(step, enm); 929 } 930 } 931 932 HWRITE4(sc, GEM_HASHL, hash); 933 HWRITE4(sc, GEM_HASHH, hash >> 32); 934 } 935 936 HWRITE4(sc, GEM_NETCFG, netcfg); 937 } 938 939 void 940 cad_start(struct ifqueue *ifq) 941 { 942 struct ifnet *ifp = ifq->ifq_if; 943 struct cad_softc *sc = ifp->if_softc; 944 struct mbuf *m; 945 unsigned int free, head, used; 946 947 free = sc->sc_tx_cons; 948 head = sc->sc_tx_prod; 949 if (free <= head) 950 free += CAD_NTXDESC; 951 free -= head; 952 953 for (;;) { 954 if (free <= CAD_NTXSEGS) { 955 ifq_set_oactive(ifq); 956 break; 957 } 958 959 m = ifq_dequeue(ifq); 960 if (m == NULL) 961 break; 962 963 used = cad_encap(sc, m); 964 if (used == 0) { 965 m_freem(m); 966 continue; 967 } 968 969 #if NBPFILTER > 0 970 if (ifp->if_bpf != NULL) 971 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 972 #endif 973 974 ifp->if_timer = 5; 975 976 KASSERT(free >= used); 977 free -= used; 978 } 979 980 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STARTTX); 981 } 982 983 void 984 cad_watchdog(struct ifnet *ifp) 985 { 986 struct cad_softc *sc = ifp->if_softc; 987 988 ifp->if_timer = 0; 989 990 if ((ifp->if_flags & IFF_RUNNING) == 0) 991 return; 992 993 if (sc->sc_tx_cons == sc->sc_tx_prod) 994 return; 995 996 /* XXX */ 997 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STARTTX); 998 } 999 1000 unsigned int 1001 cad_encap(struct cad_softc *sc, struct mbuf *m) 1002 { 1003 bus_dmamap_t map; 1004 struct cad_buf *txb; 1005 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_txdesc; 1006 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_txdesc; 1007 unsigned int head, idx, nsegs; 1008 uint32_t status; 1009 int i; 1010 1011 head = sc->sc_tx_prod; 1012 1013 txb = &sc->sc_txbuf[head]; 1014 map = txb->bf_map; 1015 1016 switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) { 1017 case 0: 1018 break; 1019 case EFBIG: 1020 if (m_defrag(m, M_DONTWAIT) != 0) 1021 return 0; 1022 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1023 BUS_DMA_NOWAIT) != 0) 1024 return 0; 1025 break; 1026 default: 1027 return 0; 1028 } 1029 1030 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1031 BUS_DMASYNC_PREWRITE); 1032 1033 nsegs = map->dm_nsegs; 1034 KASSERT(nsegs > 0); 1035 1036 txb->bf_m = m; 1037 1038 /* 1039 * Fill descriptors in reverse order so that all the descriptors 1040 * are ready when the first descriptor's GEM_TXD_USED bit is cleared. 1041 */ 1042 for (i = nsegs - 1; i >= 0; i--) { 1043 idx = (head + i) % CAD_NTXDESC; 1044 1045 status = map->dm_segs[i].ds_len & GEM_TXD_LEN_MASK; 1046 if (i == nsegs - 1) 1047 status |= GEM_TXD_LAST; 1048 if (idx == CAD_NTXDESC - 1) 1049 status |= GEM_TXD_WRAP; 1050 1051 if (sc->sc_dma64) { 1052 uint64_t addr = map->dm_segs[i].ds_addr; 1053 1054 desc64[idx].d_addrlo = addr; 1055 desc64[idx].d_addrhi = addr >> 32; 1056 } else { 1057 desc32[idx].d_addr = map->dm_segs[i].ds_addr; 1058 } 1059 1060 /* Make d_addr visible before GEM_TXD_USED is cleared 1061 * in d_status. */ 1062 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 1063 idx * sc->sc_descsize, sc->sc_descsize, 1064 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1065 1066 if (sc->sc_dma64) 1067 desc64[idx].d_status = status; 1068 else 1069 desc32[idx].d_status = status; 1070 1071 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 1072 idx * sc->sc_descsize, sc->sc_descsize, 1073 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1074 } 1075 1076 sc->sc_tx_prod = (head + nsegs) % CAD_NTXDESC; 1077 1078 return nsegs; 1079 } 1080 1081 int 1082 cad_intr(void *arg) 1083 { 1084 struct cad_softc *sc = arg; 1085 struct ifnet *ifp = &sc->sc_ac.ac_if; 1086 uint32_t isr; 1087 1088 isr = HREAD4(sc, GEM_ISR); 1089 HWRITE4(sc, GEM_ISR, isr); 1090 1091 if (isr & GEM_IXR_RXDONE) 1092 cad_rxeof(sc); 1093 if (isr & GEM_IXR_TXDONE) 1094 cad_txeof(sc); 1095 1096 if (isr & GEM_IXR_RXOVR) 1097 ifp->if_ierrors++; 1098 1099 if (sc->sc_rxhang_erratum && (isr & GEM_IXR_RXUSED)) { 1100 /* 1101 * Try to flush a packet from the Rx SRAM to avoid triggering 1102 * the Rx hang. 1103 */ 1104 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_DPRAM); 1105 cad_rxfill(sc); 1106 } 1107 1108 /* If there has been a DMA error, stop the interface to limit damage. */ 1109 if (isr & GEM_IXR_HRESP) { 1110 sc->sc_netctl &= ~(GEM_NETCTL_TXEN | GEM_NETCTL_RXEN); 1111 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl); 1112 HWRITE4(sc, GEM_IDR, ~0U); 1113 1114 printf("%s: hresp error, interface stopped\n", 1115 sc->sc_dev.dv_xname); 1116 } 1117 1118 return 1; 1119 } 1120 1121 void 1122 cad_rxeof(struct cad_softc *sc) 1123 { 1124 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1125 struct ifnet *ifp = &sc->sc_ac.ac_if; 1126 struct mbuf *m; 1127 struct cad_buf *rxb; 1128 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_rxdesc; 1129 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_rxdesc; 1130 size_t len; 1131 unsigned int idx; 1132 uint32_t addr, status; 1133 1134 idx = sc->sc_rx_cons; 1135 1136 while (if_rxr_inuse(&sc->sc_rx_ring) > 0) { 1137 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map, 1138 idx * sc->sc_descsize, sc->sc_descsize, 1139 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1140 1141 if (sc->sc_dma64) 1142 addr = desc64[idx].d_addrlo; 1143 else 1144 addr = desc32[idx].d_addr; 1145 if ((addr & GEM_RXD_ADDR_USED) == 0) 1146 break; 1147 1148 /* Prevent premature read of d_status. */ 1149 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map, 1150 idx * sc->sc_descsize, sc->sc_descsize, 1151 BUS_DMASYNC_POSTREAD); 1152 1153 if (sc->sc_dma64) 1154 status = desc64[idx].d_status; 1155 else 1156 status = desc32[idx].d_status; 1157 len = status & GEM_RXD_LEN_MASK; 1158 1159 rxb = &sc->sc_rxbuf[idx]; 1160 1161 bus_dmamap_sync(sc->sc_dmat, rxb->bf_map, ETHER_ALIGN, len, 1162 BUS_DMASYNC_POSTREAD); 1163 bus_dmamap_unload(sc->sc_dmat, rxb->bf_map); 1164 1165 m = rxb->bf_m; 1166 rxb->bf_m = NULL; 1167 KASSERT(m != NULL); 1168 1169 if_rxr_put(&sc->sc_rx_ring, 1); 1170 idx = (idx + 1) % CAD_NRXDESC; 1171 1172 if ((status & (GEM_RXD_SOF | GEM_RXD_EOF)) != 1173 (GEM_RXD_SOF | GEM_RXD_EOF)) { 1174 m_freem(m); 1175 ifp->if_ierrors++; 1176 continue; 1177 } 1178 1179 m_adj(m, ETHER_ALIGN); 1180 m->m_len = m->m_pkthdr.len = len; 1181 1182 m->m_pkthdr.csum_flags = 0; 1183 switch (status & GEM_RXD_CSUM_MASK) { 1184 case GEM_RXD_CSUM_IP_OK: 1185 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 1186 break; 1187 case GEM_RXD_CSUM_TCP_OK: 1188 case GEM_RXD_CSUM_UDP_OK: 1189 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK | 1190 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1191 break; 1192 } 1193 1194 ml_enqueue(&ml, m); 1195 1196 sc->sc_rxdone = 1; 1197 } 1198 1199 sc->sc_rx_cons = idx; 1200 1201 cad_rxfill(sc); 1202 1203 if (ifiq_input(&ifp->if_rcv, &ml)) 1204 if_rxr_livelocked(&sc->sc_rx_ring); 1205 } 1206 1207 void 1208 cad_rxfill(struct cad_softc *sc) 1209 { 1210 struct cad_buf *rxb; 1211 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_rxdesc; 1212 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_rxdesc; 1213 uint64_t addr; 1214 unsigned int idx; 1215 u_int slots; 1216 1217 idx = sc->sc_rx_prod; 1218 1219 for (slots = if_rxr_get(&sc->sc_rx_ring, CAD_NRXDESC); 1220 slots > 0; slots--) { 1221 rxb = &sc->sc_rxbuf[idx]; 1222 rxb->bf_m = cad_alloc_mbuf(sc, rxb->bf_map); 1223 if (rxb->bf_m == NULL) 1224 break; 1225 1226 addr = rxb->bf_map->dm_segs[0].ds_addr; 1227 KASSERT((addr & (GEM_RXD_ADDR_WRAP | GEM_RXD_ADDR_USED)) == 0); 1228 if (idx == CAD_NRXDESC - 1) 1229 addr |= GEM_RXD_ADDR_WRAP; 1230 1231 if (sc->sc_dma64) { 1232 desc64[idx].d_addrhi = addr >> 32; 1233 desc64[idx].d_status = 0; 1234 } else { 1235 desc32[idx].d_status = 0; 1236 } 1237 1238 /* Make d_addrhi and d_status visible before clearing 1239 * GEM_RXD_ADDR_USED in d_addr or d_addrlo. */ 1240 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map, 1241 idx * sc->sc_descsize, sc->sc_descsize, 1242 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1243 1244 if (sc->sc_dma64) 1245 desc64[idx].d_addrlo = addr; 1246 else 1247 desc32[idx].d_addr = addr; 1248 1249 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map, 1250 idx * sc->sc_descsize, sc->sc_descsize, 1251 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1252 1253 idx = (idx + 1) % CAD_NRXDESC; 1254 } 1255 if_rxr_put(&sc->sc_rx_ring, slots); 1256 1257 sc->sc_rx_prod = idx; 1258 } 1259 1260 void 1261 cad_txeof(struct cad_softc *sc) 1262 { 1263 struct ifnet *ifp = &sc->sc_ac.ac_if; 1264 struct cad_buf *txb; 1265 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_txdesc; 1266 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_txdesc; 1267 unsigned int free = 0; 1268 unsigned int idx, nsegs; 1269 uint32_t status; 1270 1271 idx = sc->sc_tx_cons; 1272 1273 while (idx != sc->sc_tx_prod) { 1274 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 1275 idx * sc->sc_descsize, sc->sc_descsize, 1276 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1277 1278 if (sc->sc_dma64) 1279 status = desc64[idx].d_status; 1280 else 1281 status = desc32[idx].d_status; 1282 if ((status & GEM_TXD_USED) == 0) 1283 break; 1284 1285 if (status & (GEM_TXD_RLIMIT | GEM_TXD_CORRUPT | 1286 GEM_TXD_LCOLL | GEM_TXD_CSUMERR_MASK)) 1287 ifp->if_oerrors++; 1288 1289 txb = &sc->sc_txbuf[idx]; 1290 nsegs = txb->bf_map->dm_nsegs; 1291 KASSERT(nsegs > 0); 1292 1293 bus_dmamap_sync(sc->sc_dmat, txb->bf_map, 0, 1294 txb->bf_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1295 bus_dmamap_unload(sc->sc_dmat, txb->bf_map); 1296 1297 m_freem(txb->bf_m); 1298 txb->bf_m = NULL; 1299 1300 for (;;) { 1301 idx = (idx + 1) % CAD_NTXDESC; 1302 1303 nsegs--; 1304 if (nsegs == 0) 1305 break; 1306 1307 /* 1308 * The controller marks only the initial segment used. 1309 * Mark the remaining segments used manually, so that 1310 * the controller will not accidentally use them later. 1311 * 1312 * This could be done lazily on the Tx ring producer 1313 * side by ensuring that the subsequent descriptor 1314 * after the actual segments is marked used. 1315 * However, this would make the ring trickier to debug. 1316 */ 1317 1318 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 1319 idx * sc->sc_descsize, sc->sc_descsize, 1320 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1321 1322 if (sc->sc_dma64) 1323 desc64[idx].d_status |= GEM_TXD_USED; 1324 else 1325 desc32[idx].d_status |= GEM_TXD_USED; 1326 1327 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map, 1328 idx * sc->sc_descsize, sc->sc_descsize, 1329 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1330 } 1331 1332 free++; 1333 } 1334 1335 if (free == 0) 1336 return; 1337 1338 sc->sc_tx_cons = idx; 1339 1340 if (ifq_is_oactive(&ifp->if_snd)) 1341 ifq_restart(&ifp->if_snd); 1342 } 1343 1344 void 1345 cad_tick(void *arg) 1346 { 1347 struct cad_softc *sc = arg; 1348 struct ifnet *ifp = &sc->sc_ac.ac_if; 1349 int s; 1350 1351 if ((ifp->if_flags & IFF_RUNNING) == 0) 1352 return; 1353 1354 s = splnet(); 1355 1356 mii_tick(&sc->sc_mii); 1357 1358 /* 1359 * If there has been no Rx for a moment, Rx DMA might be stuck. 1360 * Try to recover by restarting the receiver. 1361 */ 1362 if (sc->sc_rxhang_erratum && !sc->sc_rxdone) { 1363 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl & ~GEM_NETCTL_RXEN); 1364 (void)HREAD4(sc, GEM_NETCTL); 1365 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl); 1366 } 1367 sc->sc_rxdone = 0; 1368 1369 splx(s); 1370 1371 timeout_add_sec(&sc->sc_tick, 1); 1372 } 1373 1374 int 1375 cad_media_change(struct ifnet *ifp) 1376 { 1377 struct cad_softc *sc = ifp->if_softc; 1378 1379 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) 1380 mii_mediachg(&sc->sc_mii); 1381 1382 return 0; 1383 } 1384 1385 void 1386 cad_media_status(struct ifnet *ifp, struct ifmediareq *imr) 1387 { 1388 struct cad_softc *sc = ifp->if_softc; 1389 1390 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) { 1391 mii_pollstat(&sc->sc_mii); 1392 imr->ifm_active = sc->sc_mii.mii_media_active; 1393 imr->ifm_status = sc->sc_mii.mii_media_status; 1394 } 1395 } 1396 1397 int 1398 cad_mii_wait(struct cad_softc *sc) 1399 { 1400 int timeout; 1401 1402 for (timeout = 10000; timeout > 0; timeout--) { 1403 if (HREAD4(sc, GEM_NETSR) & GEM_NETSR_PHY_MGMT_IDLE) 1404 break; 1405 delay(10); 1406 } 1407 if (timeout == 0) 1408 return ETIMEDOUT; 1409 return 0; 1410 } 1411 1412 void 1413 cad_mii_oper(struct cad_softc *sc, int phy_no, int reg, uint32_t oper) 1414 { 1415 oper |= (phy_no << GEM_PHYMNTNC_ADDR_SHIFT) & GEM_PHYMNTNC_ADDR_MASK; 1416 oper |= (reg << GEM_PHYMNTNC_REG_SHIFT) & GEM_PHYMNTNC_REG_MASK; 1417 oper |= GEM_PHYMNTNC_CLAUSE_22 | GEM_PHYMNTNC_MUST_10; 1418 1419 if (cad_mii_wait(sc) != 0) { 1420 printf("%s: MII bus idle timeout\n", sc->sc_dev.dv_xname); 1421 return; 1422 } 1423 1424 HWRITE4(sc, GEM_PHYMNTNC, oper); 1425 1426 if (cad_mii_wait(sc) != 0) { 1427 printf("%s: MII bus operation timeout\n", sc->sc_dev.dv_xname); 1428 return; 1429 } 1430 } 1431 1432 int 1433 cad_mii_readreg(struct device *self, int phy_no, int reg) 1434 { 1435 struct cad_softc *sc = (struct cad_softc *)self; 1436 int val; 1437 1438 cad_mii_oper(sc, phy_no, reg, GEM_PHYMNTNC_OP_READ); 1439 1440 val = HREAD4(sc, GEM_PHYMNTNC) & GEM_PHYMNTNC_DATA_MASK; 1441 1442 /* The MAC does not handle 1000baseT in half duplex mode. */ 1443 if (reg == MII_EXTSR) 1444 val &= ~EXTSR_1000THDX; 1445 1446 return val; 1447 } 1448 1449 void 1450 cad_mii_writereg(struct device *self, int phy_no, int reg, int val) 1451 { 1452 struct cad_softc *sc = (struct cad_softc *)self; 1453 1454 cad_mii_oper(sc, phy_no, reg, GEM_PHYMNTNC_OP_WRITE | 1455 (val & GEM_PHYMNTNC_DATA_MASK)); 1456 } 1457 1458 void 1459 cad_mii_statchg(struct device *self) 1460 { 1461 struct cad_softc *sc = (struct cad_softc *)self; 1462 uint32_t netcfg; 1463 1464 netcfg = HREAD4(sc, GEM_NETCFG); 1465 if (sc->sc_mii.mii_media_active & IFM_FDX) 1466 netcfg |= GEM_NETCFG_FDEN; 1467 else 1468 netcfg &= ~GEM_NETCFG_FDEN; 1469 1470 netcfg &= ~(GEM_NETCFG_100 | GEM_NETCFG_1000); 1471 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1472 default: 1473 sc->sc_tx_freq = 2500000; 1474 break; 1475 case IFM_100_TX: 1476 netcfg |= GEM_NETCFG_100; 1477 sc->sc_tx_freq = 25000000; 1478 break; 1479 case IFM_1000_T: 1480 netcfg |= GEM_NETCFG_100 | GEM_NETCFG_1000; 1481 sc->sc_tx_freq = 125000000; 1482 break; 1483 } 1484 1485 HWRITE4(sc, GEM_NETCFG, netcfg); 1486 1487 /* Defer clock setting because it allocates memory with M_WAITOK. */ 1488 task_add(systq, &sc->sc_statchg_task); 1489 } 1490 1491 void 1492 cad_statchg_task(void *arg) 1493 { 1494 struct cad_softc *sc = arg; 1495 1496 clock_set_frequency(sc->sc_node, GEM_CLK_TX, sc->sc_tx_freq); 1497 } 1498 1499 struct cad_dmamem * 1500 cad_dmamem_alloc(struct cad_softc *sc, bus_size_t size, bus_size_t align) 1501 { 1502 struct cad_dmamem *cdm; 1503 int flags = BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW; 1504 int nsegs; 1505 1506 cdm = malloc(sizeof(*cdm), M_DEVBUF, M_WAITOK | M_ZERO); 1507 cdm->cdm_size = size; 1508 1509 if (sc->sc_dma64) 1510 flags |= BUS_DMA_64BIT; 1511 1512 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1513 flags, &cdm->cdm_map) != 0) 1514 goto cdmfree; 1515 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &cdm->cdm_seg, 1, 1516 &nsegs, BUS_DMA_WAITOK) != 0) 1517 goto destroy; 1518 if (bus_dmamem_map(sc->sc_dmat, &cdm->cdm_seg, nsegs, size, 1519 &cdm->cdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0) 1520 goto free; 1521 if (bus_dmamap_load(sc->sc_dmat, cdm->cdm_map, cdm->cdm_kva, size, 1522 NULL, BUS_DMA_WAITOK) != 0) 1523 goto unmap; 1524 memset(cdm->cdm_kva, 0, size); 1525 return cdm; 1526 1527 unmap: 1528 bus_dmamem_unmap(sc->sc_dmat, cdm->cdm_kva, size); 1529 free: 1530 bus_dmamem_free(sc->sc_dmat, &cdm->cdm_seg, 1); 1531 destroy: 1532 bus_dmamap_destroy(sc->sc_dmat, cdm->cdm_map); 1533 cdmfree: 1534 free(cdm, M_DEVBUF, sizeof(*cdm)); 1535 return NULL; 1536 } 1537 1538 void 1539 cad_dmamem_free(struct cad_softc *sc, struct cad_dmamem *cdm) 1540 { 1541 bus_dmamem_unmap(sc->sc_dmat, cdm->cdm_kva, cdm->cdm_size); 1542 bus_dmamem_free(sc->sc_dmat, &cdm->cdm_seg, 1); 1543 bus_dmamap_destroy(sc->sc_dmat, cdm->cdm_map); 1544 free(cdm, M_DEVBUF, sizeof(*cdm)); 1545 } 1546 1547 struct mbuf * 1548 cad_alloc_mbuf(struct cad_softc *sc, bus_dmamap_t map) 1549 { 1550 struct mbuf *m; 1551 1552 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 1553 if (m == NULL) 1554 return NULL; 1555 m->m_len = m->m_pkthdr.len = MCLBYTES; 1556 1557 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) { 1558 m_freem(m); 1559 return NULL; 1560 } 1561 1562 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1563 BUS_DMASYNC_PREREAD); 1564 1565 return m; 1566 } 1567 1568 #if NKSTAT > 0 1569 enum cad_stat { 1570 cad_stat_tx_toto, 1571 cad_stat_tx_totp, 1572 cad_stat_tx_bcast, 1573 cad_stat_tx_mcast, 1574 cad_stat_tx_pause, 1575 cad_stat_tx_h64, 1576 cad_stat_tx_h65, 1577 cad_stat_tx_h128, 1578 cad_stat_tx_h256, 1579 cad_stat_tx_h512, 1580 cad_stat_tx_h1024, 1581 cad_stat_tx_underrun, 1582 cad_stat_tx_scoll, 1583 cad_stat_tx_mcoll, 1584 cad_stat_tx_ecoll, 1585 cad_stat_tx_lcoll, 1586 cad_stat_tx_defer, 1587 cad_stat_tx_sense, 1588 cad_stat_rx_toto, 1589 cad_stat_rx_totp, 1590 cad_stat_rx_bcast, 1591 cad_stat_rx_mcast, 1592 cad_stat_rx_pause, 1593 cad_stat_rx_h64, 1594 cad_stat_rx_h65, 1595 cad_stat_rx_h128, 1596 cad_stat_rx_h256, 1597 cad_stat_rx_h512, 1598 cad_stat_rx_h1024, 1599 cad_stat_rx_undersz, 1600 cad_stat_rx_oversz, 1601 cad_stat_rx_jabber, 1602 cad_stat_rx_fcs, 1603 cad_stat_rx_symberr, 1604 cad_stat_rx_align, 1605 cad_stat_rx_reserr, 1606 cad_stat_rx_overrun, 1607 cad_stat_rx_ipcsum, 1608 cad_stat_rx_tcpcsum, 1609 cad_stat_rx_udpcsum, 1610 cad_stat_count 1611 }; 1612 1613 struct cad_counter { 1614 const char *c_name; 1615 enum kstat_kv_unit c_unit; 1616 uint32_t c_reg; 1617 }; 1618 1619 const struct cad_counter cad_counters[cad_stat_count] = { 1620 [cad_stat_tx_toto] = 1621 { "tx total", KSTAT_KV_U_BYTES, 0 }, 1622 [cad_stat_tx_totp] = 1623 { "tx total", KSTAT_KV_U_PACKETS, GEM_TXCNT }, 1624 [cad_stat_tx_bcast] = 1625 { "tx bcast", KSTAT_KV_U_PACKETS, GEM_TXBCCNT }, 1626 [cad_stat_tx_mcast] = 1627 { "tx mcast", KSTAT_KV_U_PACKETS, GEM_TXMCCNT }, 1628 [cad_stat_tx_pause] = 1629 { "tx pause", KSTAT_KV_U_PACKETS, GEM_TXPAUSECNT }, 1630 [cad_stat_tx_h64] = 1631 { "tx 64B", KSTAT_KV_U_PACKETS, GEM_TX64CNT }, 1632 [cad_stat_tx_h65] = 1633 { "tx 65-127B", KSTAT_KV_U_PACKETS, GEM_TX65CNT }, 1634 [cad_stat_tx_h128] = 1635 { "tx 128-255B", KSTAT_KV_U_PACKETS, GEM_TX128CNT }, 1636 [cad_stat_tx_h256] = 1637 { "tx 256-511B", KSTAT_KV_U_PACKETS, GEM_TX256CNT }, 1638 [cad_stat_tx_h512] = 1639 { "tx 512-1023B", KSTAT_KV_U_PACKETS, GEM_TX512CNT }, 1640 [cad_stat_tx_h1024] = 1641 { "tx 1024-1518B", KSTAT_KV_U_PACKETS, GEM_TX1024CNT }, 1642 [cad_stat_tx_underrun] = 1643 { "tx underrun", KSTAT_KV_U_PACKETS, GEM_TXURUNCNT }, 1644 [cad_stat_tx_scoll] = 1645 { "tx scoll", KSTAT_KV_U_PACKETS, GEM_SNGLCOLLCNT }, 1646 [cad_stat_tx_mcoll] = 1647 { "tx mcoll", KSTAT_KV_U_PACKETS, GEM_MULTICOLLCNT }, 1648 [cad_stat_tx_ecoll] = 1649 { "tx excess coll", KSTAT_KV_U_PACKETS, GEM_EXCESSCOLLCNT }, 1650 [cad_stat_tx_lcoll] = 1651 { "tx late coll", KSTAT_KV_U_PACKETS, GEM_LATECOLLCNT }, 1652 [cad_stat_tx_defer] = 1653 { "tx defer", KSTAT_KV_U_PACKETS, GEM_TXDEFERCNT }, 1654 [cad_stat_tx_sense] = 1655 { "tx csense", KSTAT_KV_U_PACKETS, GEM_TXCSENSECNT }, 1656 [cad_stat_rx_toto] = 1657 { "rx total", KSTAT_KV_U_BYTES, 0 }, 1658 [cad_stat_rx_totp] = 1659 { "rx total", KSTAT_KV_U_PACKETS, GEM_RXCNT }, 1660 [cad_stat_rx_bcast] = 1661 { "rx bcast", KSTAT_KV_U_PACKETS, GEM_RXBROADCNT }, 1662 [cad_stat_rx_mcast] = 1663 { "rx mcast", KSTAT_KV_U_PACKETS, GEM_RXMULTICNT }, 1664 [cad_stat_rx_pause] = 1665 { "rx pause", KSTAT_KV_U_PACKETS, GEM_RXPAUSECNT }, 1666 [cad_stat_rx_h64] = 1667 { "rx 64B", KSTAT_KV_U_PACKETS, GEM_RX64CNT }, 1668 [cad_stat_rx_h65] = 1669 { "rx 65-127B", KSTAT_KV_U_PACKETS, GEM_RX65CNT }, 1670 [cad_stat_rx_h128] = 1671 { "rx 128-255B", KSTAT_KV_U_PACKETS, GEM_RX128CNT }, 1672 [cad_stat_rx_h256] = 1673 { "rx 256-511B", KSTAT_KV_U_PACKETS, GEM_RX256CNT }, 1674 [cad_stat_rx_h512] = 1675 { "rx 512-1023B", KSTAT_KV_U_PACKETS, GEM_RX512CNT }, 1676 [cad_stat_rx_h1024] = 1677 { "rx 1024-1518B", KSTAT_KV_U_PACKETS, GEM_RX1024CNT }, 1678 [cad_stat_rx_undersz] = 1679 { "rx undersz", KSTAT_KV_U_PACKETS, GEM_RXUNDRCNT }, 1680 [cad_stat_rx_oversz] = 1681 { "rx oversz", KSTAT_KV_U_PACKETS, GEM_RXOVRCNT }, 1682 [cad_stat_rx_jabber] = 1683 { "rx jabber", KSTAT_KV_U_PACKETS, GEM_RXJABCNT }, 1684 [cad_stat_rx_fcs] = 1685 { "rx fcs", KSTAT_KV_U_PACKETS, GEM_RXFCSCNT }, 1686 [cad_stat_rx_symberr] = 1687 { "rx symberr", KSTAT_KV_U_PACKETS, GEM_RXSYMBCNT }, 1688 [cad_stat_rx_align] = 1689 { "rx align", KSTAT_KV_U_PACKETS, GEM_RXALIGNCNT }, 1690 [cad_stat_rx_reserr] = 1691 { "rx reserr", KSTAT_KV_U_PACKETS, GEM_RXRESERRCNT }, 1692 [cad_stat_rx_overrun] = 1693 { "rx overrun", KSTAT_KV_U_PACKETS, GEM_RXORCNT }, 1694 [cad_stat_rx_ipcsum] = 1695 { "rx ip csum", KSTAT_KV_U_PACKETS, GEM_RXIPCCNT }, 1696 [cad_stat_rx_tcpcsum] = 1697 { "rx tcp csum", KSTAT_KV_U_PACKETS, GEM_RXTCPCCNT }, 1698 [cad_stat_rx_udpcsum] = 1699 { "rx udp csum", KSTAT_KV_U_PACKETS, GEM_RXUDPCCNT }, 1700 }; 1701 1702 void 1703 cad_kstat_attach(struct cad_softc *sc) 1704 { 1705 const struct cad_counter *c; 1706 struct kstat *ks; 1707 struct kstat_kv *kvs; 1708 int i; 1709 1710 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK); 1711 1712 ks = kstat_create(sc->sc_dev.dv_xname, 0, "cad-stats", 0, 1713 KSTAT_T_KV, 0); 1714 if (ks == NULL) 1715 return; 1716 1717 kvs = mallocarray(nitems(cad_counters), sizeof(*kvs), 1718 M_DEVBUF, M_WAITOK | M_ZERO); 1719 for (i = 0; i < nitems(cad_counters); i++) { 1720 c = &cad_counters[i]; 1721 kstat_kv_unit_init(&kvs[i], c->c_name, KSTAT_KV_T_COUNTER64, 1722 c->c_unit); 1723 } 1724 1725 kstat_set_mutex(ks, &sc->sc_kstat_mtx); 1726 ks->ks_softc = sc; 1727 ks->ks_data = kvs; 1728 ks->ks_datalen = nitems(cad_counters) * sizeof(*kvs); 1729 ks->ks_read = cad_kstat_read; 1730 1731 sc->sc_kstat = ks; 1732 kstat_install(ks); 1733 } 1734 1735 int 1736 cad_kstat_read(struct kstat *ks) 1737 { 1738 const struct cad_counter *c; 1739 struct kstat_kv *kvs = ks->ks_data; 1740 struct cad_softc *sc = ks->ks_softc; 1741 uint64_t v64; 1742 int i; 1743 1744 v64 = HREAD4(sc, GEM_OCTTXL); 1745 v64 |= (uint64_t)HREAD4(sc, GEM_OCTTXH) << 32; 1746 kstat_kv_u64(&kvs[cad_stat_tx_toto]) += v64; 1747 1748 v64 = HREAD4(sc, GEM_OCTRXL); 1749 v64 |= (uint64_t)HREAD4(sc, GEM_OCTRXH) << 32; 1750 kstat_kv_u64(&kvs[cad_stat_rx_toto]) += v64; 1751 1752 for (i = 0; i < nitems(cad_counters); i++) { 1753 c = &cad_counters[i]; 1754 if (c->c_reg == 0) 1755 continue; 1756 kstat_kv_u64(&kvs[i]) += HREAD4(sc, c->c_reg); 1757 } 1758 1759 getnanouptime(&ks->ks_updated); 1760 1761 return 0; 1762 } 1763 1764 void 1765 cad_kstat_tick(void *arg) 1766 { 1767 struct cad_softc *sc = arg; 1768 1769 if (mtx_enter_try(&sc->sc_kstat_mtx)) { 1770 cad_kstat_read(sc->sc_kstat); 1771 mtx_leave(&sc->sc_kstat_mtx); 1772 } 1773 } 1774 #endif /* NKSTAT > 0 */ 1775 1776 #ifdef DDB 1777 void 1778 cad_dump(struct cad_softc *sc) 1779 { 1780 struct cad_buf *rxb, *txb; 1781 struct cad_desc32 *desc32; 1782 struct cad_desc64 *desc64; 1783 int i; 1784 1785 printf("isr 0x%x txsr 0x%x rxsr 0x%x\n", HREAD4(sc, GEM_ISR), 1786 HREAD4(sc, GEM_TXSR), HREAD4(sc, GEM_RXSR)); 1787 1788 if (sc->sc_dma64) { 1789 printf("tx q 0x%08x%08x\n", 1790 HREAD4(sc, GEM_TXQBASEHI), 1791 HREAD4(sc, GEM_TXQBASE)); 1792 } else { 1793 printf("tx q 0x%08x\n", 1794 HREAD4(sc, GEM_TXQBASE)); 1795 } 1796 desc32 = (struct cad_desc32 *)sc->sc_txdesc; 1797 desc64 = (struct cad_desc64 *)sc->sc_txdesc; 1798 if (sc->sc_txbuf != NULL) { 1799 for (i = 0; i < CAD_NTXDESC; i++) { 1800 txb = &sc->sc_txbuf[i]; 1801 if (sc->sc_dma64) { 1802 printf(" %3i %p 0x%08x%08x 0x%08x %s%s " 1803 "m %p\n", i, 1804 &desc64[i], 1805 desc64[i].d_addrhi, desc64[i].d_addrlo, 1806 desc64[i].d_status, 1807 sc->sc_tx_cons == i ? ">" : " ", 1808 sc->sc_tx_prod == i ? "<" : " ", 1809 txb->bf_m); 1810 } else { 1811 printf(" %3i %p 0x%08x 0x%08x %s%s m %p\n", i, 1812 &desc32[i], 1813 desc32[i].d_addr, 1814 desc32[i].d_status, 1815 sc->sc_tx_cons == i ? ">" : " ", 1816 sc->sc_tx_prod == i ? "<" : " ", 1817 txb->bf_m); 1818 } 1819 } 1820 } 1821 1822 if (sc->sc_dma64) { 1823 printf("rx q 0x%08x%08x\n", 1824 HREAD4(sc, GEM_RXQBASEHI), 1825 HREAD4(sc, GEM_RXQBASE)); 1826 } else { 1827 printf("rx q 0x%08x\n", 1828 HREAD4(sc, GEM_RXQBASE)); 1829 } 1830 desc32 = (struct cad_desc32 *)sc->sc_rxdesc; 1831 desc64 = (struct cad_desc64 *)sc->sc_rxdesc; 1832 if (sc->sc_rxbuf != NULL) { 1833 for (i = 0; i < CAD_NRXDESC; i++) { 1834 rxb = &sc->sc_rxbuf[i]; 1835 if (sc->sc_dma64) { 1836 printf(" %3i %p 0x%08x%08x 0x%08x %s%s " 1837 "m %p\n", i, 1838 &desc64[i], 1839 desc64[i].d_addrhi, desc64[i].d_addrlo, 1840 desc64[i].d_status, 1841 sc->sc_rx_cons == i ? ">" : " ", 1842 sc->sc_rx_prod == i ? "<" : " ", 1843 rxb->bf_m); 1844 } else { 1845 printf(" %3i %p 0x%08x 0x%08x %s%s m %p\n", i, 1846 &desc32[i], 1847 desc32[i].d_addr, 1848 desc32[i].d_status, 1849 sc->sc_rx_cons == i ? ">" : " ", 1850 sc->sc_rx_prod == i ? "<" : " ", 1851 rxb->bf_m); 1852 } 1853 } 1854 } 1855 } 1856 #endif 1857