1 /* $OpenBSD: if_dwge.c,v 1.8 2020/12/17 19:50:06 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org> 4 * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for the Synopsys Designware ethernet controller. 21 */ 22 23 #include "bpfilter.h" 24 25 #include <sys/param.h> 26 #include <sys/systm.h> 27 #include <sys/device.h> 28 #include <sys/kernel.h> 29 #include <sys/malloc.h> 30 #include <sys/mbuf.h> 31 #include <sys/queue.h> 32 #include <sys/socket.h> 33 #include <sys/sockio.h> 34 #include <sys/timeout.h> 35 36 #include <machine/bus.h> 37 #include <machine/fdt.h> 38 39 #include <net/if.h> 40 #include <net/if_media.h> 41 42 #include <dev/ofw/openfirm.h> 43 #include <dev/ofw/ofw_clock.h> 44 #include <dev/ofw/ofw_gpio.h> 45 #include <dev/ofw/ofw_misc.h> 46 #include <dev/ofw/ofw_pinctrl.h> 47 #include <dev/ofw/ofw_regulator.h> 48 #include <dev/ofw/fdt.h> 49 50 #include <dev/mii/mii.h> 51 #include <dev/mii/miivar.h> 52 53 #if NBPFILTER > 0 54 #include <net/bpf.h> 55 #endif 56 57 #include <netinet/in.h> 58 #include <netinet/if_ether.h> 59 60 /* Registers */ 61 62 #define GMAC_MAC_CONF 0x0000 63 #define GMAC_MAC_CONF_JD (1 << 22) 64 #define GMAC_MAC_CONF_BE (1 << 21) 65 #define GMAC_MAC_CONF_DCRS (1 << 16) 66 #define GMAC_MAC_CONF_PS (1 << 15) 67 #define GMAC_MAC_CONF_FES (1 << 14) 68 #define GMAC_MAC_CONF_LM (1 << 12) 69 #define GMAC_MAC_CONF_DM (1 << 11) 70 #define GMAC_MAC_CONF_TE (1 << 3) 71 #define GMAC_MAC_CONF_RE (1 << 2) 72 #define GMAC_MAC_FRM_FILT 0x0004 73 #define GMAC_MAC_FRM_FILT_PM (1 << 4) 74 #define GMAC_MAC_FRM_FILT_HMC (1 << 2) 75 #define GMAC_MAC_FRM_FILT_PR (1 << 0) 76 #define GMAC_HASH_TAB_HI 0x0008 77 #define GMAC_HASH_TAB_LO 0x000c 78 #define GMAC_GMII_ADDR 0x0010 79 #define GMAC_GMII_ADDR_PA_SHIFT 11 80 #define GMAC_GMII_ADDR_GR_SHIFT 6 81 #define GMAC_GMII_ADDR_CR_SHIFT 2 82 #define GMAC_GMII_ADDR_CR_MASK 0xf 83 #define GMAC_GMII_ADDR_CR_DIV_42 0 84 #define GMAC_GMII_ADDR_CR_DIV_62 1 85 #define GMAC_GMII_ADDR_CR_DIV_16 2 86 #define GMAC_GMII_ADDR_CR_DIV_26 3 87 #define GMAC_GMII_ADDR_CR_DIV_102 4 88 #define GMAC_GMII_ADDR_CR_DIV_124 5 89 #define GMAC_GMII_ADDR_GW (1 << 1) 90 #define GMAC_GMII_ADDR_GB (1 << 0) 91 #define GMAC_GMII_DATA 0x0014 92 #define GMAC_INT_MASK 0x003c 93 #define GMAC_INT_MASK_PIM (1 << 3) 94 #define GMAC_INT_MASK_RIM (1 << 0) 95 #define GMAC_MAC_ADDR0_HI 0x0040 96 #define GMAC_MAC_ADDR0_LO 0x0044 97 #define GMAC_MMC_RX_INT_MSK 0x010c 98 #define GMAC_MMC_TX_INT_MSK 0x0110 99 #define GMAC_MMC_IPC_INT_MSK 0x0200 100 #define GMAC_BUS_MODE 0x1000 101 #define GMAC_BUS_MODE_8XPBL (1 << 24) 102 #define GMAC_BUS_MODE_USP (1 << 23) 103 #define GMAC_BUS_MODE_RPBL_MASK (0x3f << 17) 104 #define GMAC_BUS_MODE_RPBL_SHIFT 17 105 #define GMAC_BUS_MODE_FB (1 << 16) 106 #define GMAC_BUS_MODE_PBL_MASK (0x3f << 8) 107 #define GMAC_BUS_MODE_PBL_SHIFT 8 108 #define GMAC_BUS_MODE_SWR (1 << 0) 109 #define GMAC_TX_POLL_DEMAND 0x1004 110 #define GMAC_RX_DESC_LIST_ADDR 0x100c 111 #define GMAC_TX_DESC_LIST_ADDR 0x1010 112 #define GMAC_STATUS 0x1014 113 #define GMAC_STATUS_RI (1 << 6) 114 #define GMAC_STATUS_TU (1 << 2) 115 #define GMAC_STATUS_TI (1 << 0) 116 #define GMAC_OP_MODE 0x1018 117 #define GMAC_OP_MODE_RSF (1 << 25) 118 #define GMAC_OP_MODE_TSF (1 << 21) 119 #define GMAC_OP_MODE_FTF (1 << 20) 120 #define GMAC_OP_MODE_TTC_MASK (0x7 << 14) 121 #define GMAC_OP_MODE_TTC_64 (0x0 << 14) 122 #define GMAC_OP_MODE_TTC_128 (0x1 << 14) 123 #define GMAC_OP_MODE_ST (1 << 13) 124 #define GMAC_OP_MODE_RTC_MASK (0x3 << 3) 125 #define GMAC_OP_MODE_RTC_64 (0x0 << 3) 126 #define GMAC_OP_MODE_RTC_128 (0x3 << 3) 127 #define GMAC_OP_MODE_OSF (1 << 2) 128 #define GMAC_OP_MODE_SR (1 << 1) 129 #define GMAC_INT_ENA 0x101c 130 #define GMAC_INT_ENA_NIE (1 << 16) 131 #define GMAC_INT_ENA_RIE (1 << 6) 132 #define GMAC_INT_ENA_TUE (1 << 2) 133 #define GMAC_INT_ENA_TIE (1 << 0) 134 135 /* 136 * DWGE descriptors. 137 */ 138 139 struct dwge_desc { 140 uint32_t sd_status; 141 uint32_t sd_len; 142 uint32_t sd_addr; 143 uint32_t sd_next; 144 }; 145 146 /* Tx status bits. */ 147 #define TDES0_DB (1 << 0) 148 #define TDES0_UF (1 << 1) 149 #define TDES0_ED (1 << 2) 150 #define TDES0_CC_MASK (0xf << 3) 151 #define TDES0_CC_SHIFT 3 152 #define TDES0_EC (1 << 8) 153 #define TDES0_LC (1 << 9) 154 #define TDES0_NC (1 << 10) 155 #define TDES0_PCE (1 << 12) 156 #define TDES0_JT (1 << 14) 157 #define TDES0_IHE (1 << 16) 158 #define TDES0_OWN (1 << 31) 159 160 /* Rx status bits */ 161 #define RDES0_PE (1 << 0) 162 #define RDES0_CE (1 << 1) 163 #define RDES0_RE (1 << 3) 164 #define RDES0_RWT (1 << 4) 165 #define RDES0_FT (1 << 5) 166 #define RDES0_LC (1 << 6) 167 #define RDES0_IPC (1 << 7) 168 #define RDES0_LS (1 << 8) 169 #define RDES0_FS (1 << 9) 170 #define RDES0_OE (1 << 11) 171 #define RDES0_SAF (1 << 13) 172 #define RDES0_DE (1 << 14) 173 #define RDES0_FL_MASK 0x3fff 174 #define RDES0_FL_SHIFT 16 175 #define RDES0_AFM (1 << 30) 176 #define RDES0_OWN (1 << 31) 177 178 /* Tx size bits */ 179 #define TDES1_TBS1 (0xfff << 0) 180 #define TDES1_TCH (1 << 24) 181 #define TDES1_DC (1 << 26) 182 #define TDES1_CIC_MASK (0x3 << 27) 183 #define TDES1_CIC_IP (1 << 27) 184 #define TDES1_CIC_NO_PSE (2 << 27) 185 #define TDES1_CIC_FULL (3 << 27) 186 #define TDES1_FS (1 << 29) 187 #define TDES1_LS (1 << 30) 188 #define TDES1_IC (1 << 31) 189 190 /* Rx size bits */ 191 #define RDES1_RBS1 (0xfff << 0) 192 #define RDES1_RCH (1 << 24) 193 #define RDES1_DIC (1 << 31) 194 195 struct dwge_buf { 196 bus_dmamap_t tb_map; 197 struct mbuf *tb_m; 198 }; 199 200 #define DWGE_NTXDESC 512 201 #define DWGE_NTXSEGS 16 202 203 #define DWGE_NRXDESC 512 204 205 struct dwge_dmamem { 206 bus_dmamap_t tdm_map; 207 bus_dma_segment_t tdm_seg; 208 size_t tdm_size; 209 caddr_t tdm_kva; 210 }; 211 #define DWGE_DMA_MAP(_tdm) ((_tdm)->tdm_map) 212 #define DWGE_DMA_LEN(_tdm) ((_tdm)->tdm_size) 213 #define DWGE_DMA_DVA(_tdm) ((_tdm)->tdm_map->dm_segs[0].ds_addr) 214 #define DWGE_DMA_KVA(_tdm) ((void *)(_tdm)->tdm_kva) 215 216 struct dwge_softc { 217 struct device sc_dev; 218 int sc_node; 219 bus_space_tag_t sc_iot; 220 bus_space_handle_t sc_ioh; 221 bus_dma_tag_t sc_dmat; 222 223 struct arpcom sc_ac; 224 #define sc_lladdr sc_ac.ac_enaddr 225 struct mii_data sc_mii; 226 #define sc_media sc_mii.mii_media 227 int sc_link; 228 int sc_phyloc; 229 int sc_force_thresh_dma_mode; 230 231 struct dwge_dmamem *sc_txring; 232 struct dwge_buf *sc_txbuf; 233 struct dwge_desc *sc_txdesc; 234 int sc_tx_prod; 235 int sc_tx_cnt; 236 int sc_tx_cons; 237 238 struct dwge_dmamem *sc_rxring; 239 struct dwge_buf *sc_rxbuf; 240 struct dwge_desc *sc_rxdesc; 241 int sc_rx_prod; 242 struct if_rxring sc_rx_ring; 243 int sc_rx_cons; 244 245 struct timeout sc_tick; 246 struct timeout sc_rxto; 247 248 uint32_t sc_clk; 249 250 bus_size_t sc_clk_sel; 251 uint32_t sc_clk_sel_125; 252 uint32_t sc_clk_sel_25; 253 uint32_t sc_clk_sel_2_5; 254 }; 255 256 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 257 258 int dwge_match(struct device *, void *, void *); 259 void dwge_attach(struct device *, struct device *, void *); 260 void dwge_setup_allwinner(struct dwge_softc *); 261 void dwge_setup_rockchip(struct dwge_softc *); 262 263 struct cfattach dwge_ca = { 264 sizeof(struct dwge_softc), dwge_match, dwge_attach 265 }; 266 267 struct cfdriver dwge_cd = { 268 NULL, "dwge", DV_IFNET 269 }; 270 271 void dwge_reset_phy(struct dwge_softc *); 272 273 uint32_t dwge_read(struct dwge_softc *, bus_addr_t); 274 void dwge_write(struct dwge_softc *, bus_addr_t, uint32_t); 275 276 int dwge_ioctl(struct ifnet *, u_long, caddr_t); 277 void dwge_start(struct ifnet *); 278 void dwge_watchdog(struct ifnet *); 279 280 int dwge_media_change(struct ifnet *); 281 void dwge_media_status(struct ifnet *, struct ifmediareq *); 282 283 int dwge_mii_readreg(struct device *, int, int); 284 void dwge_mii_writereg(struct device *, int, int, int); 285 void dwge_mii_statchg(struct device *); 286 287 void dwge_lladdr_read(struct dwge_softc *, uint8_t *); 288 void dwge_lladdr_write(struct dwge_softc *); 289 290 void dwge_tick(void *); 291 void dwge_rxtick(void *); 292 293 int dwge_intr(void *); 294 void dwge_tx_proc(struct dwge_softc *); 295 void dwge_rx_proc(struct dwge_softc *); 296 297 void dwge_up(struct dwge_softc *); 298 void dwge_down(struct dwge_softc *); 299 void dwge_iff(struct dwge_softc *); 300 int dwge_encap(struct dwge_softc *, struct mbuf *, int *); 301 302 void dwge_reset(struct dwge_softc *); 303 void dwge_stop_dma(struct dwge_softc *); 304 305 struct dwge_dmamem * 306 dwge_dmamem_alloc(struct dwge_softc *, bus_size_t, bus_size_t); 307 void dwge_dmamem_free(struct dwge_softc *, struct dwge_dmamem *); 308 struct mbuf *dwge_alloc_mbuf(struct dwge_softc *, bus_dmamap_t); 309 void dwge_fill_rx_ring(struct dwge_softc *); 310 311 int 312 dwge_match(struct device *parent, void *cfdata, void *aux) 313 { 314 struct fdt_attach_args *faa = aux; 315 316 return (OF_is_compatible(faa->fa_node, "allwinner,sun7i-a20-gmac") || 317 OF_is_compatible(faa->fa_node, "amlogic,meson-axg-dwmac") || 318 OF_is_compatible(faa->fa_node, "amlogic,meson-g12a-dwmac") || 319 OF_is_compatible(faa->fa_node, "rockchip,rk3288-gmac") || 320 OF_is_compatible(faa->fa_node, "rockchip,rk3308-mac") || 321 OF_is_compatible(faa->fa_node, "rockchip,rk3328-gmac") || 322 OF_is_compatible(faa->fa_node, "rockchip,rk3399-gmac")); 323 } 324 325 void 326 dwge_attach(struct device *parent, struct device *self, void *aux) 327 { 328 struct dwge_softc *sc = (void *)self; 329 struct fdt_attach_args *faa = aux; 330 struct ifnet *ifp; 331 uint32_t phy, phy_supply; 332 uint32_t mode, pbl; 333 int node; 334 335 sc->sc_node = faa->fa_node; 336 sc->sc_iot = faa->fa_iot; 337 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 338 faa->fa_reg[0].size, 0, &sc->sc_ioh)) { 339 printf("%s: cannot map registers\n", self->dv_xname); 340 return; 341 } 342 sc->sc_dmat = faa->fa_dmat; 343 344 /* Lookup PHY. */ 345 phy = OF_getpropint(faa->fa_node, "phy", 0); 346 if (phy == 0) 347 phy = OF_getpropint(faa->fa_node, "phy-handle", 0); 348 node = OF_getnodebyphandle(phy); 349 if (node) 350 sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY); 351 else 352 sc->sc_phyloc = MII_PHY_ANY; 353 354 pinctrl_byname(faa->fa_node, "default"); 355 356 /* Enable clock. */ 357 clock_enable(faa->fa_node, "stmmaceth"); 358 reset_deassert(faa->fa_node, "stmmaceth"); 359 delay(5000); 360 361 /* Power up PHY. */ 362 phy_supply = OF_getpropint(faa->fa_node, "phy-supply", 0); 363 if (phy_supply) 364 regulator_enable(phy_supply); 365 366 /* Reset PHY */ 367 dwge_reset_phy(sc); 368 369 sc->sc_clk = clock_get_frequency(faa->fa_node, "stmmaceth"); 370 if (sc->sc_clk > 250000000) 371 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_124; 372 else if (sc->sc_clk > 150000000) 373 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_102; 374 else if (sc->sc_clk > 100000000) 375 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_62; 376 else if (sc->sc_clk > 60000000) 377 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_42; 378 else if (sc->sc_clk > 35000000) 379 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_26; 380 else 381 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_16; 382 383 if (OF_getprop(faa->fa_node, "local-mac-address", 384 &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN) 385 dwge_lladdr_read(sc, sc->sc_lladdr); 386 printf(": address %s\n", ether_sprintf(sc->sc_lladdr)); 387 388 timeout_set(&sc->sc_tick, dwge_tick, sc); 389 timeout_set(&sc->sc_rxto, dwge_rxtick, sc); 390 391 ifp = &sc->sc_ac.ac_if; 392 ifp->if_softc = sc; 393 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 394 ifp->if_ioctl = dwge_ioctl; 395 ifp->if_start = dwge_start; 396 ifp->if_watchdog = dwge_watchdog; 397 ifq_set_maxlen(&ifp->if_snd, DWGE_NTXDESC - 1); 398 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 399 400 ifp->if_capabilities = IFCAP_VLAN_MTU; 401 402 sc->sc_mii.mii_ifp = ifp; 403 sc->sc_mii.mii_readreg = dwge_mii_readreg; 404 sc->sc_mii.mii_writereg = dwge_mii_writereg; 405 sc->sc_mii.mii_statchg = dwge_mii_statchg; 406 407 ifmedia_init(&sc->sc_media, 0, dwge_media_change, dwge_media_status); 408 409 /* Do hardware specific initializations. */ 410 if (OF_is_compatible(faa->fa_node, "allwinner,sun7i-a20-gmac")) 411 dwge_setup_allwinner(sc); 412 else if (OF_is_compatible(faa->fa_node, "rockchip,rk3288-gmac")) 413 dwge_setup_rockchip(sc); 414 else if (OF_is_compatible(faa->fa_node, "rockchip,rk3308-mac")) 415 dwge_setup_rockchip(sc); 416 else if (OF_is_compatible(faa->fa_node, "rockchip,rk3328-gmac")) 417 dwge_setup_rockchip(sc); 418 else if (OF_is_compatible(faa->fa_node, "rockchip,rk3399-gmac")) 419 dwge_setup_rockchip(sc); 420 421 if (OF_getproplen(faa->fa_node, "snps,force_thresh_dma_mode") == 0) 422 sc->sc_force_thresh_dma_mode = 1; 423 424 dwge_reset(sc); 425 426 /* Configure MAC. */ 427 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, GMAC_MAC_CONF) | 428 GMAC_MAC_CONF_JD | GMAC_MAC_CONF_BE | GMAC_MAC_CONF_DCRS); 429 430 /* Configure DMA engine. */ 431 mode = dwge_read(sc, GMAC_BUS_MODE); 432 mode |= GMAC_BUS_MODE_8XPBL | GMAC_BUS_MODE_USP; 433 mode &= ~(GMAC_BUS_MODE_RPBL_MASK | GMAC_BUS_MODE_PBL_MASK); 434 pbl = OF_getpropint(faa->fa_node, "snps,pbl", 8); 435 mode |= pbl << GMAC_BUS_MODE_RPBL_SHIFT; 436 mode |= pbl << GMAC_BUS_MODE_PBL_SHIFT; 437 if (OF_getproplen(faa->fa_node, "snps,fixed-burst") == 0) 438 mode |= GMAC_BUS_MODE_FB; 439 dwge_write(sc, GMAC_BUS_MODE, mode); 440 441 mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc, 442 (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, 0); 443 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 444 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 445 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 446 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 447 } else 448 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 449 450 if_attach(ifp); 451 ether_ifattach(ifp); 452 453 /* Disable interrupts. */ 454 dwge_write(sc, GMAC_INT_ENA, 0); 455 dwge_write(sc, GMAC_INT_MASK, GMAC_INT_MASK_PIM | GMAC_INT_MASK_RIM); 456 dwge_write(sc, GMAC_MMC_RX_INT_MSK, 0xffffffff); 457 dwge_write(sc, GMAC_MMC_TX_INT_MSK, 0xffffffff); 458 dwge_write(sc, GMAC_MMC_IPC_INT_MSK, 0xffffffff); 459 460 fdt_intr_establish(faa->fa_node, IPL_NET, dwge_intr, sc, 461 sc->sc_dev.dv_xname); 462 } 463 464 void 465 dwge_reset_phy(struct dwge_softc *sc) 466 { 467 uint32_t *gpio; 468 uint32_t delays[3]; 469 int active = 1; 470 int len; 471 472 len = OF_getproplen(sc->sc_node, "snps,reset-gpio"); 473 if (len <= 0) 474 return; 475 476 gpio = malloc(len, M_TEMP, M_WAITOK); 477 478 /* Gather information. */ 479 OF_getpropintarray(sc->sc_node, "snps,reset-gpio", gpio, len); 480 if (OF_getproplen(sc->sc_node, "snps-reset-active-low") == 0) 481 active = 0; 482 delays[0] = delays[1] = delays[2] = 0; 483 OF_getpropintarray(sc->sc_node, "snps,reset-delays-us", delays, 484 sizeof(delays)); 485 486 /* Perform reset sequence. */ 487 gpio_controller_config_pin(gpio, GPIO_CONFIG_OUTPUT); 488 gpio_controller_set_pin(gpio, !active); 489 delay(delays[0]); 490 gpio_controller_set_pin(gpio, active); 491 delay(delays[1]); 492 gpio_controller_set_pin(gpio, !active); 493 delay(delays[2]); 494 495 free(gpio, M_TEMP, len); 496 } 497 498 uint32_t 499 dwge_read(struct dwge_softc *sc, bus_addr_t addr) 500 { 501 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr); 502 } 503 504 void 505 dwge_write(struct dwge_softc *sc, bus_addr_t addr, uint32_t data) 506 { 507 bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data); 508 } 509 510 void 511 dwge_lladdr_read(struct dwge_softc *sc, uint8_t *lladdr) 512 { 513 uint32_t machi, maclo; 514 515 machi = dwge_read(sc, GMAC_MAC_ADDR0_HI); 516 maclo = dwge_read(sc, GMAC_MAC_ADDR0_LO); 517 518 lladdr[0] = (maclo >> 0) & 0xff; 519 lladdr[1] = (maclo >> 8) & 0xff; 520 lladdr[2] = (maclo >> 16) & 0xff; 521 lladdr[3] = (maclo >> 24) & 0xff; 522 lladdr[4] = (machi >> 0) & 0xff; 523 lladdr[5] = (machi >> 8) & 0xff; 524 } 525 526 void 527 dwge_lladdr_write(struct dwge_softc *sc) 528 { 529 dwge_write(sc, GMAC_MAC_ADDR0_HI, 530 sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0); 531 dwge_write(sc, GMAC_MAC_ADDR0_LO, 532 sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 | 533 sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0); 534 } 535 536 void 537 dwge_start(struct ifnet *ifp) 538 { 539 struct dwge_softc *sc = ifp->if_softc; 540 struct mbuf *m; 541 int error, idx; 542 543 if (!(ifp->if_flags & IFF_RUNNING)) 544 return; 545 if (ifq_is_oactive(&ifp->if_snd)) 546 return; 547 if (ifq_empty(&ifp->if_snd)) 548 return; 549 if (!sc->sc_link) 550 return; 551 552 idx = sc->sc_tx_prod; 553 while ((sc->sc_txdesc[idx].sd_status & TDES0_OWN) == 0) { 554 m = ifq_deq_begin(&ifp->if_snd); 555 if (m == NULL) 556 break; 557 558 error = dwge_encap(sc, m, &idx); 559 if (error == ENOBUFS) { 560 ifq_deq_rollback(&ifp->if_snd, m); 561 ifq_set_oactive(&ifp->if_snd); 562 break; 563 } 564 if (error == EFBIG) { 565 ifq_deq_commit(&ifp->if_snd, m); 566 m_freem(m); /* give up: drop it */ 567 ifp->if_oerrors++; 568 continue; 569 } 570 571 /* Now we are committed to transmit the packet. */ 572 ifq_deq_commit(&ifp->if_snd, m); 573 574 #if NBPFILTER > 0 575 if (ifp->if_bpf) 576 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 577 #endif 578 } 579 580 if (sc->sc_tx_prod != idx) { 581 sc->sc_tx_prod = idx; 582 583 /* Set a timeout in case the chip goes out to lunch. */ 584 ifp->if_timer = 5; 585 } 586 } 587 588 int 589 dwge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 590 { 591 struct dwge_softc *sc = ifp->if_softc; 592 struct ifreq *ifr = (struct ifreq *)addr; 593 int error = 0, s; 594 595 s = splnet(); 596 597 switch (cmd) { 598 case SIOCSIFADDR: 599 ifp->if_flags |= IFF_UP; 600 /* FALLTHROUGH */ 601 case SIOCSIFFLAGS: 602 if (ifp->if_flags & IFF_UP) { 603 if (ifp->if_flags & IFF_RUNNING) 604 error = ENETRESET; 605 else 606 dwge_up(sc); 607 } else { 608 if (ifp->if_flags & IFF_RUNNING) 609 dwge_down(sc); 610 } 611 break; 612 613 case SIOCGIFMEDIA: 614 case SIOCSIFMEDIA: 615 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 616 break; 617 618 case SIOCGIFRXR: 619 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 620 NULL, MCLBYTES, &sc->sc_rx_ring); 621 break; 622 623 default: 624 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr); 625 break; 626 } 627 628 if (error == ENETRESET) { 629 if (ifp->if_flags & IFF_RUNNING) 630 dwge_iff(sc); 631 error = 0; 632 } 633 634 splx(s); 635 return (error); 636 } 637 638 void 639 dwge_watchdog(struct ifnet *ifp) 640 { 641 printf("%s\n", __func__); 642 } 643 644 int 645 dwge_media_change(struct ifnet *ifp) 646 { 647 struct dwge_softc *sc = ifp->if_softc; 648 649 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 650 mii_mediachg(&sc->sc_mii); 651 652 return (0); 653 } 654 655 void 656 dwge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 657 { 658 struct dwge_softc *sc = ifp->if_softc; 659 660 if (LIST_FIRST(&sc->sc_mii.mii_phys)) { 661 mii_pollstat(&sc->sc_mii); 662 ifmr->ifm_active = sc->sc_mii.mii_media_active; 663 ifmr->ifm_status = sc->sc_mii.mii_media_status; 664 } 665 } 666 667 int 668 dwge_mii_readreg(struct device *self, int phy, int reg) 669 { 670 struct dwge_softc *sc = (void *)self; 671 int n; 672 673 dwge_write(sc, GMAC_GMII_ADDR, 674 sc->sc_clk << GMAC_GMII_ADDR_CR_SHIFT | 675 phy << GMAC_GMII_ADDR_PA_SHIFT | 676 reg << GMAC_GMII_ADDR_GR_SHIFT | 677 GMAC_GMII_ADDR_GB); 678 for (n = 0; n < 1000; n++) { 679 if ((dwge_read(sc, GMAC_GMII_ADDR) & GMAC_GMII_ADDR_GB) == 0) 680 return dwge_read(sc, GMAC_GMII_DATA); 681 delay(10); 682 } 683 684 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 685 return (0); 686 } 687 688 void 689 dwge_mii_writereg(struct device *self, int phy, int reg, int val) 690 { 691 struct dwge_softc *sc = (void *)self; 692 int n; 693 694 dwge_write(sc, GMAC_GMII_DATA, val); 695 dwge_write(sc, GMAC_GMII_ADDR, 696 sc->sc_clk << GMAC_GMII_ADDR_CR_SHIFT | 697 phy << GMAC_GMII_ADDR_PA_SHIFT | 698 reg << GMAC_GMII_ADDR_GR_SHIFT | 699 GMAC_GMII_ADDR_GW | GMAC_GMII_ADDR_GB); 700 for (n = 0; n < 1000; n++) { 701 if ((dwge_read(sc, GMAC_GMII_ADDR) & GMAC_GMII_ADDR_GB) == 0) 702 return; 703 delay(10); 704 } 705 706 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 707 } 708 709 void 710 dwge_mii_statchg(struct device *self) 711 { 712 struct dwge_softc *sc = (void *)self; 713 uint32_t conf; 714 715 conf = dwge_read(sc, GMAC_MAC_CONF); 716 conf &= ~(GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES); 717 718 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 719 case IFM_1000_SX: 720 case IFM_1000_LX: 721 case IFM_1000_CX: 722 case IFM_1000_T: 723 sc->sc_link = 1; 724 break; 725 case IFM_100_TX: 726 conf |= GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES; 727 sc->sc_link = 1; 728 break; 729 case IFM_10_T: 730 conf |= GMAC_MAC_CONF_PS; 731 sc->sc_link = 1; 732 break; 733 default: 734 sc->sc_link = 0; 735 return; 736 } 737 738 if (sc->sc_link == 0) 739 return; 740 741 conf &= ~GMAC_MAC_CONF_DM; 742 if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) 743 conf |= GMAC_MAC_CONF_DM; 744 745 /* XXX: RX/TX flow control? */ 746 747 dwge_write(sc, GMAC_MAC_CONF, conf); 748 } 749 750 void 751 dwge_tick(void *arg) 752 { 753 struct dwge_softc *sc = arg; 754 int s; 755 756 s = splnet(); 757 mii_tick(&sc->sc_mii); 758 splx(s); 759 760 timeout_add_sec(&sc->sc_tick, 1); 761 } 762 763 void 764 dwge_rxtick(void *arg) 765 { 766 struct dwge_softc *sc = arg; 767 uint32_t mode; 768 int s; 769 770 s = splnet(); 771 772 mode = dwge_read(sc, GMAC_OP_MODE); 773 dwge_write(sc, GMAC_OP_MODE, mode & ~GMAC_OP_MODE_SR); 774 775 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 776 0, DWGE_DMA_LEN(sc->sc_rxring), 777 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 778 779 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, 0); 780 781 sc->sc_rx_prod = sc->sc_rx_cons = 0; 782 dwge_fill_rx_ring(sc); 783 784 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 785 0, DWGE_DMA_LEN(sc->sc_rxring), 786 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 787 788 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_rxring)); 789 dwge_write(sc, GMAC_OP_MODE, mode); 790 791 splx(s); 792 } 793 794 int 795 dwge_intr(void *arg) 796 { 797 struct dwge_softc *sc = arg; 798 uint32_t reg; 799 800 reg = dwge_read(sc, GMAC_STATUS); 801 dwge_write(sc, GMAC_STATUS, reg); 802 803 if (reg & GMAC_STATUS_RI) 804 dwge_rx_proc(sc); 805 806 if (reg & GMAC_STATUS_TI || 807 reg & GMAC_STATUS_TU) 808 dwge_tx_proc(sc); 809 810 return (1); 811 } 812 813 void 814 dwge_tx_proc(struct dwge_softc *sc) 815 { 816 struct ifnet *ifp = &sc->sc_ac.ac_if; 817 struct dwge_desc *txd; 818 struct dwge_buf *txb; 819 int idx, txfree; 820 821 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 0, 822 DWGE_DMA_LEN(sc->sc_txring), 823 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 824 825 txfree = 0; 826 while (sc->sc_tx_cnt > 0) { 827 idx = sc->sc_tx_cons; 828 KASSERT(idx < DWGE_NTXDESC); 829 830 txd = &sc->sc_txdesc[idx]; 831 if (txd->sd_status & TDES0_OWN) 832 break; 833 834 txb = &sc->sc_txbuf[idx]; 835 if (txb->tb_m) { 836 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0, 837 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 838 bus_dmamap_unload(sc->sc_dmat, txb->tb_map); 839 840 m_freem(txb->tb_m); 841 txb->tb_m = NULL; 842 } 843 844 txfree++; 845 sc->sc_tx_cnt--; 846 847 if (sc->sc_tx_cons == (DWGE_NTXDESC - 1)) 848 sc->sc_tx_cons = 0; 849 else 850 sc->sc_tx_cons++; 851 852 txd->sd_status = 0; 853 } 854 855 if (sc->sc_tx_cnt == 0) 856 ifp->if_timer = 0; 857 858 if (txfree) { 859 if (ifq_is_oactive(&ifp->if_snd)) 860 ifq_restart(&ifp->if_snd); 861 } 862 } 863 864 void 865 dwge_rx_proc(struct dwge_softc *sc) 866 { 867 struct ifnet *ifp = &sc->sc_ac.ac_if; 868 struct dwge_desc *rxd; 869 struct dwge_buf *rxb; 870 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 871 struct mbuf *m; 872 int idx, len; 873 874 if ((ifp->if_flags & IFF_RUNNING) == 0) 875 return; 876 877 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 0, 878 DWGE_DMA_LEN(sc->sc_rxring), 879 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 880 881 while (if_rxr_inuse(&sc->sc_rx_ring) > 0) { 882 idx = sc->sc_rx_cons; 883 KASSERT(idx < DWGE_NRXDESC); 884 885 rxd = &sc->sc_rxdesc[idx]; 886 if (rxd->sd_status & RDES0_OWN) 887 break; 888 889 len = (rxd->sd_status >> RDES0_FL_SHIFT) & RDES0_FL_MASK; 890 rxb = &sc->sc_rxbuf[idx]; 891 KASSERT(rxb->tb_m); 892 893 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0, 894 len, BUS_DMASYNC_POSTREAD); 895 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map); 896 897 /* Strip off CRC. */ 898 len -= ETHER_CRC_LEN; 899 KASSERT(len > 0); 900 901 m = rxb->tb_m; 902 rxb->tb_m = NULL; 903 m->m_pkthdr.len = m->m_len = len; 904 905 ml_enqueue(&ml, m); 906 907 if_rxr_put(&sc->sc_rx_ring, 1); 908 if (sc->sc_rx_cons == (DWGE_NRXDESC - 1)) 909 sc->sc_rx_cons = 0; 910 else 911 sc->sc_rx_cons++; 912 } 913 914 if (ifiq_input(&ifp->if_rcv, &ml)) 915 if_rxr_livelocked(&sc->sc_rx_ring); 916 917 dwge_fill_rx_ring(sc); 918 919 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 0, 920 DWGE_DMA_LEN(sc->sc_rxring), 921 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 922 923 } 924 925 void 926 dwge_up(struct dwge_softc *sc) 927 { 928 struct ifnet *ifp = &sc->sc_ac.ac_if; 929 struct dwge_buf *txb, *rxb; 930 uint32_t mode; 931 int i; 932 933 /* Allocate Tx descriptor ring. */ 934 sc->sc_txring = dwge_dmamem_alloc(sc, 935 DWGE_NTXDESC * sizeof(struct dwge_desc), 8); 936 sc->sc_txdesc = DWGE_DMA_KVA(sc->sc_txring); 937 938 sc->sc_txbuf = malloc(sizeof(struct dwge_buf) * DWGE_NTXDESC, 939 M_DEVBUF, M_WAITOK); 940 for (i = 0; i < DWGE_NTXDESC; i++) { 941 txb = &sc->sc_txbuf[i]; 942 bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWGE_NTXSEGS, 943 MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map); 944 txb->tb_m = NULL; 945 946 sc->sc_txdesc[i].sd_next = 947 DWGE_DMA_DVA(sc->sc_txring) + 948 ((i+1) % DWGE_NTXDESC) * sizeof(struct dwge_desc); 949 } 950 951 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 952 0, DWGE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE); 953 954 sc->sc_tx_prod = sc->sc_tx_cons = 0; 955 sc->sc_tx_cnt = 0; 956 957 dwge_write(sc, GMAC_TX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_txring)); 958 959 /* Allocate descriptor ring. */ 960 sc->sc_rxring = dwge_dmamem_alloc(sc, 961 DWGE_NRXDESC * sizeof(struct dwge_desc), 8); 962 sc->sc_rxdesc = DWGE_DMA_KVA(sc->sc_rxring); 963 964 sc->sc_rxbuf = malloc(sizeof(struct dwge_buf) * DWGE_NRXDESC, 965 M_DEVBUF, M_WAITOK); 966 967 for (i = 0; i < DWGE_NRXDESC; i++) { 968 rxb = &sc->sc_rxbuf[i]; 969 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 970 MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map); 971 rxb->tb_m = NULL; 972 973 sc->sc_rxdesc[i].sd_next = 974 DWGE_DMA_DVA(sc->sc_rxring) + 975 ((i+1) % DWGE_NRXDESC) * sizeof(struct dwge_desc); 976 } 977 978 if_rxr_init(&sc->sc_rx_ring, 2, DWGE_NRXDESC); 979 980 sc->sc_rx_prod = sc->sc_rx_cons = 0; 981 dwge_fill_rx_ring(sc); 982 983 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 984 0, DWGE_DMA_LEN(sc->sc_rxring), 985 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 986 987 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_rxring)); 988 989 dwge_lladdr_write(sc); 990 991 /* Configure media. */ 992 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 993 mii_mediachg(&sc->sc_mii); 994 995 /* Program promiscuous mode and multicast filters. */ 996 dwge_iff(sc); 997 998 ifp->if_flags |= IFF_RUNNING; 999 ifq_clr_oactive(&ifp->if_snd); 1000 1001 dwge_write(sc, GMAC_INT_ENA, GMAC_INT_ENA_NIE | 1002 GMAC_INT_ENA_RIE | GMAC_INT_ENA_TIE | GMAC_INT_ENA_TUE); 1003 1004 mode = dwge_read(sc, GMAC_OP_MODE); 1005 if (sc->sc_force_thresh_dma_mode) { 1006 mode &= ~(GMAC_OP_MODE_TSF | GMAC_OP_MODE_TTC_MASK); 1007 mode |= GMAC_OP_MODE_TTC_128; 1008 mode &= ~(GMAC_OP_MODE_RSF | GMAC_OP_MODE_RTC_MASK); 1009 mode |= GMAC_OP_MODE_RTC_128; 1010 } else { 1011 mode |= GMAC_OP_MODE_TSF | GMAC_OP_MODE_OSF; 1012 mode |= GMAC_OP_MODE_RSF; 1013 } 1014 dwge_write(sc, GMAC_OP_MODE, mode | GMAC_OP_MODE_ST | GMAC_OP_MODE_SR); 1015 1016 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, GMAC_MAC_CONF) | 1017 GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE); 1018 1019 timeout_add_sec(&sc->sc_tick, 1); 1020 } 1021 1022 void 1023 dwge_down(struct dwge_softc *sc) 1024 { 1025 struct ifnet *ifp = &sc->sc_ac.ac_if; 1026 struct dwge_buf *txb, *rxb; 1027 uint32_t dmactrl; 1028 int i; 1029 1030 timeout_del(&sc->sc_rxto); 1031 timeout_del(&sc->sc_tick); 1032 1033 ifp->if_flags &= ~IFF_RUNNING; 1034 ifq_clr_oactive(&ifp->if_snd); 1035 ifp->if_timer = 0; 1036 1037 dwge_stop_dma(sc); 1038 1039 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, 1040 GMAC_MAC_CONF) & ~(GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE)); 1041 1042 dmactrl = dwge_read(sc, GMAC_OP_MODE); 1043 dmactrl &= ~(GMAC_OP_MODE_ST | GMAC_OP_MODE_SR); 1044 dwge_write(sc, GMAC_OP_MODE, dmactrl); 1045 1046 dwge_write(sc, GMAC_INT_ENA, 0); 1047 1048 for (i = 0; i < DWGE_NTXDESC; i++) { 1049 txb = &sc->sc_txbuf[i]; 1050 if (txb->tb_m) { 1051 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0, 1052 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1053 bus_dmamap_unload(sc->sc_dmat, txb->tb_map); 1054 m_freem(txb->tb_m); 1055 } 1056 bus_dmamap_destroy(sc->sc_dmat, txb->tb_map); 1057 } 1058 1059 dwge_dmamem_free(sc, sc->sc_txring); 1060 free(sc->sc_txbuf, M_DEVBUF, 0); 1061 1062 for (i = 0; i < DWGE_NRXDESC; i++) { 1063 rxb = &sc->sc_rxbuf[i]; 1064 if (rxb->tb_m) { 1065 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0, 1066 rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1067 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map); 1068 m_freem(rxb->tb_m); 1069 } 1070 bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map); 1071 } 1072 1073 dwge_dmamem_free(sc, sc->sc_rxring); 1074 free(sc->sc_rxbuf, M_DEVBUF, 0); 1075 } 1076 1077 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ 1078 static uint32_t 1079 bitrev32(uint32_t x) 1080 { 1081 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 1082 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 1083 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 1084 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 1085 1086 return (x >> 16) | (x << 16); 1087 } 1088 1089 void 1090 dwge_iff(struct dwge_softc *sc) 1091 { 1092 struct arpcom *ac = &sc->sc_ac; 1093 struct ifnet *ifp = &sc->sc_ac.ac_if; 1094 struct ether_multi *enm; 1095 struct ether_multistep step; 1096 uint32_t crc, hash[2], hashbit, hashreg; 1097 uint32_t reg; 1098 1099 reg = 0; 1100 1101 ifp->if_flags &= ~IFF_ALLMULTI; 1102 bzero(hash, sizeof(hash)); 1103 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1104 ifp->if_flags |= IFF_ALLMULTI; 1105 reg |= GMAC_MAC_FRM_FILT_PM; 1106 if (ifp->if_flags & IFF_PROMISC) 1107 reg |= GMAC_MAC_FRM_FILT_PR; 1108 } else { 1109 reg |= GMAC_MAC_FRM_FILT_HMC; 1110 ETHER_FIRST_MULTI(step, ac, enm); 1111 while (enm != NULL) { 1112 crc = ether_crc32_le(enm->enm_addrlo, 1113 ETHER_ADDR_LEN) & 0x7f; 1114 1115 crc = bitrev32(~crc) >> 26; 1116 hashreg = (crc >> 5); 1117 hashbit = (crc & 0x1f); 1118 hash[hashreg] |= (1 << hashbit); 1119 1120 ETHER_NEXT_MULTI(step, enm); 1121 } 1122 } 1123 1124 dwge_lladdr_write(sc); 1125 1126 dwge_write(sc, GMAC_HASH_TAB_HI, hash[1]); 1127 dwge_write(sc, GMAC_HASH_TAB_LO, hash[0]); 1128 1129 dwge_write(sc, GMAC_MAC_FRM_FILT, reg); 1130 } 1131 1132 int 1133 dwge_encap(struct dwge_softc *sc, struct mbuf *m, int *idx) 1134 { 1135 struct dwge_desc *txd, *txd_start; 1136 bus_dmamap_t map; 1137 int cur, frag, i; 1138 1139 cur = frag = *idx; 1140 map = sc->sc_txbuf[cur].tb_map; 1141 1142 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) { 1143 if (m_defrag(m, M_DONTWAIT)) 1144 return (EFBIG); 1145 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) 1146 return (EFBIG); 1147 } 1148 1149 if (map->dm_nsegs > (DWGE_NTXDESC - sc->sc_tx_cnt - 2)) { 1150 bus_dmamap_unload(sc->sc_dmat, map); 1151 return (ENOBUFS); 1152 } 1153 1154 /* Sync the DMA map. */ 1155 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1156 BUS_DMASYNC_PREWRITE); 1157 1158 txd = txd_start = &sc->sc_txdesc[frag]; 1159 for (i = 0; i < map->dm_nsegs; i++) { 1160 txd->sd_addr = map->dm_segs[i].ds_addr; 1161 txd->sd_len = map->dm_segs[i].ds_len | TDES1_TCH; 1162 if (i == 0) 1163 txd->sd_len |= TDES1_FS; 1164 if (i == (map->dm_nsegs - 1)) 1165 txd->sd_len |= TDES1_LS | TDES1_IC; 1166 if (i != 0) 1167 txd->sd_status = TDES0_OWN; 1168 1169 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 1170 frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE); 1171 1172 cur = frag; 1173 if (frag == (DWGE_NTXDESC - 1)) { 1174 txd = &sc->sc_txdesc[0]; 1175 frag = 0; 1176 } else { 1177 txd++; 1178 frag++; 1179 } 1180 KASSERT(frag != sc->sc_tx_cons); 1181 } 1182 1183 txd_start->sd_status = TDES0_OWN; 1184 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 1185 *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE); 1186 1187 dwge_write(sc, GMAC_TX_POLL_DEMAND, 0xffffffff); 1188 1189 KASSERT(sc->sc_txbuf[cur].tb_m == NULL); 1190 sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map; 1191 sc->sc_txbuf[cur].tb_map = map; 1192 sc->sc_txbuf[cur].tb_m = m; 1193 1194 sc->sc_tx_cnt += map->dm_nsegs; 1195 *idx = frag; 1196 1197 return (0); 1198 } 1199 1200 void 1201 dwge_reset(struct dwge_softc *sc) 1202 { 1203 int n; 1204 1205 dwge_stop_dma(sc); 1206 1207 dwge_write(sc, GMAC_BUS_MODE, dwge_read(sc, GMAC_BUS_MODE) | 1208 GMAC_BUS_MODE_SWR); 1209 1210 for (n = 0; n < 30000; n++) { 1211 if ((dwge_read(sc, GMAC_BUS_MODE) & 1212 GMAC_BUS_MODE_SWR) == 0) 1213 return; 1214 delay(10); 1215 } 1216 1217 printf("%s: reset timeout\n", sc->sc_dev.dv_xname); 1218 } 1219 1220 void 1221 dwge_stop_dma(struct dwge_softc *sc) 1222 { 1223 uint32_t dmactrl; 1224 1225 /* Stop DMA. */ 1226 dmactrl = dwge_read(sc, GMAC_OP_MODE); 1227 dmactrl &= ~GMAC_OP_MODE_ST; 1228 dmactrl |= GMAC_OP_MODE_FTF; 1229 dwge_write(sc, GMAC_OP_MODE, dmactrl); 1230 } 1231 1232 struct dwge_dmamem * 1233 dwge_dmamem_alloc(struct dwge_softc *sc, bus_size_t size, bus_size_t align) 1234 { 1235 struct dwge_dmamem *tdm; 1236 int nsegs; 1237 1238 tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO); 1239 tdm->tdm_size = size; 1240 1241 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1242 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0) 1243 goto tdmfree; 1244 1245 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1, 1246 &nsegs, BUS_DMA_WAITOK) != 0) 1247 goto destroy; 1248 1249 if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size, 1250 &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0) 1251 goto free; 1252 1253 if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size, 1254 NULL, BUS_DMA_WAITOK) != 0) 1255 goto unmap; 1256 1257 bzero(tdm->tdm_kva, size); 1258 1259 return (tdm); 1260 1261 unmap: 1262 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size); 1263 free: 1264 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1); 1265 destroy: 1266 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map); 1267 tdmfree: 1268 free(tdm, M_DEVBUF, 0); 1269 1270 return (NULL); 1271 } 1272 1273 void 1274 dwge_dmamem_free(struct dwge_softc *sc, struct dwge_dmamem *tdm) 1275 { 1276 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size); 1277 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1); 1278 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map); 1279 free(tdm, M_DEVBUF, 0); 1280 } 1281 1282 struct mbuf * 1283 dwge_alloc_mbuf(struct dwge_softc *sc, bus_dmamap_t map) 1284 { 1285 struct mbuf *m = NULL; 1286 1287 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 1288 if (!m) 1289 return (NULL); 1290 m->m_len = m->m_pkthdr.len = MCLBYTES; 1291 m_adj(m, ETHER_ALIGN); 1292 1293 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) { 1294 printf("%s: could not load mbuf DMA map", DEVNAME(sc)); 1295 m_freem(m); 1296 return (NULL); 1297 } 1298 1299 bus_dmamap_sync(sc->sc_dmat, map, 0, 1300 m->m_pkthdr.len, BUS_DMASYNC_PREREAD); 1301 1302 return (m); 1303 } 1304 1305 void 1306 dwge_fill_rx_ring(struct dwge_softc *sc) 1307 { 1308 struct dwge_desc *rxd; 1309 struct dwge_buf *rxb; 1310 u_int slots; 1311 1312 for (slots = if_rxr_get(&sc->sc_rx_ring, DWGE_NRXDESC); 1313 slots > 0; slots--) { 1314 rxb = &sc->sc_rxbuf[sc->sc_rx_prod]; 1315 rxb->tb_m = dwge_alloc_mbuf(sc, rxb->tb_map); 1316 if (rxb->tb_m == NULL) 1317 break; 1318 1319 rxd = &sc->sc_rxdesc[sc->sc_rx_prod]; 1320 rxd->sd_len = rxb->tb_map->dm_segs[0].ds_len | RDES1_RCH; 1321 rxd->sd_addr = rxb->tb_map->dm_segs[0].ds_addr; 1322 rxd->sd_status = RDES0_OWN; 1323 1324 if (sc->sc_rx_prod == (DWGE_NRXDESC - 1)) 1325 sc->sc_rx_prod = 0; 1326 else 1327 sc->sc_rx_prod++; 1328 } 1329 if_rxr_put(&sc->sc_rx_ring, slots); 1330 1331 if (if_rxr_inuse(&sc->sc_rx_ring) == 0) 1332 timeout_add(&sc->sc_rxto, 1); 1333 } 1334 1335 /* 1336 * Allwinner A20/A31. 1337 */ 1338 1339 void 1340 dwge_setup_allwinner(struct dwge_softc *sc) 1341 { 1342 char phy_mode[8]; 1343 uint32_t freq; 1344 1345 /* default to RGMII */ 1346 OF_getprop(sc->sc_node, "phy-mode", phy_mode, sizeof(phy_mode)); 1347 if (strcmp(phy_mode, "mii") == 0) 1348 freq = 25000000; 1349 else 1350 freq = 125000000; 1351 clock_set_frequency(sc->sc_node, "allwinner_gmac_tx", freq); 1352 } 1353 1354 /* 1355 * Rockchip RK3288/RK3399. 1356 */ 1357 1358 /* RK3308 registers */ 1359 #define RK3308_GRF_MAC_CON0 0x04a0 1360 #define RK3308_MAC_SPEED_100M ((0x1 << 0) << 16 | (0x1 << 0)) 1361 #define RK3308_MAC_SPEED_10M ((0x1 << 0) << 16 | (0x0 << 0)) 1362 #define RK3308_INTF_SEL_RMII ((0x1 << 4) << 16 | (0x1 << 4)) 1363 1364 /* RK3288 registers */ 1365 #define RK3288_GRF_SOC_CON1 0x0248 1366 #define RK3288_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 6) << 16 | (0x1 << 6)) 1367 #define RK3288_GMAC_PHY_INTF_SEL_RMII ((0x7 << 6) << 16 | (0x4 << 6)) 1368 #define RK3288_RMII_MODE_RMII ((1 << 14) << 16 | (1 << 14)) 1369 #define RK3288_RMII_MODE_MII ((1 << 14) << 16 | (0 << 14)) 1370 #define RK3288_GMAC_CLK_SEL_125 ((0x3 << 12) << 16 | (0x0 << 12)) 1371 #define RK3288_GMAC_CLK_SEL_25 ((0x3 << 12) << 16 | (0x3 << 12)) 1372 #define RK3288_GMAC_CLK_SEL_2_5 ((0x3 << 12) << 16 | (0x2 << 12)) 1373 1374 #define RK3288_GRF_SOC_CON3 0x0250 1375 #define RK3288_GMAC_RXCLK_DLY_ENA ((1 << 15) << 16 | (1 << 15)) 1376 #define RK3288_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 7) << 16 | ((val) << 7)) 1377 #define RK3288_GMAC_TXCLK_DLY_ENA ((1 << 14) << 16 | (1 << 14)) 1378 #define RK3288_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0)) 1379 1380 /* RK3328 registers */ 1381 #define RK3328_GRF_MAC_CON0 0x0900 1382 #define RK3328_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 7) << 16 | ((val) << 7)) 1383 #define RK3328_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0)) 1384 1385 #define RK3328_GRF_MAC_CON1 0x0904 1386 #define RK3328_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 4) << 16 | (0x1 << 4)) 1387 #define RK3328_GMAC_PHY_INTF_SEL_RMII ((0x7 << 4) << 16 | (0x4 << 4)) 1388 #define RK3328_RMII_MODE_RMII ((1 << 9) << 16 | (1 << 9)) 1389 #define RK3328_RMII_MODE_MII ((1 << 9) << 16 | (0 << 9)) 1390 #define RK3328_GMAC_CLK_SEL_125 ((0x3 << 11) << 16 | (0x0 << 11)) 1391 #define RK3328_GMAC_CLK_SEL_25 ((0x3 << 11) << 16 | (0x3 << 11)) 1392 #define RK3328_GMAC_CLK_SEL_2_5 ((0x3 << 11) << 16 | (0x2 << 11)) 1393 #define RK3328_GMAC_RXCLK_DLY_ENA ((1 << 1) << 16 | (1 << 1)) 1394 #define RK3328_GMAC_TXCLK_DLY_ENA ((1 << 0) << 16 | (1 << 0)) 1395 1396 /* RK3399 registers */ 1397 #define RK3399_GRF_SOC_CON5 0xc214 1398 #define RK3399_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 9) << 16 | (0x1 << 9)) 1399 #define RK3399_GMAC_PHY_INTF_SEL_RMII ((0x7 << 9) << 16 | (0x4 << 9)) 1400 #define RK3399_RMII_MODE_RMII ((1 << 6) << 16 | (1 << 6)) 1401 #define RK3399_RMII_MODE_MII ((1 << 6) << 16 | (0 << 6)) 1402 #define RK3399_GMAC_CLK_SEL_125 ((0x3 << 4) << 16 | (0x0 << 4)) 1403 #define RK3399_GMAC_CLK_SEL_25 ((0x3 << 4) << 16 | (0x3 << 4)) 1404 #define RK3399_GMAC_CLK_SEL_2_5 ((0x3 << 4) << 16 | (0x2 << 4)) 1405 #define RK3399_GRF_SOC_CON6 0xc218 1406 #define RK3399_GMAC_RXCLK_DLY_ENA ((1 << 15) << 16 | (1 << 15)) 1407 #define RK3399_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 8) << 16 | ((val) << 8)) 1408 #define RK3399_GMAC_TXCLK_DLY_ENA ((1 << 7) << 16 | (1 << 7)) 1409 #define RK3399_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0)) 1410 1411 void dwge_mii_statchg_rockchip(struct device *); 1412 1413 void 1414 dwge_setup_rockchip(struct dwge_softc *sc) 1415 { 1416 struct regmap *rm; 1417 uint32_t grf; 1418 int tx_delay, rx_delay; 1419 char clock_mode[8]; 1420 1421 grf = OF_getpropint(sc->sc_node, "rockchip,grf", 0); 1422 rm = regmap_byphandle(grf); 1423 if (rm == NULL) 1424 return; 1425 1426 clock_set_assigned(sc->sc_node); 1427 clock_enable(sc->sc_node, "mac_clk_rx"); 1428 clock_enable(sc->sc_node, "mac_clk_tx"); 1429 clock_enable(sc->sc_node, "aclk_mac"); 1430 clock_enable(sc->sc_node, "pclk_mac"); 1431 1432 tx_delay = OF_getpropint(sc->sc_node, "tx_delay", 0x30); 1433 rx_delay = OF_getpropint(sc->sc_node, "rx_delay", 0x10); 1434 1435 if (OF_is_compatible(sc->sc_node, "rockchip,rk3288-gmac")) { 1436 /* Use RGMII interface. */ 1437 regmap_write_4(rm, RK3288_GRF_SOC_CON1, 1438 RK3288_GMAC_PHY_INTF_SEL_RGMII | RK3288_RMII_MODE_MII); 1439 1440 /* Program clock delay lines. */ 1441 regmap_write_4(rm, RK3288_GRF_SOC_CON3, 1442 RK3288_GMAC_TXCLK_DLY_ENA | RK3288_GMAC_RXCLK_DLY_ENA | 1443 RK3288_GMAC_CLK_TX_DL_CFG(tx_delay) | 1444 RK3288_GMAC_CLK_RX_DL_CFG(rx_delay)); 1445 1446 /* Clock speed bits. */ 1447 sc->sc_clk_sel = RK3288_GRF_SOC_CON1; 1448 sc->sc_clk_sel_2_5 = RK3288_GMAC_CLK_SEL_2_5; 1449 sc->sc_clk_sel_25 = RK3288_GMAC_CLK_SEL_25; 1450 sc->sc_clk_sel_125 = RK3288_GMAC_CLK_SEL_125; 1451 } else if (OF_is_compatible(sc->sc_node, "rockchip,rk3308-mac")) { 1452 /* Use RMII interface. */ 1453 regmap_write_4(rm, RK3308_GRF_MAC_CON0, 1454 RK3308_INTF_SEL_RMII | RK3308_MAC_SPEED_100M); 1455 1456 /* Adjust MAC clock if necessary. */ 1457 OF_getprop(sc->sc_node, "clock_in_out", clock_mode, 1458 sizeof(clock_mode)); 1459 if (strcmp(clock_mode, "output") == 0) { 1460 clock_set_frequency(sc->sc_node, "stmmaceth", 1461 50000000); 1462 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_26; 1463 } 1464 1465 /* Clock speed bits. */ 1466 sc->sc_clk_sel = RK3308_GRF_MAC_CON0; 1467 sc->sc_clk_sel_2_5 = RK3308_MAC_SPEED_10M; 1468 sc->sc_clk_sel_25 = RK3308_MAC_SPEED_100M; 1469 } else if (OF_is_compatible(sc->sc_node, "rockchip,rk3328-gmac")) { 1470 /* Use RGMII interface. */ 1471 regmap_write_4(rm, RK3328_GRF_MAC_CON1, 1472 RK3328_GMAC_PHY_INTF_SEL_RGMII | RK3328_RMII_MODE_MII); 1473 1474 /* Program clock delay lines. */ 1475 regmap_write_4(rm, RK3328_GRF_MAC_CON0, 1476 RK3328_GMAC_CLK_TX_DL_CFG(tx_delay) | 1477 RK3328_GMAC_CLK_RX_DL_CFG(rx_delay)); 1478 regmap_write_4(rm, RK3328_GRF_MAC_CON1, 1479 RK3328_GMAC_TXCLK_DLY_ENA | RK3328_GMAC_RXCLK_DLY_ENA); 1480 1481 /* Clock speed bits. */ 1482 sc->sc_clk_sel = RK3328_GRF_MAC_CON1; 1483 sc->sc_clk_sel_2_5 = RK3328_GMAC_CLK_SEL_2_5; 1484 sc->sc_clk_sel_25 = RK3328_GMAC_CLK_SEL_25; 1485 sc->sc_clk_sel_125 = RK3328_GMAC_CLK_SEL_125; 1486 } else { 1487 /* Use RGMII interface. */ 1488 regmap_write_4(rm, RK3399_GRF_SOC_CON5, 1489 RK3399_GMAC_PHY_INTF_SEL_RGMII | RK3399_RMII_MODE_MII); 1490 1491 /* Program clock delay lines. */ 1492 regmap_write_4(rm, RK3399_GRF_SOC_CON6, 1493 RK3399_GMAC_TXCLK_DLY_ENA | RK3399_GMAC_RXCLK_DLY_ENA | 1494 RK3399_GMAC_CLK_TX_DL_CFG(tx_delay) | 1495 RK3399_GMAC_CLK_RX_DL_CFG(rx_delay)); 1496 1497 /* Clock speed bits. */ 1498 sc->sc_clk_sel = RK3399_GRF_SOC_CON5; 1499 sc->sc_clk_sel_2_5 = RK3399_GMAC_CLK_SEL_2_5; 1500 sc->sc_clk_sel_25 = RK3399_GMAC_CLK_SEL_25; 1501 sc->sc_clk_sel_125 = RK3399_GMAC_CLK_SEL_125; 1502 } 1503 1504 sc->sc_mii.mii_statchg = dwge_mii_statchg_rockchip; 1505 } 1506 1507 void 1508 dwge_mii_statchg_rockchip(struct device *self) 1509 { 1510 struct dwge_softc *sc = (void *)self; 1511 struct regmap *rm; 1512 uint32_t grf; 1513 uint32_t gmac_clk_sel = 0; 1514 1515 dwge_mii_statchg(self); 1516 1517 grf = OF_getpropint(sc->sc_node, "rockchip,grf", 0); 1518 rm = regmap_byphandle(grf); 1519 if (rm == NULL) 1520 return; 1521 1522 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1523 case IFM_10_T: 1524 gmac_clk_sel = sc->sc_clk_sel_2_5; 1525 break; 1526 case IFM_100_TX: 1527 gmac_clk_sel = sc->sc_clk_sel_25; 1528 break; 1529 case IFM_1000_T: 1530 gmac_clk_sel = sc->sc_clk_sel_125; 1531 break; 1532 } 1533 1534 regmap_write_4(rm, sc->sc_clk_sel, gmac_clk_sel); 1535 } 1536