1 /* $OpenBSD: if_dwge.c,v 1.2 2019/10/07 00:40:04 jmatthew Exp $ */ 2 /* 3 * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org> 4 * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for the Synopsys Designware ethernet controller. 21 */ 22 23 #include "bpfilter.h" 24 25 #include <sys/param.h> 26 #include <sys/systm.h> 27 #include <sys/device.h> 28 #include <sys/kernel.h> 29 #include <sys/malloc.h> 30 #include <sys/mbuf.h> 31 #include <sys/queue.h> 32 #include <sys/socket.h> 33 #include <sys/sockio.h> 34 #include <sys/timeout.h> 35 36 #include <machine/bus.h> 37 #include <machine/fdt.h> 38 39 #include <net/if.h> 40 #include <net/if_media.h> 41 42 #include <dev/ofw/openfirm.h> 43 #include <dev/ofw/ofw_clock.h> 44 #include <dev/ofw/ofw_gpio.h> 45 #include <dev/ofw/ofw_misc.h> 46 #include <dev/ofw/ofw_pinctrl.h> 47 #include <dev/ofw/ofw_regulator.h> 48 #include <dev/ofw/fdt.h> 49 50 #include <dev/mii/mii.h> 51 #include <dev/mii/miivar.h> 52 53 #if NBPFILTER > 0 54 #include <net/bpf.h> 55 #endif 56 57 #include <netinet/in.h> 58 #include <netinet/if_ether.h> 59 60 /* Registers */ 61 62 #define GMAC_MAC_CONF 0x0000 63 #define GMAC_MAC_CONF_JD (1 << 22) 64 #define GMAC_MAC_CONF_BE (1 << 21) 65 #define GMAC_MAC_CONF_DCRS (1 << 16) 66 #define GMAC_MAC_CONF_PS (1 << 15) 67 #define GMAC_MAC_CONF_FES (1 << 14) 68 #define GMAC_MAC_CONF_LM (1 << 12) 69 #define GMAC_MAC_CONF_DM (1 << 11) 70 #define GMAC_MAC_CONF_TE (1 << 3) 71 #define GMAC_MAC_CONF_RE (1 << 2) 72 #define GMAC_MAC_FRM_FILT 0x0004 73 #define GMAC_MAC_FRM_FILT_PM (1 << 4) 74 #define GMAC_MAC_FRM_FILT_HMC (1 << 2) 75 #define GMAC_MAC_FRM_FILT_PR (1 << 0) 76 #define GMAC_HASH_TAB_HI 0x0008 77 #define GMAC_HASH_TAB_LO 0x000c 78 #define GMAC_GMII_ADDR 0x0010 79 #define GMAC_GMII_ADDR_PA_SHIFT 11 80 #define GMAC_GMII_ADDR_GR_SHIFT 6 81 #define GMAC_GMII_ADDR_CR_SHIFT 2 82 #define GMAC_GMII_ADDR_CR_MASK 0xf 83 #define GMAC_GMII_ADDR_CR_DIV_42 0 84 #define GMAC_GMII_ADDR_CR_DIV_62 1 85 #define GMAC_GMII_ADDR_CR_DIV_16 2 86 #define GMAC_GMII_ADDR_CR_DIV_26 3 87 #define GMAC_GMII_ADDR_CR_DIV_102 4 88 #define GMAC_GMII_ADDR_CR_DIV_124 5 89 #define GMAC_GMII_ADDR_GW (1 << 1) 90 #define GMAC_GMII_ADDR_GB (1 << 0) 91 #define GMAC_GMII_DATA 0x0014 92 #define GMAC_INT_MASK 0x003c 93 #define GMAC_INT_MASK_PIM (1 << 3) 94 #define GMAC_INT_MASK_RIM (1 << 0) 95 #define GMAC_MAC_ADDR0_HI 0x0040 96 #define GMAC_MAC_ADDR0_LO 0x0044 97 #define GMAC_MMC_RX_INT_MSK 0x010c 98 #define GMAC_MMC_TX_INT_MSK 0x0110 99 #define GMAC_MMC_IPC_INT_MSK 0x0200 100 #define GMAC_BUS_MODE 0x1000 101 #define GMAC_BUS_MODE_8XPBL (1 << 24) 102 #define GMAC_BUS_MODE_USP (1 << 23) 103 #define GMAC_BUS_MODE_RPBL_MASK (0x3f << 17) 104 #define GMAC_BUS_MODE_RPBL_SHIFT 17 105 #define GMAC_BUS_MODE_FB (1 << 16) 106 #define GMAC_BUS_MODE_PBL_MASK (0x3f << 8) 107 #define GMAC_BUS_MODE_PBL_SHIFT 8 108 #define GMAC_BUS_MODE_SWR (1 << 0) 109 #define GMAC_TX_POLL_DEMAND 0x1004 110 #define GMAC_RX_DESC_LIST_ADDR 0x100c 111 #define GMAC_TX_DESC_LIST_ADDR 0x1010 112 #define GMAC_STATUS 0x1014 113 #define GMAC_STATUS_RI (1 << 6) 114 #define GMAC_STATUS_TU (1 << 2) 115 #define GMAC_STATUS_TI (1 << 0) 116 #define GMAC_OP_MODE 0x1018 117 #define GMAC_OP_MODE_RSF (1 << 25) 118 #define GMAC_OP_MODE_TSF (1 << 21) 119 #define GMAC_OP_MODE_FTF (1 << 20) 120 #define GMAC_OP_MODE_TTC_MASK (0x7 << 14) 121 #define GMAC_OP_MODE_TTC_64 (0x0 << 14) 122 #define GMAC_OP_MODE_TTC_128 (0x1 << 14) 123 #define GMAC_OP_MODE_ST (1 << 13) 124 #define GMAC_OP_MODE_RTC_MASK (0x3 << 3) 125 #define GMAC_OP_MODE_RTC_64 (0x0 << 3) 126 #define GMAC_OP_MODE_RTC_128 (0x3 << 3) 127 #define GMAC_OP_MODE_OSF (1 << 2) 128 #define GMAC_OP_MODE_SR (1 << 1) 129 #define GMAC_INT_ENA 0x101c 130 #define GMAC_INT_ENA_NIE (1 << 16) 131 #define GMAC_INT_ENA_RIE (1 << 6) 132 #define GMAC_INT_ENA_TUE (1 << 2) 133 #define GMAC_INT_ENA_TIE (1 << 0) 134 135 /* 136 * DWGE descriptors. 137 */ 138 139 struct dwge_desc { 140 uint32_t sd_status; 141 uint32_t sd_len; 142 uint32_t sd_addr; 143 uint32_t sd_next; 144 }; 145 146 /* Tx status bits. */ 147 #define TDES0_DB (1 << 0) 148 #define TDES0_UF (1 << 1) 149 #define TDES0_ED (1 << 2) 150 #define TDES0_CC_MASK (0xf << 3) 151 #define TDES0_CC_SHIFT 3 152 #define TDES0_EC (1 << 8) 153 #define TDES0_LC (1 << 9) 154 #define TDES0_NC (1 << 10) 155 #define TDES0_PCE (1 << 12) 156 #define TDES0_JT (1 << 14) 157 #define TDES0_IHE (1 << 16) 158 #define TDES0_OWN (1 << 31) 159 160 /* Rx status bits */ 161 #define RDES0_PE (1 << 0) 162 #define RDES0_CE (1 << 1) 163 #define RDES0_RE (1 << 3) 164 #define RDES0_RWT (1 << 4) 165 #define RDES0_FT (1 << 5) 166 #define RDES0_LC (1 << 6) 167 #define RDES0_IPC (1 << 7) 168 #define RDES0_LS (1 << 8) 169 #define RDES0_FS (1 << 9) 170 #define RDES0_OE (1 << 11) 171 #define RDES0_SAF (1 << 13) 172 #define RDES0_DE (1 << 14) 173 #define RDES0_FL_MASK 0x3fff 174 #define RDES0_FL_SHIFT 16 175 #define RDES0_AFM (1 << 30) 176 #define RDES0_OWN (1 << 31) 177 178 /* Tx size bits */ 179 #define TDES1_TBS1 (0xfff << 0) 180 #define TDES1_TCH (1 << 24) 181 #define TDES1_DC (1 << 26) 182 #define TDES1_CIC_MASK (0x3 << 27) 183 #define TDES1_CIC_IP (1 << 27) 184 #define TDES1_CIC_NO_PSE (2 << 27) 185 #define TDES1_CIC_FULL (3 << 27) 186 #define TDES1_FS (1 << 29) 187 #define TDES1_LS (1 << 30) 188 #define TDES1_IC (1 << 31) 189 190 /* Rx size bits */ 191 #define RDES1_RBS1 (0xfff << 0) 192 #define RDES1_RCH (1 << 24) 193 #define RDES1_DIC (1 << 31) 194 195 struct dwge_buf { 196 bus_dmamap_t tb_map; 197 struct mbuf *tb_m; 198 }; 199 200 #define DWGE_NTXDESC 512 201 #define DWGE_NTXSEGS 16 202 203 #define DWGE_NRXDESC 512 204 205 struct dwge_dmamem { 206 bus_dmamap_t tdm_map; 207 bus_dma_segment_t tdm_seg; 208 size_t tdm_size; 209 caddr_t tdm_kva; 210 }; 211 #define DWGE_DMA_MAP(_tdm) ((_tdm)->tdm_map) 212 #define DWGE_DMA_LEN(_tdm) ((_tdm)->tdm_size) 213 #define DWGE_DMA_DVA(_tdm) ((_tdm)->tdm_map->dm_segs[0].ds_addr) 214 #define DWGE_DMA_KVA(_tdm) ((void *)(_tdm)->tdm_kva) 215 216 struct dwge_softc { 217 struct device sc_dev; 218 int sc_node; 219 bus_space_tag_t sc_iot; 220 bus_space_handle_t sc_ioh; 221 bus_dma_tag_t sc_dmat; 222 223 struct arpcom sc_ac; 224 #define sc_lladdr sc_ac.ac_enaddr 225 struct mii_data sc_mii; 226 #define sc_media sc_mii.mii_media 227 int sc_link; 228 int sc_phyloc; 229 int sc_force_thresh_dma_mode; 230 231 struct dwge_dmamem *sc_txring; 232 struct dwge_buf *sc_txbuf; 233 struct dwge_desc *sc_txdesc; 234 int sc_tx_prod; 235 int sc_tx_cnt; 236 int sc_tx_cons; 237 238 struct dwge_dmamem *sc_rxring; 239 struct dwge_buf *sc_rxbuf; 240 struct dwge_desc *sc_rxdesc; 241 int sc_rx_prod; 242 struct if_rxring sc_rx_ring; 243 int sc_rx_cons; 244 245 struct timeout sc_tick; 246 struct timeout sc_rxto; 247 248 uint32_t sc_clk; 249 250 bus_size_t sc_clk_sel; 251 uint32_t sc_clk_sel_125; 252 uint32_t sc_clk_sel_25; 253 uint32_t sc_clk_sel_2_5; 254 }; 255 256 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 257 258 int dwge_match(struct device *, void *, void *); 259 void dwge_attach(struct device *, struct device *, void *); 260 void dwge_setup_allwinner(struct dwge_softc *); 261 void dwge_setup_rockchip(struct dwge_softc *); 262 263 struct cfattach dwge_ca = { 264 sizeof(struct dwge_softc), dwge_match, dwge_attach 265 }; 266 267 struct cfdriver dwge_cd = { 268 NULL, "dwge", DV_IFNET 269 }; 270 271 void dwge_reset_phy(struct dwge_softc *); 272 273 uint32_t dwge_read(struct dwge_softc *, bus_addr_t); 274 void dwge_write(struct dwge_softc *, bus_addr_t, uint32_t); 275 276 int dwge_ioctl(struct ifnet *, u_long, caddr_t); 277 void dwge_start(struct ifnet *); 278 void dwge_watchdog(struct ifnet *); 279 280 int dwge_media_change(struct ifnet *); 281 void dwge_media_status(struct ifnet *, struct ifmediareq *); 282 283 int dwge_mii_readreg(struct device *, int, int); 284 void dwge_mii_writereg(struct device *, int, int, int); 285 void dwge_mii_statchg(struct device *); 286 287 void dwge_lladdr_read(struct dwge_softc *, uint8_t *); 288 void dwge_lladdr_write(struct dwge_softc *); 289 290 void dwge_tick(void *); 291 void dwge_rxtick(void *); 292 293 int dwge_intr(void *); 294 void dwge_tx_proc(struct dwge_softc *); 295 void dwge_rx_proc(struct dwge_softc *); 296 297 void dwge_up(struct dwge_softc *); 298 void dwge_down(struct dwge_softc *); 299 void dwge_iff(struct dwge_softc *); 300 int dwge_encap(struct dwge_softc *, struct mbuf *, int *); 301 302 void dwge_reset(struct dwge_softc *); 303 void dwge_stop_dma(struct dwge_softc *); 304 305 struct dwge_dmamem * 306 dwge_dmamem_alloc(struct dwge_softc *, bus_size_t, bus_size_t); 307 void dwge_dmamem_free(struct dwge_softc *, struct dwge_dmamem *); 308 struct mbuf *dwge_alloc_mbuf(struct dwge_softc *, bus_dmamap_t); 309 void dwge_fill_rx_ring(struct dwge_softc *); 310 311 int 312 dwge_match(struct device *parent, void *cfdata, void *aux) 313 { 314 struct fdt_attach_args *faa = aux; 315 316 return (OF_is_compatible(faa->fa_node, "allwinner,sun7i-a20-gmac") || 317 OF_is_compatible(faa->fa_node, "amlogic,meson-axg-dwmac") || 318 OF_is_compatible(faa->fa_node, "rockchip,rk3288-gmac") || 319 OF_is_compatible(faa->fa_node, "rockchip,rk3328-gmac") || 320 OF_is_compatible(faa->fa_node, "rockchip,rk3399-gmac")); 321 } 322 323 void 324 dwge_attach(struct device *parent, struct device *self, void *aux) 325 { 326 struct dwge_softc *sc = (void *)self; 327 struct fdt_attach_args *faa = aux; 328 struct ifnet *ifp; 329 uint32_t phy, phy_supply; 330 uint32_t mode, pbl; 331 int node; 332 333 sc->sc_node = faa->fa_node; 334 sc->sc_iot = faa->fa_iot; 335 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 336 faa->fa_reg[0].size, 0, &sc->sc_ioh)) { 337 printf("%s: cannot map registers\n", self->dv_xname); 338 return; 339 } 340 sc->sc_dmat = faa->fa_dmat; 341 342 /* Lookup PHY. */ 343 phy = OF_getpropint(faa->fa_node, "phy", 0); 344 if (phy == 0) 345 phy = OF_getpropint(faa->fa_node, "phy-handle", 0); 346 node = OF_getnodebyphandle(phy); 347 if (node) 348 sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY); 349 else 350 sc->sc_phyloc = MII_PHY_ANY; 351 352 pinctrl_byname(faa->fa_node, "default"); 353 354 /* Enable clock. */ 355 clock_enable(faa->fa_node, "stmmaceth"); 356 reset_deassert(faa->fa_node, "stmmaceth"); 357 delay(5000); 358 359 /* Power up PHY. */ 360 phy_supply = OF_getpropint(faa->fa_node, "phy-supply", 0); 361 if (phy_supply) 362 regulator_enable(phy_supply); 363 364 /* Reset PHY */ 365 dwge_reset_phy(sc); 366 367 sc->sc_clk = clock_get_frequency(faa->fa_node, "stmmaceth"); 368 if (sc->sc_clk > 250000000) 369 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_124; 370 else if (sc->sc_clk > 150000000) 371 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_102; 372 else if (sc->sc_clk > 100000000) 373 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_62; 374 else if (sc->sc_clk > 60000000) 375 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_42; 376 else if (sc->sc_clk > 35000000) 377 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_26; 378 else 379 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_16; 380 381 if (OF_getprop(faa->fa_node, "local-mac-address", 382 &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN) 383 dwge_lladdr_read(sc, sc->sc_lladdr); 384 printf(": address %s\n", ether_sprintf(sc->sc_lladdr)); 385 386 timeout_set(&sc->sc_tick, dwge_tick, sc); 387 timeout_set(&sc->sc_rxto, dwge_rxtick, sc); 388 389 ifp = &sc->sc_ac.ac_if; 390 ifp->if_softc = sc; 391 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 392 ifp->if_ioctl = dwge_ioctl; 393 ifp->if_start = dwge_start; 394 ifp->if_watchdog = dwge_watchdog; 395 IFQ_SET_MAXLEN(&ifp->if_snd, DWGE_NTXDESC - 1); 396 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 397 398 ifp->if_capabilities = IFCAP_VLAN_MTU; 399 400 sc->sc_mii.mii_ifp = ifp; 401 sc->sc_mii.mii_readreg = dwge_mii_readreg; 402 sc->sc_mii.mii_writereg = dwge_mii_writereg; 403 sc->sc_mii.mii_statchg = dwge_mii_statchg; 404 405 ifmedia_init(&sc->sc_media, 0, dwge_media_change, dwge_media_status); 406 407 /* Do hardware specific initializations. */ 408 if (OF_is_compatible(faa->fa_node, "allwinner,sun7i-a20-gmac")) 409 dwge_setup_allwinner(sc); 410 else if (OF_is_compatible(faa->fa_node, "rockchip,rk3288-gmac")) 411 dwge_setup_rockchip(sc); 412 else if (OF_is_compatible(faa->fa_node, "rockchip,rk3328-gmac")) 413 dwge_setup_rockchip(sc); 414 else if (OF_is_compatible(faa->fa_node, "rockchip,rk3399-gmac")) 415 dwge_setup_rockchip(sc); 416 417 if (OF_getproplen(faa->fa_node, "snps,force_thresh_dma_mode") == 0) 418 sc->sc_force_thresh_dma_mode = 1; 419 420 dwge_reset(sc); 421 422 /* Configure MAC. */ 423 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, GMAC_MAC_CONF) | 424 GMAC_MAC_CONF_JD | GMAC_MAC_CONF_BE | GMAC_MAC_CONF_DCRS); 425 426 /* Configure DMA engine. */ 427 mode = dwge_read(sc, GMAC_BUS_MODE); 428 mode |= GMAC_BUS_MODE_8XPBL | GMAC_BUS_MODE_USP; 429 mode &= ~(GMAC_BUS_MODE_RPBL_MASK | GMAC_BUS_MODE_PBL_MASK); 430 pbl = OF_getpropint(faa->fa_node, "snps,pbl", 8); 431 mode |= pbl << GMAC_BUS_MODE_RPBL_SHIFT; 432 mode |= pbl << GMAC_BUS_MODE_PBL_SHIFT; 433 if (OF_getproplen(faa->fa_node, "snps,fixed-burst") == 0) 434 mode |= GMAC_BUS_MODE_FB; 435 dwge_write(sc, GMAC_BUS_MODE, mode); 436 437 mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc, 438 (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, 0); 439 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 440 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 441 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 442 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 443 } else 444 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 445 446 if_attach(ifp); 447 ether_ifattach(ifp); 448 449 /* Disable interrupts. */ 450 dwge_write(sc, GMAC_INT_ENA, 0); 451 dwge_write(sc, GMAC_INT_MASK, GMAC_INT_MASK_PIM | GMAC_INT_MASK_RIM); 452 dwge_write(sc, GMAC_MMC_RX_INT_MSK, 0xffffffff); 453 dwge_write(sc, GMAC_MMC_TX_INT_MSK, 0xffffffff); 454 dwge_write(sc, GMAC_MMC_IPC_INT_MSK, 0xffffffff); 455 456 fdt_intr_establish(faa->fa_node, IPL_NET, dwge_intr, sc, 457 sc->sc_dev.dv_xname); 458 } 459 460 void 461 dwge_reset_phy(struct dwge_softc *sc) 462 { 463 uint32_t *gpio; 464 uint32_t delays[3]; 465 int active = 1; 466 int len; 467 468 len = OF_getproplen(sc->sc_node, "snps,reset-gpio"); 469 if (len <= 0) 470 return; 471 472 gpio = malloc(len, M_TEMP, M_WAITOK); 473 474 /* Gather information. */ 475 OF_getpropintarray(sc->sc_node, "snps,reset-gpio", gpio, len); 476 if (OF_getproplen(sc->sc_node, "snps-reset-active-low") == 0) 477 active = 0; 478 delays[0] = delays[1] = delays[2] = 0; 479 OF_getpropintarray(sc->sc_node, "snps,reset-delay-us", delays, 480 sizeof(delays)); 481 482 /* Perform reset sequence. */ 483 gpio_controller_config_pin(gpio, GPIO_CONFIG_OUTPUT); 484 gpio_controller_set_pin(gpio, !active); 485 delay(delays[0]); 486 gpio_controller_set_pin(gpio, active); 487 delay(delays[1]); 488 gpio_controller_set_pin(gpio, !active); 489 delay(delays[2]); 490 491 free(gpio, M_TEMP, len); 492 } 493 494 uint32_t 495 dwge_read(struct dwge_softc *sc, bus_addr_t addr) 496 { 497 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr); 498 } 499 500 void 501 dwge_write(struct dwge_softc *sc, bus_addr_t addr, uint32_t data) 502 { 503 bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data); 504 } 505 506 void 507 dwge_lladdr_read(struct dwge_softc *sc, uint8_t *lladdr) 508 { 509 uint32_t machi, maclo; 510 511 machi = dwge_read(sc, GMAC_MAC_ADDR0_HI); 512 maclo = dwge_read(sc, GMAC_MAC_ADDR0_LO); 513 514 lladdr[0] = (maclo >> 0) & 0xff; 515 lladdr[1] = (maclo >> 8) & 0xff; 516 lladdr[2] = (maclo >> 16) & 0xff; 517 lladdr[3] = (maclo >> 24) & 0xff; 518 lladdr[4] = (machi >> 0) & 0xff; 519 lladdr[5] = (machi >> 8) & 0xff; 520 } 521 522 void 523 dwge_lladdr_write(struct dwge_softc *sc) 524 { 525 dwge_write(sc, GMAC_MAC_ADDR0_HI, 526 sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0); 527 dwge_write(sc, GMAC_MAC_ADDR0_LO, 528 sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 | 529 sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0); 530 } 531 532 void 533 dwge_start(struct ifnet *ifp) 534 { 535 struct dwge_softc *sc = ifp->if_softc; 536 struct mbuf *m; 537 int error, idx; 538 539 if (!(ifp->if_flags & IFF_RUNNING)) 540 return; 541 if (ifq_is_oactive(&ifp->if_snd)) 542 return; 543 if (IFQ_IS_EMPTY(&ifp->if_snd)) 544 return; 545 if (!sc->sc_link) 546 return; 547 548 idx = sc->sc_tx_prod; 549 while ((sc->sc_txdesc[idx].sd_status & TDES0_OWN) == 0) { 550 m = ifq_deq_begin(&ifp->if_snd); 551 if (m == NULL) 552 break; 553 554 error = dwge_encap(sc, m, &idx); 555 if (error == ENOBUFS) { 556 ifq_deq_rollback(&ifp->if_snd, m); 557 ifq_set_oactive(&ifp->if_snd); 558 break; 559 } 560 if (error == EFBIG) { 561 ifq_deq_commit(&ifp->if_snd, m); 562 m_freem(m); /* give up: drop it */ 563 ifp->if_oerrors++; 564 continue; 565 } 566 567 /* Now we are committed to transmit the packet. */ 568 ifq_deq_commit(&ifp->if_snd, m); 569 570 #if NBPFILTER > 0 571 if (ifp->if_bpf) 572 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 573 #endif 574 } 575 576 if (sc->sc_tx_prod != idx) { 577 sc->sc_tx_prod = idx; 578 579 /* Set a timeout in case the chip goes out to lunch. */ 580 ifp->if_timer = 5; 581 } 582 } 583 584 int 585 dwge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 586 { 587 struct dwge_softc *sc = ifp->if_softc; 588 struct ifreq *ifr = (struct ifreq *)addr; 589 int error = 0, s; 590 591 s = splnet(); 592 593 switch (cmd) { 594 case SIOCSIFADDR: 595 ifp->if_flags |= IFF_UP; 596 /* FALLTHROUGH */ 597 case SIOCSIFFLAGS: 598 if (ifp->if_flags & IFF_UP) { 599 if (ifp->if_flags & IFF_RUNNING) 600 error = ENETRESET; 601 else 602 dwge_up(sc); 603 } else { 604 if (ifp->if_flags & IFF_RUNNING) 605 dwge_down(sc); 606 } 607 break; 608 609 case SIOCGIFMEDIA: 610 case SIOCSIFMEDIA: 611 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 612 break; 613 614 case SIOCGIFRXR: 615 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 616 NULL, MCLBYTES, &sc->sc_rx_ring); 617 break; 618 619 default: 620 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr); 621 break; 622 } 623 624 if (error == ENETRESET) { 625 if (ifp->if_flags & IFF_RUNNING) 626 dwge_iff(sc); 627 error = 0; 628 } 629 630 splx(s); 631 return (error); 632 } 633 634 void 635 dwge_watchdog(struct ifnet *ifp) 636 { 637 printf("%s\n", __func__); 638 } 639 640 int 641 dwge_media_change(struct ifnet *ifp) 642 { 643 struct dwge_softc *sc = ifp->if_softc; 644 645 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 646 mii_mediachg(&sc->sc_mii); 647 648 return (0); 649 } 650 651 void 652 dwge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 653 { 654 struct dwge_softc *sc = ifp->if_softc; 655 656 if (LIST_FIRST(&sc->sc_mii.mii_phys)) { 657 mii_pollstat(&sc->sc_mii); 658 ifmr->ifm_active = sc->sc_mii.mii_media_active; 659 ifmr->ifm_status = sc->sc_mii.mii_media_status; 660 } 661 } 662 663 int 664 dwge_mii_readreg(struct device *self, int phy, int reg) 665 { 666 struct dwge_softc *sc = (void *)self; 667 int n; 668 669 dwge_write(sc, GMAC_GMII_ADDR, 670 sc->sc_clk << GMAC_GMII_ADDR_CR_SHIFT | 671 phy << GMAC_GMII_ADDR_PA_SHIFT | 672 reg << GMAC_GMII_ADDR_GR_SHIFT | 673 GMAC_GMII_ADDR_GB); 674 for (n = 0; n < 1000; n++) { 675 if ((dwge_read(sc, GMAC_GMII_ADDR) & GMAC_GMII_ADDR_GB) == 0) 676 return dwge_read(sc, GMAC_GMII_DATA); 677 delay(10); 678 } 679 680 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 681 return (0); 682 } 683 684 void 685 dwge_mii_writereg(struct device *self, int phy, int reg, int val) 686 { 687 struct dwge_softc *sc = (void *)self; 688 int n; 689 690 dwge_write(sc, GMAC_GMII_DATA, val); 691 dwge_write(sc, GMAC_GMII_ADDR, 692 sc->sc_clk << GMAC_GMII_ADDR_CR_SHIFT | 693 phy << GMAC_GMII_ADDR_PA_SHIFT | 694 reg << GMAC_GMII_ADDR_GR_SHIFT | 695 GMAC_GMII_ADDR_GW | GMAC_GMII_ADDR_GB); 696 for (n = 0; n < 1000; n++) { 697 if ((dwge_read(sc, GMAC_GMII_ADDR) & GMAC_GMII_ADDR_GB) == 0) 698 return; 699 delay(10); 700 } 701 702 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 703 } 704 705 void 706 dwge_mii_statchg(struct device *self) 707 { 708 struct dwge_softc *sc = (void *)self; 709 uint32_t conf; 710 711 conf = dwge_read(sc, GMAC_MAC_CONF); 712 conf &= ~(GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES); 713 714 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 715 case IFM_1000_SX: 716 case IFM_1000_LX: 717 case IFM_1000_CX: 718 case IFM_1000_T: 719 sc->sc_link = 1; 720 break; 721 case IFM_100_TX: 722 conf |= GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES; 723 sc->sc_link = 1; 724 break; 725 case IFM_10_T: 726 conf |= GMAC_MAC_CONF_PS; 727 sc->sc_link = 1; 728 break; 729 default: 730 sc->sc_link = 0; 731 return; 732 } 733 734 if (sc->sc_link == 0) 735 return; 736 737 conf &= ~GMAC_MAC_CONF_DM; 738 if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) 739 conf |= GMAC_MAC_CONF_DM; 740 741 /* XXX: RX/TX flow control? */ 742 743 dwge_write(sc, GMAC_MAC_CONF, conf); 744 } 745 746 void 747 dwge_tick(void *arg) 748 { 749 struct dwge_softc *sc = arg; 750 int s; 751 752 s = splnet(); 753 mii_tick(&sc->sc_mii); 754 splx(s); 755 756 timeout_add_sec(&sc->sc_tick, 1); 757 } 758 759 void 760 dwge_rxtick(void *arg) 761 { 762 struct dwge_softc *sc = arg; 763 uint32_t mode; 764 int s; 765 766 s = splnet(); 767 768 mode = dwge_read(sc, GMAC_OP_MODE); 769 dwge_write(sc, GMAC_OP_MODE, mode & ~GMAC_OP_MODE_SR); 770 771 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 772 0, DWGE_DMA_LEN(sc->sc_rxring), 773 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 774 775 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, 0); 776 777 sc->sc_rx_prod = sc->sc_rx_cons = 0; 778 dwge_fill_rx_ring(sc); 779 780 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 781 0, DWGE_DMA_LEN(sc->sc_rxring), 782 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 783 784 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_rxring)); 785 dwge_write(sc, GMAC_OP_MODE, mode); 786 787 splx(s); 788 } 789 790 int 791 dwge_intr(void *arg) 792 { 793 struct dwge_softc *sc = arg; 794 uint32_t reg; 795 796 reg = dwge_read(sc, GMAC_STATUS); 797 dwge_write(sc, GMAC_STATUS, reg); 798 799 if (reg & GMAC_STATUS_RI) 800 dwge_rx_proc(sc); 801 802 if (reg & GMAC_STATUS_TI || 803 reg & GMAC_STATUS_TU) 804 dwge_tx_proc(sc); 805 806 return (1); 807 } 808 809 void 810 dwge_tx_proc(struct dwge_softc *sc) 811 { 812 struct ifnet *ifp = &sc->sc_ac.ac_if; 813 struct dwge_desc *txd; 814 struct dwge_buf *txb; 815 int idx, txfree; 816 817 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 0, 818 DWGE_DMA_LEN(sc->sc_txring), 819 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 820 821 txfree = 0; 822 while (sc->sc_tx_cnt > 0) { 823 idx = sc->sc_tx_cons; 824 KASSERT(idx < DWGE_NTXDESC); 825 826 txd = &sc->sc_txdesc[idx]; 827 if (txd->sd_status & TDES0_OWN) 828 break; 829 830 txb = &sc->sc_txbuf[idx]; 831 if (txb->tb_m) { 832 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0, 833 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 834 bus_dmamap_unload(sc->sc_dmat, txb->tb_map); 835 836 m_freem(txb->tb_m); 837 txb->tb_m = NULL; 838 } 839 840 txfree++; 841 sc->sc_tx_cnt--; 842 843 if (sc->sc_tx_cons == (DWGE_NTXDESC - 1)) 844 sc->sc_tx_cons = 0; 845 else 846 sc->sc_tx_cons++; 847 848 txd->sd_status = 0; 849 } 850 851 if (sc->sc_tx_cnt == 0) 852 ifp->if_timer = 0; 853 854 if (txfree) { 855 if (ifq_is_oactive(&ifp->if_snd)) 856 ifq_restart(&ifp->if_snd); 857 } 858 } 859 860 void 861 dwge_rx_proc(struct dwge_softc *sc) 862 { 863 struct ifnet *ifp = &sc->sc_ac.ac_if; 864 struct dwge_desc *rxd; 865 struct dwge_buf *rxb; 866 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 867 struct mbuf *m; 868 int idx, len; 869 870 if ((ifp->if_flags & IFF_RUNNING) == 0) 871 return; 872 873 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 0, 874 DWGE_DMA_LEN(sc->sc_rxring), 875 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 876 877 while (if_rxr_inuse(&sc->sc_rx_ring) > 0) { 878 idx = sc->sc_rx_cons; 879 KASSERT(idx < DWGE_NRXDESC); 880 881 rxd = &sc->sc_rxdesc[idx]; 882 if (rxd->sd_status & RDES0_OWN) 883 break; 884 885 len = (rxd->sd_status >> RDES0_FL_SHIFT) & RDES0_FL_MASK; 886 rxb = &sc->sc_rxbuf[idx]; 887 KASSERT(rxb->tb_m); 888 889 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0, 890 len, BUS_DMASYNC_POSTREAD); 891 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map); 892 893 /* Strip off CRC. */ 894 len -= ETHER_CRC_LEN; 895 KASSERT(len > 0); 896 897 m = rxb->tb_m; 898 rxb->tb_m = NULL; 899 m->m_pkthdr.len = m->m_len = len; 900 901 ml_enqueue(&ml, m); 902 903 if_rxr_put(&sc->sc_rx_ring, 1); 904 if (sc->sc_rx_cons == (DWGE_NRXDESC - 1)) 905 sc->sc_rx_cons = 0; 906 else 907 sc->sc_rx_cons++; 908 } 909 910 dwge_fill_rx_ring(sc); 911 912 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 0, 913 DWGE_DMA_LEN(sc->sc_rxring), 914 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 915 916 if_input(ifp, &ml); 917 } 918 919 void 920 dwge_up(struct dwge_softc *sc) 921 { 922 struct ifnet *ifp = &sc->sc_ac.ac_if; 923 struct dwge_buf *txb, *rxb; 924 uint32_t mode; 925 int i; 926 927 /* Allocate Tx descriptor ring. */ 928 sc->sc_txring = dwge_dmamem_alloc(sc, 929 DWGE_NTXDESC * sizeof(struct dwge_desc), 8); 930 sc->sc_txdesc = DWGE_DMA_KVA(sc->sc_txring); 931 932 sc->sc_txbuf = malloc(sizeof(struct dwge_buf) * DWGE_NTXDESC, 933 M_DEVBUF, M_WAITOK); 934 for (i = 0; i < DWGE_NTXDESC; i++) { 935 txb = &sc->sc_txbuf[i]; 936 bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWGE_NTXSEGS, 937 MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map); 938 txb->tb_m = NULL; 939 940 sc->sc_txdesc[i].sd_next = 941 DWGE_DMA_DVA(sc->sc_txring) + 942 ((i+1) % DWGE_NTXDESC) * sizeof(struct dwge_desc); 943 } 944 945 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 946 0, DWGE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE); 947 948 sc->sc_tx_prod = sc->sc_tx_cons = 0; 949 sc->sc_tx_cnt = 0; 950 951 dwge_write(sc, GMAC_TX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_txring)); 952 953 /* Allocate descriptor ring. */ 954 sc->sc_rxring = dwge_dmamem_alloc(sc, 955 DWGE_NRXDESC * sizeof(struct dwge_desc), 8); 956 sc->sc_rxdesc = DWGE_DMA_KVA(sc->sc_rxring); 957 958 sc->sc_rxbuf = malloc(sizeof(struct dwge_buf) * DWGE_NRXDESC, 959 M_DEVBUF, M_WAITOK); 960 961 for (i = 0; i < DWGE_NRXDESC; i++) { 962 rxb = &sc->sc_rxbuf[i]; 963 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 964 MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map); 965 rxb->tb_m = NULL; 966 967 sc->sc_rxdesc[i].sd_next = 968 DWGE_DMA_DVA(sc->sc_rxring) + 969 ((i+1) % DWGE_NRXDESC) * sizeof(struct dwge_desc); 970 } 971 972 if_rxr_init(&sc->sc_rx_ring, 2, DWGE_NRXDESC); 973 974 sc->sc_rx_prod = sc->sc_rx_cons = 0; 975 dwge_fill_rx_ring(sc); 976 977 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 978 0, DWGE_DMA_LEN(sc->sc_rxring), 979 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 980 981 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_rxring)); 982 983 dwge_lladdr_write(sc); 984 985 /* Configure media. */ 986 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 987 mii_mediachg(&sc->sc_mii); 988 989 /* Program promiscuous mode and multicast filters. */ 990 dwge_iff(sc); 991 992 ifp->if_flags |= IFF_RUNNING; 993 ifq_clr_oactive(&ifp->if_snd); 994 995 dwge_write(sc, GMAC_INT_ENA, GMAC_INT_ENA_NIE | 996 GMAC_INT_ENA_RIE | GMAC_INT_ENA_TIE | GMAC_INT_ENA_TUE); 997 998 mode = dwge_read(sc, GMAC_OP_MODE); 999 if (sc->sc_force_thresh_dma_mode) { 1000 mode &= ~(GMAC_OP_MODE_TSF | GMAC_OP_MODE_TTC_MASK); 1001 mode |= GMAC_OP_MODE_TTC_128; 1002 mode &= ~(GMAC_OP_MODE_RSF | GMAC_OP_MODE_RTC_MASK); 1003 mode |= GMAC_OP_MODE_RTC_128; 1004 } else { 1005 mode |= GMAC_OP_MODE_TSF | GMAC_OP_MODE_OSF; 1006 mode |= GMAC_OP_MODE_RSF; 1007 } 1008 dwge_write(sc, GMAC_OP_MODE, mode | GMAC_OP_MODE_ST | GMAC_OP_MODE_SR); 1009 1010 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, GMAC_MAC_CONF) | 1011 GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE); 1012 1013 timeout_add_sec(&sc->sc_tick, 1); 1014 } 1015 1016 void 1017 dwge_down(struct dwge_softc *sc) 1018 { 1019 struct ifnet *ifp = &sc->sc_ac.ac_if; 1020 struct dwge_buf *txb, *rxb; 1021 uint32_t dmactrl; 1022 int i; 1023 1024 timeout_del(&sc->sc_rxto); 1025 timeout_del(&sc->sc_tick); 1026 1027 ifp->if_flags &= ~IFF_RUNNING; 1028 ifq_clr_oactive(&ifp->if_snd); 1029 ifp->if_timer = 0; 1030 1031 dwge_stop_dma(sc); 1032 1033 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, 1034 GMAC_MAC_CONF) & ~(GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE)); 1035 1036 dmactrl = dwge_read(sc, GMAC_OP_MODE); 1037 dmactrl &= ~(GMAC_OP_MODE_ST | GMAC_OP_MODE_SR); 1038 dwge_write(sc, GMAC_OP_MODE, dmactrl); 1039 1040 dwge_write(sc, GMAC_INT_ENA, 0); 1041 1042 for (i = 0; i < DWGE_NTXDESC; i++) { 1043 txb = &sc->sc_txbuf[i]; 1044 if (txb->tb_m) { 1045 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0, 1046 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1047 bus_dmamap_unload(sc->sc_dmat, txb->tb_map); 1048 m_freem(txb->tb_m); 1049 } 1050 bus_dmamap_destroy(sc->sc_dmat, txb->tb_map); 1051 } 1052 1053 dwge_dmamem_free(sc, sc->sc_txring); 1054 free(sc->sc_txbuf, M_DEVBUF, 0); 1055 1056 for (i = 0; i < DWGE_NRXDESC; i++) { 1057 rxb = &sc->sc_rxbuf[i]; 1058 if (rxb->tb_m) { 1059 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0, 1060 rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1061 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map); 1062 m_freem(rxb->tb_m); 1063 } 1064 bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map); 1065 } 1066 1067 dwge_dmamem_free(sc, sc->sc_rxring); 1068 free(sc->sc_rxbuf, M_DEVBUF, 0); 1069 } 1070 1071 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ 1072 static uint32_t 1073 bitrev32(uint32_t x) 1074 { 1075 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 1076 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 1077 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 1078 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 1079 1080 return (x >> 16) | (x << 16); 1081 } 1082 1083 void 1084 dwge_iff(struct dwge_softc *sc) 1085 { 1086 struct arpcom *ac = &sc->sc_ac; 1087 struct ifnet *ifp = &sc->sc_ac.ac_if; 1088 struct ether_multi *enm; 1089 struct ether_multistep step; 1090 uint32_t crc, hash[2], hashbit, hashreg; 1091 uint32_t reg; 1092 1093 reg = 0; 1094 1095 ifp->if_flags &= ~IFF_ALLMULTI; 1096 bzero(hash, sizeof(hash)); 1097 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1098 ifp->if_flags |= IFF_ALLMULTI; 1099 reg |= GMAC_MAC_FRM_FILT_PM; 1100 if (ifp->if_flags & IFF_PROMISC) 1101 reg |= GMAC_MAC_FRM_FILT_PR; 1102 } else { 1103 reg |= GMAC_MAC_FRM_FILT_HMC; 1104 ETHER_FIRST_MULTI(step, ac, enm); 1105 while (enm != NULL) { 1106 crc = ether_crc32_le(enm->enm_addrlo, 1107 ETHER_ADDR_LEN) & 0x7f; 1108 1109 crc = bitrev32(~crc) >> 26; 1110 hashreg = (crc >> 5); 1111 hashbit = (crc & 0x1f); 1112 hash[hashreg] |= (1 << hashbit); 1113 1114 ETHER_NEXT_MULTI(step, enm); 1115 } 1116 } 1117 1118 dwge_lladdr_write(sc); 1119 1120 dwge_write(sc, GMAC_HASH_TAB_HI, hash[1]); 1121 dwge_write(sc, GMAC_HASH_TAB_LO, hash[0]); 1122 1123 dwge_write(sc, GMAC_MAC_FRM_FILT, reg); 1124 } 1125 1126 int 1127 dwge_encap(struct dwge_softc *sc, struct mbuf *m, int *idx) 1128 { 1129 struct dwge_desc *txd, *txd_start; 1130 bus_dmamap_t map; 1131 int cur, frag, i; 1132 1133 cur = frag = *idx; 1134 map = sc->sc_txbuf[cur].tb_map; 1135 1136 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) { 1137 if (m_defrag(m, M_DONTWAIT)) 1138 return (EFBIG); 1139 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) 1140 return (EFBIG); 1141 } 1142 1143 if (map->dm_nsegs > (DWGE_NTXDESC - sc->sc_tx_cnt - 2)) { 1144 bus_dmamap_unload(sc->sc_dmat, map); 1145 return (ENOBUFS); 1146 } 1147 1148 /* Sync the DMA map. */ 1149 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1150 BUS_DMASYNC_PREWRITE); 1151 1152 txd = txd_start = &sc->sc_txdesc[frag]; 1153 for (i = 0; i < map->dm_nsegs; i++) { 1154 txd->sd_addr = map->dm_segs[i].ds_addr; 1155 txd->sd_len = map->dm_segs[i].ds_len | TDES1_TCH; 1156 if (i == 0) 1157 txd->sd_len |= TDES1_FS; 1158 if (i == (map->dm_nsegs - 1)) 1159 txd->sd_len |= TDES1_LS | TDES1_IC; 1160 if (i != 0) 1161 txd->sd_status = TDES0_OWN; 1162 1163 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 1164 frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE); 1165 1166 cur = frag; 1167 if (frag == (DWGE_NTXDESC - 1)) { 1168 txd = &sc->sc_txdesc[0]; 1169 frag = 0; 1170 } else { 1171 txd++; 1172 frag++; 1173 } 1174 KASSERT(frag != sc->sc_tx_cons); 1175 } 1176 1177 txd_start->sd_status = TDES0_OWN; 1178 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 1179 *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE); 1180 1181 dwge_write(sc, GMAC_TX_POLL_DEMAND, 0xffffffff); 1182 1183 KASSERT(sc->sc_txbuf[cur].tb_m == NULL); 1184 sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map; 1185 sc->sc_txbuf[cur].tb_map = map; 1186 sc->sc_txbuf[cur].tb_m = m; 1187 1188 sc->sc_tx_cnt += map->dm_nsegs; 1189 *idx = frag; 1190 1191 return (0); 1192 } 1193 1194 void 1195 dwge_reset(struct dwge_softc *sc) 1196 { 1197 int n; 1198 1199 dwge_stop_dma(sc); 1200 1201 dwge_write(sc, GMAC_BUS_MODE, dwge_read(sc, GMAC_BUS_MODE) | 1202 GMAC_BUS_MODE_SWR); 1203 1204 for (n = 0; n < 30000; n++) { 1205 if ((dwge_read(sc, GMAC_BUS_MODE) & 1206 GMAC_BUS_MODE_SWR) == 0) 1207 return; 1208 delay(10); 1209 } 1210 1211 printf("%s: reset timeout\n", sc->sc_dev.dv_xname); 1212 } 1213 1214 void 1215 dwge_stop_dma(struct dwge_softc *sc) 1216 { 1217 uint32_t dmactrl; 1218 1219 /* Stop DMA. */ 1220 dmactrl = dwge_read(sc, GMAC_OP_MODE); 1221 dmactrl &= ~GMAC_OP_MODE_ST; 1222 dmactrl |= GMAC_OP_MODE_FTF; 1223 dwge_write(sc, GMAC_OP_MODE, dmactrl); 1224 } 1225 1226 struct dwge_dmamem * 1227 dwge_dmamem_alloc(struct dwge_softc *sc, bus_size_t size, bus_size_t align) 1228 { 1229 struct dwge_dmamem *tdm; 1230 int nsegs; 1231 1232 tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO); 1233 tdm->tdm_size = size; 1234 1235 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1236 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0) 1237 goto tdmfree; 1238 1239 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1, 1240 &nsegs, BUS_DMA_WAITOK) != 0) 1241 goto destroy; 1242 1243 if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size, 1244 &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0) 1245 goto free; 1246 1247 if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size, 1248 NULL, BUS_DMA_WAITOK) != 0) 1249 goto unmap; 1250 1251 bzero(tdm->tdm_kva, size); 1252 1253 return (tdm); 1254 1255 unmap: 1256 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size); 1257 free: 1258 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1); 1259 destroy: 1260 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map); 1261 tdmfree: 1262 free(tdm, M_DEVBUF, 0); 1263 1264 return (NULL); 1265 } 1266 1267 void 1268 dwge_dmamem_free(struct dwge_softc *sc, struct dwge_dmamem *tdm) 1269 { 1270 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size); 1271 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1); 1272 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map); 1273 free(tdm, M_DEVBUF, 0); 1274 } 1275 1276 struct mbuf * 1277 dwge_alloc_mbuf(struct dwge_softc *sc, bus_dmamap_t map) 1278 { 1279 struct mbuf *m = NULL; 1280 1281 m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 1282 if (!m) 1283 return (NULL); 1284 m->m_len = m->m_pkthdr.len = MCLBYTES; 1285 m_adj(m, ETHER_ALIGN); 1286 1287 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) { 1288 printf("%s: could not load mbuf DMA map", DEVNAME(sc)); 1289 m_freem(m); 1290 return (NULL); 1291 } 1292 1293 bus_dmamap_sync(sc->sc_dmat, map, 0, 1294 m->m_pkthdr.len, BUS_DMASYNC_PREREAD); 1295 1296 return (m); 1297 } 1298 1299 void 1300 dwge_fill_rx_ring(struct dwge_softc *sc) 1301 { 1302 struct dwge_desc *rxd; 1303 struct dwge_buf *rxb; 1304 u_int slots; 1305 1306 for (slots = if_rxr_get(&sc->sc_rx_ring, DWGE_NRXDESC); 1307 slots > 0; slots--) { 1308 rxb = &sc->sc_rxbuf[sc->sc_rx_prod]; 1309 rxb->tb_m = dwge_alloc_mbuf(sc, rxb->tb_map); 1310 if (rxb->tb_m == NULL) 1311 break; 1312 1313 rxd = &sc->sc_rxdesc[sc->sc_rx_prod]; 1314 rxd->sd_len = rxb->tb_map->dm_segs[0].ds_len | RDES1_RCH; 1315 rxd->sd_addr = rxb->tb_map->dm_segs[0].ds_addr; 1316 rxd->sd_status = RDES0_OWN; 1317 1318 if (sc->sc_rx_prod == (DWGE_NRXDESC - 1)) 1319 sc->sc_rx_prod = 0; 1320 else 1321 sc->sc_rx_prod++; 1322 } 1323 if_rxr_put(&sc->sc_rx_ring, slots); 1324 1325 if (if_rxr_inuse(&sc->sc_rx_ring) == 0) 1326 timeout_add(&sc->sc_rxto, 1); 1327 } 1328 1329 /* 1330 * Allwinner A20/A31. 1331 */ 1332 1333 void 1334 dwge_setup_allwinner(struct dwge_softc *sc) 1335 { 1336 char phy_mode[8]; 1337 uint32_t freq; 1338 1339 /* default to RGMII */ 1340 OF_getprop(sc->sc_node, "phy-mode", phy_mode, sizeof(phy_mode)); 1341 if (strcmp(phy_mode, "mii") == 0) 1342 freq = 25000000; 1343 else 1344 freq = 125000000; 1345 clock_set_frequency(sc->sc_node, "allwinner_gmac_tx", freq); 1346 } 1347 1348 /* 1349 * Rockchip RK3288/RK3399. 1350 */ 1351 1352 /* RK3288 registers */ 1353 #define RK3288_GRF_SOC_CON1 0x0248 1354 #define RK3288_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 6) << 16 | (0x1 << 6)) 1355 #define RK3288_GMAC_PHY_INTF_SEL_RMII ((0x7 << 6) << 16 | (0x4 << 6)) 1356 #define RK3288_RMII_MODE_RMII ((1 << 14) << 16 | (1 << 14)) 1357 #define RK3288_RMII_MODE_MII ((1 << 14) << 16 | (0 << 14)) 1358 #define RK3288_GMAC_CLK_SEL_125 ((0x3 << 12) << 16 | (0x0 << 12)) 1359 #define RK3288_GMAC_CLK_SEL_25 ((0x3 << 12) << 16 | (0x3 << 12)) 1360 #define RK3288_GMAC_CLK_SEL_2_5 ((0x3 << 12) << 16 | (0x2 << 12)) 1361 1362 #define RK3288_GRF_SOC_CON3 0x0250 1363 #define RK3288_GMAC_RXCLK_DLY_ENA ((1 << 15) << 16 | (1 << 15)) 1364 #define RK3288_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 7) << 16 | ((val) << 7)) 1365 #define RK3288_GMAC_TXCLK_DLY_ENA ((1 << 14) << 16 | (1 << 14)) 1366 #define RK3288_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0)) 1367 1368 /* RK3328 registers */ 1369 #define RK3328_GRF_MAC_CON0 0x0900 1370 #define RK3328_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 7) << 16 | ((val) << 7)) 1371 #define RK3328_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0)) 1372 1373 #define RK3328_GRF_MAC_CON1 0x0904 1374 #define RK3328_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 4) << 16 | (0x1 << 4)) 1375 #define RK3328_GMAC_PHY_INTF_SEL_RMII ((0x7 << 4) << 16 | (0x4 << 4)) 1376 #define RK3328_RMII_MODE_RMII ((1 << 9) << 16 | (1 << 9)) 1377 #define RK3328_RMII_MODE_MII ((1 << 9) << 16 | (0 << 9)) 1378 #define RK3328_GMAC_CLK_SEL_125 ((0x3 << 11) << 16 | (0x0 << 11)) 1379 #define RK3328_GMAC_CLK_SEL_25 ((0x3 << 11) << 16 | (0x3 << 11)) 1380 #define RK3328_GMAC_CLK_SEL_2_5 ((0x3 << 11) << 16 | (0x2 << 11)) 1381 #define RK3328_GMAC_RXCLK_DLY_ENA ((1 << 1) << 16 | (1 << 1)) 1382 #define RK3328_GMAC_TXCLK_DLY_ENA ((1 << 0) << 16 | (1 << 0)) 1383 1384 /* RK3399 registers */ 1385 #define RK3399_GRF_SOC_CON5 0xc214 1386 #define RK3399_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 9) << 16 | (0x1 << 9)) 1387 #define RK3399_GMAC_PHY_INTF_SEL_RMII ((0x7 << 9) << 16 | (0x4 << 9)) 1388 #define RK3399_RMII_MODE_RMII ((1 << 6) << 16 | (1 << 6)) 1389 #define RK3399_RMII_MODE_MII ((1 << 6) << 16 | (0 << 6)) 1390 #define RK3399_GMAC_CLK_SEL_125 ((0x3 << 4) << 16 | (0x0 << 4)) 1391 #define RK3399_GMAC_CLK_SEL_25 ((0x3 << 4) << 16 | (0x3 << 4)) 1392 #define RK3399_GMAC_CLK_SEL_2_5 ((0x3 << 4) << 16 | (0x2 << 4)) 1393 #define RK3399_GRF_SOC_CON6 0xc218 1394 #define RK3399_GMAC_RXCLK_DLY_ENA ((1 << 15) << 16 | (1 << 15)) 1395 #define RK3399_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 8) << 16 | ((val) << 8)) 1396 #define RK3399_GMAC_TXCLK_DLY_ENA ((1 << 7) << 16 | (1 << 7)) 1397 #define RK3399_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0)) 1398 1399 void dwge_mii_statchg_rockchip(struct device *); 1400 1401 void 1402 dwge_setup_rockchip(struct dwge_softc *sc) 1403 { 1404 struct regmap *rm; 1405 uint32_t grf; 1406 int tx_delay, rx_delay; 1407 1408 grf = OF_getpropint(sc->sc_node, "rockchip,grf", 0); 1409 rm = regmap_byphandle(grf); 1410 if (rm == NULL) 1411 return; 1412 1413 clock_set_assigned(sc->sc_node); 1414 clock_enable(sc->sc_node, "mac_clk_rx"); 1415 clock_enable(sc->sc_node, "mac_clk_tx"); 1416 clock_enable(sc->sc_node, "aclk_mac"); 1417 clock_enable(sc->sc_node, "pclk_mac"); 1418 1419 tx_delay = OF_getpropint(sc->sc_node, "tx_delay", 0x30); 1420 rx_delay = OF_getpropint(sc->sc_node, "rx_delay", 0x10); 1421 1422 if (OF_is_compatible(sc->sc_node, "rockchip,rk3288-gmac")) { 1423 /* Use RGMII interface. */ 1424 regmap_write_4(rm, RK3288_GRF_SOC_CON1, 1425 RK3288_GMAC_PHY_INTF_SEL_RGMII | RK3288_RMII_MODE_MII); 1426 1427 /* Program clock delay lines. */ 1428 regmap_write_4(rm, RK3288_GRF_SOC_CON3, 1429 RK3288_GMAC_TXCLK_DLY_ENA | RK3288_GMAC_RXCLK_DLY_ENA | 1430 RK3288_GMAC_CLK_TX_DL_CFG(tx_delay) | 1431 RK3288_GMAC_CLK_RX_DL_CFG(rx_delay)); 1432 1433 /* Clock speed bits. */ 1434 sc->sc_clk_sel = RK3288_GRF_SOC_CON1; 1435 sc->sc_clk_sel_2_5 = RK3288_GMAC_CLK_SEL_2_5; 1436 sc->sc_clk_sel_25 = RK3288_GMAC_CLK_SEL_25; 1437 sc->sc_clk_sel_125 = RK3288_GMAC_CLK_SEL_125; 1438 } else if (OF_is_compatible(sc->sc_node, "rockchip,rk3328-gmac")) { 1439 /* Use RGMII interface. */ 1440 regmap_write_4(rm, RK3328_GRF_MAC_CON1, 1441 RK3328_GMAC_PHY_INTF_SEL_RGMII | RK3328_RMII_MODE_MII); 1442 1443 /* Program clock delay lines. */ 1444 regmap_write_4(rm, RK3328_GRF_MAC_CON0, 1445 RK3328_GMAC_CLK_TX_DL_CFG(tx_delay) | 1446 RK3328_GMAC_CLK_RX_DL_CFG(rx_delay)); 1447 regmap_write_4(rm, RK3328_GRF_MAC_CON1, 1448 RK3328_GMAC_TXCLK_DLY_ENA | RK3328_GMAC_RXCLK_DLY_ENA); 1449 1450 /* Clock speed bits. */ 1451 sc->sc_clk_sel = RK3328_GRF_MAC_CON1; 1452 sc->sc_clk_sel_2_5 = RK3328_GMAC_CLK_SEL_2_5; 1453 sc->sc_clk_sel_25 = RK3328_GMAC_CLK_SEL_25; 1454 sc->sc_clk_sel_125 = RK3328_GMAC_CLK_SEL_125; 1455 } else { 1456 /* Use RGMII interface. */ 1457 regmap_write_4(rm, RK3399_GRF_SOC_CON5, 1458 RK3399_GMAC_PHY_INTF_SEL_RGMII | RK3399_RMII_MODE_MII); 1459 1460 /* Program clock delay lines. */ 1461 regmap_write_4(rm, RK3399_GRF_SOC_CON6, 1462 RK3399_GMAC_TXCLK_DLY_ENA | RK3399_GMAC_RXCLK_DLY_ENA | 1463 RK3399_GMAC_CLK_TX_DL_CFG(tx_delay) | 1464 RK3399_GMAC_CLK_RX_DL_CFG(rx_delay)); 1465 1466 /* Clock speed bits. */ 1467 sc->sc_clk_sel = RK3399_GRF_SOC_CON5; 1468 sc->sc_clk_sel_2_5 = RK3399_GMAC_CLK_SEL_2_5; 1469 sc->sc_clk_sel_25 = RK3399_GMAC_CLK_SEL_25; 1470 sc->sc_clk_sel_125 = RK3399_GMAC_CLK_SEL_125; 1471 } 1472 1473 sc->sc_mii.mii_statchg = dwge_mii_statchg_rockchip; 1474 } 1475 1476 void 1477 dwge_mii_statchg_rockchip(struct device *self) 1478 { 1479 struct dwge_softc *sc = (void *)self; 1480 struct regmap *rm; 1481 uint32_t grf; 1482 uint32_t gmac_clk_sel = 0; 1483 1484 dwge_mii_statchg(self); 1485 1486 grf = OF_getpropint(sc->sc_node, "rockchip,grf", 0); 1487 rm = regmap_byphandle(grf); 1488 if (rm == NULL) 1489 return; 1490 1491 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1492 case IFM_10_T: 1493 gmac_clk_sel = sc->sc_clk_sel_2_5; 1494 break; 1495 case IFM_100_TX: 1496 gmac_clk_sel = sc->sc_clk_sel_25; 1497 break; 1498 case IFM_1000_T: 1499 gmac_clk_sel = sc->sc_clk_sel_125; 1500 break; 1501 } 1502 1503 regmap_write_4(rm, sc->sc_clk_sel, gmac_clk_sel); 1504 } 1505